Merge tag 'uapi-prep-20121002' of git://git.infradead.org/users/dhowells/linux-headers
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Oct 2012 20:45:43 +0000 (13:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Oct 2012 20:45:43 +0000 (13:45 -0700)
Pull preparatory patches for user API disintegration from David Howells:
 "The patches herein prepare for the extraction of the Userspace API
  bits from the various header files named in the Kbuild files.

  New subdirectories are created under either include/uapi/ or
  arch/x/include/uapi/ that correspond to the subdirectory containing
  that file under include/ or arch/x/include/.

  The new subdirs under the uapi/ directory are populated with Kbuild
  files that mostly do nothing at this time.  Further patches will
  disintegrate the headers in each original directory and fill in the
  Kbuild files as they do it.

  These patches also:

   (1) fix up #inclusions of "foo.h" rather than <foo.h>.

   (2) Remove some redundant #includes from the DRM code.

   (3) Make the kernel build infrastructure handle Kbuild files both in
       the old places and the new UAPI place that both specify headers
       to be exported.

   (4) Fix some kernel tools that #include kernel headers during their
       build.

  I have compile tested this with allyesconfig against x86_64,
  allmodconfig against i386 and a scattering of additional defconfigs of
  other arches.  Prepared for main script

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Michael Kerrisk <mtk.manpages@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Dave Jones <davej@redhat.com>
Acked-by: H. Peter Anvin <hpa@zytor.com>"
* tag 'uapi-prep-20121002' of git://git.infradead.org/users/dhowells/linux-headers:
  UAPI: Plumb the UAPI Kbuilds into the user header installation and checking
  UAPI: x86: Differentiate the generated UAPI and internal headers
  UAPI: Remove the objhdr-y export list
  UAPI: Move linux/version.h
  UAPI: Set up uapi/asm/Kbuild.asm
  UAPI: x86: Fix insn_sanity build failure after UAPI split
  UAPI: x86: Fix the test_get_len tool
  UAPI: (Scripted) Set up UAPI Kbuild files
  UAPI: Partition the header include path sets and add uapi/ header directories
  UAPI: (Scripted) Convert #include "..." to #include <path/...> in kernel system headers
  UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
  UAPI: (Scripted) Remove redundant DRM UAPI header #inclusions from drivers/gpu/.
  UAPI: Refer to the DRM UAPI headers with <...> and from certain headers only

2106 files changed:
Documentation/ABI/testing/ima_policy
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/ABI/testing/sysfs-driver-ppi [new file with mode: 0644]
Documentation/ABI/testing/sysfs-ptp
Documentation/cgroups/cgroups.txt
Documentation/cpu-freq/boost.txt [new file with mode: 0644]
Documentation/cpuidle/sysfs.txt
Documentation/devicetree/bindings/arm/calxeda/combophy.txt [new file with mode: 0644]
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/ata/pata-arasan.txt [new file with mode: 0644]
Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/arm-pl330.txt
Documentation/devicetree/bindings/gpio/gpio-74x164.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/gpio-adnp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/gpio/led.txt
Documentation/devicetree/bindings/input/gpio-keys-polled.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/rotary-encoder.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/interrupts.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/calxeda-xgmac.txt
Documentation/devicetree/bindings/net/can/c_can.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/cpsw.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/davinci-mdio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/fsl,imx35-pinctrl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
Documentation/devicetree/bindings/power/opp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pwm/mxs-pwm.txt
Documentation/devicetree/bindings/serial/fsl-imx-uart.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/mxs-spi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-bus.txt
Documentation/devicetree/bindings/spi/spi-gpio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-sc18is602.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi_pl022.txt
Documentation/filesystems/jfs.txt
Documentation/filesystems/nfs/nfsroot.txt
Documentation/infiniband/ipoib.txt
Documentation/kernel-parameters.txt
Documentation/networking/batman-adv.txt
Documentation/networking/bonding.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/stmmac.txt
Documentation/networking/vxlan.txt [new file with mode: 0644]
Documentation/pinctrl.txt
Documentation/scsi/ChangeLog.megaraid_sas
Documentation/scsi/LICENSE.qla2xxx
Documentation/scsi/LICENSE.qla4xxx
Documentation/scsi/st.txt
Documentation/security/Smack.txt
Documentation/spi/spi-sc18is602 [new file with mode: 0644]
MAINTAINERS
arch/alpha/kernel/osf_sys.c
arch/arm/Kconfig
arch/arm/boot/dts/highbank.dts
arch/arm/configs/kzm9g_defconfig
arch/arm/configs/sam9_l9260_defconfig
arch/arm/include/asm/barrier.h
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/memory.h
arch/arm/kernel/smp.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-nomadik/board-nhk8815.c
arch/arm/mach-nomadik/cpu-8815.c
arch/arm/mach-pxa/sharpsl_pm.c
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-kzm9g.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/common.c [deleted file]
arch/arm/mach-shmobile/cpuidle.c
arch/arm/mach-shmobile/include/mach/common.h
arch/arm/mach-shmobile/include/mach/pm-rmobile.h
arch/arm/mach-shmobile/include/mach/r8a7740.h
arch/arm/mach-shmobile/include/mach/r8a7779.h
arch/arm/mach-shmobile/include/mach/sh7372.h
arch/arm/mach-shmobile/pm-r8a7740.c
arch/arm/mach-shmobile/pm-r8a7779.c
arch/arm/mach-shmobile/pm-rmobile.c
arch/arm/mach-shmobile/pm-sh7372.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-r8a7779.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mach-tegra/include/mach/kbc.h [deleted file]
arch/arm/mach-u300/core.c
arch/arm/mach-ux500/board-mop500-pins.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmu.c
arch/arm/plat-omap/mailbox.c
arch/cris/Kconfig
arch/cris/arch-v32/drivers/axisflashmap.c
arch/cris/arch-v32/drivers/pci/bios.c
arch/cris/arch-v32/kernel/head.S
arch/cris/arch-v32/kernel/kgdb.c
arch/cris/arch-v32/mach-a3/Makefile
arch/cris/arch-v32/mach-a3/vcs_hook.c [deleted file]
arch/cris/arch-v32/mach-a3/vcs_hook.h [deleted file]
arch/cris/arch-v32/mach-fs/Makefile
arch/cris/arch-v32/mach-fs/vcs_hook.c [deleted file]
arch/cris/arch-v32/mach-fs/vcs_hook.h [deleted file]
arch/cris/arch-v32/mm/init.c
arch/cris/include/arch-v32/arch/page.h
arch/cris/include/arch-v32/arch/processor.h
arch/cris/include/arch-v32/mach-fs/mach/startup.inc
arch/cris/include/asm/pci.h
arch/ia64/include/asm/xen/interface.h
arch/ia64/kernel/mca_drv.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/signal.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/mips/configs/ar7_defconfig
arch/mips/configs/bcm47xx_defconfig
arch/mips/configs/ip22_defconfig
arch/mips/configs/jazz_defconfig
arch/mips/configs/malta_defconfig
arch/mips/configs/markeins_defconfig
arch/mips/configs/nlm_xlp_defconfig
arch/mips/configs/nlm_xlr_defconfig
arch/mips/configs/rm200_defconfig
arch/parisc/hpux/fs.c
arch/powerpc/configs/pmac32_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ppc64e_defconfig
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/sys_ppc32.c
arch/powerpc/mm/fault.c
arch/powerpc/platforms/cell/cpufreq_spudemand.c
arch/powerpc/platforms/cell/spu_syscalls.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/s390/hypfs/inode.c
arch/s390/kernel/compat_linux.c
arch/sh/drivers/push-switch.c
arch/sparc/Kbuild
arch/sparc/crypto/Makefile [new file with mode: 0644]
arch/sparc/crypto/aes_asm.S [new file with mode: 0644]
arch/sparc/crypto/aes_glue.c [new file with mode: 0644]
arch/sparc/crypto/camellia_asm.S [new file with mode: 0644]
arch/sparc/crypto/camellia_glue.c [new file with mode: 0644]
arch/sparc/crypto/crc32c_asm.S [new file with mode: 0644]
arch/sparc/crypto/crc32c_glue.c [new file with mode: 0644]
arch/sparc/crypto/crop_devid.c [new file with mode: 0644]
arch/sparc/crypto/des_asm.S [new file with mode: 0644]
arch/sparc/crypto/des_glue.c [new file with mode: 0644]
arch/sparc/crypto/md5_asm.S [new file with mode: 0644]
arch/sparc/crypto/md5_glue.c [new file with mode: 0644]
arch/sparc/crypto/opcodes.h [new file with mode: 0644]
arch/sparc/crypto/sha1_asm.S [new file with mode: 0644]
arch/sparc/crypto/sha1_glue.c [new file with mode: 0644]
arch/sparc/crypto/sha256_asm.S [new file with mode: 0644]
arch/sparc/crypto/sha256_glue.c [new file with mode: 0644]
arch/sparc/crypto/sha512_asm.S [new file with mode: 0644]
arch/sparc/crypto/sha512_glue.c [new file with mode: 0644]
arch/sparc/include/asm/asi.h
arch/sparc/include/asm/elf_64.h
arch/sparc/include/asm/hypervisor.h
arch/sparc/include/asm/mdesc.h
arch/sparc/include/asm/oplib_32.h
arch/sparc/include/asm/oplib_64.h
arch/sparc/include/asm/pcr.h
arch/sparc/include/asm/perfctr.h
arch/sparc/include/asm/pstate.h
arch/sparc/include/asm/unistd.h
arch/sparc/kernel/head_64.S
arch/sparc/kernel/hvapi.c
arch/sparc/kernel/hvcalls.S
arch/sparc/kernel/ktlb.S
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/nmi.c
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/pcr.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/sys_sparc32.c
arch/sparc/kernel/traps_64.c
arch/sparc/lib/Makefile
arch/sparc/lib/NG2memcpy.S
arch/sparc/lib/NG4copy_from_user.S [new file with mode: 0644]
arch/sparc/lib/NG4copy_page.S [new file with mode: 0644]
arch/sparc/lib/NG4copy_to_user.S [new file with mode: 0644]
arch/sparc/lib/NG4memcpy.S [new file with mode: 0644]
arch/sparc/lib/NG4patch.S [new file with mode: 0644]
arch/sparc/lib/NGpage.S
arch/sparc/lib/ksyms.c
arch/sparc/mm/init_64.c
arch/sparc/mm/init_64.h
arch/sparc/mm/iommu.c
arch/sparc/net/bpf_jit_comp.c
arch/tile/configs/tilegx_defconfig
arch/tile/configs/tilepro_defconfig
arch/um/drivers/mconsole_kern.c
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/xen/interface.h
arch/x86/include/asm/xen/swiotlb-xen.h
arch/x86/net/bpf_jit_comp.c
arch/x86/xen/apic.c
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
arch/x86/xen/pci-swiotlb-xen.c
arch/x86/xen/platform-pci-unplug.c
arch/x86/xen/setup.c
arch/x86/xen/vga.c
arch/x86/xen/xen-head.S
arch/x86/xen/xen-ops.h
block/blk-cgroup.c
block/blk-core.c
block/blk-throttle.c
block/genhd.c
crypto/Kconfig
crypto/crypto_user.c
drivers/Makefile
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci.h
drivers/ata/ahci_platform.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/pata_arasan_cf.c
drivers/ata/sata_fsl.c
drivers/ata/sata_highbank.c [new file with mode: 0644]
drivers/ata/sata_mv.c
drivers/base/devtmpfs.c
drivers/base/dma-buf.c
drivers/base/dma-contiguous.c
drivers/base/platform.c
drivers/base/power/domain.c
drivers/base/power/main.c
drivers/base/power/opp.c
drivers/base/power/power.h
drivers/base/power/runtime.c
drivers/base/power/wakeup.c
drivers/bcma/Kconfig
drivers/bcma/bcma_private.h
drivers/bcma/core.c
drivers/bcma/driver_chipcommon_nflash.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_chipcommon_sflash.c
drivers/bcma/driver_pci.c
drivers/bcma/driver_pci_host.c
drivers/bcma/host_pci.c
drivers/bcma/host_soc.c
drivers/bcma/main.c
drivers/bcma/sprom.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/xen-blkfront.c
drivers/bluetooth/bcm203x.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bpa10x.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btsdio.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_ll.c
drivers/bluetooth/hci_vhci.c
drivers/cdrom/gdrom.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/hw_random/tpm-rng.c [new file with mode: 0644]
drivers/char/sonypi.c
drivers/char/tpm/Kconfig
drivers/char/tpm/Makefile
drivers/char/tpm/tpm.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm_acpi.c [new file with mode: 0644]
drivers/char/tpm/tpm_bios.c [deleted file]
drivers/char/tpm/tpm_eventlog.c [new file with mode: 0644]
drivers/char/tpm/tpm_eventlog.h [new file with mode: 0644]
drivers/char/tpm/tpm_i2c_infineon.c [new file with mode: 0644]
drivers/char/tpm/tpm_ibmvtpm.c [new file with mode: 0644]
drivers/char/tpm/tpm_ibmvtpm.h [new file with mode: 0644]
drivers/char/tpm/tpm_of.c [new file with mode: 0644]
drivers/char/tpm/tpm_ppi.c [new file with mode: 0644]
drivers/char/tpm/tpm_tis.c
drivers/clk/mxs/Makefile
drivers/clk/mxs/clk-ssp.c [new file with mode: 0644]
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/connector/cn_proc.c
drivers/connector/connector.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c [new file with mode: 0644]
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/longhaul.h
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/powernow-k8.h
drivers/cpuidle/driver.c
drivers/cpuidle/governors/ladder.c
drivers/crypto/n2_core.c
drivers/devfreq/devfreq.c
drivers/edac/edac_mc.c
drivers/extcon/extcon-adc-jack.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-74x164.c
drivers/gpio/gpio-adnp.c [new file with mode: 0644]
drivers/gpio/gpio-adp5588.c
drivers/gpio/gpio-bt8xx.c
drivers/gpio/gpio-da9052.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-em.c
drivers/gpio/gpio-lpc32xx.c
drivers/gpio/gpio-mc9s08dz60.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-msm-v2.c
drivers/gpio/gpio-pcf857x.c
drivers/gpio/gpio-pch.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpio-sodaville.c
drivers/gpio/gpio-sx150x.c
drivers/gpio/gpio-tc3589x.c
drivers/gpio/gpio-tps65912.c
drivers/gpio/gpio-wm831x.c
drivers/gpio/gpio-wm8350.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/nouveau/nouveau_gpio.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/hid/hid-picolcd_fb.c
drivers/hid/hid-wiimote-ext.c
drivers/ieee802154/Kconfig [deleted file]
drivers/ieee802154/Makefile [deleted file]
drivers/ieee802154/at86rf230.c [deleted file]
drivers/ieee802154/fakehard.c [deleted file]
drivers/ieee802154/fakelb.c [deleted file]
drivers/infiniband/core/addr.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/netlink.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/mlx4/Makefile
drivers/infiniband/hw/mlx4/alias_GUID.c [new file with mode: 0644]
drivers/infiniband/hw/mlx4/cm.c [new file with mode: 0644]
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mcg.c [new file with mode: 0644]
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/sysfs.c [new file with mode: 0644]
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/nes/nes_utils.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qib/qib_common.h
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_keys.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/ulp/ipoib/Makefile
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_netlink.c [new file with mode: 0644]
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/apm-power.c
drivers/input/ff-core.c
drivers/input/ff-memless.c
drivers/input/input.c
drivers/input/joydev.c
drivers/input/keyboard/gpio_keys.c
drivers/input/keyboard/gpio_keys_polled.c
drivers/input/keyboard/omap-keypad.c
drivers/input/keyboard/qt2160.c
drivers/input/keyboard/samsung-keypad.c
drivers/input/keyboard/tegra-kbc.c
drivers/input/misc/rotary_encoder.c
drivers/input/misc/twl4030-pwrbutton.c
drivers/input/misc/uinput.c
drivers/input/mouse/hgpk.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics_i2c.c
drivers/input/mousedev.c
drivers/input/sparse-keymap.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/s3c2410_ts.c
drivers/input/touchscreen/wm831x-ts.c
drivers/isdn/gigaset/common.c
drivers/isdn/mISDN/hwchannel.c
drivers/leds/leds-lm3533.c
drivers/leds/leds-lp8788.c
drivers/leds/leds-wm8350.c
drivers/macintosh/ams/ams-core.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-stripe.c
drivers/media/dvb/dvb-core/dvb_net.c
drivers/media/dvb/mantis/mantis_evm.c
drivers/media/dvb/mantis/mantis_uart.c
drivers/media/video/bt8xx/bttv-driver.c
drivers/media/video/cx18/cx18-driver.c
drivers/media/video/cx231xx/cx231xx-cards.c
drivers/media/video/cx23885/cx23885-input.c
drivers/media/video/cx88/cx88-mpeg.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/omap24xxcam.c
drivers/media/video/saa7134/saa7134-core.c
drivers/media/video/saa7134/saa7134-empress.c
drivers/media/video/tm6000/tm6000-cards.c
drivers/message/fusion/mptbase.c
drivers/mfd/menelaus.c
drivers/misc/ioc4.c
drivers/mmc/core/host.c
drivers/mmc/host/mxs-mmc.c
drivers/mtd/mtdoops.c
drivers/mtd/mtdpart.c
drivers/mtd/ubi/Kconfig
drivers/mtd/ubi/attach.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/cdev.c
drivers/mtd/ubi/debug.c
drivers/mtd/ubi/debug.h
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/gluebi.c
drivers/mtd/ubi/io.c
drivers/mtd/ubi/misc.c
drivers/mtd/ubi/ubi.h
drivers/mtd/ubi/vtbl.c
drivers/mtd/ubi/wl.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_core.h
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic.h
drivers/net/ethernet/broadcom/cnic_defs.h
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/Makefile
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/fsl_pq_mdio.h [deleted file]
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/xgmac_mdio.c [new file with mode: 0644]
drivers/net/ethernet/i825xx/Kconfig
drivers/net/ethernet/i825xx/znet.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c [new file with mode: 0644]
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/intel/ixgbevf/mbx.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx4/sense.c
drivers/net/ethernet/mipsnet.c [deleted file]
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/Kconfig
drivers/net/ethernet/sfc/Makefile
drivers/net/ethernet/sfc/bitfield.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon_boards.c
drivers/net/ethernet/sfc/filter.c
drivers/net/ethernet/sfc/filter.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mtd.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/ptp.c [new file with mode: 0644]
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig [new file with mode: 0644]
drivers/net/ieee802154/Makefile [new file with mode: 0644]
drivers/net/ieee802154/at86rf230.c [new file with mode: 0644]
drivers/net/ieee802154/fakehard.c [new file with mode: 0644]
drivers/net/ieee802154/fakelb.c [new file with mode: 0644]
drivers/net/ieee802154/mrf24j40.c [new file with mode: 0644]
drivers/net/loopback.c
drivers/net/macvlan.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/dp83640.c
drivers/net/phy/lxt.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio-mux-mmioreg.c [new file with mode: 0644]
drivers/net/phy/phy.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/Kconfig
drivers/net/team/team.c
drivers/net/team/team_mode_broadcast.c
drivers/net/team/team_mode_roundrobin.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/catc.c
drivers/net/usb/cx82310_eth.c
drivers/net/usb/gl620a.c
drivers/net/usb/kaweth.c
drivers/net/usb/net1080.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/rtl8150.c
drivers/net/usb/sierra_net.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/smsc95xx.h
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan.c [new file with mode: 0644]
drivers/net/wimax/i2400m/driver.c
drivers/net/wireless/adm8211.c
drivers/net/wireless/airo.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/cfg80211.h
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/antenna.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c
drivers/net/wireless/ath/ath9k/ar9003_mci.h
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/btcoex.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/hif_usb.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw-ops.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/rc.h
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/wow.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/fw.c
drivers/net/wireless/ath/carl9170/mac.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/rx.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/b43/Makefile
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_common.h
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/phy_n.h
drivers/net/wireless/b43/radio_2057.c [new file with mode: 0644]
drivers/net/wireless/b43/radio_2057.h [new file with mode: 0644]
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/brcm80211/include/brcmu_wifi.h
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_hw.c
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw_wx.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/4965.h
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/commands.h
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/cmd.h
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/Makefile
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/uap_txrx.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/wext.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/eeprom.h
drivers/net/wireless/p54/lmac.h
drivers/net/wireless/p54/main.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/p54pci.h
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2400pci.h
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500pci.h
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2500usb.h
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800lib.h
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt61pci.h
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rt2x00/rt73usb.h
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8192de/fw.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.h
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.h
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl12xx/wl12xx.h
drivers/net/wireless/ti/wl18xx/debugfs.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/wl18xx.h
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/conf.h
drivers/net/wireless/ti/wlcore/debug.h
drivers/net/wireless/ti/wlcore/debugfs.c
drivers/net/wireless/ti/wlcore/init.c
drivers/net/wireless/ti/wlcore/io.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/ps.c
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/scan.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/testmode.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/net/wireless/ti/wlcore/tx.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/wireless/wl3501_cs.c
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/nfcwilink.c
drivers/nfc/pn533.c
drivers/nfc/pn544.c [deleted file]
drivers/nfc/pn544_hci.c
drivers/of/address.c
drivers/of/base.c
drivers/of/irq.c
drivers/of/of_i2c.c
drivers/of/platform.c
drivers/pci/pci-driver.c
drivers/pci/xen-pcifront.c
drivers/pinctrl/Kconfig
drivers/pinctrl/Makefile
drivers/pinctrl/core.c
drivers/pinctrl/pinctrl-bcm2835.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-imx.c
drivers/pinctrl/pinctrl-imx35.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-imx51.c
drivers/pinctrl/pinctrl-imx53.c
drivers/pinctrl/pinctrl-nomadik-db8500.c
drivers/pinctrl/pinctrl-nomadik-db8540.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-nomadik-stn8815.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-nomadik.h
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-sirf.c
drivers/pinctrl/pinmux.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/ab8500_btemp.c
drivers/power/ab8500_charger.c
drivers/power/ab8500_fg.c
drivers/power/abx500_chargalg.c
drivers/power/charger-manager.c
drivers/power/collie_battery.c
drivers/power/ds2760_battery.c
drivers/power/jz4740-battery.c
drivers/power/max17040_battery.c
drivers/power/tosa_battery.c
drivers/power/wm97xx_battery.c
drivers/power/z2_battery.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_ixp46x.c
drivers/ptp/ptp_pch.c
drivers/ptp/ptp_private.h
drivers/regulator/core.c
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/lcs.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_ccw.c
drivers/s390/scsi/zfcp_cfdc.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_dbf.h
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fc.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_qdio.c
drivers/s390/scsi/zfcp_sysfs.c
drivers/s390/scsi/zfcp_unit.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/sbus/char/openprom.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bfa/bfa_core.c
drivers/scsi/bfa/bfa_cs.h
drivers/scsi/bfa/bfa_defs_fcs.h
drivers/scsi/bfa/bfa_fc.h
drivers/scsi/bfa/bfa_fcbuild.c
drivers/scsi/bfa/bfa_fcbuild.h
drivers/scsi/bfa/bfa_fcpim.c
drivers/scsi/bfa/bfa_fcs.c
drivers/scsi/bfa/bfa_fcs.h
drivers/scsi/bfa/bfa_fcs_fcpim.c
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/bfa/bfa_fcs_rport.c
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/bfa/bfa_modules.h
drivers/scsi/bfa/bfa_svc.c
drivers/scsi/bfa/bfa_svc.h
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bfa/bfad_bsg.h
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/constants.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/Makefile
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/ibmvscsi.h
drivers/scsi/ibmvscsi/rpa_vscsi.c [deleted file]
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/isci/host.c
drivers/scsi/isci/host.h
drivers/scsi/isci/init.c
drivers/scsi/isci/phy.c
drivers/scsi/isci/probe_roms.c
drivers/scsi/isci/remote_node_context.h
drivers/scsi/iscsi_tcp.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_dump.c
drivers/scsi/libsas/sas_event.c
drivers/scsi/libsas/sas_init.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_phy.c
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_debugfs.h
drivers/scsi/lpfc/lpfc_disc.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt2sas/Kconfig
drivers/scsi/mpt2sas/mpi/mpi2.h
drivers/scsi/mpt2sas/mpi/mpi2_init.h
drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
drivers/scsi/mpt2sas/mpi/mpi2_raid.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_config.c
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_ctl.h
drivers/scsi/mpt2sas/mpt2sas_debug.h
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/pmcraid.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_settings.h
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/Kconfig
drivers/scsi/qla4xxx/Makefile
drivers/scsi/qla4xxx/ql4_83xx.c [new file with mode: 0644]
drivers/scsi/qla4xxx/ql4_83xx.h [new file with mode: 0644]
drivers/scsi/qla4xxx/ql4_attr.c
drivers/scsi/qla4xxx/ql4_dbg.c
drivers/scsi/qla4xxx/ql4_dbg.h
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_inline.h
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nvram.c
drivers/scsi/qla4xxx/ql4_nvram.h
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_nx.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_netlink.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_dif.c
drivers/scsi/st.c
drivers/scsi/st.h
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-altera.c
drivers/spi/spi-gpio.c
drivers/spi/spi-imx.c
drivers/spi/spi-mpc512x-psc.c
drivers/spi/spi-mpc52xx-psc.c
drivers/spi/spi-mpc52xx.c
drivers/spi/spi-mxs.c [new file with mode: 0644]
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-orion.c
drivers/spi/spi-pl022.c
drivers/spi/spi-s3c24xx.c
drivers/spi/spi-s3c64xx.c
drivers/spi/spi-sc18is602.c [new file with mode: 0644]
drivers/spi/spi-sh-hspi.c
drivers/spi/spi-stmp.c
drivers/spi/spi-tegra.c [deleted file]
drivers/spi/spi-tle62x0.c
drivers/spi/spi-topcliff-pch.c
drivers/ssb/driver_mipscore.c
drivers/staging/android/binder.c
drivers/staging/ccg/u_ether.c
drivers/staging/gdm72xx/netlink_k.c
drivers/staging/nvec/nvec.c
drivers/staging/omapdrm/omap_gem.c
drivers/staging/winbond/wbusb.c
drivers/thermal/thermal_sys.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/hvc/hvsi.c
drivers/tty/ipwireless/hardware.c
drivers/tty/ipwireless/network.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/omap-serial.c
drivers/tty/tty_audit.c
drivers/tty/tty_io.c
drivers/tty/tty_ldisc.c
drivers/usb/atm/speedtch.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/u_ether.c
drivers/usb/host/ohci-hcd.c
drivers/usb/otg/isp1301_omap.c
drivers/vfio/vfio.c
drivers/vhost/vhost.c
drivers/video/msm/mdp.c
drivers/video/omap2/displays/panel-taal.c
drivers/video/omap2/dss/dsi.c
drivers/xen/events.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/privcmd.c
drivers/xen/swiotlb-xen.c
drivers/xen/sys-hypervisor.c
drivers/xen/tmem.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xen-pciback/pci_stub.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_comms.c
drivers/xen/xenbus/xenbus_dev_backend.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xenbus/xenbus_probe_frontend.c
drivers/xen/xenbus/xenbus_xs.c
drivers/xen/xenfs/super.c
firmware/Makefile
firmware/cxgb3/t3fw-7.10.0.bin.ihex [deleted file]
fs/9p/acl.c
fs/9p/v9fs.c
fs/Makefile
fs/adfs/adfs.h
fs/adfs/inode.c
fs/adfs/super.c
fs/affs/affs.h
fs/affs/inode.c
fs/affs/super.c
fs/afs/callback.c
fs/afs/server.c
fs/afs/super.c
fs/afs/vlocation.c
fs/attr.c
fs/autofs4/dev-ioctl.c
fs/autofs4/waitq.c
fs/befs/befs.h
fs/befs/linuxvfs.c
fs/bfs/inode.c
fs/binfmt_elf.c
fs/btrfs/acl.c
fs/btrfs/delayed-inode.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/reada.c
fs/ceph/inode.c
fs/ceph/super.c
fs/cifs/cifsfs.c
fs/coda/inode.c
fs/compat.c
fs/compat_ioctl.c
fs/configfs/inode.c
fs/coredump.c [new file with mode: 0644]
fs/cramfs/inode.c
fs/dcache.c
fs/debugfs/inode.c
fs/dlm/netlink.c
fs/ecryptfs/main.c
fs/ecryptfs/messaging.c
fs/efs/inode.c
fs/efs/super.c
fs/eventpoll.c
fs/exec.c
fs/exofs/inode.c
fs/exofs/super.c
fs/ext2/acl.c
fs/ext2/super.c
fs/ext3/acl.c
fs/ext3/super.c
fs/ext4/acl.c
fs/ext4/ioctl.c
fs/ext4/super.c
fs/fat/fat.h
fs/fat/file.c
fs/fat/inode.c
fs/fcntl.c
fs/fhandle.c
fs/file.c
fs/file_table.c
fs/freevxfs/vxfs_inode.c
fs/freevxfs/vxfs_super.c
fs/fuse/dev.c
fs/fuse/inode.c
fs/generic_acl.c
fs/gfs2/acl.c
fs/gfs2/lock_dlm.c
fs/gfs2/quota.c
fs/gfs2/super.c
fs/hfs/hfs_fs.h
fs/hfs/inode.c
fs/hfs/super.c
fs/hfsplus/catalog.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/inode.c
fs/hfsplus/options.c
fs/hfsplus/super.c
fs/hostfs/hostfs_kern.c
fs/hpfs/hpfs_fn.h
fs/hpfs/inode.c
fs/hpfs/namei.c
fs/hpfs/super.c
fs/hugetlbfs/inode.c
fs/ioctl.c
fs/isofs/inode.c
fs/isofs/isofs.h
fs/isofs/rock.c
fs/jffs2/acl.c
fs/jffs2/file.c
fs/jffs2/fs.c
fs/jffs2/os-linux.h
fs/jffs2/super.c
fs/jfs/Makefile
fs/jfs/acl.c
fs/jfs/file.c
fs/jfs/ioctl.c
fs/jfs/jfs_discard.c [new file with mode: 0644]
fs/jfs/jfs_discard.h [new file with mode: 0644]
fs/jfs/jfs_dmap.c
fs/jfs/jfs_dmap.h
fs/jfs/jfs_filsys.h
fs/jfs/jfs_imap.c
fs/jfs/jfs_incore.h
fs/jfs/jfs_txnmgr.c
fs/jfs/super.c
fs/jfs/xattr.c
fs/locks.c
fs/logfs/inode.c
fs/logfs/readwrite.c
fs/minix/inode.c
fs/namei.c
fs/ncpfs/inode.c
fs/nfs/inode.c
fs/nfs/nfs3acl.c
fs/nfs/nfs4renewd.c
fs/nfsd/nfs4state.c
fs/nfsd/vfs.c
fs/nilfs2/inode.c
fs/nilfs2/super.c
fs/notify/fanotify/fanotify_user.c
fs/notify/inotify/inotify_user.c
fs/ntfs/inode.c
fs/ntfs/super.c
fs/ntfs/volume.h
fs/ocfs2/acl.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/quorum.c
fs/ocfs2/dlmfs/dlmfs.c
fs/ocfs2/file.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/ocfs2/super.c
fs/omfs/inode.c
fs/omfs/omfs.h
fs/open.c
fs/openpromfs/inode.c
fs/pipe.c
fs/posix_acl.c
fs/proc/Makefile
fs/proc/base.c
fs/proc/fd.c [new file with mode: 0644]
fs/proc/fd.h [new file with mode: 0644]
fs/proc/internal.h
fs/qnx4/inode.c
fs/qnx6/inode.c
fs/quota/Makefile
fs/quota/dquot.c
fs/quota/kqid.c [new file with mode: 0644]
fs/quota/netlink.c
fs/quota/quota.c
fs/quota/quota_tree.c
fs/quota/quota_v1.c
fs/quota/quota_v2.c
fs/read_write.c
fs/read_write.h
fs/readdir.c
fs/reiserfs/inode.c
fs/reiserfs/super.c
fs/reiserfs/xattr_acl.c
fs/romfs/super.c
fs/select.c
fs/seq_file.c
fs/signalfd.c
fs/splice.c
fs/squashfs/inode.c
fs/squashfs/super.c
fs/stat.c
fs/statfs.c
fs/super.c
fs/sync.c
fs/sysv/inode.c
fs/timerfd.c
fs/ubifs/budget.c
fs/ubifs/commit.c
fs/ubifs/compress.c
fs/ubifs/debug.c
fs/ubifs/debug.h
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/gc.c
fs/ubifs/journal.c
fs/ubifs/log.c
fs/ubifs/lprops.c
fs/ubifs/lpt.c
fs/ubifs/lpt_commit.c
fs/ubifs/orphan.c
fs/ubifs/recovery.c
fs/ubifs/replay.c
fs/ubifs/sb.c
fs/ubifs/scan.c
fs/ubifs/super.c
fs/ubifs/tnc_misc.c
fs/ubifs/ubifs.h
fs/udf/inode.c
fs/udf/super.c
fs/udf/udf_sb.h
fs/ufs/inode.c
fs/ufs/super.c
fs/utimes.c
fs/xattr.c
fs/xattr_acl.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_file.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_quotaops.c
fs/xfs/xfs_super.c
fs/xfs/xfs_super.h
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_trans_dquot.c
include/acpi/processor.h
include/asm-generic/gpio.h
include/drm/drmP.h
include/linux/Kbuild
include/linux/amba/pl022.h
include/linux/ata.h
include/linux/audit.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bcma/bcma_regs.h
include/linux/cgroup.h
include/linux/cgroup_subsys.h
include/linux/clockchips.h
include/linux/compat.h
include/linux/device.h
include/linux/etherdevice.h
include/linux/ethtool.h
include/linux/fdtable.h
include/linux/file.h
include/linux/filter.h
include/linux/frontswap.h
include/linux/fs.h
include/linux/hash.h
include/linux/i2c/pcf857x.h
include/linux/ieee80211.h
include/linux/if_arp.h
include/linux/if_link.h
include/linux/if_team.h
include/linux/if_tunnel.h
include/linux/if_vlan.h
include/linux/ima.h
include/linux/inet_diag.h
include/linux/inetdevice.h
include/linux/init_task.h
include/linux/input/tegra_kbc.h [new file with mode: 0644]
include/linux/integrity.h
include/linux/ip6_tunnel.h
include/linux/ipc.h
include/linux/ipv6.h
include/linux/jiffies.h
include/linux/key.h
include/linux/libata.h
include/linux/loop.h
include/linux/mdio.h
include/linux/mlx4/device.h
include/linux/mlx4/driver.h
include/linux/mlx4/qp.h
include/linux/mtd/partitions.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/netfilter/nf_conntrack_amanda.h
include/linux/netfilter/nf_conntrack_ftp.h
include/linux/netfilter/nf_conntrack_h323.h
include/linux/netfilter/nf_conntrack_irc.h
include/linux/netfilter/nf_conntrack_pptp.h
include/linux/netfilter/nf_conntrack_sip.h
include/linux/netfilter/nf_nat.h
include/linux/netfilter/nfnetlink_conntrack.h
include/linux/netfilter/nfnetlink_queue.h
include/linux/netfilter/xt_time.h
include/linux/netfilter_ipv4.h
include/linux/netfilter_ipv6/Kbuild
include/linux/netfilter_ipv6/ip6t_NPT.h [new file with mode: 0644]
include/linux/netlink.h
include/linux/nfc.h
include/linux/nl80211.h
include/linux/of.h
include/linux/of_address.h
include/linux/of_mdio.h
include/linux/opp.h
include/linux/packet_diag.h [new file with mode: 0644]
include/linux/pci_ids.h
include/linux/pinctrl/consumer.h
include/linux/pinctrl/pinctrl-state.h
include/linux/platform_data/sc18is602.h [new file with mode: 0644]
include/linux/pm.h
include/linux/pm_domain.h
include/linux/posix_acl.h
include/linux/posix_acl_xattr.h
include/linux/pps_kernel.h
include/linux/projid.h [new file with mode: 0644]
include/linux/ptp_clock_kernel.h
include/linux/ptrace.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/rfkill.h
include/linux/sched.h
include/linux/security.h
include/linux/seq_file.h
include/linux/shmem_fs.h
include/linux/skbuff.h
include/linux/snmp.h
include/linux/spi/mxs-spi.h [new file with mode: 0644]
include/linux/ssb/ssb_driver_chipcommon.h
include/linux/stmmac.h
include/linux/swiotlb.h
include/linux/tcp.h
include/linux/tcp_metrics.h [new file with mode: 0644]
include/linux/tipc_config.h
include/linux/tpm.h
include/linux/tsacct_kern.h
include/linux/tty.h
include/linux/uinput.h
include/linux/user_namespace.h
include/linux/workqueue.h
include/linux/xattr.h
include/mtd/ubi-user.h
include/net/addrconf.h
include/net/arp.h
include/net/ax25.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/bluetooth/smp.h
include/net/cfg80211.h
include/net/checksum.h
include/net/cls_cgroup.h
include/net/dst.h
include/net/genetlink.h
include/net/gro_cells.h [new file with mode: 0644]
include/net/ieee80211_radiotap.h
include/net/inet_ecn.h
include/net/inet_frag.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_tunnel.h
include/net/ip_vs.h
include/net/ipip.h
include/net/ipv6.h
include/net/llc.h
include/net/mac80211.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_expect.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_core.h
include/net/netfilter/nf_nat_helper.h
include/net/netfilter/nf_nat_l3proto.h [new file with mode: 0644]
include/net/netfilter/nf_nat_l4proto.h [new file with mode: 0644]
include/net/netfilter/nf_nat_protocol.h [deleted file]
include/net/netfilter/nf_nat_rule.h [deleted file]
include/net/netlabel.h
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/netns/packet.h
include/net/netns/sctp.h [new file with mode: 0644]
include/net/netprio_cgroup.h
include/net/nfc/hci.h
include/net/nfc/llc.h [new file with mode: 0644]
include/net/nfc/nci.h
include/net/nfc/nci_core.h
include/net/nfc/nfc.h
include/net/nfc/shdlc.h [deleted file]
include/net/request_sock.h
include/net/sch_generic.h
include/net/scm.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/snmp.h
include/net/sock.h
include/net/tcp.h
include/net/xfrm.h
include/rdma/ib_cache.h
include/rdma/ib_verbs.h
include/scsi/libsas.h
include/scsi/sas_ata.h
include/scsi/scsi_bsg_fc.h
include/scsi/scsi_device.h
include/scsi/scsi_devinfo.h
include/scsi/scsi_host.h
include/scsi/scsi_netlink.h
include/xen/grant_table.h
include/xen/interface/grant_table.h
include/xen/interface/memory.h
include/xen/interface/platform.h
include/xen/interface/version.h
include/xen/interface/xen.h
include/xen/privcmd.h
include/xen/swiotlb-xen.h
init/Kconfig
ipc/mqueue.c
ipc/msg.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/acct.c
kernel/audit.c
kernel/audit.h
kernel/audit_watch.c
kernel/auditfilter.c
kernel/auditsc.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/cred.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/pid.c
kernel/pid_namespace.c
kernel/power/Kconfig
kernel/power/poweroff.c
kernel/power/process.c
kernel/power/qos.c
kernel/ptrace.c
kernel/srcu.c
kernel/sys.c
kernel/taskstats.c
kernel/time/clockevents.c
kernel/time/timekeeping.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/tsacct.c
kernel/user.c
kernel/user_namespace.c
kernel/workqueue.c
lib/kobject_uevent.c
lib/nlattr.c
lib/swiotlb.c
mm/fadvise.c
mm/fremap.c
mm/frontswap.c
mm/memcontrol.c
mm/mmap.c
mm/nommu.c
mm/readahead.c
mm/shmem.c
mm/slab.c
mm/vmstat.c
net/8021q/vlan_core.c
net/9p/trans_fd.c
net/Kconfig
net/appletalk/atalk_proc.c
net/atm/resources.c
net/ax25/ax25_uid.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/gateway_client.c
net/batman-adv/hard-interface.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c
net/batman-adv/vis.c
net/batman-adv/vis.h
net/bluetooth/a2mp.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/sock.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hidp/sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp_timer.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/can/gw.c
net/compat.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/dst.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/link_watch.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/netprio_cgroup.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/scm.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/utils.c
net/dcb/dcbnl.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_route.c
net/decnet/dn_table.c
net/decnet/netfilter/dn_rtmsg.c
net/dns_resolver/dns_key.c
net/dsa/dsa.c
net/ieee802154/6lowpan.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_fragment.c
net/ipv4/inetpeer.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_vti.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/ipt_NETMAP.c [deleted file]
net/ipv4/netfilter/ipt_REDIRECT.c [deleted file]
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c [new file with mode: 0644]
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_amanda.c [deleted file]
net/ipv4/netfilter/nf_nat_core.c [deleted file]
net/ipv4/netfilter/nf_nat_ftp.c [deleted file]
net/ipv4/netfilter/nf_nat_h323.c
net/ipv4/netfilter/nf_nat_helper.c [deleted file]
net/ipv4/netfilter/nf_nat_irc.c [deleted file]
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nf_nat_pptp.c
net/ipv4/netfilter/nf_nat_proto_common.c [deleted file]
net/ipv4/netfilter/nf_nat_proto_dccp.c [deleted file]
net/ipv4/netfilter/nf_nat_proto_gre.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/netfilter/nf_nat_proto_sctp.c [deleted file]
net/ipv4/netfilter/nf_nat_proto_tcp.c [deleted file]
net/ipv4/netfilter/nf_nat_proto_udp.c [deleted file]
net/ipv4/netfilter/nf_nat_proto_udplite.c [deleted file]
net/ipv4/netfilter/nf_nat_proto_unknown.c [deleted file]
net/ipv4/netfilter/nf_nat_rule.c [deleted file]
net/ipv4/netfilter/nf_nat_sip.c [deleted file]
net/ipv4/netfilter/nf_nat_standalone.c [deleted file]
net/ipv4/netfilter/nf_nat_tftp.c [deleted file]
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udp_diag.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/addrlabel.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c [new file with mode: 0644]
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/netfilter.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_MASQUERADE.c [new file with mode: 0644]
net/ipv6/netfilter/ip6t_NPT.c [new file with mode: 0644]
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c [new file with mode: 0644]
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nf_nat_proto_icmpv6.c [new file with mode: 0644]
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipx/ipx_proc.c
net/irda/irnetlink.c
net/key/af_key.c
net/l2tp/Kconfig
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_netlink.c
net/llc/llc_proc.c
net/llc/llc_station.c
net/llc/sysctl_net_llc.c
net/mac80211/aes_cmac.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.h
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rate.h
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/Kconfig
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_amanda.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_internals.h
net/netfilter/nf_nat_amanda.c [new file with mode: 0644]
net/netfilter/nf_nat_core.c [new file with mode: 0644]
net/netfilter/nf_nat_ftp.c [new file with mode: 0644]
net/netfilter/nf_nat_helper.c [new file with mode: 0644]
net/netfilter/nf_nat_irc.c [new file with mode: 0644]
net/netfilter/nf_nat_proto_common.c [new file with mode: 0644]
net/netfilter/nf_nat_proto_dccp.c [new file with mode: 0644]
net/netfilter/nf_nat_proto_sctp.c [new file with mode: 0644]
net/netfilter/nf_nat_proto_tcp.c [new file with mode: 0644]
net/netfilter/nf_nat_proto_udp.c [new file with mode: 0644]
net/netfilter/nf_nat_proto_udplite.c [new file with mode: 0644]
net/netfilter/nf_nat_proto_unknown.c [new file with mode: 0644]
net/netfilter/nf_nat_sip.c [new file with mode: 0644]
net/netfilter/nf_nat_tftp.c [new file with mode: 0644]
net/netfilter/nf_queue.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_CT.c
net/netfilter/xt_LOG.c
net/netfilter/xt_NETMAP.c [new file with mode: 0644]
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_NOTRACK.c [deleted file]
net/netfilter/xt_REDIRECT.c [new file with mode: 0644]
net/netfilter/xt_nat.c [new file with mode: 0644]
net/netfilter/xt_osf.c
net/netfilter/xt_owner.c
net/netfilter/xt_recent.c
net/netfilter/xt_set.c
net/netfilter/xt_socket.c
net/netfilter/xt_time.c
net/netlabel/netlabel_cipso_v4.c
net/netlabel/netlabel_mgmt.c
net/netlabel/netlabel_unlabeled.c
net/netlabel/netlabel_user.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/nfc/core.c
net/nfc/hci/Makefile
net/nfc/hci/command.c
net/nfc/hci/core.c
net/nfc/hci/hci.h
net/nfc/hci/hcp.c
net/nfc/hci/llc.c [new file with mode: 0644]
net/nfc/hci/llc.h [new file with mode: 0644]
net/nfc/hci/llc_nop.c [new file with mode: 0644]
net/nfc/hci/llc_shdlc.c [new file with mode: 0644]
net/nfc/hci/shdlc.c [deleted file]
net/nfc/llcp/commands.c
net/nfc/llcp/llcp.c
net/nfc/llcp/llcp.h
net/nfc/llcp/sock.c
net/nfc/nci/core.c
net/nfc/nci/ntf.c
net/nfc/nci/rsp.c
net/nfc/netlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/dp_notify.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/Kconfig
net/packet/Makefile
net/packet/af_packet.c
net/packet/diag.c [new file with mode: 0644]
net/packet/internal.h [new file with mode: 0644]
net/phonet/pn_netlink.c
net/phonet/socket.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rfkill/core.c
net/rfkill/input.c
net/rxrpc/ar-key.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_drr.c
net/sched/sch_generic.c
net/sched/sch_qfq.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/bind_addr.c
net/sctp/chunk.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/objcnt.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/primitive.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/ulpqueue.c
net/socket.c
net/sunrpc/cache.c
net/tipc/bearer.c
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/eth_media.c
net/tipc/handler.c
net/tipc/link.c
net/tipc/name_table.c
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink.c
net/tipc/subscr.c
net/unix/af_unix.c
net/unix/diag.c
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/radiotap.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
net/wireless/wext-core.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/seccomp/Makefile
samples/seccomp/bpf-helper.h
scripts/dtc/Makefile.dtc
scripts/dtc/checks.c
scripts/dtc/data.c
scripts/dtc/dtc-lexer.l
scripts/dtc/dtc-lexer.lex.c_shipped
scripts/dtc/dtc-parser.tab.c_shipped
scripts/dtc/dtc-parser.tab.h_shipped
scripts/dtc/dtc-parser.y
scripts/dtc/dtc.c
scripts/dtc/dtc.h
scripts/dtc/fdtdump.c [new file with mode: 0644]
scripts/dtc/fdtget.c [new file with mode: 0644]
scripts/dtc/fdtput.c [new file with mode: 0644]
scripts/dtc/flattree.c
scripts/dtc/libfdt/Makefile.libfdt
scripts/dtc/libfdt/fdt.c
scripts/dtc/libfdt/fdt_empty_tree.c [new file with mode: 0644]
scripts/dtc/libfdt/fdt_ro.c
scripts/dtc/libfdt/fdt_rw.c
scripts/dtc/libfdt/fdt_sw.c
scripts/dtc/libfdt/fdt_wip.c
scripts/dtc/libfdt/libfdt.h
scripts/dtc/libfdt/libfdt_env.h
scripts/dtc/libfdt/libfdt_internal.h
scripts/dtc/livetree.c
scripts/dtc/srcpos.c
scripts/dtc/srcpos.h
scripts/dtc/treesource.c
scripts/dtc/util.c
scripts/dtc/util.h
security/apparmor/domain.c
security/apparmor/file.c
security/apparmor/include/audit.h
security/apparmor/include/file.h
security/apparmor/lsm.c
security/capability.c
security/device_cgroup.c
security/integrity/evm/evm_crypto.c
security/integrity/evm/evm_main.c
security/integrity/iint.c
security/integrity/ima/Kconfig
security/integrity/ima/Makefile
security/integrity/ima/ima.h
security/integrity/ima/ima_api.c
security/integrity/ima/ima_appraise.c [new file with mode: 0644]
security/integrity/ima/ima_audit.c
security/integrity/ima/ima_crypto.c
security/integrity/ima/ima_main.c
security/integrity/ima/ima_policy.c
security/integrity/integrity.h
security/keys/gc.c
security/keys/internal.h
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/permission.c
security/keys/proc.c
security/keys/process_keys.c
security/keys/request_key.c
security/keys/trusted.c
security/security.c
security/selinux/hooks.c
security/selinux/netlink.c
security/selinux/selinuxfs.c
security/selinux/ss/services.c
security/smack/smack_lsm.c
security/smack/smackfs.c
security/tomoyo/audit.c
security/tomoyo/common.c
security/tomoyo/common.h
security/tomoyo/condition.c
security/tomoyo/tomoyo.c
security/yama/Kconfig
security/yama/yama_lsm.c
sound/core/pcm_native.c
sound/i2c/other/ak4113.c
sound/i2c/other/ak4114.c
sound/pci/oxygen/oxygen_lib.c
sound/soc/codecs/wm8350.c
sound/soc/codecs/wm8753.c
sound/soc/soc-core.c
virt/kvm/eventfd.c

index 6cd6daefaaedeb160a6f1ac1d616de7871b38965..986946613542b4bc1852aaf671e62059d3295cea 100644 (file)
@@ -12,11 +12,14 @@ Description:
                then closing the file.  The new policy takes effect after
                the file ima/policy is closed.
 
+               IMA appraisal, if configured, uses these file measurements
+               for local measurement appraisal.
+
                rule format: action [condition ...]
 
-               action: measure | dont_measure
+               action: measure | dont_measure | appraise | dont_appraise | audit
                condition:= base | lsm
-                       base:   [[func=] [mask=] [fsmagic=] [uid=]]
+                       base:   [[func=] [mask=] [fsmagic=] [uid=] [fowner]]
                        lsm:    [[subj_user=] [subj_role=] [subj_type=]
                                 [obj_user=] [obj_role=] [obj_type=]]
 
@@ -24,36 +27,50 @@ Description:
                        mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
                        fsmagic:= hex value
                        uid:= decimal value
+                       fowner:=decimal value
                lsm:    are LSM specific
 
                default policy:
                        # PROC_SUPER_MAGIC
                        dont_measure fsmagic=0x9fa0
+                       dont_appraise fsmagic=0x9fa0
                        # SYSFS_MAGIC
                        dont_measure fsmagic=0x62656572
+                       dont_appraise fsmagic=0x62656572
                        # DEBUGFS_MAGIC
                        dont_measure fsmagic=0x64626720
+                       dont_appraise fsmagic=0x64626720
                        # TMPFS_MAGIC
                        dont_measure fsmagic=0x01021994
+                       dont_appraise fsmagic=0x01021994
+                       # RAMFS_MAGIC
+                       dont_measure fsmagic=0x858458f6
+                       dont_appraise fsmagic=0x858458f6
                        # SECURITYFS_MAGIC
                        dont_measure fsmagic=0x73636673
+                       dont_appraise fsmagic=0x73636673
 
                        measure func=BPRM_CHECK
                        measure func=FILE_MMAP mask=MAY_EXEC
                        measure func=FILE_CHECK mask=MAY_READ uid=0
+                       appraise fowner=0
 
                The default policy measures all executables in bprm_check,
                all files mmapped executable in file_mmap, and all files
-               open for read by root in do_filp_open.
+               open for read by root in do_filp_open.  The default appraisal
+               policy appraises all files owned by root.
 
                Examples of LSM specific definitions:
 
                SELinux:
                        # SELINUX_MAGIC
-                       dont_measure fsmagic=0xF97CFF8C
+                       dont_measure fsmagic=0xf97cff8c
+                       dont_appraise fsmagic=0xf97cff8c
 
                        dont_measure obj_type=var_log_t
+                       dont_appraise obj_type=var_log_t
                        dont_measure obj_type=auditd_log_t
+                       dont_appraise obj_type=auditd_log_t
                        measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
                        measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
 
index 5dab36448b44b84bf869b9316d50af47f00328b6..6943133afcb8388916a87e2bd66923fe26a97906 100644 (file)
@@ -176,3 +176,14 @@ Description:       Disable L3 cache indices
                All AMD processors with L3 caches provide this functionality.
                For details, see BKDGs at
                http://developer.amd.com/documentation/guides/Pages/default.aspx
+
+
+What:          /sys/devices/system/cpu/cpufreq/boost
+Date:          August 2012
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:   Processor frequency boosting control
+
+               This switch controls the boost setting for the whole system.
+               Boosting allows the CPU and the firmware to run at a frequency
+               beyound it's nominal limit.
+               More details can be found in Documentation/cpu-freq/boost.txt
diff --git a/Documentation/ABI/testing/sysfs-driver-ppi b/Documentation/ABI/testing/sysfs-driver-ppi
new file mode 100644 (file)
index 0000000..97a003e
--- /dev/null
@@ -0,0 +1,70 @@
+What:          /sys/devices/pnp0/<bus-num>/ppi/
+Date:          August 2012
+Kernel Version:        3.6
+Contact:       xiaoyan.zhang@intel.com
+Description:
+               This folder includes the attributes related with PPI (Physical
+               Presence Interface). Only if TPM is supported by BIOS, this
+               folder makes sence. The folder path can be got by command
+               'find /sys/ -name 'pcrs''. For the detail information of PPI,
+               please refer to the PPI specification from
+               http://www.trustedcomputinggroup.org/
+
+What:          /sys/devices/pnp0/<bus-num>/ppi/version
+Date:          August 2012
+Contact:       xiaoyan.zhang@intel.com
+Description:
+               This attribute shows the version of the PPI supported by the
+               platform.
+               This file is readonly.
+
+What:          /sys/devices/pnp0/<bus-num>/ppi/request
+Date:          August 2012
+Contact:       xiaoyan.zhang@intel.com
+Description:
+               This attribute shows the request for an operation to be
+               executed in the pre-OS environment. It is the only input from
+               the OS to the pre-OS environment. The request should be an
+               integer value range from 1 to 160, and 0 means no request.
+               This file can be read and written.
+
+What:          /sys/devices/pnp0/00:<bus-num>/ppi/response
+Date:          August 2012
+Contact:       xiaoyan.zhang@intel.com
+Description:
+               This attribute shows the response to the most recent operation
+               request it acted upon. The format is "<request> <response num>
+               : <response description>".
+               This file is readonly.
+
+What:          /sys/devices/pnp0/<bus-num>/ppi/transition_action
+Date:          August 2012
+Contact:       xiaoyan.zhang@intel.com
+Description:
+               This attribute shows the platform-specific action that should
+               take place in order to transition to the BIOS for execution of
+               a requested operation. The format is "<action num>: <action
+               description>".
+               This file is readonly.
+
+What:          /sys/devices/pnp0/<bus-num>/ppi/tcg_operations
+Date:          August 2012
+Contact:       xiaoyan.zhang@intel.com
+Description:
+               This attribute shows whether it is allowed to request an
+               operation to be executed in the pre-OS environment by the BIOS
+               for the requests defined by TCG, i.e. requests from 1 to 22.
+               The format is "<request> <status num>: <status description>".
+               This attribute is only supported by PPI version 1.2+.
+               This file is readonly.
+
+What:          /sys/devices/pnp0/<bus-num>/ppi/vs_operations
+Date:          August 2012
+Contact:       xiaoyan.zhang@intel.com
+Description:
+               This attribute shows whether it is allowed to request an
+               operation to be executed in the pre-OS environment by the BIOS
+               for the verdor specific requests, i.e. requests from 128 to
+               255. The format is same with tcg_operations. This attribute
+               is also only supported by PPI version 1.2+.
+               This file is readonly.
index d40d2b55050239cd2669095ab909f287208f8f24..05aeedf177946a9f9eb99a9f6b4b26a488f2f74a 100644 (file)
@@ -19,7 +19,11 @@ Date:                September 2010
 Contact:       Richard Cochran <richardcochran@gmail.com>
 Description:
                This file contains the name of the PTP hardware clock
-               as a human readable string.
+               as a human readable string. The purpose of this
+               attribute is to provide the user with a "friendly
+               name" and to help distinguish PHY based devices from
+               MAC based ones. The string does not necessarily have
+               to be any kind of unique id.
 
 What:          /sys/class/ptp/ptpN/max_adjustment
 Date:          September 2010
index 4a0b64c605fc83cef8c945a76d08554c625dc746..9e04196c4d781f5a411b2afd329876bd255b77e4 100644 (file)
@@ -29,7 +29,8 @@ CONTENTS:
   3.1 Overview
   3.2 Synchronization
   3.3 Subsystem API
-4. Questions
+4. Extended attributes usage
+5. Questions
 
 1. Control Groups
 =================
@@ -62,9 +63,9 @@ an instance of the cgroup virtual filesystem associated with it.
 At any one time there may be multiple active hierarchies of task
 cgroups. Each hierarchy is a partition of all tasks in the system.
 
-User level code may create and destroy cgroups by name in an
+User-level code may create and destroy cgroups by name in an
 instance of the cgroup virtual file system, specify and query to
-which cgroup a task is assigned, and list the task pids assigned to
+which cgroup a task is assigned, and list the task PIDs assigned to
 a cgroup. Those creations and assignments only affect the hierarchy
 associated with that instance of the cgroup file system.
 
@@ -72,7 +73,7 @@ On their own, the only use for cgroups is for simple job
 tracking. The intention is that other subsystems hook into the generic
 cgroup support to provide new attributes for cgroups, such as
 accounting/limiting the resources which processes in a cgroup can
-access. For example, cpusets (see Documentation/cgroups/cpusets.txt) allows
+access. For example, cpusets (see Documentation/cgroups/cpusets.txt) allow
 you to associate a set of CPUs and a set of memory nodes with the
 tasks in each cgroup.
 
@@ -80,11 +81,11 @@ tasks in each cgroup.
 ----------------------------
 
 There are multiple efforts to provide process aggregations in the
-Linux kernel, mainly for resource tracking purposes. Such efforts
+Linux kernel, mainly for resource-tracking purposes. Such efforts
 include cpusets, CKRM/ResGroups, UserBeanCounters, and virtual server
 namespaces. These all require the basic notion of a
 grouping/partitioning of processes, with newly forked processes ending
-in the same group (cgroup) as their parent process.
+up in the same group (cgroup) as their parent process.
 
 The kernel cgroup patch provides the minimum essential kernel
 mechanisms required to efficiently implement such groups. It has
@@ -127,14 +128,14 @@ following lines:
                                / \
                Professors (15%)  students (5%)
 
-Browsers like Firefox/Lynx go into the WWW network class, while (k)nfsd go
-into NFS network class.
+Browsers like Firefox/Lynx go into the WWW network class, while (k)nfsd goes
+into the NFS network class.
 
 At the same time Firefox/Lynx will share an appropriate CPU/Memory class
 depending on who launched it (prof/student).
 
 With the ability to classify tasks differently for different resources
-(by putting those resource subsystems in different hierarchies) then
+(by putting those resource subsystems in different hierarchies),
 the admin can easily set up a script which receives exec notifications
 and depending on who is launching the browser he can
 
@@ -145,19 +146,19 @@ a separate cgroup for every browser launched and associate it with
 appropriate network and other resource class.  This may lead to
 proliferation of such cgroups.
 
-Also lets say that the administrator would like to give enhanced network
+Also let's say that the administrator would like to give enhanced network
 access temporarily to a student's browser (since it is night and the user
-wants to do online gaming :))  OR give one of the students simulation
-apps enhanced CPU power,
+wants to do online gaming :))  OR give one of the student's simulation
+apps enhanced CPU power.
 
-With ability to write pids directly to resource classes, it's just a
-matter of :
+With ability to write PIDs directly to resource classes, it's just a
+matter of:
 
        # echo pid > /sys/fs/cgroup/network/<new_class>/tasks
        (after some time)
        # echo pid > /sys/fs/cgroup/network/<orig_class>/tasks
 
-Without this ability, he would have to split the cgroup into
+Without this ability, the administrator would have to split the cgroup into
 multiple separate ones and then associate the new cgroups with the
 new resource classes.
 
@@ -184,20 +185,20 @@ Control Groups extends the kernel as follows:
    field of each task_struct using the css_set, anchored at
    css_set->tasks.
 
- - A cgroup hierarchy filesystem can be mounted  for browsing and
+ - A cgroup hierarchy filesystem can be mounted for browsing and
    manipulation from user space.
 
- - You can list all the tasks (by pid) attached to any cgroup.
+ - You can list all the tasks (by PID) attached to any cgroup.
 
 The implementation of cgroups requires a few, simple hooks
-into the rest of the kernel, none in performance critical paths:
+into the rest of the kernel, none in performance-critical paths:
 
  - in init/main.c, to initialize the root cgroups and initial
    css_set at system boot.
 
  - in fork and exit, to attach and detach a task from its css_set.
 
-In addition a new file system, of type "cgroup" may be mounted, to
+In addition, a new file system of type "cgroup" may be mounted, to
 enable browsing and modifying the cgroups presently known to the
 kernel.  When mounting a cgroup hierarchy, you may specify a
 comma-separated list of subsystems to mount as the filesystem mount
@@ -230,13 +231,13 @@ as the path relative to the root of the cgroup file system.
 Each cgroup is represented by a directory in the cgroup file system
 containing the following files describing that cgroup:
 
- - tasks: list of tasks (by pid) attached to that cgroup.  This list
-   is not guaranteed to be sorted.  Writing a thread id into this file
+ - tasks: list of tasks (by PID) attached to that cgroup.  This list
+   is not guaranteed to be sorted.  Writing a thread ID into this file
    moves the thread into this cgroup.
- - cgroup.procs: list of tgids in the cgroup.  This list is not
-   guaranteed to be sorted or free of duplicate tgids, and userspace
+ - cgroup.procs: list of thread group IDs in the cgroup.  This list is
+   not guaranteed to be sorted or free of duplicate TGIDs, and userspace
    should sort/uniquify the list if this property is required.
-   Writing a thread group id into this file moves all threads in that
+   Writing a thread group ID into this file moves all threads in that
    group into this cgroup.
  - notify_on_release flag: run the release agent on exit?
  - release_agent: the path to use for release notifications (this file
@@ -261,7 +262,7 @@ cgroup file system directories.
 
 When a task is moved from one cgroup to another, it gets a new
 css_set pointer - if there's an already existing css_set with the
-desired collection of cgroups then that group is reused, else a new
+desired collection of cgroups then that group is reused, otherwise a new
 css_set is allocated. The appropriate existing css_set is located by
 looking into a hash table.
 
@@ -292,7 +293,7 @@ file system) of the abandoned cgroup.  This enables automatic
 removal of abandoned cgroups.  The default value of
 notify_on_release in the root cgroup at system boot is disabled
 (0).  The default value of other cgroups at creation is the current
-value of their parents notify_on_release setting. The default value of
+value of their parents' notify_on_release settings. The default value of
 a cgroup hierarchy's release_agent path is empty.
 
 1.5 What does clone_children do ?
@@ -316,7 +317,7 @@ the "cpuset" cgroup subsystem, the steps are something like:
  4) Create the new cgroup by doing mkdir's and write's (or echo's) in
     the /sys/fs/cgroup virtual file system.
  5) Start a task that will be the "founding father" of the new job.
- 6) Attach that task to the new cgroup by writing its pid to the
+ 6) Attach that task to the new cgroup by writing its PID to the
     /sys/fs/cgroup/cpuset/tasks file for that cgroup.
  7) fork, exec or clone the job tasks from this founding father task.
 
@@ -344,7 +345,7 @@ and then start a subshell 'sh' in that cgroup:
 2.1 Basic Usage
 ---------------
 
-Creating, modifying, using the cgroups can be done through the cgroup
+Creating, modifying, using cgroups can be done through the cgroup
 virtual filesystem.
 
 To mount a cgroup hierarchy with all available subsystems, type:
@@ -441,7 +442,7 @@ You can attach the current shell task by echoing 0:
 # echo 0 > tasks
 
 You can use the cgroup.procs file instead of the tasks file to move all
-threads in a threadgroup at once. Echoing the pid of any task in a
+threads in a threadgroup at once. Echoing the PID of any task in a
 threadgroup to cgroup.procs causes all tasks in that threadgroup to be
 be attached to the cgroup. Writing 0 to cgroup.procs moves all tasks
 in the writing task's threadgroup.
@@ -479,7 +480,7 @@ in /proc/mounts and /proc/<pid>/cgroups.
 There is mechanism which allows to get notifications about changing
 status of a cgroup.
 
-To register new notification handler you need:
+To register a new notification handler you need to:
  - create a file descriptor for event notification using eventfd(2);
  - open a control file to be monitored (e.g. memory.usage_in_bytes);
  - write "<event_fd> <control_fd> <args>" to cgroup.event_control.
@@ -488,7 +489,7 @@ To register new notification handler you need:
 eventfd will be woken up by control file implementation or when the
 cgroup is removed.
 
-To unregister notification handler just close eventfd.
+To unregister notification handler just close eventfd.
 
 NOTE: Support of notifications should be implemented for the control
 file. See documentation for the subsystem.
@@ -502,7 +503,7 @@ file. See documentation for the subsystem.
 Each kernel subsystem that wants to hook into the generic cgroup
 system needs to create a cgroup_subsys object. This contains
 various methods, which are callbacks from the cgroup system, along
-with a subsystem id which will be assigned by the cgroup system.
+with a subsystem ID which will be assigned by the cgroup system.
 
 Other fields in the cgroup_subsys object include:
 
@@ -516,7 +517,7 @@ Other fields in the cgroup_subsys object include:
   at system boot.
 
 Each cgroup object created by the system has an array of pointers,
-indexed by subsystem id; this pointer is entirely managed by the
+indexed by subsystem ID; this pointer is entirely managed by the
 subsystem; the generic cgroup code will never touch this pointer.
 
 3.2 Synchronization
@@ -639,7 +640,7 @@ void post_clone(struct cgroup *cgrp)
 
 Called during cgroup_create() to do any parameter
 initialization which might be required before a task could attach.  For
-example in cpusets, no task may attach before 'cpus' and 'mems' are set
+example, in cpusets, no task may attach before 'cpus' and 'mems' are set
 up.
 
 void bind(struct cgroup *root)
@@ -650,7 +651,26 @@ and root cgroup. Currently this will only involve movement between
 the default hierarchy (which never has sub-cgroups) and a hierarchy
 that is being created/destroyed (and hence has no sub-cgroups).
 
-4. Questions
+4. Extended attribute usage
+===========================
+
+cgroup filesystem supports certain types of extended attributes in its
+directories and files.  The current supported types are:
+       - Trusted (XATTR_TRUSTED)
+       - Security (XATTR_SECURITY)
+
+Both require CAP_SYS_ADMIN capability to set.
+
+Like in tmpfs, the extended attributes in cgroup filesystem are stored
+using kernel memory and it's advised to keep the usage at minimum.  This
+is the reason why user defined extended attributes are not supported, since
+any user can do it and there's no limit in the value size.
+
+The current known users for this feature are SELinux to limit cgroup usage
+in containers and systemd for assorted meta data like main PID in a cgroup
+(systemd creates a cgroup per service).
+
+5. Questions
 ============
 
 Q: what's up with this '/bin/echo' ?
@@ -660,5 +680,5 @@ A: bash's builtin 'echo' command does not check calls to write() against
 
 Q: When I attach processes, only the first of the line gets really attached !
 A: We can only return one error code per call to write(). So you should also
-   put only ONE pid.
+   put only ONE PID.
 
diff --git a/Documentation/cpu-freq/boost.txt b/Documentation/cpu-freq/boost.txt
new file mode 100644 (file)
index 0000000..9b4edfc
--- /dev/null
@@ -0,0 +1,93 @@
+Processor boosting control
+
+       - information for users -
+
+Quick guide for the impatient:
+--------------------
+/sys/devices/system/cpu/cpufreq/boost
+controls the boost setting for the whole system. You can read and write
+that file with either "0" (boosting disabled) or "1" (boosting allowed).
+Reading or writing 1 does not mean that the system is boosting at this
+very moment, but only that the CPU _may_ raise the frequency at it's
+discretion.
+--------------------
+
+Introduction
+-------------
+Some CPUs support a functionality to raise the operating frequency of
+some cores in a multi-core package if certain conditions apply, mostly
+if the whole chip is not fully utilized and below it's intended thermal
+budget. This is done without operating system control by a combination
+of hardware and firmware.
+On Intel CPUs this is called "Turbo Boost", AMD calls it "Turbo-Core",
+in technical documentation "Core performance boost". In Linux we use
+the term "boost" for convenience.
+
+Rationale for disable switch
+----------------------------
+
+Though the idea is to just give better performance without any user
+intervention, sometimes the need arises to disable this functionality.
+Most systems offer a switch in the (BIOS) firmware to disable the
+functionality at all, but a more fine-grained and dynamic control would
+be desirable:
+1. While running benchmarks, reproducible results are important. Since
+   the boosting functionality depends on the load of the whole package,
+   single thread performance can vary. By explicitly disabling the boost
+   functionality at least for the benchmark's run-time the system will run
+   at a fixed frequency and results are reproducible again.
+2. To examine the impact of the boosting functionality it is helpful
+   to do tests with and without boosting.
+3. Boosting means overclocking the processor, though under controlled
+   conditions. By raising the frequency and the voltage the processor
+   will consume more power than without the boosting, which may be
+   undesirable for instance for mobile users. Disabling boosting may
+   save power here, though this depends on the workload.
+
+
+User controlled switch
+----------------------
+
+To allow the user to toggle the boosting functionality, the acpi-cpufreq
+driver exports a sysfs knob to disable it. There is a file:
+/sys/devices/system/cpu/cpufreq/boost
+which can either read "0" (boosting disabled) or "1" (boosting enabled).
+Reading the file is always supported, even if the processor does not
+support boosting. In this case the file will be read-only and always
+reads as "0". Explicitly changing the permissions and writing to that
+file anyway will return EINVAL.
+
+On supported CPUs one can write either a "0" or a "1" into this file.
+This will either disable the boost functionality on all cores in the
+whole system (0) or will allow the hardware to boost at will (1).
+
+Writing a "1" does not explicitly boost the system, but just allows the
+CPU (and the firmware) to boost at their discretion. Some implementations
+take external factors like the chip's temperature into account, so
+boosting once does not necessarily mean that it will occur every time
+even using the exact same software setup.
+
+
+AMD legacy cpb switch
+---------------------
+The AMD powernow-k8 driver used to support a very similar switch to
+disable or enable the "Core Performance Boost" feature of some AMD CPUs.
+This switch was instantiated in each CPU's cpufreq directory
+(/sys/devices/system/cpu[0-9]*/cpufreq) and was called "cpb".
+Though the per CPU existence hints at a more fine grained control, the
+actual implementation only supported a system-global switch semantics,
+which was simply reflected into each CPU's file. Writing a 0 or 1 into it
+would pull the other CPUs to the same state.
+For compatibility reasons this file and its behavior is still supported
+on AMD CPUs, though it is now protected by a config switch
+(X86_ACPI_CPUFREQ_CPB). On Intel CPUs this file will never be created,
+even with the config option set.
+This functionality is considered legacy and will be removed in some future
+kernel version.
+
+More fine grained boosting control
+----------------------------------
+
+Technically it is possible to switch the boosting functionality at least
+on a per package basis, for some CPUs even per core. Currently the driver
+does not support it, but this may be implemented in the future.
index 9d28a3406e745589383021732fe7941ed9aad1cb..b6f44f490ed7839f0963acfc0c53ed2537fa35e2 100644 (file)
@@ -76,9 +76,17 @@ total 0
 
 
 * desc : Small description about the idle state (string)
-* disable : Option to disable this idle state (bool)
+* disable : Option to disable this idle state (bool) -> see note below
 * latency : Latency to exit out of this idle state (in microseconds)
 * name : Name of the idle state (string)
 * power : Power consumed while in this idle state (in milliwatts)
 * time : Total time spent in this idle state (in microseconds)
 * usage : Number of times this state was entered (count)
+
+Note:
+The behavior and the effect of the disable variable depends on the
+implementation of a particular governor. In the ladder governor, for
+example, it is not coherent, i.e. if one is disabling a light state,
+then all deeper states are disabled as well, but the disable variable
+does not reflect it. Likewise, if one enables a deep state but a lighter
+state still is disabled, then this has no effect.
diff --git a/Documentation/devicetree/bindings/arm/calxeda/combophy.txt b/Documentation/devicetree/bindings/arm/calxeda/combophy.txt
new file mode 100644 (file)
index 0000000..6622bdb
--- /dev/null
@@ -0,0 +1,17 @@
+Calxeda Highbank Combination Phys for SATA
+
+Properties:
+- compatible : Should be "calxeda,hb-combophy"
+- #phy-cells: Should be 1.
+- reg : Address and size for Combination Phy registers.
+- phydev: device ID for programming the combophy.
+
+Example:
+
+       combophy5: combo-phy@fff5d000 {
+               compatible = "calxeda,hb-combophy";
+               #phy-cells = <1>;
+               reg = <0xfff5d000 0x1000>;
+               phydev = <31>;
+       };
+
index 8bb8a76d42e8c1b9cad1de552d8639045a204992..b519f9b699c30eec22d99b7398691b3b80c4c7a3 100644 (file)
@@ -8,9 +8,18 @@ Required properties:
 - interrupts        : <interrupt mapping for SATA IRQ>
 - reg               : <registers mapping>
 
+Optional properties:
+- calxeda,port-phys: phandle-combophy and lane assignment, which maps each
+                       SATA port to a combophy and a lane within that
+                       combophy
+- dma-coherent      : Present if dma operations are coherent
+
 Example:
         sata@ffe08000 {
                compatible = "calxeda,hb-ahci";
                 reg = <0xffe08000 0x1000>;
                 interrupts = <115>;
+               calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1
+                                       &combophy0 2 &combophy0 3>;
+
         };
diff --git a/Documentation/devicetree/bindings/ata/pata-arasan.txt b/Documentation/devicetree/bindings/ata/pata-arasan.txt
new file mode 100644 (file)
index 0000000..95ec7f8
--- /dev/null
@@ -0,0 +1,17 @@
+* ARASAN PATA COMPACT FLASH CONTROLLER
+
+Required properties:
+- compatible: "arasan,cf-spear1340"
+- reg: Address range of the CF registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupt: Should contain the CF interrupt number
+
+Example:
+
+       cf@fc000000 {
+               compatible = "arasan,cf-spear1340";
+               reg = <0xfc000000 0x1000>;
+               interrupt-parent = <&vic1>;
+               interrupts = <12>;
+       };
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
new file mode 100644 (file)
index 0000000..4416ccc
--- /dev/null
@@ -0,0 +1,55 @@
+Generic CPU0 cpufreq driver
+
+It is a generic cpufreq driver for CPU0 frequency management.  It
+supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
+systems which share clock and voltage across all CPUs.
+
+Both required and optional properties listed below must be defined
+under node /cpus/cpu@0.
+
+Required properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
+  for details
+
+Optional properties:
+- clock-latency: Specify the possible maximum transition latency for clock,
+  in unit of nanoseconds.
+- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
+
+Examples:
+
+cpus {
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       cpu@0 {
+               compatible = "arm,cortex-a9";
+               reg = <0>;
+               next-level-cache = <&L2>;
+               operating-points = <
+                       /* kHz    uV */
+                       792000  1100000
+                       396000  950000
+                       198000  850000
+               >;
+               transition-latency = <61036>; /* two CLK32 periods */
+       };
+
+       cpu@1 {
+               compatible = "arm,cortex-a9";
+               reg = <1>;
+               next-level-cache = <&L2>;
+       };
+
+       cpu@2 {
+               compatible = "arm,cortex-a9";
+               reg = <2>;
+               next-level-cache = <&L2>;
+       };
+
+       cpu@3 {
+               compatible = "arm,cortex-a9";
+               reg = <3>;
+               next-level-cache = <&L2>;
+       };
+};
index a4cd273b2a679d0d5c4952d6a576149658570cdd..36e27d54260b37ddde16669c0294474026084b7b 100644 (file)
@@ -9,6 +9,9 @@ Required properties:
     region.
   - interrupts: interrupt number to the cpu.
 
+Optional properties:
+- dma-coherent      : Present if dma operations are coherent
+
 Example:
 
        pdma0: pdma@12680000 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-74x164.txt b/Documentation/devicetree/bindings/gpio/gpio-74x164.txt
new file mode 100644 (file)
index 0000000..cc26080
--- /dev/null
@@ -0,0 +1,22 @@
+* Generic 8-bits shift register GPIO driver
+
+Required properties:
+- compatible : Should be "fairchild,74hc595"
+- reg : chip select number
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify the gpio polarity:
+      0 = active high
+      1 = active low
+- registers-number: Number of daisy-chained shift registers
+
+Example:
+
+gpio5: gpio5@0 {
+       compatible = "fairchild,74hc595";
+       reg = <0>;
+       gpio-controller;
+       #gpio-cells = <2>;
+       registers-number = <4>;
+       spi-max-frequency = <100000>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-adnp.txt b/Documentation/devicetree/bindings/gpio/gpio-adnp.txt
new file mode 100644 (file)
index 0000000..af66b27
--- /dev/null
@@ -0,0 +1,34 @@
+Avionic Design N-bit GPIO expander bindings
+
+Required properties:
+- compatible: should be "ad,gpio-adnp"
+- reg: The I2C slave address for this device.
+- interrupt-parent: phandle of the parent interrupt controller.
+- interrupts: Interrupt specifier for the controllers interrupt.
+- #gpio-cells: Should be 2. The first cell is the GPIO number and the
+  second cell is used to specify optional parameters:
+  - bit 0: polarity (0: normal, 1: inverted)
+- gpio-controller: Marks the device as a GPIO controller
+- nr-gpios: The number of pins supported by the controller.
+
+The GPIO expander can optionally be used as an interrupt controller, in
+which case it uses the default two cell specifier as described in
+Documentation/devicetree/bindings/interrupt-controller/interrupts.txt.
+
+Example:
+
+       gpioext: gpio-controller@41 {
+               compatible = "ad,gpio-adnp";
+               reg = <0x41>;
+
+               interrupt-parent = <&gpio>;
+               interrupts = <160 1>;
+
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+
+               nr-gpios = <64>;
+       };
index 9bb308abd2213c3ee061ebbf45389f4643f583ba..edc83c1c0d54bb5f7f3297593eba77f1931e3d12 100644 (file)
@@ -8,7 +8,7 @@ node's name represents the name of the corresponding LED.
 
 LED sub-node properties:
 - gpios :  Should specify the LED's GPIO, see "gpios property" in
-  Documentation/devicetree/gpio.txt.  Active low LEDs should be
+  Documentation/devicetree/bindings/gpio/gpio.txt.  Active low LEDs should be
   indicated using flags in the GPIO specifier.
 - label :  (optional) The label for this LED.  If omitted, the label is
   taken from the node name (excluding the unit address).
diff --git a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
new file mode 100644 (file)
index 0000000..313abef
--- /dev/null
@@ -0,0 +1,38 @@
+Device-Tree bindings for input/gpio_keys_polled.c keyboard driver
+
+Required properties:
+       - compatible = "gpio-keys-polled";
+       - poll-interval: Poll interval time in milliseconds
+
+Optional properties:
+       - autorepeat: Boolean, Enable auto repeat feature of Linux input
+         subsystem.
+
+Each button (key) is represented as a sub-node of "gpio-keys-polled":
+Subnode properties:
+
+       - gpios: OF device-tree gpio specification.
+       - label: Descriptive name of the key.
+       - linux,code: Keycode to emit.
+
+Optional subnode-properties:
+       - linux,input-type: Specify event type this button/key generates.
+         If not specified defaults to <1> == EV_KEY.
+       - debounce-interval: Debouncing interval time in milliseconds.
+         If not specified defaults to 5.
+       - gpio-key,wakeup: Boolean, button can wake-up the system.
+
+Example nodes:
+
+       gpio_keys_polled {
+                       compatible = "gpio-keys-polled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       poll-interval = <100>;
+                       autorepeat;
+                       button@21 {
+                               label = "GPIO Key UP";
+                               linux,code = <103>;
+                               gpios = <&gpio1 0 1>;
+                       };
+                       ...
diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.txt b/Documentation/devicetree/bindings/input/rotary-encoder.txt
new file mode 100644 (file)
index 0000000..3315495
--- /dev/null
@@ -0,0 +1,36 @@
+Rotary encoder DT bindings
+
+Required properties:
+- gpios: a spec for two GPIOs to be used
+
+Optional properties:
+- linux,axis: the input subsystem axis to map to this rotary encoder.
+  Defaults to 0 (ABS_X / REL_X)
+- rotary-encoder,steps: Number of steps in a full turnaround of the
+  encoder. Only relevant for absolute axis. Defaults to 24 which is a
+  typical value for such devices.
+- rotary-encoder,relative-axis: register a relative axis rather than an
+  absolute one. Relative axis will only generate +1/-1 events on the input
+  device, hence no steps need to be passed.
+- rotary-encoder,rollover: Automatic rollove when the rotary value becomes
+  greater than the specified steps or smaller than 0. For absolute axis only.
+- rotary-encoder,half-period: Makes the driver work on half-period mode.
+
+See Documentation/input/rotary-encoder.txt for more information.
+
+Example:
+
+               rotary@0 {
+                       compatible = "rotary-encoder";
+                       gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */
+                       linux,axis = <0>; /* REL_X */
+                       rotary-encoder,relative-axis;
+               };
+
+               rotary@1 {
+                       compatible = "rotary-encoder";
+                       gpios = <&gpio 21 0>, <&gpio 22 0>;
+                       linux,axis = <1>; /* ABS_Y */
+                       rotary-encoder,steps = <24>;
+                       rotary-encoder,rollover;
+               };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
new file mode 100644 (file)
index 0000000..72a06c0
--- /dev/null
@@ -0,0 +1,95 @@
+Specifying interrupt information for devices
+============================================
+
+1) Interrupt client nodes
+-------------------------
+
+Nodes that describe devices which generate interrupts must contain an
+"interrupts" property. This property must contain a list of interrupt
+specifiers, one per output interrupt. The format of the interrupt specifier is
+determined by the interrupt controller to which the interrupts are routed; see
+section 2 below for details.
+
+The "interrupt-parent" property is used to specify the controller to which
+interrupts are routed and contains a single phandle referring to the interrupt
+controller node. This property is inherited, so it may be specified in an
+interrupt client node or in any of its parent nodes.
+
+2) Interrupt controller nodes
+-----------------------------
+
+A device is marked as an interrupt controller with the "interrupt-controller"
+property. This is a empty, boolean property. An additional "#interrupt-cells"
+property defines the number of cells needed to specify a single interrupt.
+
+It is the responsibility of the interrupt controller's binding to define the
+length and format of the interrupt specifier. The following two variants are
+commonly used:
+
+  a) one cell
+  -----------
+  The #interrupt-cells property is set to 1 and the single cell defines the
+  index of the interrupt within the controller.
+
+  Example:
+
+       vic: intc@10140000 {
+               compatible = "arm,versatile-vic";
+               interrupt-controller;
+               #interrupt-cells = <1>;
+               reg = <0x10140000 0x1000>;
+       };
+
+       sic: intc@10003000 {
+               compatible = "arm,versatile-sic";
+               interrupt-controller;
+               #interrupt-cells = <1>;
+               reg = <0x10003000 0x1000>;
+               interrupt-parent = <&vic>;
+               interrupts = <31>; /* Cascaded to vic */
+       };
+
+  b) two cells
+  ------------
+  The #interrupt-cells property is set to 2 and the first cell defines the
+  index of the interrupt within the controller, while the second cell is used
+  to specify any of the following flags:
+    - bits[3:0] trigger type and level flags
+        1 = low-to-high edge triggered
+        2 = high-to-low edge triggered
+        4 = active high level-sensitive
+        8 = active low level-sensitive
+
+  Example:
+
+       i2c@7000c000 {
+               gpioext: gpio-adnp@41 {
+                       compatible = "ad,gpio-adnp";
+                       reg = <0x41>;
+
+                       interrupt-parent = <&gpio>;
+                       interrupts = <160 1>;
+
+                       gpio-controller;
+                       #gpio-cells = <1>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+
+                       nr-gpios = <64>;
+               };
+
+               sx8634@2b {
+                       compatible = "smtc,sx8634";
+                       reg = <0x2b>;
+
+                       interrupt-parent = <&gpioext>;
+                       interrupts = <3 0x8>;
+
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       threshold = <0x40>;
+                       sensitivity = <7>;
+               };
+       };
index 411727a3f82d6add5a7c1ca18aa01dbd30fe0ff5..c8ae996bd8f2e4403cd8c1bd7ed7507d9379f7d7 100644 (file)
@@ -6,6 +6,9 @@ Required properties:
 - interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt.
   The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt.
 
+Optional properties:
+- dma-coherent      : Present if dma operations are coherent
+
 Example:
 
 ethernet@fff50000 {
diff --git a/Documentation/devicetree/bindings/net/can/c_can.txt b/Documentation/devicetree/bindings/net/can/c_can.txt
new file mode 100644 (file)
index 0000000..8f1ae81
--- /dev/null
@@ -0,0 +1,49 @@
+Bosch C_CAN/D_CAN controller Device Tree Bindings
+-------------------------------------------------
+
+Required properties:
+- compatible           : Should be "bosch,c_can" for C_CAN controllers and
+                         "bosch,d_can" for D_CAN controllers.
+- reg                  : physical base address and size of the C_CAN/D_CAN
+                         registers map
+- interrupts           : property with a value describing the interrupt
+                         number
+
+Optional properties:
+- ti,hwmods            : Must be "d_can<n>" or "c_can<n>", n being the
+                         instance number
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Example:
+
+Step1: SoC common .dtsi file
+
+       dcan1: d_can@481d0000 {
+               compatible = "bosch,d_can";
+               reg = <0x481d0000 0x2000>;
+               interrupts = <55>;
+               interrupt-parent = <&intc>;
+               status = "disabled";
+       };
+
+(or)
+
+       dcan1: d_can@481d0000 {
+               compatible = "bosch,d_can";
+               ti,hwmods = "d_can1";
+               reg = <0x481d0000 0x2000>;
+               interrupts = <55>;
+               interrupt-parent = <&intc>;
+               status = "disabled";
+       };
+
+Step 2: board specific .dts file
+
+       &dcan1 {
+               status = "okay";
+       };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
new file mode 100644 (file)
index 0000000..dcaabe9
--- /dev/null
@@ -0,0 +1,109 @@
+TI SoC Ethernet Switch Controller Device Tree Bindings
+------------------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,cpsw"
+- reg                  : physical base address and size of the cpsw
+                         registers map
+- interrupts           : property with a value describing the interrupt
+                         number
+- interrupt-parent     : The parent interrupt controller
+- cpdma_channels       : Specifies number of channels in CPDMA
+- host_port_no         : Specifies host port shift
+- cpdma_reg_ofs                : Specifies CPDMA submodule register offset
+- cpdma_sram_ofs       : Specifies CPDMA SRAM offset
+- ale_reg_ofs          : Specifies ALE submodule register offset
+- ale_entries          : Specifies No of entries ALE can hold
+- host_port_reg_ofs    : Specifies host port register offset
+- hw_stats_reg_ofs     : Specifies hardware statistics register offset
+- bd_ram_ofs           : Specifies internal desciptor RAM offset
+- bd_ram_size          : Specifies internal descriptor RAM size
+- rx_descs             : Specifies number of Rx descriptors
+- mac_control          : Specifies Default MAC control register content
+                         for the specific platform
+- slaves               : Specifies number for slaves
+- slave_reg_ofs                : Specifies slave register offset
+- sliver_reg_ofs       : Specifies slave sliver register offset
+- phy_id               : Specifies slave phy id
+- mac-address          : Specifies slave MAC address
+
+Optional properties:
+- ti,hwmods            : Must be "cpgmac0"
+- no_bd_ram            : Must be 0 or 1
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+       mac: ethernet@4A100000 {
+               compatible = "ti,cpsw";
+               reg = <0x4A100000 0x1000>;
+               interrupts = <55 0x4>;
+               interrupt-parent = <&intc>;
+               cpdma_channels = <8>;
+               host_port_no = <0>;
+               cpdma_reg_ofs = <0x800>;
+               cpdma_sram_ofs = <0xa00>;
+               ale_reg_ofs = <0xd00>;
+               ale_entries = <1024>;
+               host_port_reg_ofs = <0x108>;
+               hw_stats_reg_ofs = <0x900>;
+               bd_ram_ofs = <0x2000>;
+               bd_ram_size = <0x2000>;
+               no_bd_ram = <0>;
+               rx_descs = <64>;
+               mac_control = <0x20>;
+               slaves = <2>;
+               cpsw_emac0: slave@0 {
+                       slave_reg_ofs = <0x208>;
+                       sliver_reg_ofs = <0xd80>;
+                       phy_id = "davinci_mdio.16:00";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+               cpsw_emac1: slave@1 {
+                       slave_reg_ofs = <0x308>;
+                       sliver_reg_ofs = <0xdc0>;
+                       phy_id = "davinci_mdio.16:01";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+       };
+
+(or)
+       mac: ethernet@4A100000 {
+               compatible = "ti,cpsw";
+               ti,hwmods = "cpgmac0";
+               cpdma_channels = <8>;
+               host_port_no = <0>;
+               cpdma_reg_ofs = <0x800>;
+               cpdma_sram_ofs = <0xa00>;
+               ale_reg_ofs = <0xd00>;
+               ale_entries = <1024>;
+               host_port_reg_ofs = <0x108>;
+               hw_stats_reg_ofs = <0x900>;
+               bd_ram_ofs = <0x2000>;
+               bd_ram_size = <0x2000>;
+               no_bd_ram = <0>;
+               rx_descs = <64>;
+               mac_control = <0x20>;
+               slaves = <2>;
+               cpsw_emac0: slave@0 {
+                       slave_reg_ofs = <0x208>;
+                       sliver_reg_ofs = <0xd80>;
+                       phy_id = "davinci_mdio.16:00";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+               cpsw_emac1: slave@1 {
+                       slave_reg_ofs = <0x308>;
+                       sliver_reg_ofs = <0xdc0>;
+                       phy_id = "davinci_mdio.16:01";
+                       /* Filled in by U-Boot */
+                       mac-address = [ 00 00 00 00 00 00 ];
+               };
+       };
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
new file mode 100644 (file)
index 0000000..72efaaf
--- /dev/null
@@ -0,0 +1,33 @@
+TI SoC Davinci MDIO Controller Device Tree Bindings
+---------------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,davinci_mdio"
+- reg                  : physical base address and size of the davinci mdio
+                         registers map
+- bus_freq             : Mdio Bus frequency
+
+Optional properties:
+- ti,hwmods            : Must be "davinci_mdio"
+
+Note: "ti,hwmods" field is used to fetch the base address and irq
+resources from TI, omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples:
+
+       mdio: davinci_mdio@4A101000 {
+               compatible = "ti,cpsw";
+               reg = <0x4A101000 0x1000>;
+               bus_freq = <1000000>;
+       };
+
+(or)
+
+       mdio: davinci_mdio@4A101000 {
+               compatible = "ti,cpsw";
+               ti,hwmods = "davinci_mdio";
+               bus_freq = <1000000>;
+       };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt b/Documentation/devicetree/bindings/net/mdio-mux-mmioreg.txt
new file mode 100644 (file)
index 0000000..8516929
--- /dev/null
@@ -0,0 +1,75 @@
+Properties for an MDIO bus multiplexer controlled by a memory-mapped device
+
+This is a special case of a MDIO bus multiplexer.  A memory-mapped device,
+like an FPGA, is used to control which child bus is connected.  The mdio-mux
+node must be a child of the memory-mapped device.  The driver currently only
+supports devices with eight-bit registers.
+
+Required properties in addition to the generic multiplexer properties:
+
+- compatible : string, must contain "mdio-mux-mmioreg"
+
+- reg : integer, contains the offset of the register that controls the bus
+       multiplexer.  The size field in the 'reg' property is the size of
+       register, and must therefore be 1.
+
+- mux-mask : integer, contains an eight-bit mask that specifies which
+       bits in the register control the actual bus multiplexer.  The
+       'reg' property of each child mdio-mux node must be constrained by
+       this mask.
+
+Example:
+
+The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
+For the "EMI2" MDIO bus, register 9 (BRDCFG1) controls the mux on that bus.
+A bitmask of 0x6 means that bits 1 and 2 (bit 0 is lsb) are the bits on
+BRDCFG1 that control the actual mux.
+
+       /* The FPGA node */
+       fpga: board-control@3,0 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "fsl,p5020ds-fpga", "fsl,fpga-ngpixis";
+               reg = <3 0 0x30>;
+               ranges = <0 3 0 0x30>;
+
+               mdio-mux-emi2 {
+                       compatible = "mdio-mux-mmioreg", "mdio-mux";
+                       mdio-parent-bus = <&xmdio0>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <9 1>; // BRDCFG1
+                       mux-mask = <0x6>; // EMI2
+
+                       emi2_slot1: mdio@0 {    // Slot 1 XAUI (FM2)
+                               reg = <0>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               phy_xgmii_slot1: ethernet-phy@0 {
+                                       compatible = "ethernet-phy-ieee802.3-c45";
+                                       reg = <4>;
+                               };
+                       };
+
+                       emi2_slot2: mdio@2 {    // Slot 2 XAUI (FM1)
+                               reg = <2>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               phy_xgmii_slot2: ethernet-phy@4 {
+                                       compatible = "ethernet-phy-ieee802.3-c45";
+                                       reg = <0>;
+                               };
+                       };
+               };
+       };
+
+       /* The parent MDIO bus. */
+       xmdio0: mdio@f1000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "fsl,fman-xmdio";
+               reg = <0xf1000 0x1000>;
+               interrupts = <100 1 0 0>;
+       };
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
new file mode 100644 (file)
index 0000000..8edc20e
--- /dev/null
@@ -0,0 +1,74 @@
+Broadcom BCM2835 GPIO (and pinmux) controller
+
+The BCM2835 GPIO module is a combined GPIO controller, (GPIO) interrupt
+controller, and pinmux/control device.
+
+Required properties:
+- compatible: "brcm,bcm2835-gpio"
+- reg: Should contain the physical address of the GPIO module's registes.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells : Should be two. The first cell is the pin number and the
+  second cell is used to specify optional parameters:
+  - bit 0 specifies polarity (0 for normal, 1 for inverted)
+- interrupts : The interrupt outputs from the controller. One interrupt per
+  individual bank followed by the "all banks" interrupt.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells : Should be 2.
+  The first cell is the GPIO number.
+  The second cell is used to specify flags:
+    bits[3:0] trigger type and level flags:
+      1 = low-to-high edge triggered.
+      2 = high-to-low edge triggered.
+      4 = active high level-sensitive.
+      8 = active low level-sensitive.
+    Valid combinations are 1, 2, 3, 4, 8.
+
+Please refer to ../gpio/gpio.txt for a general description of GPIO bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Each pin configuration node lists the pin(s) to which it applies, and one or
+more of the mux function to select on those pin(s), and pull-up/down
+configuration. Each subnode only affects those parameters that are explicitly
+listed. In other words, a subnode that lists only a mux function implies no
+information about any pull configuration. Similarly, a subnode that lists only
+a pul parameter implies no information about the mux function.
+
+Required subnode-properties:
+- brcm,pins: An array of cells. Each cell contains the ID of a pin. Valid IDs
+  are the integer GPIO IDs; 0==GPIO0, 1==GPIO1, ... 53==GPIO53.
+
+Optional subnode-properties:
+- brcm,function: Integer, containing the function to mux to the pin(s):
+  0: GPIO in
+  1: GPIO out
+  2: alt5
+  3: alt4
+  4: alt0
+  5: alt1
+  6: alt2
+  7: alt3
+- brcm,pull: Integer, representing the pull-down/up to apply to the pin(s):
+  0: none
+  1: down
+  2: up
+
+Each of brcm,function and brcm,pull may contain either a single value which
+will be applied to all pins in brcm,pins, or 1 value for each entry in
+brcm,pins.
+
+Example:
+
+       gpio: gpio {
+               compatible = "brcm,bcm2835-gpio";
+               reg = <0x2200000 0xb4>;
+               interrupts = <2 17>, <2 19>, <2 18>, <2 20>;
+
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx35-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx35-pinctrl.txt
new file mode 100644 (file)
index 0000000..1183f1a
--- /dev/null
@@ -0,0 +1,984 @@
+* Freescale IMX35 IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
+and usage.
+
+Required properties:
+- compatible: "fsl,imx35-iomuxc"
+- fsl,pins: two integers array, represents a group of pins mux and config
+  setting. The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is a
+  pin working on a specific function, CONFIG is the pad setting value like
+  pull-up for this pin. Please refer to imx35 datasheet for the valid pad
+  config settings.
+
+CONFIG bits definition:
+PAD_CTL_DRIVE_VOLAGAGE_18      (1 << 13)
+PAD_CTL_DRIVE_VOLAGAGE_33      (0 << 13)
+PAD_CTL_HYS                    (1 << 8)
+PAD_CTL_PKE                    (1 << 7)
+PAD_CTL_PUE                    (1 << 6)
+PAD_CTL_PUS_100K_DOWN          (0 << 4)
+PAD_CTL_PUS_47K_UP             (1 << 4)
+PAD_CTL_PUS_100K_UP            (2 << 4)
+PAD_CTL_PUS_22K_UP             (3 << 4)
+PAD_CTL_ODE_CMOS               (0 << 3)
+PAD_CTL_ODE_OPENDRAIN          (1 << 3)
+PAD_CTL_DSE_NOMINAL            (0 << 1)
+PAD_CTL_DSE_HIGH               (1 << 1)
+PAD_CTL_DSE_MAX                        (2 << 1)
+PAD_CTL_SRE_FAST               (1 << 0)
+PAD_CTL_SRE_SLOW               (0 << 0)
+
+See below for available PIN_FUNC_ID for imx35:
+0 MX35_PAD_CAPTURE__GPT_CAPIN1
+1 MX35_PAD_CAPTURE__GPT_CMPOUT2
+2 MX35_PAD_CAPTURE__CSPI2_SS1
+3 MX35_PAD_CAPTURE__EPIT1_EPITO
+4 MX35_PAD_CAPTURE__CCM_CLK32K
+5 MX35_PAD_CAPTURE__GPIO1_4
+6 MX35_PAD_COMPARE__GPT_CMPOUT1
+7 MX35_PAD_COMPARE__GPT_CAPIN2
+8 MX35_PAD_COMPARE__GPT_CMPOUT3
+9 MX35_PAD_COMPARE__EPIT2_EPITO
+10 MX35_PAD_COMPARE__GPIO1_5
+11 MX35_PAD_COMPARE__SDMA_EXTDMA_2
+12 MX35_PAD_WDOG_RST__WDOG_WDOG_B
+13 MX35_PAD_WDOG_RST__IPU_FLASH_STROBE
+14 MX35_PAD_WDOG_RST__GPIO1_6
+15 MX35_PAD_GPIO1_0__GPIO1_0
+16 MX35_PAD_GPIO1_0__CCM_PMIC_RDY
+17 MX35_PAD_GPIO1_0__OWIRE_LINE
+18 MX35_PAD_GPIO1_0__SDMA_EXTDMA_0
+19 MX35_PAD_GPIO1_1__GPIO1_1
+20 MX35_PAD_GPIO1_1__PWM_PWMO
+21 MX35_PAD_GPIO1_1__CSPI1_SS2
+22 MX35_PAD_GPIO1_1__SCC_TAMPER_DETECT
+23 MX35_PAD_GPIO1_1__SDMA_EXTDMA_1
+24 MX35_PAD_GPIO2_0__GPIO2_0
+25 MX35_PAD_GPIO2_0__USB_TOP_USBOTG_CLK
+26 MX35_PAD_GPIO3_0__GPIO3_0
+27 MX35_PAD_GPIO3_0__USB_TOP_USBH2_CLK
+28 MX35_PAD_RESET_IN_B__CCM_RESET_IN_B
+29 MX35_PAD_POR_B__CCM_POR_B
+30 MX35_PAD_CLKO__CCM_CLKO
+31 MX35_PAD_CLKO__GPIO1_8
+32 MX35_PAD_BOOT_MODE0__CCM_BOOT_MODE_0
+33 MX35_PAD_BOOT_MODE1__CCM_BOOT_MODE_1
+34 MX35_PAD_CLK_MODE0__CCM_CLK_MODE_0
+35 MX35_PAD_CLK_MODE1__CCM_CLK_MODE_1
+36 MX35_PAD_POWER_FAIL__CCM_DSM_WAKEUP_INT_26
+37 MX35_PAD_VSTBY__CCM_VSTBY
+38 MX35_PAD_VSTBY__GPIO1_7
+39 MX35_PAD_A0__EMI_EIM_DA_L_0
+40 MX35_PAD_A1__EMI_EIM_DA_L_1
+41 MX35_PAD_A2__EMI_EIM_DA_L_2
+42 MX35_PAD_A3__EMI_EIM_DA_L_3
+43 MX35_PAD_A4__EMI_EIM_DA_L_4
+44 MX35_PAD_A5__EMI_EIM_DA_L_5
+45 MX35_PAD_A6__EMI_EIM_DA_L_6
+46 MX35_PAD_A7__EMI_EIM_DA_L_7
+47 MX35_PAD_A8__EMI_EIM_DA_H_8
+48 MX35_PAD_A9__EMI_EIM_DA_H_9
+49 MX35_PAD_A10__EMI_EIM_DA_H_10
+50 MX35_PAD_MA10__EMI_MA10
+51 MX35_PAD_A11__EMI_EIM_DA_H_11
+52 MX35_PAD_A12__EMI_EIM_DA_H_12
+53 MX35_PAD_A13__EMI_EIM_DA_H_13
+54 MX35_PAD_A14__EMI_EIM_DA_H2_14
+55 MX35_PAD_A15__EMI_EIM_DA_H2_15
+56 MX35_PAD_A16__EMI_EIM_A_16
+57 MX35_PAD_A17__EMI_EIM_A_17
+58 MX35_PAD_A18__EMI_EIM_A_18
+59 MX35_PAD_A19__EMI_EIM_A_19
+60 MX35_PAD_A20__EMI_EIM_A_20
+61 MX35_PAD_A21__EMI_EIM_A_21
+62 MX35_PAD_A22__EMI_EIM_A_22
+63 MX35_PAD_A23__EMI_EIM_A_23
+64 MX35_PAD_A24__EMI_EIM_A_24
+65 MX35_PAD_A25__EMI_EIM_A_25
+66 MX35_PAD_SDBA1__EMI_EIM_SDBA1
+67 MX35_PAD_SDBA0__EMI_EIM_SDBA0
+68 MX35_PAD_SD0__EMI_DRAM_D_0
+69 MX35_PAD_SD1__EMI_DRAM_D_1
+70 MX35_PAD_SD2__EMI_DRAM_D_2
+71 MX35_PAD_SD3__EMI_DRAM_D_3
+72 MX35_PAD_SD4__EMI_DRAM_D_4
+73 MX35_PAD_SD5__EMI_DRAM_D_5
+74 MX35_PAD_SD6__EMI_DRAM_D_6
+75 MX35_PAD_SD7__EMI_DRAM_D_7
+76 MX35_PAD_SD8__EMI_DRAM_D_8
+77 MX35_PAD_SD9__EMI_DRAM_D_9
+78 MX35_PAD_SD10__EMI_DRAM_D_10
+79 MX35_PAD_SD11__EMI_DRAM_D_11
+80 MX35_PAD_SD12__EMI_DRAM_D_12
+81 MX35_PAD_SD13__EMI_DRAM_D_13
+82 MX35_PAD_SD14__EMI_DRAM_D_14
+83 MX35_PAD_SD15__EMI_DRAM_D_15
+84 MX35_PAD_SD16__EMI_DRAM_D_16
+85 MX35_PAD_SD17__EMI_DRAM_D_17
+86 MX35_PAD_SD18__EMI_DRAM_D_18
+87 MX35_PAD_SD19__EMI_DRAM_D_19
+88 MX35_PAD_SD20__EMI_DRAM_D_20
+89 MX35_PAD_SD21__EMI_DRAM_D_21
+90 MX35_PAD_SD22__EMI_DRAM_D_22
+91 MX35_PAD_SD23__EMI_DRAM_D_23
+92 MX35_PAD_SD24__EMI_DRAM_D_24
+93 MX35_PAD_SD25__EMI_DRAM_D_25
+94 MX35_PAD_SD26__EMI_DRAM_D_26
+95 MX35_PAD_SD27__EMI_DRAM_D_27
+96 MX35_PAD_SD28__EMI_DRAM_D_28
+97 MX35_PAD_SD29__EMI_DRAM_D_29
+98 MX35_PAD_SD30__EMI_DRAM_D_30
+99 MX35_PAD_SD31__EMI_DRAM_D_31
+100 MX35_PAD_DQM0__EMI_DRAM_DQM_0
+101 MX35_PAD_DQM1__EMI_DRAM_DQM_1
+102 MX35_PAD_DQM2__EMI_DRAM_DQM_2
+103 MX35_PAD_DQM3__EMI_DRAM_DQM_3
+104 MX35_PAD_EB0__EMI_EIM_EB0_B
+105 MX35_PAD_EB1__EMI_EIM_EB1_B
+106 MX35_PAD_OE__EMI_EIM_OE
+107 MX35_PAD_CS0__EMI_EIM_CS0
+108 MX35_PAD_CS1__EMI_EIM_CS1
+109 MX35_PAD_CS1__EMI_NANDF_CE3
+110 MX35_PAD_CS2__EMI_EIM_CS2
+111 MX35_PAD_CS3__EMI_EIM_CS3
+112 MX35_PAD_CS4__EMI_EIM_CS4
+113 MX35_PAD_CS4__EMI_DTACK_B
+114 MX35_PAD_CS4__EMI_NANDF_CE1
+115 MX35_PAD_CS4__GPIO1_20
+116 MX35_PAD_CS5__EMI_EIM_CS5
+117 MX35_PAD_CS5__CSPI2_SS2
+118 MX35_PAD_CS5__CSPI1_SS2
+119 MX35_PAD_CS5__EMI_NANDF_CE2
+120 MX35_PAD_CS5__GPIO1_21
+121 MX35_PAD_NF_CE0__EMI_NANDF_CE0
+122 MX35_PAD_NF_CE0__GPIO1_22
+123 MX35_PAD_ECB__EMI_EIM_ECB
+124 MX35_PAD_LBA__EMI_EIM_LBA
+125 MX35_PAD_BCLK__EMI_EIM_BCLK
+126 MX35_PAD_RW__EMI_EIM_RW
+127 MX35_PAD_RAS__EMI_DRAM_RAS
+128 MX35_PAD_CAS__EMI_DRAM_CAS
+129 MX35_PAD_SDWE__EMI_DRAM_SDWE
+130 MX35_PAD_SDCKE0__EMI_DRAM_SDCKE_0
+131 MX35_PAD_SDCKE1__EMI_DRAM_SDCKE_1
+132 MX35_PAD_SDCLK__EMI_DRAM_SDCLK
+133 MX35_PAD_SDQS0__EMI_DRAM_SDQS_0
+134 MX35_PAD_SDQS1__EMI_DRAM_SDQS_1
+135 MX35_PAD_SDQS2__EMI_DRAM_SDQS_2
+136 MX35_PAD_SDQS3__EMI_DRAM_SDQS_3
+137 MX35_PAD_NFWE_B__EMI_NANDF_WE_B
+138 MX35_PAD_NFWE_B__USB_TOP_USBH2_DATA_3
+139 MX35_PAD_NFWE_B__IPU_DISPB_D0_VSYNC
+140 MX35_PAD_NFWE_B__GPIO2_18
+141 MX35_PAD_NFWE_B__ARM11P_TOP_TRACE_0
+142 MX35_PAD_NFRE_B__EMI_NANDF_RE_B
+143 MX35_PAD_NFRE_B__USB_TOP_USBH2_DIR
+144 MX35_PAD_NFRE_B__IPU_DISPB_BCLK
+145 MX35_PAD_NFRE_B__GPIO2_19
+146 MX35_PAD_NFRE_B__ARM11P_TOP_TRACE_1
+147 MX35_PAD_NFALE__EMI_NANDF_ALE
+148 MX35_PAD_NFALE__USB_TOP_USBH2_STP
+149 MX35_PAD_NFALE__IPU_DISPB_CS0
+150 MX35_PAD_NFALE__GPIO2_20
+151 MX35_PAD_NFALE__ARM11P_TOP_TRACE_2
+152 MX35_PAD_NFCLE__EMI_NANDF_CLE
+153 MX35_PAD_NFCLE__USB_TOP_USBH2_NXT
+154 MX35_PAD_NFCLE__IPU_DISPB_PAR_RS
+155 MX35_PAD_NFCLE__GPIO2_21
+156 MX35_PAD_NFCLE__ARM11P_TOP_TRACE_3
+157 MX35_PAD_NFWP_B__EMI_NANDF_WP_B
+158 MX35_PAD_NFWP_B__USB_TOP_USBH2_DATA_7
+159 MX35_PAD_NFWP_B__IPU_DISPB_WR
+160 MX35_PAD_NFWP_B__GPIO2_22
+161 MX35_PAD_NFWP_B__ARM11P_TOP_TRCTL
+162 MX35_PAD_NFRB__EMI_NANDF_RB
+163 MX35_PAD_NFRB__IPU_DISPB_RD
+164 MX35_PAD_NFRB__GPIO2_23
+165 MX35_PAD_NFRB__ARM11P_TOP_TRCLK
+166 MX35_PAD_D15__EMI_EIM_D_15
+167 MX35_PAD_D14__EMI_EIM_D_14
+168 MX35_PAD_D13__EMI_EIM_D_13
+169 MX35_PAD_D12__EMI_EIM_D_12
+170 MX35_PAD_D11__EMI_EIM_D_11
+171 MX35_PAD_D10__EMI_EIM_D_10
+172 MX35_PAD_D9__EMI_EIM_D_9
+173 MX35_PAD_D8__EMI_EIM_D_8
+174 MX35_PAD_D7__EMI_EIM_D_7
+175 MX35_PAD_D6__EMI_EIM_D_6
+176 MX35_PAD_D5__EMI_EIM_D_5
+177 MX35_PAD_D4__EMI_EIM_D_4
+178 MX35_PAD_D3__EMI_EIM_D_3
+179 MX35_PAD_D2__EMI_EIM_D_2
+180 MX35_PAD_D1__EMI_EIM_D_1
+181 MX35_PAD_D0__EMI_EIM_D_0
+182 MX35_PAD_CSI_D8__IPU_CSI_D_8
+183 MX35_PAD_CSI_D8__KPP_COL_0
+184 MX35_PAD_CSI_D8__GPIO1_20
+185 MX35_PAD_CSI_D8__ARM11P_TOP_EVNTBUS_13
+186 MX35_PAD_CSI_D9__IPU_CSI_D_9
+187 MX35_PAD_CSI_D9__KPP_COL_1
+188 MX35_PAD_CSI_D9__GPIO1_21
+189 MX35_PAD_CSI_D9__ARM11P_TOP_EVNTBUS_14
+190 MX35_PAD_CSI_D10__IPU_CSI_D_10
+191 MX35_PAD_CSI_D10__KPP_COL_2
+192 MX35_PAD_CSI_D10__GPIO1_22
+193 MX35_PAD_CSI_D10__ARM11P_TOP_EVNTBUS_15
+194 MX35_PAD_CSI_D11__IPU_CSI_D_11
+195 MX35_PAD_CSI_D11__KPP_COL_3
+196 MX35_PAD_CSI_D11__GPIO1_23
+197 MX35_PAD_CSI_D12__IPU_CSI_D_12
+198 MX35_PAD_CSI_D12__KPP_ROW_0
+199 MX35_PAD_CSI_D12__GPIO1_24
+200 MX35_PAD_CSI_D13__IPU_CSI_D_13
+201 MX35_PAD_CSI_D13__KPP_ROW_1
+202 MX35_PAD_CSI_D13__GPIO1_25
+203 MX35_PAD_CSI_D14__IPU_CSI_D_14
+204 MX35_PAD_CSI_D14__KPP_ROW_2
+205 MX35_PAD_CSI_D14__GPIO1_26
+206 MX35_PAD_CSI_D15__IPU_CSI_D_15
+207 MX35_PAD_CSI_D15__KPP_ROW_3
+208 MX35_PAD_CSI_D15__GPIO1_27
+209 MX35_PAD_CSI_MCLK__IPU_CSI_MCLK
+210 MX35_PAD_CSI_MCLK__GPIO1_28
+211 MX35_PAD_CSI_VSYNC__IPU_CSI_VSYNC
+212 MX35_PAD_CSI_VSYNC__GPIO1_29
+213 MX35_PAD_CSI_HSYNC__IPU_CSI_HSYNC
+214 MX35_PAD_CSI_HSYNC__GPIO1_30
+215 MX35_PAD_CSI_PIXCLK__IPU_CSI_PIXCLK
+216 MX35_PAD_CSI_PIXCLK__GPIO1_31
+217 MX35_PAD_I2C1_CLK__I2C1_SCL
+218 MX35_PAD_I2C1_CLK__GPIO2_24
+219 MX35_PAD_I2C1_CLK__CCM_USB_BYP_CLK
+220 MX35_PAD_I2C1_DAT__I2C1_SDA
+221 MX35_PAD_I2C1_DAT__GPIO2_25
+222 MX35_PAD_I2C2_CLK__I2C2_SCL
+223 MX35_PAD_I2C2_CLK__CAN1_TXCAN
+224 MX35_PAD_I2C2_CLK__USB_TOP_USBH2_PWR
+225 MX35_PAD_I2C2_CLK__GPIO2_26
+226 MX35_PAD_I2C2_CLK__SDMA_DEBUG_BUS_DEVICE_2
+227 MX35_PAD_I2C2_DAT__I2C2_SDA
+228 MX35_PAD_I2C2_DAT__CAN1_RXCAN
+229 MX35_PAD_I2C2_DAT__USB_TOP_USBH2_OC
+230 MX35_PAD_I2C2_DAT__GPIO2_27
+231 MX35_PAD_I2C2_DAT__SDMA_DEBUG_BUS_DEVICE_3
+232 MX35_PAD_STXD4__AUDMUX_AUD4_TXD
+233 MX35_PAD_STXD4__GPIO2_28
+234 MX35_PAD_STXD4__ARM11P_TOP_ARM_COREASID0
+235 MX35_PAD_SRXD4__AUDMUX_AUD4_RXD
+236 MX35_PAD_SRXD4__GPIO2_29
+237 MX35_PAD_SRXD4__ARM11P_TOP_ARM_COREASID1
+238 MX35_PAD_SCK4__AUDMUX_AUD4_TXC
+239 MX35_PAD_SCK4__GPIO2_30
+240 MX35_PAD_SCK4__ARM11P_TOP_ARM_COREASID2
+241 MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS
+242 MX35_PAD_STXFS4__GPIO2_31
+243 MX35_PAD_STXFS4__ARM11P_TOP_ARM_COREASID3
+244 MX35_PAD_STXD5__AUDMUX_AUD5_TXD
+245 MX35_PAD_STXD5__SPDIF_SPDIF_OUT1
+246 MX35_PAD_STXD5__CSPI2_MOSI
+247 MX35_PAD_STXD5__GPIO1_0
+248 MX35_PAD_STXD5__ARM11P_TOP_ARM_COREASID4
+249 MX35_PAD_SRXD5__AUDMUX_AUD5_RXD
+250 MX35_PAD_SRXD5__SPDIF_SPDIF_IN1
+251 MX35_PAD_SRXD5__CSPI2_MISO
+252 MX35_PAD_SRXD5__GPIO1_1
+253 MX35_PAD_SRXD5__ARM11P_TOP_ARM_COREASID5
+254 MX35_PAD_SCK5__AUDMUX_AUD5_TXC
+255 MX35_PAD_SCK5__SPDIF_SPDIF_EXTCLK
+256 MX35_PAD_SCK5__CSPI2_SCLK
+257 MX35_PAD_SCK5__GPIO1_2
+258 MX35_PAD_SCK5__ARM11P_TOP_ARM_COREASID6
+259 MX35_PAD_STXFS5__AUDMUX_AUD5_TXFS
+260 MX35_PAD_STXFS5__CSPI2_RDY
+261 MX35_PAD_STXFS5__GPIO1_3
+262 MX35_PAD_STXFS5__ARM11P_TOP_ARM_COREASID7
+263 MX35_PAD_SCKR__ESAI_SCKR
+264 MX35_PAD_SCKR__GPIO1_4
+265 MX35_PAD_SCKR__ARM11P_TOP_EVNTBUS_10
+266 MX35_PAD_FSR__ESAI_FSR
+267 MX35_PAD_FSR__GPIO1_5
+268 MX35_PAD_FSR__ARM11P_TOP_EVNTBUS_11
+269 MX35_PAD_HCKR__ESAI_HCKR
+270 MX35_PAD_HCKR__AUDMUX_AUD5_RXFS
+271 MX35_PAD_HCKR__CSPI2_SS0
+272 MX35_PAD_HCKR__IPU_FLASH_STROBE
+273 MX35_PAD_HCKR__GPIO1_6
+274 MX35_PAD_HCKR__ARM11P_TOP_EVNTBUS_12
+275 MX35_PAD_SCKT__ESAI_SCKT
+276 MX35_PAD_SCKT__GPIO1_7
+277 MX35_PAD_SCKT__IPU_CSI_D_0
+278 MX35_PAD_SCKT__KPP_ROW_2
+279 MX35_PAD_FST__ESAI_FST
+280 MX35_PAD_FST__GPIO1_8
+281 MX35_PAD_FST__IPU_CSI_D_1
+282 MX35_PAD_FST__KPP_ROW_3
+283 MX35_PAD_HCKT__ESAI_HCKT
+284 MX35_PAD_HCKT__AUDMUX_AUD5_RXC
+285 MX35_PAD_HCKT__GPIO1_9
+286 MX35_PAD_HCKT__IPU_CSI_D_2
+287 MX35_PAD_HCKT__KPP_COL_3
+288 MX35_PAD_TX5_RX0__ESAI_TX5_RX0
+289 MX35_PAD_TX5_RX0__AUDMUX_AUD4_RXC
+290 MX35_PAD_TX5_RX0__CSPI2_SS2
+291 MX35_PAD_TX5_RX0__CAN2_TXCAN
+292 MX35_PAD_TX5_RX0__UART2_DTR
+293 MX35_PAD_TX5_RX0__GPIO1_10
+294 MX35_PAD_TX5_RX0__EMI_M3IF_CHOSEN_MASTER_0
+295 MX35_PAD_TX4_RX1__ESAI_TX4_RX1
+296 MX35_PAD_TX4_RX1__AUDMUX_AUD4_RXFS
+297 MX35_PAD_TX4_RX1__CSPI2_SS3
+298 MX35_PAD_TX4_RX1__CAN2_RXCAN
+299 MX35_PAD_TX4_RX1__UART2_DSR
+300 MX35_PAD_TX4_RX1__GPIO1_11
+301 MX35_PAD_TX4_RX1__IPU_CSI_D_3
+302 MX35_PAD_TX4_RX1__KPP_ROW_0
+303 MX35_PAD_TX3_RX2__ESAI_TX3_RX2
+304 MX35_PAD_TX3_RX2__I2C3_SCL
+305 MX35_PAD_TX3_RX2__EMI_NANDF_CE1
+306 MX35_PAD_TX3_RX2__GPIO1_12
+307 MX35_PAD_TX3_RX2__IPU_CSI_D_4
+308 MX35_PAD_TX3_RX2__KPP_ROW_1
+309 MX35_PAD_TX2_RX3__ESAI_TX2_RX3
+310 MX35_PAD_TX2_RX3__I2C3_SDA
+311 MX35_PAD_TX2_RX3__EMI_NANDF_CE2
+312 MX35_PAD_TX2_RX3__GPIO1_13
+313 MX35_PAD_TX2_RX3__IPU_CSI_D_5
+314 MX35_PAD_TX2_RX3__KPP_COL_0
+315 MX35_PAD_TX1__ESAI_TX1
+316 MX35_PAD_TX1__CCM_PMIC_RDY
+317 MX35_PAD_TX1__CSPI1_SS2
+318 MX35_PAD_TX1__EMI_NANDF_CE3
+319 MX35_PAD_TX1__UART2_RI
+320 MX35_PAD_TX1__GPIO1_14
+321 MX35_PAD_TX1__IPU_CSI_D_6
+322 MX35_PAD_TX1__KPP_COL_1
+323 MX35_PAD_TX0__ESAI_TX0
+324 MX35_PAD_TX0__SPDIF_SPDIF_EXTCLK
+325 MX35_PAD_TX0__CSPI1_SS3
+326 MX35_PAD_TX0__EMI_DTACK_B
+327 MX35_PAD_TX0__UART2_DCD
+328 MX35_PAD_TX0__GPIO1_15
+329 MX35_PAD_TX0__IPU_CSI_D_7
+330 MX35_PAD_TX0__KPP_COL_2
+331 MX35_PAD_CSPI1_MOSI__CSPI1_MOSI
+332 MX35_PAD_CSPI1_MOSI__GPIO1_16
+333 MX35_PAD_CSPI1_MOSI__ECT_CTI_TRIG_OUT1_2
+334 MX35_PAD_CSPI1_MISO__CSPI1_MISO
+335 MX35_PAD_CSPI1_MISO__GPIO1_17
+336 MX35_PAD_CSPI1_MISO__ECT_CTI_TRIG_OUT1_3
+337 MX35_PAD_CSPI1_SS0__CSPI1_SS0
+338 MX35_PAD_CSPI1_SS0__OWIRE_LINE
+339 MX35_PAD_CSPI1_SS0__CSPI2_SS3
+340 MX35_PAD_CSPI1_SS0__GPIO1_18
+341 MX35_PAD_CSPI1_SS0__ECT_CTI_TRIG_OUT1_4
+342 MX35_PAD_CSPI1_SS1__CSPI1_SS1
+343 MX35_PAD_CSPI1_SS1__PWM_PWMO
+344 MX35_PAD_CSPI1_SS1__CCM_CLK32K
+345 MX35_PAD_CSPI1_SS1__GPIO1_19
+346 MX35_PAD_CSPI1_SS1__IPU_DIAGB_29
+347 MX35_PAD_CSPI1_SS1__ECT_CTI_TRIG_OUT1_5
+348 MX35_PAD_CSPI1_SCLK__CSPI1_SCLK
+349 MX35_PAD_CSPI1_SCLK__GPIO3_4
+350 MX35_PAD_CSPI1_SCLK__IPU_DIAGB_30
+351 MX35_PAD_CSPI1_SCLK__EMI_M3IF_CHOSEN_MASTER_1
+352 MX35_PAD_CSPI1_SPI_RDY__CSPI1_RDY
+353 MX35_PAD_CSPI1_SPI_RDY__GPIO3_5
+354 MX35_PAD_CSPI1_SPI_RDY__IPU_DIAGB_31
+355 MX35_PAD_CSPI1_SPI_RDY__EMI_M3IF_CHOSEN_MASTER_2
+356 MX35_PAD_RXD1__UART1_RXD_MUX
+357 MX35_PAD_RXD1__CSPI2_MOSI
+358 MX35_PAD_RXD1__KPP_COL_4
+359 MX35_PAD_RXD1__GPIO3_6
+360 MX35_PAD_RXD1__ARM11P_TOP_EVNTBUS_16
+361 MX35_PAD_TXD1__UART1_TXD_MUX
+362 MX35_PAD_TXD1__CSPI2_MISO
+363 MX35_PAD_TXD1__KPP_COL_5
+364 MX35_PAD_TXD1__GPIO3_7
+365 MX35_PAD_TXD1__ARM11P_TOP_EVNTBUS_17
+366 MX35_PAD_RTS1__UART1_RTS
+367 MX35_PAD_RTS1__CSPI2_SCLK
+368 MX35_PAD_RTS1__I2C3_SCL
+369 MX35_PAD_RTS1__IPU_CSI_D_0
+370 MX35_PAD_RTS1__KPP_COL_6
+371 MX35_PAD_RTS1__GPIO3_8
+372 MX35_PAD_RTS1__EMI_NANDF_CE1
+373 MX35_PAD_RTS1__ARM11P_TOP_EVNTBUS_18
+374 MX35_PAD_CTS1__UART1_CTS
+375 MX35_PAD_CTS1__CSPI2_RDY
+376 MX35_PAD_CTS1__I2C3_SDA
+377 MX35_PAD_CTS1__IPU_CSI_D_1
+378 MX35_PAD_CTS1__KPP_COL_7
+379 MX35_PAD_CTS1__GPIO3_9
+380 MX35_PAD_CTS1__EMI_NANDF_CE2
+381 MX35_PAD_CTS1__ARM11P_TOP_EVNTBUS_19
+382 MX35_PAD_RXD2__UART2_RXD_MUX
+383 MX35_PAD_RXD2__KPP_ROW_4
+384 MX35_PAD_RXD2__GPIO3_10
+385 MX35_PAD_TXD2__UART2_TXD_MUX
+386 MX35_PAD_TXD2__SPDIF_SPDIF_EXTCLK
+387 MX35_PAD_TXD2__KPP_ROW_5
+388 MX35_PAD_TXD2__GPIO3_11
+389 MX35_PAD_RTS2__UART2_RTS
+390 MX35_PAD_RTS2__SPDIF_SPDIF_IN1
+391 MX35_PAD_RTS2__CAN2_RXCAN
+392 MX35_PAD_RTS2__IPU_CSI_D_2
+393 MX35_PAD_RTS2__KPP_ROW_6
+394 MX35_PAD_RTS2__GPIO3_12
+395 MX35_PAD_RTS2__AUDMUX_AUD5_RXC
+396 MX35_PAD_RTS2__UART3_RXD_MUX
+397 MX35_PAD_CTS2__UART2_CTS
+398 MX35_PAD_CTS2__SPDIF_SPDIF_OUT1
+399 MX35_PAD_CTS2__CAN2_TXCAN
+400 MX35_PAD_CTS2__IPU_CSI_D_3
+401 MX35_PAD_CTS2__KPP_ROW_7
+402 MX35_PAD_CTS2__GPIO3_13
+403 MX35_PAD_CTS2__AUDMUX_AUD5_RXFS
+404 MX35_PAD_CTS2__UART3_TXD_MUX
+405 MX35_PAD_RTCK__ARM11P_TOP_RTCK
+406 MX35_PAD_TCK__SJC_TCK
+407 MX35_PAD_TMS__SJC_TMS
+408 MX35_PAD_TDI__SJC_TDI
+409 MX35_PAD_TDO__SJC_TDO
+410 MX35_PAD_TRSTB__SJC_TRSTB
+411 MX35_PAD_DE_B__SJC_DE_B
+412 MX35_PAD_SJC_MOD__SJC_MOD
+413 MX35_PAD_USBOTG_PWR__USB_TOP_USBOTG_PWR
+414 MX35_PAD_USBOTG_PWR__USB_TOP_USBH2_PWR
+415 MX35_PAD_USBOTG_PWR__GPIO3_14
+416 MX35_PAD_USBOTG_OC__USB_TOP_USBOTG_OC
+417 MX35_PAD_USBOTG_OC__USB_TOP_USBH2_OC
+418 MX35_PAD_USBOTG_OC__GPIO3_15
+419 MX35_PAD_LD0__IPU_DISPB_DAT_0
+420 MX35_PAD_LD0__GPIO2_0
+421 MX35_PAD_LD0__SDMA_SDMA_DEBUG_PC_0
+422 MX35_PAD_LD1__IPU_DISPB_DAT_1
+423 MX35_PAD_LD1__GPIO2_1
+424 MX35_PAD_LD1__SDMA_SDMA_DEBUG_PC_1
+425 MX35_PAD_LD2__IPU_DISPB_DAT_2
+426 MX35_PAD_LD2__GPIO2_2
+427 MX35_PAD_LD2__SDMA_SDMA_DEBUG_PC_2
+428 MX35_PAD_LD3__IPU_DISPB_DAT_3
+429 MX35_PAD_LD3__GPIO2_3
+430 MX35_PAD_LD3__SDMA_SDMA_DEBUG_PC_3
+431 MX35_PAD_LD4__IPU_DISPB_DAT_4
+432 MX35_PAD_LD4__GPIO2_4
+433 MX35_PAD_LD4__SDMA_SDMA_DEBUG_PC_4
+434 MX35_PAD_LD5__IPU_DISPB_DAT_5
+435 MX35_PAD_LD5__GPIO2_5
+436 MX35_PAD_LD5__SDMA_SDMA_DEBUG_PC_5
+437 MX35_PAD_LD6__IPU_DISPB_DAT_6
+438 MX35_PAD_LD6__GPIO2_6
+439 MX35_PAD_LD6__SDMA_SDMA_DEBUG_PC_6
+440 MX35_PAD_LD7__IPU_DISPB_DAT_7
+441 MX35_PAD_LD7__GPIO2_7
+442 MX35_PAD_LD7__SDMA_SDMA_DEBUG_PC_7
+443 MX35_PAD_LD8__IPU_DISPB_DAT_8
+444 MX35_PAD_LD8__GPIO2_8
+445 MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8
+446 MX35_PAD_LD9__IPU_DISPB_DAT_9
+447 MX35_PAD_LD9__GPIO2_9
+448 MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9
+449 MX35_PAD_LD10__IPU_DISPB_DAT_10
+450 MX35_PAD_LD10__GPIO2_10
+451 MX35_PAD_LD10__SDMA_SDMA_DEBUG_PC_10
+452 MX35_PAD_LD11__IPU_DISPB_DAT_11
+453 MX35_PAD_LD11__GPIO2_11
+454 MX35_PAD_LD11__SDMA_SDMA_DEBUG_PC_11
+455 MX35_PAD_LD11__ARM11P_TOP_TRACE_4
+456 MX35_PAD_LD12__IPU_DISPB_DAT_12
+457 MX35_PAD_LD12__GPIO2_12
+458 MX35_PAD_LD12__SDMA_SDMA_DEBUG_PC_12
+459 MX35_PAD_LD12__ARM11P_TOP_TRACE_5
+460 MX35_PAD_LD13__IPU_DISPB_DAT_13
+461 MX35_PAD_LD13__GPIO2_13
+462 MX35_PAD_LD13__SDMA_SDMA_DEBUG_PC_13
+463 MX35_PAD_LD13__ARM11P_TOP_TRACE_6
+464 MX35_PAD_LD14__IPU_DISPB_DAT_14
+465 MX35_PAD_LD14__GPIO2_14
+466 MX35_PAD_LD14__SDMA_SDMA_DEBUG_EVENT_CHANNEL_0
+467 MX35_PAD_LD14__ARM11P_TOP_TRACE_7
+468 MX35_PAD_LD15__IPU_DISPB_DAT_15
+469 MX35_PAD_LD15__GPIO2_15
+470 MX35_PAD_LD15__SDMA_SDMA_DEBUG_EVENT_CHANNEL_1
+471 MX35_PAD_LD15__ARM11P_TOP_TRACE_8
+472 MX35_PAD_LD16__IPU_DISPB_DAT_16
+473 MX35_PAD_LD16__IPU_DISPB_D12_VSYNC
+474 MX35_PAD_LD16__GPIO2_16
+475 MX35_PAD_LD16__SDMA_SDMA_DEBUG_EVENT_CHANNEL_2
+476 MX35_PAD_LD16__ARM11P_TOP_TRACE_9
+477 MX35_PAD_LD17__IPU_DISPB_DAT_17
+478 MX35_PAD_LD17__IPU_DISPB_CS2
+479 MX35_PAD_LD17__GPIO2_17
+480 MX35_PAD_LD17__SDMA_SDMA_DEBUG_EVENT_CHANNEL_3
+481 MX35_PAD_LD17__ARM11P_TOP_TRACE_10
+482 MX35_PAD_LD18__IPU_DISPB_DAT_18
+483 MX35_PAD_LD18__IPU_DISPB_D0_VSYNC
+484 MX35_PAD_LD18__IPU_DISPB_D12_VSYNC
+485 MX35_PAD_LD18__ESDHC3_CMD
+486 MX35_PAD_LD18__USB_TOP_USBOTG_DATA_3
+487 MX35_PAD_LD18__GPIO3_24
+488 MX35_PAD_LD18__SDMA_SDMA_DEBUG_EVENT_CHANNEL_4
+489 MX35_PAD_LD18__ARM11P_TOP_TRACE_11
+490 MX35_PAD_LD19__IPU_DISPB_DAT_19
+491 MX35_PAD_LD19__IPU_DISPB_BCLK
+492 MX35_PAD_LD19__IPU_DISPB_CS1
+493 MX35_PAD_LD19__ESDHC3_CLK
+494 MX35_PAD_LD19__USB_TOP_USBOTG_DIR
+495 MX35_PAD_LD19__GPIO3_25
+496 MX35_PAD_LD19__SDMA_SDMA_DEBUG_EVENT_CHANNEL_5
+497 MX35_PAD_LD19__ARM11P_TOP_TRACE_12
+498 MX35_PAD_LD20__IPU_DISPB_DAT_20
+499 MX35_PAD_LD20__IPU_DISPB_CS0
+500 MX35_PAD_LD20__IPU_DISPB_SD_CLK
+501 MX35_PAD_LD20__ESDHC3_DAT0
+502 MX35_PAD_LD20__GPIO3_26
+503 MX35_PAD_LD20__SDMA_SDMA_DEBUG_CORE_STATUS_3
+504 MX35_PAD_LD20__ARM11P_TOP_TRACE_13
+505 MX35_PAD_LD21__IPU_DISPB_DAT_21
+506 MX35_PAD_LD21__IPU_DISPB_PAR_RS
+507 MX35_PAD_LD21__IPU_DISPB_SER_RS
+508 MX35_PAD_LD21__ESDHC3_DAT1
+509 MX35_PAD_LD21__USB_TOP_USBOTG_STP
+510 MX35_PAD_LD21__GPIO3_27
+511 MX35_PAD_LD21__SDMA_DEBUG_EVENT_CHANNEL_SEL
+512 MX35_PAD_LD21__ARM11P_TOP_TRACE_14
+513 MX35_PAD_LD22__IPU_DISPB_DAT_22
+514 MX35_PAD_LD22__IPU_DISPB_WR
+515 MX35_PAD_LD22__IPU_DISPB_SD_D_I
+516 MX35_PAD_LD22__ESDHC3_DAT2
+517 MX35_PAD_LD22__USB_TOP_USBOTG_NXT
+518 MX35_PAD_LD22__GPIO3_28
+519 MX35_PAD_LD22__SDMA_DEBUG_BUS_ERROR
+520 MX35_PAD_LD22__ARM11P_TOP_TRCTL
+521 MX35_PAD_LD23__IPU_DISPB_DAT_23
+522 MX35_PAD_LD23__IPU_DISPB_RD
+523 MX35_PAD_LD23__IPU_DISPB_SD_D_IO
+524 MX35_PAD_LD23__ESDHC3_DAT3
+525 MX35_PAD_LD23__USB_TOP_USBOTG_DATA_7
+526 MX35_PAD_LD23__GPIO3_29
+527 MX35_PAD_LD23__SDMA_DEBUG_MATCHED_DMBUS
+528 MX35_PAD_LD23__ARM11P_TOP_TRCLK
+529 MX35_PAD_D3_HSYNC__IPU_DISPB_D3_HSYNC
+530 MX35_PAD_D3_HSYNC__IPU_DISPB_SD_D_IO
+531 MX35_PAD_D3_HSYNC__GPIO3_30
+532 MX35_PAD_D3_HSYNC__SDMA_DEBUG_RTBUFFER_WRITE
+533 MX35_PAD_D3_HSYNC__ARM11P_TOP_TRACE_15
+534 MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK
+535 MX35_PAD_D3_FPSHIFT__IPU_DISPB_SD_CLK
+536 MX35_PAD_D3_FPSHIFT__GPIO3_31
+537 MX35_PAD_D3_FPSHIFT__SDMA_SDMA_DEBUG_CORE_STATUS_0
+538 MX35_PAD_D3_FPSHIFT__ARM11P_TOP_TRACE_16
+539 MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY
+540 MX35_PAD_D3_DRDY__IPU_DISPB_SD_D_O
+541 MX35_PAD_D3_DRDY__GPIO1_0
+542 MX35_PAD_D3_DRDY__SDMA_SDMA_DEBUG_CORE_STATUS_1
+543 MX35_PAD_D3_DRDY__ARM11P_TOP_TRACE_17
+544 MX35_PAD_CONTRAST__IPU_DISPB_CONTR
+545 MX35_PAD_CONTRAST__GPIO1_1
+546 MX35_PAD_CONTRAST__SDMA_SDMA_DEBUG_CORE_STATUS_2
+547 MX35_PAD_CONTRAST__ARM11P_TOP_TRACE_18
+548 MX35_PAD_D3_VSYNC__IPU_DISPB_D3_VSYNC
+549 MX35_PAD_D3_VSYNC__IPU_DISPB_CS1
+550 MX35_PAD_D3_VSYNC__GPIO1_2
+551 MX35_PAD_D3_VSYNC__SDMA_DEBUG_YIELD
+552 MX35_PAD_D3_VSYNC__ARM11P_TOP_TRACE_19
+553 MX35_PAD_D3_REV__IPU_DISPB_D3_REV
+554 MX35_PAD_D3_REV__IPU_DISPB_SER_RS
+555 MX35_PAD_D3_REV__GPIO1_3
+556 MX35_PAD_D3_REV__SDMA_DEBUG_BUS_RWB
+557 MX35_PAD_D3_REV__ARM11P_TOP_TRACE_20
+558 MX35_PAD_D3_CLS__IPU_DISPB_D3_CLS
+559 MX35_PAD_D3_CLS__IPU_DISPB_CS2
+560 MX35_PAD_D3_CLS__GPIO1_4
+561 MX35_PAD_D3_CLS__SDMA_DEBUG_BUS_DEVICE_0
+562 MX35_PAD_D3_CLS__ARM11P_TOP_TRACE_21
+563 MX35_PAD_D3_SPL__IPU_DISPB_D3_SPL
+564 MX35_PAD_D3_SPL__IPU_DISPB_D12_VSYNC
+565 MX35_PAD_D3_SPL__GPIO1_5
+566 MX35_PAD_D3_SPL__SDMA_DEBUG_BUS_DEVICE_1
+567 MX35_PAD_D3_SPL__ARM11P_TOP_TRACE_22
+568 MX35_PAD_SD1_CMD__ESDHC1_CMD
+569 MX35_PAD_SD1_CMD__MSHC_SCLK
+570 MX35_PAD_SD1_CMD__IPU_DISPB_D0_VSYNC
+571 MX35_PAD_SD1_CMD__USB_TOP_USBOTG_DATA_4
+572 MX35_PAD_SD1_CMD__GPIO1_6
+573 MX35_PAD_SD1_CMD__ARM11P_TOP_TRCTL
+574 MX35_PAD_SD1_CLK__ESDHC1_CLK
+575 MX35_PAD_SD1_CLK__MSHC_BS
+576 MX35_PAD_SD1_CLK__IPU_DISPB_BCLK
+577 MX35_PAD_SD1_CLK__USB_TOP_USBOTG_DATA_5
+578 MX35_PAD_SD1_CLK__GPIO1_7
+579 MX35_PAD_SD1_CLK__ARM11P_TOP_TRCLK
+580 MX35_PAD_SD1_DATA0__ESDHC1_DAT0
+581 MX35_PAD_SD1_DATA0__MSHC_DATA_0
+582 MX35_PAD_SD1_DATA0__IPU_DISPB_CS0
+583 MX35_PAD_SD1_DATA0__USB_TOP_USBOTG_DATA_6
+584 MX35_PAD_SD1_DATA0__GPIO1_8
+585 MX35_PAD_SD1_DATA0__ARM11P_TOP_TRACE_23
+586 MX35_PAD_SD1_DATA1__ESDHC1_DAT1
+587 MX35_PAD_SD1_DATA1__MSHC_DATA_1
+588 MX35_PAD_SD1_DATA1__IPU_DISPB_PAR_RS
+589 MX35_PAD_SD1_DATA1__USB_TOP_USBOTG_DATA_0
+590 MX35_PAD_SD1_DATA1__GPIO1_9
+591 MX35_PAD_SD1_DATA1__ARM11P_TOP_TRACE_24
+592 MX35_PAD_SD1_DATA2__ESDHC1_DAT2
+593 MX35_PAD_SD1_DATA2__MSHC_DATA_2
+594 MX35_PAD_SD1_DATA2__IPU_DISPB_WR
+595 MX35_PAD_SD1_DATA2__USB_TOP_USBOTG_DATA_1
+596 MX35_PAD_SD1_DATA2__GPIO1_10
+597 MX35_PAD_SD1_DATA2__ARM11P_TOP_TRACE_25
+598 MX35_PAD_SD1_DATA3__ESDHC1_DAT3
+599 MX35_PAD_SD1_DATA3__MSHC_DATA_3
+600 MX35_PAD_SD1_DATA3__IPU_DISPB_RD
+601 MX35_PAD_SD1_DATA3__USB_TOP_USBOTG_DATA_2
+602 MX35_PAD_SD1_DATA3__GPIO1_11
+603 MX35_PAD_SD1_DATA3__ARM11P_TOP_TRACE_26
+604 MX35_PAD_SD2_CMD__ESDHC2_CMD
+605 MX35_PAD_SD2_CMD__I2C3_SCL
+606 MX35_PAD_SD2_CMD__ESDHC1_DAT4
+607 MX35_PAD_SD2_CMD__IPU_CSI_D_2
+608 MX35_PAD_SD2_CMD__USB_TOP_USBH2_DATA_4
+609 MX35_PAD_SD2_CMD__GPIO2_0
+610 MX35_PAD_SD2_CMD__SPDIF_SPDIF_OUT1
+611 MX35_PAD_SD2_CMD__IPU_DISPB_D12_VSYNC
+612 MX35_PAD_SD2_CLK__ESDHC2_CLK
+613 MX35_PAD_SD2_CLK__I2C3_SDA
+614 MX35_PAD_SD2_CLK__ESDHC1_DAT5
+615 MX35_PAD_SD2_CLK__IPU_CSI_D_3
+616 MX35_PAD_SD2_CLK__USB_TOP_USBH2_DATA_5
+617 MX35_PAD_SD2_CLK__GPIO2_1
+618 MX35_PAD_SD2_CLK__SPDIF_SPDIF_IN1
+619 MX35_PAD_SD2_CLK__IPU_DISPB_CS2
+620 MX35_PAD_SD2_DATA0__ESDHC2_DAT0
+621 MX35_PAD_SD2_DATA0__UART3_RXD_MUX
+622 MX35_PAD_SD2_DATA0__ESDHC1_DAT6
+623 MX35_PAD_SD2_DATA0__IPU_CSI_D_4
+624 MX35_PAD_SD2_DATA0__USB_TOP_USBH2_DATA_6
+625 MX35_PAD_SD2_DATA0__GPIO2_2
+626 MX35_PAD_SD2_DATA0__SPDIF_SPDIF_EXTCLK
+627 MX35_PAD_SD2_DATA1__ESDHC2_DAT1
+628 MX35_PAD_SD2_DATA1__UART3_TXD_MUX
+629 MX35_PAD_SD2_DATA1__ESDHC1_DAT7
+630 MX35_PAD_SD2_DATA1__IPU_CSI_D_5
+631 MX35_PAD_SD2_DATA1__USB_TOP_USBH2_DATA_0
+632 MX35_PAD_SD2_DATA1__GPIO2_3
+633 MX35_PAD_SD2_DATA2__ESDHC2_DAT2
+634 MX35_PAD_SD2_DATA2__UART3_RTS
+635 MX35_PAD_SD2_DATA2__CAN1_RXCAN
+636 MX35_PAD_SD2_DATA2__IPU_CSI_D_6
+637 MX35_PAD_SD2_DATA2__USB_TOP_USBH2_DATA_1
+638 MX35_PAD_SD2_DATA2__GPIO2_4
+639 MX35_PAD_SD2_DATA3__ESDHC2_DAT3
+640 MX35_PAD_SD2_DATA3__UART3_CTS
+641 MX35_PAD_SD2_DATA3__CAN1_TXCAN
+642 MX35_PAD_SD2_DATA3__IPU_CSI_D_7
+643 MX35_PAD_SD2_DATA3__USB_TOP_USBH2_DATA_2
+644 MX35_PAD_SD2_DATA3__GPIO2_5
+645 MX35_PAD_ATA_CS0__ATA_CS0
+646 MX35_PAD_ATA_CS0__CSPI1_SS3
+647 MX35_PAD_ATA_CS0__IPU_DISPB_CS1
+648 MX35_PAD_ATA_CS0__GPIO2_6
+649 MX35_PAD_ATA_CS0__IPU_DIAGB_0
+650 MX35_PAD_ATA_CS0__ARM11P_TOP_MAX1_HMASTER_0
+651 MX35_PAD_ATA_CS1__ATA_CS1
+652 MX35_PAD_ATA_CS1__IPU_DISPB_CS2
+653 MX35_PAD_ATA_CS1__CSPI2_SS0
+654 MX35_PAD_ATA_CS1__GPIO2_7
+655 MX35_PAD_ATA_CS1__IPU_DIAGB_1
+656 MX35_PAD_ATA_CS1__ARM11P_TOP_MAX1_HMASTER_1
+657 MX35_PAD_ATA_DIOR__ATA_DIOR
+658 MX35_PAD_ATA_DIOR__ESDHC3_DAT0
+659 MX35_PAD_ATA_DIOR__USB_TOP_USBOTG_DIR
+660 MX35_PAD_ATA_DIOR__IPU_DISPB_BE0
+661 MX35_PAD_ATA_DIOR__CSPI2_SS1
+662 MX35_PAD_ATA_DIOR__GPIO2_8
+663 MX35_PAD_ATA_DIOR__IPU_DIAGB_2
+664 MX35_PAD_ATA_DIOR__ARM11P_TOP_MAX1_HMASTER_2
+665 MX35_PAD_ATA_DIOW__ATA_DIOW
+666 MX35_PAD_ATA_DIOW__ESDHC3_DAT1
+667 MX35_PAD_ATA_DIOW__USB_TOP_USBOTG_STP
+668 MX35_PAD_ATA_DIOW__IPU_DISPB_BE1
+669 MX35_PAD_ATA_DIOW__CSPI2_MOSI
+670 MX35_PAD_ATA_DIOW__GPIO2_9
+671 MX35_PAD_ATA_DIOW__IPU_DIAGB_3
+672 MX35_PAD_ATA_DIOW__ARM11P_TOP_MAX1_HMASTER_3
+673 MX35_PAD_ATA_DMACK__ATA_DMACK
+674 MX35_PAD_ATA_DMACK__ESDHC3_DAT2
+675 MX35_PAD_ATA_DMACK__USB_TOP_USBOTG_NXT
+676 MX35_PAD_ATA_DMACK__CSPI2_MISO
+677 MX35_PAD_ATA_DMACK__GPIO2_10
+678 MX35_PAD_ATA_DMACK__IPU_DIAGB_4
+679 MX35_PAD_ATA_DMACK__ARM11P_TOP_MAX0_HMASTER_0
+680 MX35_PAD_ATA_RESET_B__ATA_RESET_B
+681 MX35_PAD_ATA_RESET_B__ESDHC3_DAT3
+682 MX35_PAD_ATA_RESET_B__USB_TOP_USBOTG_DATA_0
+683 MX35_PAD_ATA_RESET_B__IPU_DISPB_SD_D_O
+684 MX35_PAD_ATA_RESET_B__CSPI2_RDY
+685 MX35_PAD_ATA_RESET_B__GPIO2_11
+686 MX35_PAD_ATA_RESET_B__IPU_DIAGB_5
+687 MX35_PAD_ATA_RESET_B__ARM11P_TOP_MAX0_HMASTER_1
+688 MX35_PAD_ATA_IORDY__ATA_IORDY
+689 MX35_PAD_ATA_IORDY__ESDHC3_DAT4
+690 MX35_PAD_ATA_IORDY__USB_TOP_USBOTG_DATA_1
+691 MX35_PAD_ATA_IORDY__IPU_DISPB_SD_D_IO
+692 MX35_PAD_ATA_IORDY__ESDHC2_DAT4
+693 MX35_PAD_ATA_IORDY__GPIO2_12
+694 MX35_PAD_ATA_IORDY__IPU_DIAGB_6
+695 MX35_PAD_ATA_IORDY__ARM11P_TOP_MAX0_HMASTER_2
+696 MX35_PAD_ATA_DATA0__ATA_DATA_0
+697 MX35_PAD_ATA_DATA0__ESDHC3_DAT5
+698 MX35_PAD_ATA_DATA0__USB_TOP_USBOTG_DATA_2
+699 MX35_PAD_ATA_DATA0__IPU_DISPB_D12_VSYNC
+700 MX35_PAD_ATA_DATA0__ESDHC2_DAT5
+701 MX35_PAD_ATA_DATA0__GPIO2_13
+702 MX35_PAD_ATA_DATA0__IPU_DIAGB_7
+703 MX35_PAD_ATA_DATA0__ARM11P_TOP_MAX0_HMASTER_3
+704 MX35_PAD_ATA_DATA1__ATA_DATA_1
+705 MX35_PAD_ATA_DATA1__ESDHC3_DAT6
+706 MX35_PAD_ATA_DATA1__USB_TOP_USBOTG_DATA_3
+707 MX35_PAD_ATA_DATA1__IPU_DISPB_SD_CLK
+708 MX35_PAD_ATA_DATA1__ESDHC2_DAT6
+709 MX35_PAD_ATA_DATA1__GPIO2_14
+710 MX35_PAD_ATA_DATA1__IPU_DIAGB_8
+711 MX35_PAD_ATA_DATA1__ARM11P_TOP_TRACE_27
+712 MX35_PAD_ATA_DATA2__ATA_DATA_2
+713 MX35_PAD_ATA_DATA2__ESDHC3_DAT7
+714 MX35_PAD_ATA_DATA2__USB_TOP_USBOTG_DATA_4
+715 MX35_PAD_ATA_DATA2__IPU_DISPB_SER_RS
+716 MX35_PAD_ATA_DATA2__ESDHC2_DAT7
+717 MX35_PAD_ATA_DATA2__GPIO2_15
+718 MX35_PAD_ATA_DATA2__IPU_DIAGB_9
+719 MX35_PAD_ATA_DATA2__ARM11P_TOP_TRACE_28
+720 MX35_PAD_ATA_DATA3__ATA_DATA_3
+721 MX35_PAD_ATA_DATA3__ESDHC3_CLK
+722 MX35_PAD_ATA_DATA3__USB_TOP_USBOTG_DATA_5
+723 MX35_PAD_ATA_DATA3__CSPI2_SCLK
+724 MX35_PAD_ATA_DATA3__GPIO2_16
+725 MX35_PAD_ATA_DATA3__IPU_DIAGB_10
+726 MX35_PAD_ATA_DATA3__ARM11P_TOP_TRACE_29
+727 MX35_PAD_ATA_DATA4__ATA_DATA_4
+728 MX35_PAD_ATA_DATA4__ESDHC3_CMD
+729 MX35_PAD_ATA_DATA4__USB_TOP_USBOTG_DATA_6
+730 MX35_PAD_ATA_DATA4__GPIO2_17
+731 MX35_PAD_ATA_DATA4__IPU_DIAGB_11
+732 MX35_PAD_ATA_DATA4__ARM11P_TOP_TRACE_30
+733 MX35_PAD_ATA_DATA5__ATA_DATA_5
+734 MX35_PAD_ATA_DATA5__USB_TOP_USBOTG_DATA_7
+735 MX35_PAD_ATA_DATA5__GPIO2_18
+736 MX35_PAD_ATA_DATA5__IPU_DIAGB_12
+737 MX35_PAD_ATA_DATA5__ARM11P_TOP_TRACE_31
+738 MX35_PAD_ATA_DATA6__ATA_DATA_6
+739 MX35_PAD_ATA_DATA6__CAN1_TXCAN
+740 MX35_PAD_ATA_DATA6__UART1_DTR
+741 MX35_PAD_ATA_DATA6__AUDMUX_AUD6_TXD
+742 MX35_PAD_ATA_DATA6__GPIO2_19
+743 MX35_PAD_ATA_DATA6__IPU_DIAGB_13
+744 MX35_PAD_ATA_DATA7__ATA_DATA_7
+745 MX35_PAD_ATA_DATA7__CAN1_RXCAN
+746 MX35_PAD_ATA_DATA7__UART1_DSR
+747 MX35_PAD_ATA_DATA7__AUDMUX_AUD6_RXD
+748 MX35_PAD_ATA_DATA7__GPIO2_20
+749 MX35_PAD_ATA_DATA7__IPU_DIAGB_14
+750 MX35_PAD_ATA_DATA8__ATA_DATA_8
+751 MX35_PAD_ATA_DATA8__UART3_RTS
+752 MX35_PAD_ATA_DATA8__UART1_RI
+753 MX35_PAD_ATA_DATA8__AUDMUX_AUD6_TXC
+754 MX35_PAD_ATA_DATA8__GPIO2_21
+755 MX35_PAD_ATA_DATA8__IPU_DIAGB_15
+756 MX35_PAD_ATA_DATA9__ATA_DATA_9
+757 MX35_PAD_ATA_DATA9__UART3_CTS
+758 MX35_PAD_ATA_DATA9__UART1_DCD
+759 MX35_PAD_ATA_DATA9__AUDMUX_AUD6_TXFS
+760 MX35_PAD_ATA_DATA9__GPIO2_22
+761 MX35_PAD_ATA_DATA9__IPU_DIAGB_16
+762 MX35_PAD_ATA_DATA10__ATA_DATA_10
+763 MX35_PAD_ATA_DATA10__UART3_RXD_MUX
+764 MX35_PAD_ATA_DATA10__AUDMUX_AUD6_RXC
+765 MX35_PAD_ATA_DATA10__GPIO2_23
+766 MX35_PAD_ATA_DATA10__IPU_DIAGB_17
+767 MX35_PAD_ATA_DATA11__ATA_DATA_11
+768 MX35_PAD_ATA_DATA11__UART3_TXD_MUX
+769 MX35_PAD_ATA_DATA11__AUDMUX_AUD6_RXFS
+770 MX35_PAD_ATA_DATA11__GPIO2_24
+771 MX35_PAD_ATA_DATA11__IPU_DIAGB_18
+772 MX35_PAD_ATA_DATA12__ATA_DATA_12
+773 MX35_PAD_ATA_DATA12__I2C3_SCL
+774 MX35_PAD_ATA_DATA12__GPIO2_25
+775 MX35_PAD_ATA_DATA12__IPU_DIAGB_19
+776 MX35_PAD_ATA_DATA13__ATA_DATA_13
+777 MX35_PAD_ATA_DATA13__I2C3_SDA
+778 MX35_PAD_ATA_DATA13__GPIO2_26
+779 MX35_PAD_ATA_DATA13__IPU_DIAGB_20
+780 MX35_PAD_ATA_DATA14__ATA_DATA_14
+781 MX35_PAD_ATA_DATA14__IPU_CSI_D_0
+782 MX35_PAD_ATA_DATA14__KPP_ROW_0
+783 MX35_PAD_ATA_DATA14__GPIO2_27
+784 MX35_PAD_ATA_DATA14__IPU_DIAGB_21
+785 MX35_PAD_ATA_DATA15__ATA_DATA_15
+786 MX35_PAD_ATA_DATA15__IPU_CSI_D_1
+787 MX35_PAD_ATA_DATA15__KPP_ROW_1
+788 MX35_PAD_ATA_DATA15__GPIO2_28
+789 MX35_PAD_ATA_DATA15__IPU_DIAGB_22
+790 MX35_PAD_ATA_INTRQ__ATA_INTRQ
+791 MX35_PAD_ATA_INTRQ__IPU_CSI_D_2
+792 MX35_PAD_ATA_INTRQ__KPP_ROW_2
+793 MX35_PAD_ATA_INTRQ__GPIO2_29
+794 MX35_PAD_ATA_INTRQ__IPU_DIAGB_23
+795 MX35_PAD_ATA_BUFF_EN__ATA_BUFFER_EN
+796 MX35_PAD_ATA_BUFF_EN__IPU_CSI_D_3
+797 MX35_PAD_ATA_BUFF_EN__KPP_ROW_3
+798 MX35_PAD_ATA_BUFF_EN__GPIO2_30
+799 MX35_PAD_ATA_BUFF_EN__IPU_DIAGB_24
+800 MX35_PAD_ATA_DMARQ__ATA_DMARQ
+801 MX35_PAD_ATA_DMARQ__IPU_CSI_D_4
+802 MX35_PAD_ATA_DMARQ__KPP_COL_0
+803 MX35_PAD_ATA_DMARQ__GPIO2_31
+804 MX35_PAD_ATA_DMARQ__IPU_DIAGB_25
+805 MX35_PAD_ATA_DMARQ__ECT_CTI_TRIG_IN1_4
+806 MX35_PAD_ATA_DA0__ATA_DA_0
+807 MX35_PAD_ATA_DA0__IPU_CSI_D_5
+808 MX35_PAD_ATA_DA0__KPP_COL_1
+809 MX35_PAD_ATA_DA0__GPIO3_0
+810 MX35_PAD_ATA_DA0__IPU_DIAGB_26
+811 MX35_PAD_ATA_DA0__ECT_CTI_TRIG_IN1_5
+812 MX35_PAD_ATA_DA1__ATA_DA_1
+813 MX35_PAD_ATA_DA1__IPU_CSI_D_6
+814 MX35_PAD_ATA_DA1__KPP_COL_2
+815 MX35_PAD_ATA_DA1__GPIO3_1
+816 MX35_PAD_ATA_DA1__IPU_DIAGB_27
+817 MX35_PAD_ATA_DA1__ECT_CTI_TRIG_IN1_6
+818 MX35_PAD_ATA_DA2__ATA_DA_2
+819 MX35_PAD_ATA_DA2__IPU_CSI_D_7
+820 MX35_PAD_ATA_DA2__KPP_COL_3
+821 MX35_PAD_ATA_DA2__GPIO3_2
+822 MX35_PAD_ATA_DA2__IPU_DIAGB_28
+823 MX35_PAD_ATA_DA2__ECT_CTI_TRIG_IN1_7
+824 MX35_PAD_MLB_CLK__MLB_MLBCLK
+825 MX35_PAD_MLB_CLK__GPIO3_3
+826 MX35_PAD_MLB_DAT__MLB_MLBDAT
+827 MX35_PAD_MLB_DAT__GPIO3_4
+828 MX35_PAD_MLB_SIG__MLB_MLBSIG
+829 MX35_PAD_MLB_SIG__GPIO3_5
+830 MX35_PAD_FEC_TX_CLK__FEC_TX_CLK
+831 MX35_PAD_FEC_TX_CLK__ESDHC1_DAT4
+832 MX35_PAD_FEC_TX_CLK__UART3_RXD_MUX
+833 MX35_PAD_FEC_TX_CLK__USB_TOP_USBH2_DIR
+834 MX35_PAD_FEC_TX_CLK__CSPI2_MOSI
+835 MX35_PAD_FEC_TX_CLK__GPIO3_6
+836 MX35_PAD_FEC_TX_CLK__IPU_DISPB_D12_VSYNC
+837 MX35_PAD_FEC_TX_CLK__ARM11P_TOP_EVNTBUS_0
+838 MX35_PAD_FEC_RX_CLK__FEC_RX_CLK
+839 MX35_PAD_FEC_RX_CLK__ESDHC1_DAT5
+840 MX35_PAD_FEC_RX_CLK__UART3_TXD_MUX
+841 MX35_PAD_FEC_RX_CLK__USB_TOP_USBH2_STP
+842 MX35_PAD_FEC_RX_CLK__CSPI2_MISO
+843 MX35_PAD_FEC_RX_CLK__GPIO3_7
+844 MX35_PAD_FEC_RX_CLK__IPU_DISPB_SD_D_I
+845 MX35_PAD_FEC_RX_CLK__ARM11P_TOP_EVNTBUS_1
+846 MX35_PAD_FEC_RX_DV__FEC_RX_DV
+847 MX35_PAD_FEC_RX_DV__ESDHC1_DAT6
+848 MX35_PAD_FEC_RX_DV__UART3_RTS
+849 MX35_PAD_FEC_RX_DV__USB_TOP_USBH2_NXT
+850 MX35_PAD_FEC_RX_DV__CSPI2_SCLK
+851 MX35_PAD_FEC_RX_DV__GPIO3_8
+852 MX35_PAD_FEC_RX_DV__IPU_DISPB_SD_CLK
+853 MX35_PAD_FEC_RX_DV__ARM11P_TOP_EVNTBUS_2
+854 MX35_PAD_FEC_COL__FEC_COL
+855 MX35_PAD_FEC_COL__ESDHC1_DAT7
+856 MX35_PAD_FEC_COL__UART3_CTS
+857 MX35_PAD_FEC_COL__USB_TOP_USBH2_DATA_0
+858 MX35_PAD_FEC_COL__CSPI2_RDY
+859 MX35_PAD_FEC_COL__GPIO3_9
+860 MX35_PAD_FEC_COL__IPU_DISPB_SER_RS
+861 MX35_PAD_FEC_COL__ARM11P_TOP_EVNTBUS_3
+862 MX35_PAD_FEC_RDATA0__FEC_RDATA_0
+863 MX35_PAD_FEC_RDATA0__PWM_PWMO
+864 MX35_PAD_FEC_RDATA0__UART3_DTR
+865 MX35_PAD_FEC_RDATA0__USB_TOP_USBH2_DATA_1
+866 MX35_PAD_FEC_RDATA0__CSPI2_SS0
+867 MX35_PAD_FEC_RDATA0__GPIO3_10
+868 MX35_PAD_FEC_RDATA0__IPU_DISPB_CS1
+869 MX35_PAD_FEC_RDATA0__ARM11P_TOP_EVNTBUS_4
+870 MX35_PAD_FEC_TDATA0__FEC_TDATA_0
+871 MX35_PAD_FEC_TDATA0__SPDIF_SPDIF_OUT1
+872 MX35_PAD_FEC_TDATA0__UART3_DSR
+873 MX35_PAD_FEC_TDATA0__USB_TOP_USBH2_DATA_2
+874 MX35_PAD_FEC_TDATA0__CSPI2_SS1
+875 MX35_PAD_FEC_TDATA0__GPIO3_11
+876 MX35_PAD_FEC_TDATA0__IPU_DISPB_CS0
+877 MX35_PAD_FEC_TDATA0__ARM11P_TOP_EVNTBUS_5
+878 MX35_PAD_FEC_TX_EN__FEC_TX_EN
+879 MX35_PAD_FEC_TX_EN__SPDIF_SPDIF_IN1
+880 MX35_PAD_FEC_TX_EN__UART3_RI
+881 MX35_PAD_FEC_TX_EN__USB_TOP_USBH2_DATA_3
+882 MX35_PAD_FEC_TX_EN__GPIO3_12
+883 MX35_PAD_FEC_TX_EN__IPU_DISPB_PAR_RS
+884 MX35_PAD_FEC_TX_EN__ARM11P_TOP_EVNTBUS_6
+885 MX35_PAD_FEC_MDC__FEC_MDC
+886 MX35_PAD_FEC_MDC__CAN2_TXCAN
+887 MX35_PAD_FEC_MDC__UART3_DCD
+888 MX35_PAD_FEC_MDC__USB_TOP_USBH2_DATA_4
+889 MX35_PAD_FEC_MDC__GPIO3_13
+890 MX35_PAD_FEC_MDC__IPU_DISPB_WR
+891 MX35_PAD_FEC_MDC__ARM11P_TOP_EVNTBUS_7
+892 MX35_PAD_FEC_MDIO__FEC_MDIO
+893 MX35_PAD_FEC_MDIO__CAN2_RXCAN
+894 MX35_PAD_FEC_MDIO__USB_TOP_USBH2_DATA_5
+895 MX35_PAD_FEC_MDIO__GPIO3_14
+896 MX35_PAD_FEC_MDIO__IPU_DISPB_RD
+897 MX35_PAD_FEC_MDIO__ARM11P_TOP_EVNTBUS_8
+898 MX35_PAD_FEC_TX_ERR__FEC_TX_ERR
+899 MX35_PAD_FEC_TX_ERR__OWIRE_LINE
+900 MX35_PAD_FEC_TX_ERR__SPDIF_SPDIF_EXTCLK
+901 MX35_PAD_FEC_TX_ERR__USB_TOP_USBH2_DATA_6
+902 MX35_PAD_FEC_TX_ERR__GPIO3_15
+903 MX35_PAD_FEC_TX_ERR__IPU_DISPB_D0_VSYNC
+904 MX35_PAD_FEC_TX_ERR__ARM11P_TOP_EVNTBUS_9
+905 MX35_PAD_FEC_RX_ERR__FEC_RX_ERR
+906 MX35_PAD_FEC_RX_ERR__IPU_CSI_D_0
+907 MX35_PAD_FEC_RX_ERR__USB_TOP_USBH2_DATA_7
+908 MX35_PAD_FEC_RX_ERR__KPP_COL_4
+909 MX35_PAD_FEC_RX_ERR__GPIO3_16
+910 MX35_PAD_FEC_RX_ERR__IPU_DISPB_SD_D_IO
+911 MX35_PAD_FEC_CRS__FEC_CRS
+912 MX35_PAD_FEC_CRS__IPU_CSI_D_1
+913 MX35_PAD_FEC_CRS__USB_TOP_USBH2_PWR
+914 MX35_PAD_FEC_CRS__KPP_COL_5
+915 MX35_PAD_FEC_CRS__GPIO3_17
+916 MX35_PAD_FEC_CRS__IPU_FLASH_STROBE
+917 MX35_PAD_FEC_RDATA1__FEC_RDATA_1
+918 MX35_PAD_FEC_RDATA1__IPU_CSI_D_2
+919 MX35_PAD_FEC_RDATA1__AUDMUX_AUD6_RXC
+920 MX35_PAD_FEC_RDATA1__USB_TOP_USBH2_OC
+921 MX35_PAD_FEC_RDATA1__KPP_COL_6
+922 MX35_PAD_FEC_RDATA1__GPIO3_18
+923 MX35_PAD_FEC_RDATA1__IPU_DISPB_BE0
+924 MX35_PAD_FEC_TDATA1__FEC_TDATA_1
+925 MX35_PAD_FEC_TDATA1__IPU_CSI_D_3
+926 MX35_PAD_FEC_TDATA1__AUDMUX_AUD6_RXFS
+927 MX35_PAD_FEC_TDATA1__KPP_COL_7
+928 MX35_PAD_FEC_TDATA1__GPIO3_19
+929 MX35_PAD_FEC_TDATA1__IPU_DISPB_BE1
+930 MX35_PAD_FEC_RDATA2__FEC_RDATA_2
+931 MX35_PAD_FEC_RDATA2__IPU_CSI_D_4
+932 MX35_PAD_FEC_RDATA2__AUDMUX_AUD6_TXD
+933 MX35_PAD_FEC_RDATA2__KPP_ROW_4
+934 MX35_PAD_FEC_RDATA2__GPIO3_20
+935 MX35_PAD_FEC_TDATA2__FEC_TDATA_2
+936 MX35_PAD_FEC_TDATA2__IPU_CSI_D_5
+937 MX35_PAD_FEC_TDATA2__AUDMUX_AUD6_RXD
+938 MX35_PAD_FEC_TDATA2__KPP_ROW_5
+939 MX35_PAD_FEC_TDATA2__GPIO3_21
+940 MX35_PAD_FEC_RDATA3__FEC_RDATA_3
+941 MX35_PAD_FEC_RDATA3__IPU_CSI_D_6
+942 MX35_PAD_FEC_RDATA3__AUDMUX_AUD6_TXC
+943 MX35_PAD_FEC_RDATA3__KPP_ROW_6
+944 MX35_PAD_FEC_RDATA3__GPIO3_22
+945 MX35_PAD_FEC_TDATA3__FEC_TDATA_3
+946 MX35_PAD_FEC_TDATA3__IPU_CSI_D_7
+947 MX35_PAD_FEC_TDATA3__AUDMUX_AUD6_TXFS
+948 MX35_PAD_FEC_TDATA3__KPP_ROW_7
+949 MX35_PAD_FEC_TDATA3__GPIO3_23
+950 MX35_PAD_EXT_ARMCLK__CCM_EXT_ARMCLK
+951 MX35_PAD_TEST_MODE__TCU_TEST_MODE
index 5187f0dd8b2864c7bc95282b1250688f36500e5a..2c81e45f1374ccd7b5b79c1bb2872639c4d04652 100644 (file)
@@ -14,10 +14,12 @@ Optional properties:
 - pinctrl-single,function-off : function off mode for disabled state if
   available and same for all registers; if not specified, disabling of
   pin functions is ignored
+- pinctrl-single,bit-per-mux : boolean to indicate that one register controls
+  more than one pin
 
-This driver assumes that there is only one register for each pin,
-and uses the common pinctrl bindings as specified in the pinctrl-bindings.txt
-document in this directory.
+This driver assumes that there is only one register for each pin (unless the
+pinctrl-single,bit-per-mux is set), and uses the common pinctrl bindings as
+specified in the pinctrl-bindings.txt document in this directory.
 
 The pin configuration nodes for pinctrl-single are specified as pinctrl
 register offset and value pairs using pinctrl-single,pins. Only the bits
@@ -31,6 +33,15 @@ device pinctrl register, and 0x118 contains the desired value of the
 pinctrl register. See the device example and static board pins example
 below for more information.
 
+In case when one register changes more than one pin's mux the
+pinctrl-single,bits need to be used which takes three parameters:
+
+       pinctrl-single,bits = <0xdc 0x18, 0xff>;
+
+Where 0xdc is the offset from the pinctrl register base address for the
+device pinctrl register, 0x18 is the desired value, and 0xff is the sub mask to
+be used when applying this change to the register.
+
 Example:
 
 /* SoC common file */
@@ -55,6 +66,15 @@ pmx_wkup: pinmux@4a31e040 {
        pinctrl-single,function-mask = <0xffff>;
 };
 
+control_devconf0: pinmux@48002274 {
+       compatible = "pinctrl-single";
+       reg = <0x48002274 4>;   /* Single register */
+       #address-cells = <1>;
+       #size-cells = <0>;
+       pinctrl-single,bit-per-mux;
+       pinctrl-single,register-width = <32>;
+       pinctrl-single,function-mask = <0x5F>;
+};
 
 /* board specific .dts file */
 
@@ -87,6 +107,21 @@ pmx_wkup: pinmux@4a31e040 {
        };
 };
 
+&control_devconf0 {
+       mcbsp1_pins: pinmux_mcbsp1_pins {
+               pinctrl-single,bits = <
+                       0x00 0x18 0x18 /* FSR/CLKR signal from FSX/CLKX pin */
+               >;
+       };
+
+       mcbsp2_clks_pins: pinmux_mcbsp2_clks_pins {
+               pinctrl-single,bits = <
+                       0x00 0x40 0x40 /* McBSP2 CLKS from McBSP_CLKS pin */
+               >;
+       };
+
+};
+
 &uart2 {
        pinctrl-names = "default";
        pinctrl-0 = <&uart2_pins>;
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
new file mode 100644 (file)
index 0000000..74499e5
--- /dev/null
@@ -0,0 +1,25 @@
+* Generic OPP Interface
+
+SoCs have a standard set of tuples consisting of frequency and
+voltage pairs that the device will support per voltage domain. These
+are called Operating Performance Points or OPPs.
+
+Properties:
+- operating-points: An array of 2-tuples items, and each item consists
+  of frequency and voltage like <freq-kHz vol-uV>.
+       freq: clock frequency in kHz
+       vol: voltage in microvolt
+
+Examples:
+
+cpu@0 {
+       compatible = "arm,cortex-a9";
+       reg = <0>;
+       next-level-cache = <&L2>;
+       operating-points = <
+               /* kHz    uV */
+               792000  1100000
+               396000  950000
+               198000  850000
+       >;
+};
index b16f4a57d1116756b750a9f5523e1b1c19352ba2..11963e4d6bc478a6e70e20aa1f06a32a759dee90 100644 (file)
@@ -11,7 +11,7 @@ Example:
 
 pwm: pwm@80064000 {
        compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
-       reg = <0x80064000 2000>;
+       reg = <0x80064000 0x2000>;
        #pwm-cells = <2>;
        fsl,pwm-number = <8>;
 };
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
new file mode 100644 (file)
index 0000000..c58573b
--- /dev/null
@@ -0,0 +1,35 @@
+* Freescale i.MX UART controller
+
+Required properties:
+- compatible : should be "fsl,imx21-uart"
+- reg : Address and length of the register set for the device
+- interrupts : Should contain UART interrupt number
+
+Optional properties:
+- fsl,uart-has-rtscts: indicate that RTS/CTS signals are used
+
+Note: Each uart controller should have an alias correctly numbered
+in "aliases" node.
+
+Example:
+
+- From imx51.dtsi:
+aliases {
+       serial0 = &uart1;
+       serial1 = &uart2;
+       serial2 = &uart3;
+};
+
+uart1: serial@73fbc000 {
+       compatible = "fsl,imx51-uart", "fsl,imx21-uart";
+       reg = <0x73fbc000 0x4000>;
+       interrupts = <31>;
+       status = "disabled";
+}
+
+- From imx51-babbage.dts:
+uart1: serial@73fbc000 {
+       fsl,uart-has-rtscts;
+       status = "okay";
+};
+
diff --git a/Documentation/devicetree/bindings/spi/mxs-spi.txt b/Documentation/devicetree/bindings/spi/mxs-spi.txt
new file mode 100644 (file)
index 0000000..e2e1395
--- /dev/null
@@ -0,0 +1,22 @@
+* Freescale MX233/MX28 SSP/SPI
+
+Required properties:
+- compatible: Should be "fsl,<soc>-spi", where soc is "imx23" or "imx28"
+- reg: Offset and length of the register set for the device
+- interrupts: Should contain SSP interrupts (error irq first, dma irq second)
+- fsl,ssp-dma-channel: APBX DMA channel for the SSP
+
+Optional properties:
+- clock-frequency : Input clock frequency to the SPI block in Hz.
+                   Default is 160000000 Hz.
+
+Example:
+
+ssp0: ssp@80010000 {
+       #address-cells = <1>;
+       #size-cells = <0>;
+       compatible = "fsl,imx28-spi";
+       reg = <0x80010000 0x2000>;
+       interrupts = <96 82>;
+       fsl,ssp-dma-channel = <0>;
+};
index e782add2e457df2b9e22d75f799900a4d9256e7f..d2c33d0f533ec8702938b017c470faf073db9032 100644 (file)
@@ -21,6 +21,9 @@ assumption that board specific platform code will be used to manage
 chip selects.  Individual drivers can define additional properties to
 support describing the chip select layout.
 
+Optional property:
+- num-cs : total number of chipselects
+
 SPI slave nodes must be children of the SPI master node and can
 contain the following properties.
 - reg             - (required) chip select address of device.
diff --git a/Documentation/devicetree/bindings/spi/spi-gpio.txt b/Documentation/devicetree/bindings/spi/spi-gpio.txt
new file mode 100644 (file)
index 0000000..8a824be
--- /dev/null
@@ -0,0 +1,29 @@
+SPI-GPIO devicetree bindings
+
+Required properties:
+
+ - compatible: should be set to "spi-gpio"
+ - #address-cells: should be set to <0x1>
+ - ranges
+ - gpio-sck: GPIO spec for the SCK line to use
+ - gpio-miso: GPIO spec for the MISO line to use
+ - gpio-mosi: GPIO spec for the MOSI line to use
+ - cs-gpios: GPIOs to use for chipselect lines
+ - num-chipselects: number of chipselect lines
+
+Example:
+
+       spi {
+               compatible = "spi-gpio";
+               #address-cells = <0x1>;
+               ranges;
+
+               gpio-sck = <&gpio 95 0>;
+               gpio-miso = <&gpio 98 0>;
+               gpio-mosi = <&gpio 97 0>;
+               cs-gpios = <&gpio 125 0>;
+               num-chipselects = <1>;
+
+               /* clients */
+       };
+
diff --git a/Documentation/devicetree/bindings/spi/spi-sc18is602.txt b/Documentation/devicetree/bindings/spi/spi-sc18is602.txt
new file mode 100644 (file)
index 0000000..02f9033
--- /dev/null
@@ -0,0 +1,23 @@
+NXP SC18IS602/SCIS603
+
+Required properties:
+       - compatible : Should be one of
+               "nxp,sc18is602"
+               "nxp,sc18is602b"
+               "nxp,sc18is603"
+       - reg: I2C bus address
+
+Optional properties:
+       - clock-frequency : external oscillator clock frequency. If not
+         specified, the SC18IS602 default frequency (7372000) will be used.
+
+The clock-frequency property is relevant and needed only if the chip has an
+external oscillator (SC18IS603).
+
+Example:
+
+       sc18is603@28 {
+               compatible = "nxp,sc18is603";
+               reg = <0x28>;
+               clock-frequency = <14744000>;
+       }
index 306ec3ff3c0e3be27a200ae5fdf215f483fe5eab..f158fd31cfda71a3ab69984c54cbeab3fa22bc61 100644 (file)
@@ -6,7 +6,29 @@ Required properties:
 - interrupts : Should contain SPI controller interrupt
 
 Optional properties:
+- num-cs : total number of chipselects
 - cs-gpios : should specify GPIOs used for chipselects.
   The gpios will be referred to as reg = <index> in the SPI child nodes.
   If unspecified, a single SPI device without a chip select can be used.
+- pl022,autosuspend-delay : delay in ms following transfer completion before
+                           the runtime power management system suspends the
+                           device. A setting of 0 indicates no delay and the
+                            device will be suspended immediately
+- pl022,rt : indicates the controller should run the message pump with realtime
+             priority to minimise the transfer latency on the bus (boolean)
+
+
+SPI slave nodes must be children of the SPI master node and can
+contain the following properties.
+
+- pl022,interface : interface type:
+       0: SPI
+       1: Texas Instruments Synchronous Serial Frame Format
+       2: Microwire (Half Duplex)
+- pl022,com-mode : polling, interrupt or dma
+- pl022,rx-level-trig : Rx FIFO watermark level
+- pl022,tx-level-trig : Tx FIFO watermark level
+- pl022,ctrl-len : Microwire interface: Control length
+- pl022,wait-state : Microwire interface: Wait state
+- pl022,duplex : Microwire interface: Full/Half duplex
 
index 26ebde77e821cecf11062032dfec702513eecd66..f7433355394abf95aa32ea2590e4fe749b32435e 100644 (file)
@@ -3,6 +3,7 @@ IBM's Journaled File System (JFS) for Linux
 JFS Homepage:  http://jfs.sourceforge.net/
 
 The following mount options are supported:
+(*) == default
 
 iocharset=name Character set to use for converting from Unicode to
                ASCII.  The default is to do no conversion.  Use
@@ -21,12 +22,12 @@ nointegrity Do not write to the journal.  The primary use of this option
                from backup media.  The integrity of the volume is not
                guaranteed if the system abnormally abends.
 
-integrity      Default.  Commit metadata changes to the journal.  Use this
-               option to remount a volume where the nointegrity option was
+integrity(*)   Commit metadata changes to the journal.  Use this option to
+               remount a volume where the nointegrity option was
                previously specified in order to restore normal behavior.
 
 errors=continue                Keep going on a filesystem error.
-errors=remount-ro      Default. Remount the filesystem read-only on an error.
+errors=remount-ro(*)   Remount the filesystem read-only on an error.
 errors=panic           Panic and halt the machine if an error occurs.
 
 uid=value      Override on-disk uid with specified value
@@ -35,7 +36,17 @@ umask=value  Override on-disk umask with specified octal value.  For
                directories, the execute bit will be set if the corresponding
                read bit is set.
 
-Please send bugs, comments, cards and letters to shaggy@linux.vnet.ibm.com.
+discard=minlen This enables/disables the use of discard/TRIM commands.
+discard                The discard/TRIM commands are sent to the underlying
+nodiscard(*)   block device when blocks are freed. This is useful for SSD
+               devices and sparse/thinly-provisioned LUNs.  The FITRIM ioctl
+               command is also available together with the nodiscard option.
+               The value of minlen specifies the minimum blockcount, when
+               a TRIM command to the block device is considered usefull.
+               When no value is given to the discard option, it defaults to
+               64 blocks, which means 256KiB in JFS.
+               The minlen value of discard overrides the minlen value given
+               on an FITRIM ioctl().
 
 The JFS mailing list can be subscribed to by using the link labeled
 "Mail list Subscribe" at our web page http://jfs.sourceforge.net/
index ffdd9d866ad76cb4bb21553e238a45c8fd3d3c7d..2d66ed688125f894bae223c7e50fa85327ade534 100644 (file)
@@ -78,7 +78,8 @@ nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
                        flags           = hard, nointr, noposix, cto, ac
 
 
-ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
+ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>:
+   <dns0-ip>:<dns1-ip>
 
   This parameter tells the kernel how to configure IP addresses of devices
   and also how to set up the IP routing table. It was originally called
@@ -158,6 +159,13 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
 
                 Default: any
 
+  <dns0-ip>    IP address of first nameserver.
+               Value gets exported by /proc/net/pnp which is often linked
+               on embedded systems by /etc/resolv.conf.
+
+  <dns1-ip>    IP address of secound nameserver.
+               Same as above.
+
 
 nfsrootdebug
 
index 64eeb55d0c09d05f6ec5788a150ddcf2119fdfd8..f2cfe265e836e082727a4c5004d1fed264d5d526 100644 (file)
@@ -24,6 +24,9 @@ Partitions and P_Keys
   The P_Key for any interface is given by the "pkey" file, and the
   main interface for a subinterface is in "parent."
 
+  Child interface create/delete can also be done using IPoIB's
+  rtnl_link_ops, where childs created using either way behave the same.
+
 Datagram vs Connected modes
 
   The IPoIB driver supports two modes of operation: datagram and
index df43807bb5daf788eebe34a3600c5ca9a78d0bfd..f777fa96243db4130771cb2dbd85e1a952ef683f 100644 (file)
@@ -1051,6 +1051,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        ihash_entries=  [KNL]
                        Set number of hash buckets for inode cache.
 
+       ima_appraise=   [IMA] appraise integrity measurements
+                       Format: { "off" | "enforce" | "fix" }
+                       default: "enforce"
+
+       ima_appraise_tcb [IMA]
+                       The builtin appraise policy appraises all files
+                       owned by uid=0.
+
        ima_audit=      [IMA]
                        Format: { "0" | "1" }
                        0 -- integrity auditing messages. (Default)
@@ -1350,6 +1358,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        * nohrst, nosrst, norst: suppress hard, soft
                           and both resets.
 
+                       * rstonce: only attempt one reset during
+                         hot-unplug link recovery
+
                        * dump_id: dump IDENTIFY data.
 
                        If there are multiple matching configurations changing
index 8f3ae4a6147e2c114849296e37809317671db057..a173d2a879f5cf7619b221148edbbac4789bbe8a 100644 (file)
@@ -75,9 +75,10 @@ folder:
 
 There is a special folder for debugging information:
 
-#  ls /sys/kernel/debug/batman_adv/bat0/
-# bla_claim_table    log                socket             transtable_local
-# gateways           originators        transtable_global  vis_data
+# ls /sys/kernel/debug/batman_adv/bat0/
+# bla_backbone_table  log                 transtable_global
+# bla_claim_table     originators         transtable_local
+# gateways            socket              vis_data
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
index 6b1c7110534e31bb2e04acd1505a07f6dd4db481..10a015c384b844b946ae8d7b71b81abc7fd33c79 100644 (file)
@@ -752,12 +752,22 @@ xmit_hash_policy
                protocol information to generate the hash.
 
                Uses XOR of hardware MAC addresses and IP addresses to
-               generate the hash.  The formula is
+               generate the hash.  The IPv4 formula is
 
                (((source IP XOR dest IP) AND 0xffff) XOR
                        ( source MAC XOR destination MAC ))
                                modulo slave count
 
+               The IPv6 formula is
+
+               hash = (source ip quad 2 XOR dest IP quad 2) XOR
+                      (source ip quad 3 XOR dest IP quad 3) XOR
+                      (source ip quad 4 XOR dest IP quad 4)
+
+               (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+                       XOR (source MAC XOR destination MAC))
+                               modulo slave count
+
                This algorithm will place all traffic to a particular
                network peer on the same slave.  For non-IP traffic,
                the formula is the same as for the layer2 transmit
@@ -778,19 +788,29 @@ xmit_hash_policy
                slaves, although a single connection will not span
                multiple slaves.
 
-               The formula for unfragmented TCP and UDP packets is
+               The formula for unfragmented IPv4 TCP and UDP packets is
 
                ((source port XOR dest port) XOR
                         ((source IP XOR dest IP) AND 0xffff)
                                modulo slave count
 
-               For fragmented TCP or UDP packets and all other IP
-               protocol traffic, the source and destination port
+               The formula for unfragmented IPv6 TCP and UDP packets is
+
+               hash = (source port XOR dest port) XOR
+                      ((source ip quad 2 XOR dest IP quad 2) XOR
+                       (source ip quad 3 XOR dest IP quad 3) XOR
+                       (source ip quad 4 XOR dest IP quad 4))
+
+               ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
+                       modulo slave count
+
+               For fragmented TCP or UDP packets and all other IPv4 and
+               IPv6 protocol traffic, the source and destination port
                information is omitted.  For non-IP traffic, the
                formula is the same as for the layer2 transmit hash
                policy.
 
-               This policy is intended to mimic the behavior of
+               The IPv4 policy is intended to mimic the behavior of
                certain switches, notably Cisco switches with PFC2 as
                well as some Foundry and IBM products.
 
index ca447b35b8333106cdd19649d943d80cdb12cc1e..c7fc10724948629c98100b19250f8ee07d9dcd73 100644 (file)
@@ -439,7 +439,9 @@ tcp_stdurg - BOOLEAN
 tcp_synack_retries - INTEGER
        Number of times SYNACKs for a passive TCP connection attempt will
        be retransmitted. Should not be higher than 255. Default value
-       is 5, which corresponds to ~180seconds.
+       is 5, which corresponds to 31seconds till the last retransmission
+       with the current initial RTO of 1second. With this the final timeout
+       for a passive TCP connection will happen after 63seconds.
 
 tcp_syncookies - BOOLEAN
        Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
@@ -465,20 +467,37 @@ tcp_syncookies - BOOLEAN
 tcp_fastopen - INTEGER
        Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
        in the opening SYN packet. To use this feature, the client application
-       must not use connect(). Instead, it should use sendmsg() or sendto()
-       with MSG_FASTOPEN flag which performs a TCP handshake automatically.
-
-       The values (bitmap) are:
-       1: Enables sending data in the opening SYN on the client
-       5: Enables sending data in the opening SYN on the client regardless
-          of cookie availability.
+       must use sendmsg() or sendto() with MSG_FASTOPEN flag rather than
+       connect() to perform a TCP handshake automatically.
+
+       The values (bitmap) are
+       1: Enables sending data in the opening SYN on the client.
+       2: Enables TCP Fast Open on the server side, i.e., allowing data in
+          a SYN packet to be accepted and passed to the application before
+          3-way hand shake finishes.
+       4: Send data in the opening SYN regardless of cookie availability and
+          without a cookie option.
+       0x100: Accept SYN data w/o validating the cookie.
+       0x200: Accept data-in-SYN w/o any cookie option present.
+       0x400/0x800: Enable Fast Open on all listeners regardless of the
+          TCP_FASTOPEN socket option. The two different flags designate two
+          different ways of setting max_qlen without the TCP_FASTOPEN socket
+          option.
 
        Default: 0
 
+       Note that the client & server side Fast Open flags (1 and 2
+       respectively) must be also enabled before the rest of flags can take
+       effect.
+
+       See include/net/tcp.h and the code for more details.
+
 tcp_syn_retries - INTEGER
        Number of times initial SYNs for an active TCP connection attempt
        will be retransmitted. Should not be higher than 255. Default value
-       is 5, which corresponds to ~180seconds.
+       is 6, which corresponds to 63seconds till the last restransmission
+       with the current initial RTO of 1second. With this the final timeout
+       for an active TCP connection attempt will happen after 127seconds.
 
 tcp_timestamps - BOOLEAN
        Enable timestamps as defined in RFC1323.
index c676b9cedbd0d90a802ce82c09e49a219560570c..ef9ee71b4d7fcc71a38dc94454828e74a606cded 100644 (file)
@@ -173,7 +173,6 @@ Where:
 For MDIO bus The we have:
 
  struct stmmac_mdio_bus_data {
-       int bus_id;
        int (*phy_reset)(void *priv);
        unsigned int phy_mask;
        int *irqs;
@@ -181,7 +180,6 @@ For MDIO bus The we have:
  };
 
 Where:
- o bus_id: bus identifier;
  o phy_reset: hook to reset the phy device attached to the bus.
  o phy_mask: phy mask passed when register the MDIO bus within the driver.
  o irqs: list of IRQs, one per PHY.
@@ -230,9 +228,6 @@ there are two MAC cores: one MAC is for MDIO Bus/PHY emulation
 with fixed_link support.
 
 static struct stmmac_mdio_bus_data stmmac1_mdio_bus = {
-       .bus_id = 1,
-               |
-               |-> phy device on the bus_id 1
        .phy_reset = phy_reset;
                |
                |-> function to provide the phy_reset on this board
diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt
new file mode 100644 (file)
index 0000000..5b34b76
--- /dev/null
@@ -0,0 +1,47 @@
+Virtual eXtensible Local Area Networking documentation
+======================================================
+
+The VXLAN protocol is a tunnelling protocol that is designed to
+solve the problem of limited number of available VLAN's (4096).
+With VXLAN identifier is expanded to 24 bits.
+
+It is a draft RFC standard, that is implemented by Cisco Nexus,
+Vmware and Brocade. The protocol runs over UDP using a single
+destination port (still not standardized by IANA).
+This document describes the Linux kernel tunnel device,
+there is also an implantation of VXLAN for Openvswitch.
+
+Unlike most tunnels, a VXLAN is a 1 to N network, not just point
+to point. A VXLAN device can either dynamically learn the IP address
+of the other end, in a manner similar to a learning bridge, or the
+forwarding entries can be configured statically.
+
+The management of vxlan is done in a similar fashion to it's
+too closest neighbors GRE and VLAN. Configuring VXLAN requires
+the version of iproute2 that matches the kernel release
+where VXLAN was first merged upstream.
+
+1. Create vxlan device
+  # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1
+
+This creates a new device (vxlan0). The device uses the
+the multicast group 239.1.1.1 over eth1 to handle packets where
+no entry is in the forwarding table.
+
+2. Delete vxlan device
+  # ip link delete vxlan0
+
+3. Show vxlan info
+  # ip -d show vxlan0
+
+It is possible to create, destroy and display the vxlan
+forwarding table using the new bridge command.
+
+1. Create forwarding table entry
+  # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
+
+2. Delete forwarding table entry
+  # bridge fdb delete 00:17:42:8a:b4:05
+
+3. Show forwarding table
+  # bridge fdb show dev vxlan0
index 1479aca2374441976c14668b7bc7e523fbb2211f..3b4ee5328868cea31b024ad9631a286886826c60 100644 (file)
@@ -289,6 +289,11 @@ Interaction with the GPIO subsystem
 The GPIO drivers may want to perform operations of various types on the same
 physical pins that are also registered as pin controller pins.
 
+First and foremost, the two subsystems can be used as completely orthogonal,
+see the section named "pin control requests from drivers" and
+"drivers needing both pin control and GPIOs" below for details. But in some
+situations a cross-subsystem mapping between pins and GPIOs is needed.
+
 Since the pin controller subsystem have its pinspace local to the pin
 controller we need a mapping so that the pin control subsystem can figure out
 which pin controller handles control of a certain GPIO pin. Since a single
@@ -359,6 +364,7 @@ will get an pin number into its handled number range. Further it is also passed
 the range ID value, so that the pin controller knows which range it should
 deal with.
 
+
 PINMUX interfaces
 =================
 
@@ -960,8 +966,8 @@ all get selected, and they all get enabled and disable simultaneously by the
 pinmux core.
 
 
-Pinmux requests from drivers
-============================
+Pin control requests from drivers
+=================================
 
 Generally it is discouraged to let individual drivers get and enable pin
 control. So if possible, handle the pin control in platform code or some other
@@ -969,6 +975,11 @@ place where you have access to all the affected struct device * pointers. In
 some cases where a driver needs to e.g. switch between different mux mappings
 at runtime this is not possible.
 
+A typical case is if a driver needs to switch bias of pins from normal
+operation and going to sleep, moving from the PINCTRL_STATE_DEFAULT to
+PINCTRL_STATE_SLEEP at runtime, re-biasing or even re-muxing pins to save
+current in sleep mode.
+
 A driver may request a certain control state to be activated, usually just the
 default state like this:
 
@@ -1058,6 +1069,51 @@ registered. Thus make sure that the error path in your driver gracefully
 cleans up and is ready to retry the probing later in the startup process.
 
 
+Drivers needing both pin control and GPIOs
+==========================================
+
+Again, it is discouraged to let drivers lookup and select pin control states
+themselves, but again sometimes this is unavoidable.
+
+So say that your driver is fetching its resources like this:
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/gpio.h>
+
+struct pinctrl *pinctrl;
+int gpio;
+
+pinctrl = devm_pinctrl_get_select_default(&dev);
+gpio = devm_gpio_request(&dev, 14, "foo");
+
+Here we first request a certain pin state and then request GPIO 14 to be
+used. If you're using the subsystems orthogonally like this, you should
+nominally always get your pinctrl handle and select the desired pinctrl
+state BEFORE requesting the GPIO. This is a semantic convention to avoid
+situations that can be electrically unpleasant, you will certainly want to
+mux in and bias pins in a certain way before the GPIO subsystems starts to
+deal with them.
+
+The above can be hidden: using pinctrl hogs, the pin control driver may be
+setting up the config and muxing for the pins when it is probing,
+nevertheless orthogonal to the GPIO subsystem.
+
+But there are also situations where it makes sense for the GPIO subsystem
+to communicate directly with with the pinctrl subsystem, using the latter
+as a back-end. This is when the GPIO driver may call out to the functions
+described in the section "Pin control interaction with the GPIO subsystem"
+above. This only involves per-pin multiplexing, and will be completely
+hidden behind the gpio_*() function namespace. In this case, the driver
+need not interact with the pin control subsystem at all.
+
+If a pin control driver and a GPIO driver is dealing with the same pins
+and the use cases involve multiplexing, you MUST implement the pin controller
+as a back-end for the GPIO driver like this, unless your hardware design
+is such that the GPIO controller can override the pin controller's
+multiplexing state through hardware without the need to interact with the
+pin control system.
+
+
 System pin control hogging
 ==========================
 
index 80441ab608e4f04f530119db621ec9e7941545ac..3a3079411a3dc953c59cb75b8391906b2fb74df3 100644 (file)
@@ -1,3 +1,13 @@
+Release Date    : Tue. Jun 17, 2012 17:00:00 PST 2012 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Adam Radford/Kashyap Desai
+Current Version : 00.00.06.18-rc1
+Old Version     : 00.00.06.15-rc1
+    1. Fix Copyright dates.
+    2. Add throttlequeuedepth module parameter.
+    3. Add resetwaittime module parameter.
+    4. Move poll_aen_lock initializer.
+-------------------------------------------------------------------------------
 Release Date    : Mon. Mar 19, 2012 17:00:00 PST 2012 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Adam Radford
index ce0fdf349a8159b8f68c0b110567578d9503514f..27a91cf43d6d66ce4137e9335988175b096423a4 100644 (file)
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2011 QLogic Corporation
+Copyright (c) 2003-2012 QLogic Corporation
 QLogic Linux FC-FCoE Driver
 
 This program includes a device driver for Linux 3.x.
index ab899591ecb7a93dfe6c8b936042c3b8f400bd42..78c169f0d7c6b5ac0988195bb10d47446993ce4f 100644 (file)
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2011 QLogic Corporation
+Copyright (c) 2003-2012 QLogic Corporation
 QLogic Linux iSCSI Driver
 
 This program includes a device driver for Linux 3.x.
index 685bf3582abe6104f367a76d48e03c5185bce911..f346abbdd6ff3a7aa16acb197e4dbe7b57d469ea 100644 (file)
@@ -112,10 +112,8 @@ attempted).
 
 MINOR NUMBERS
 
-The tape driver currently supports 128 drives by default. This number
-can be increased by editing st.h and recompiling the driver if
-necessary. The upper limit is 2^17 drives if 4 modes for each drive
-are used.
+The tape driver currently supports up to 2^17 drives if 4 modes for
+each drive are used.
 
 The minor numbers consist of the following bit fields:
 
index a416479b8a1c23d6755cd5354027159af832a4dc..8a177e4b6e21237e0e03f0410acb70d9282ce014 100644 (file)
@@ -28,12 +28,11 @@ Smack kernels use the CIPSO IP option. Some network
 configurations are intolerant of IP options and can impede
 access to systems that use them as Smack does.
 
-The current git repositories for Smack user space are:
+The current git repository for Smack user space is:
 
-       git@gitorious.org:meego-platform-security/smackutil.git
-       git@gitorious.org:meego-platform-security/libsmack.git
+       git://github.com/smack-team/smack.git
 
-These should make and install on most modern distributions.
+This should make and install on most modern distributions.
 There are three commands included in smackutil:
 
 smackload  - properly formats data for writing to /smack/load
@@ -194,6 +193,9 @@ onlycap
        these capabilities are effective at for processes with any
        label. The value is set by writing the desired label to the
        file or cleared by writing "-" to the file.
+revoke-subject
+       Writing a Smack label here sets the access to '-' for all access
+       rules with that subject label.
 
 You can add access rules in /etc/smack/accesses. They take the form:
 
diff --git a/Documentation/spi/spi-sc18is602 b/Documentation/spi/spi-sc18is602
new file mode 100644 (file)
index 0000000..a457028
--- /dev/null
@@ -0,0 +1,36 @@
+Kernel driver spi-sc18is602
+===========================
+
+Supported chips:
+  * NXP SI18IS602/602B/603
+    Datasheet: http://www.nxp.com/documents/data_sheet/SC18IS602_602B_603.pdf
+
+Author:
+        Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver provides connects a NXP SC18IS602/603 I2C-bus to SPI bridge to the
+kernel's SPI core subsystem.
+
+The driver does not probe for supported chips, since the SI18IS602/603 does not
+support Chip ID registers. You will have to instantiate the devices explicitly.
+Please see Documentation/i2c/instantiating-devices for details.
+
+
+Usage Notes
+-----------
+
+This driver requires the I2C adapter driver to support raw I2C messages. I2C
+adapter drivers which can only handle the SMBus protocol are not supported.
+
+The maximum SPI message size supported by SC18IS602/603 is 200 bytes. Attempts
+to initiate longer transfers will fail with -EINVAL. EEPROM read operations and
+similar large accesses have to be split into multiple chunks of no more than
+200 bytes per SPI message (128 bytes of data per message is recommended). This
+means that programs such as "cp" or "od", which automatically use large block
+sizes to access a device, can not be used directly to read data from EEPROM.
+Programs such as dd, where the block size can be specified, should be used
+instead.
index 8c22b7f6f41ab3bef0007731e277adf1be252aa9..d919e3d4a0db027c86c31b039f4183acddc5352b 100644 (file)
@@ -1650,7 +1650,6 @@ F:        drivers/bcma/
 F:     include/linux/bcma/
 
 BROCADE BFA FC SCSI DRIVER
-M:     Jing Huang <huangj@brocade.com>
 M:     Krishna C Gudipati <kgudipat@brocade.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
@@ -2632,6 +2631,18 @@ T:       git git://git.alsa-project.org/alsa-kernel.git
 S:     Maintained
 F:     sound/usb/misc/ua101.c
 
+EXTENSIBLE FIRMWARE INTERFACE (EFI)
+M:     Matt Fleming <matt.fleming@intel.com>
+L:     linux-efi@vger.kernel.org
+S:     Maintained
+F:     Documentation/x86/efi-stub.txt
+F:     arch/ia64/kernel/efi.c
+F:     arch/x86/boot/compressed/eboot.[ch]
+F:     arch/x86/include/asm/efi.h
+F:     arch/x86/platform/efi/*
+F:     drivers/firmware/efivars.c
+F:     include/linux/efi*.h
+
 EFIFB FRAMEBUFFER DRIVER
 L:     linux-fbdev@vger.kernel.org
 M:     Peter Jones <pjones@redhat.com>
@@ -3438,6 +3449,13 @@ L:       netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmveth.*
 
+IBM Power Virtual SCSI/FC Device Drivers
+M:     Robert Jennings <rcj@linux.vnet.ibm.com>
+L:     linux-scsi@vger.kernel.org
+S:     Supported
+F:     drivers/scsi/ibmvscsi/
+X:     drivers/scsi/ibmvscsi/ibmvstgt.c
+
 IBM ServeRAID RAID DRIVER
 P:     Jack Hammer
 M:     Dave Jeffery <ipslinux@adaptec.com>
@@ -4806,6 +4824,7 @@ M:        Lauro Ramos Venancio <lauro.venancio@openbossa.org>
 M:     Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
 M:     Samuel Ortiz <sameo@linux.intel.com>
 L:     linux-wireless@vger.kernel.org
+L:     linux-nfc@lists.01.org (moderated for non-subscribers)
 S:     Maintained
 F:     net/nfc/
 F:     include/linux/nfc.h
@@ -5048,6 +5067,7 @@ S:        Maintained
 F:     Documentation/devicetree
 F:     drivers/of
 F:     include/linux/of*.h
+F:     scripts/dtc
 K:     of_get_property
 K:     of_match_table
 
index bc1acdda7a5ed8ab3945b72559c09a9e763ec030..63e77e3944ced280724c964b7690502341383695 100644 (file)
@@ -145,27 +145,24 @@ SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd,
                long __user *, basep)
 {
        int error;
-       struct file *file;
+       struct fd arg = fdget(fd);
        struct osf_dirent_callback buf;
 
-       error = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
+       if (!arg.file)
+               return -EBADF;
 
        buf.dirent = dirent;
        buf.basep = basep;
        buf.count = count;
        buf.error = 0;
 
-       error = vfs_readdir(file, osf_filldir, &buf);
+       error = vfs_readdir(arg.file, osf_filldir, &buf);
        if (error >= 0)
                error = buf.error;
        if (count != buf.count)
                error = count - buf.count;
 
-       fput(file);
- out:
+       fdput(arg);
        return error;
 }
 
@@ -278,8 +275,8 @@ linux_to_osf_stat(struct kstat *lstat, struct osf_stat __user *osf_stat)
        tmp.st_dev      = lstat->dev;
        tmp.st_mode     = lstat->mode;
        tmp.st_nlink    = lstat->nlink;
-       tmp.st_uid      = lstat->uid;
-       tmp.st_gid      = lstat->gid;
+       tmp.st_uid      = from_kuid_munged(current_user_ns(), lstat->uid);
+       tmp.st_gid      = from_kgid_munged(current_user_ns(), lstat->gid);
        tmp.st_rdev     = lstat->rdev;
        tmp.st_ldev     = lstat->rdev;
        tmp.st_size     = lstat->size;
index 9d72ed67e43268ed1f679bcaafd14f669da79e79..7bab17ed29729b84c464be57d6aa9e76a9c7369d 100644 (file)
@@ -891,6 +891,7 @@ config ARCH_NOMADIK
        select COMMON_CLK
        select GENERIC_CLOCKEVENTS
        select PINCTRL
+       select PINCTRL_STN8815
        select MIGHT_HAVE_CACHE_L2X0
        select ARCH_REQUIRE_GPIOLIB
        help
index 9fecf1ae777bac14b2770c0cf09dd601e85b73b8..0c6fc34821f93f336fd705626c35da8b9cbba9f3 100644 (file)
                        compatible = "calxeda,hb-ahci";
                        reg = <0xffe08000 0x10000>;
                        interrupts = <0 83 4>;
+                       calxeda,port-phys = <&combophy5 0 &combophy0 0
+                                            &combophy0 1 &combophy0 2
+                                            &combophy0 3>;
+                       dma-coherent;
                };
 
                sdhci@ffe0e000 {
                        reg = <0xfff51000 0x1000>;
                        interrupts = <0 80 4  0 81 4  0 82 4>;
                };
+
+               combophy0: combo-phy@fff58000 {
+                       compatible = "calxeda,hb-combophy";
+                       #phy-cells = <1>;
+                       reg = <0xfff58000 0x1000>;
+                       phydev = <5>;
+               };
+
+               combophy5: combo-phy@fff5d000 {
+                       compatible = "calxeda,hb-combophy";
+                       #phy-cells = <1>;
+                       reg = <0xfff5d000 0x1000>;
+                       phydev = <31>;
+               };
        };
 };
index 5d0c66708960c7d4f2cf4d20dc31fc205f0c6c88..c88b57886e791906f8f21459df939718a95be0b6 100644 (file)
@@ -23,7 +23,6 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_SHMOBILE=y
-CONFIG_KEYBOARD_GPIO_POLLED=y
 CONFIG_ARCH_SH73A0=y
 CONFIG_MACH_KZM9G=y
 CONFIG_MEMORY_START=0x41000000
@@ -71,6 +70,7 @@ CONFIG_INPUT_SPARSEKMAP=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ST1232=y
index ecf2531523a11bbcf34c8fd1b48ca3d7be5041dc..b4384af1bea66688b8464c171e3c2bc924a73a2c 100644 (file)
@@ -39,7 +39,7 @@ CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ATMEL=y
 CONFIG_MTD_NAND_PLATFORM=y
 CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_BEB_RESERVE=3
+CONFIG_MTD_UBI_BEB_LIMIT=25
 CONFIG_MTD_UBI_GLUEBI=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
index 05112380dc5398dd47cbd9fb6adf564f96a4c94c..8dcd9c702d90c9c352d85595d0ea8f83a42f215e 100644 (file)
 #define rmb()          dsb()
 #define wmb()          mb()
 #else
-#include <asm/memory.h>
-#define mb()   do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
-#define rmb()  do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
-#define wmb()  do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define mb()           barrier()
+#define rmb()          barrier()
+#define wmb()          barrier()
 #endif
 
 #ifndef CONFIG_SMP
index 5c44dcb0987bd31418298f7932baaa5a82320477..23004847bb057becd348b5d1e870235b633acedd 100644 (file)
@@ -13,6 +13,7 @@
 
 #define DMA_ERROR_CODE (~0)
 extern struct dma_map_ops arm_dma_ops;
+extern struct dma_map_ops arm_coherent_dma_ops;
 
 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
index 5f6ddcc56452998f40b1c16d7e20a0ff1ec010bd..73cf03aa981e1b665d9a40f4f80847335ca49441 100644 (file)
@@ -275,14 +275,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
 
-/*
- * Optional coherency support.  Currently used only by selected
- * Intel XSC3-based systems.
- */
-#ifndef arch_is_coherent
-#define arch_is_coherent()             0
-#endif
-
 #endif
 
 #include <asm-generic/memory_model.h>
index aa4ffe6e5ecfbd9a8feeedf1c002ca50861ae541..dea7a925c7e249375a86c9c943302a926c865433 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/percpu.h>
 #include <linux/clockchips.h>
 #include <linux/completion.h>
+#include <linux/cpufreq.h>
 
 #include <linux/atomic.h>
 #include <asm/smp.h>
@@ -650,3 +651,56 @@ int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 }
+
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
+static unsigned long global_l_p_j_ref;
+static unsigned long global_l_p_j_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+                                       unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       int cpu = freq->cpu;
+
+       if (freq->flags & CPUFREQ_CONST_LOOPS)
+               return NOTIFY_OK;
+
+       if (!per_cpu(l_p_j_ref, cpu)) {
+               per_cpu(l_p_j_ref, cpu) =
+                       per_cpu(cpu_data, cpu).loops_per_jiffy;
+               per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+               if (!global_l_p_j_ref) {
+                       global_l_p_j_ref = loops_per_jiffy;
+                       global_l_p_j_ref_freq = freq->old;
+               }
+       }
+
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+               loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
+                                               global_l_p_j_ref_freq,
+                                               freq->new);
+               per_cpu(cpu_data, cpu).loops_per_jiffy =
+                       cpufreq_scale(per_cpu(l_p_j_ref, cpu),
+                                       per_cpu(l_p_j_ref_freq, cpu),
+                                       freq->new);
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+       .notifier_call  = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+       return cpufreq_register_notifier(&cpufreq_notifier,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif
index af1da34ccf9d46e205e8c6e2dd8ff6fa61af8d1f..40e36a50304c9bdb8994fc0e9ac91b5c06a44578 100644 (file)
@@ -15,6 +15,7 @@
  */
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
@@ -23,6 +24,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
 #include <linux/smp.h>
+#include <linux/amba/bus.h>
 
 #include <asm/cacheflush.h>
 #include <asm/smp_plat.h>
@@ -149,11 +151,61 @@ static void highbank_power_off(void)
                cpu_do_idle();
 }
 
+static int highbank_platform_notifier(struct notifier_block *nb,
+                                 unsigned long event, void *__dev)
+{
+       struct resource *res;
+       int reg = -1;
+       struct device *dev = __dev;
+
+       if (event != BUS_NOTIFY_ADD_DEVICE)
+               return NOTIFY_DONE;
+
+       if (of_device_is_compatible(dev->of_node, "calxeda,hb-ahci"))
+               reg = 0xc;
+       else if (of_device_is_compatible(dev->of_node, "calxeda,hb-sdhci"))
+               reg = 0x18;
+       else if (of_device_is_compatible(dev->of_node, "arm,pl330"))
+               reg = 0x20;
+       else if (of_device_is_compatible(dev->of_node, "calxeda,hb-xgmac")) {
+               res = platform_get_resource(to_platform_device(dev),
+                                           IORESOURCE_MEM, 0);
+               if (res) {
+                       if (res->start == 0xfff50000)
+                               reg = 0;
+                       else if (res->start == 0xfff51000)
+                               reg = 4;
+               }
+       }
+
+       if (reg < 0)
+               return NOTIFY_DONE;
+
+       if (of_property_read_bool(dev->of_node, "dma-coherent")) {
+               writel(0xff31, sregs_base + reg);
+               set_dma_ops(dev, &arm_coherent_dma_ops);
+       } else
+               writel(0, sregs_base + reg);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block highbank_amba_nb = {
+       .notifier_call = highbank_platform_notifier,
+};
+
+static struct notifier_block highbank_platform_nb = {
+       .notifier_call = highbank_platform_notifier,
+};
+
 static void __init highbank_init(void)
 {
        pm_power_off = highbank_power_off;
        highbank_pm_init();
 
+       bus_register_notifier(&platform_bus_type, &highbank_platform_nb);
+       bus_register_notifier(&amba_bustype, &highbank_amba_nb);
+
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index 92a00260d074142c2db33f43716d077d8f303511..bfa1eab91f4142cecb3bdd1ce4c4ec1b8e9a9698 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/i2c.h>
 #include <linux/io.h>
+#include <linux/pinctrl/machine.h>
 #include <asm/hardware/vic.h>
 #include <asm/sizes.h>
 #include <asm/mach-types.h>
@@ -33,6 +34,7 @@
 
 #include <plat/gpio-nomadik.h>
 #include <plat/mtu.h>
+#include <plat/pincfg.h>
 
 #include <linux/platform_data/mtd-nomadik-nand.h>
 #include <mach/fsmc.h>
@@ -290,8 +292,42 @@ static struct i2c_board_info __initdata nhk8815_i2c2_devices[] = {
        },
 };
 
+static unsigned long out_low[] = { PIN_OUTPUT_LOW };
+static unsigned long out_high[] = { PIN_OUTPUT_HIGH };
+static unsigned long in_nopull[] = { PIN_INPUT_NOPULL };
+static unsigned long in_pullup[] = { PIN_INPUT_PULLUP };
+
+static struct pinctrl_map __initdata nhk8815_pinmap[] = {
+       PIN_MAP_MUX_GROUP_DEFAULT("uart0", "pinctrl-stn8815", "u0_a_1", "u0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("uart1", "pinctrl-stn8815", "u1_a_1", "u1"),
+       /* Hog in MMC/SD card mux */
+       PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-stn8815", "mmcsd_a_1", "mmcsd"),
+       /* MCCLK */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO8_B10", out_low),
+       /* MCCMD */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO9_A10", in_pullup),
+       /* MCCMDDIR */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO10_C11", out_high),
+       /* MCDAT3-0 */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO11_B11", in_pullup),
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO12_A11", in_pullup),
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO13_C12", in_pullup),
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO14_B12", in_pullup),
+       /* MCDAT0DIR */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO15_A12", out_high),
+       /* MCDAT31DIR */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO16_C13", out_high),
+       /* MCMSFBCLK */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO24_C15", in_pullup),
+       /* CD input GPIO */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO111_H21", in_nopull),
+       /* CD bias drive */
+       PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-stn8815", "GPIO112_J21", out_low),
+};
+
 static void __init nhk8815_platform_init(void)
 {
+       pinctrl_register_mappings(nhk8815_pinmap, ARRAY_SIZE(nhk8815_pinmap));
        cpu8815_platform_init();
        nhk8815_onenand_init();
        platform_add_devices(nhk8815_platform_devices,
index 6fd8e46567a4e602fc15bed22f97f8661e7dc1c5..b617eaed0ce58695ac709b460ac29b8ac2d4b1d6 100644 (file)
@@ -83,6 +83,18 @@ void cpu8815_add_gpios(resource_size_t *base, int num, int irq,
        }
 }
 
+static inline void
+cpu8815_add_pinctrl(struct device *parent, const char *name)
+{
+       struct platform_device_info pdevinfo = {
+               .parent = parent,
+               .name = name,
+               .id = -1,
+       };
+
+       platform_device_register_full(&pdevinfo);
+}
+
 static int __init cpu8815_init(void)
 {
        struct nmk_gpio_platform_data pdata = {
@@ -91,6 +103,7 @@ static int __init cpu8815_init(void)
 
        cpu8815_add_gpios(cpu8815_gpio_base, ARRAY_SIZE(cpu8815_gpio_base),
                          IRQ_GPIO0, &pdata);
+       cpu8815_add_pinctrl(NULL, "pinctrl-stn8815");
        amba_apb_device_add(NULL, "rng", NOMADIK_RNG_BASE, SZ_4K, 0, 0, NULL, 0);
        amba_apb_device_add(NULL, "rtc-pl031", NOMADIK_RTC_BASE, SZ_4K, IRQ_RTC_RTT, 0, NULL, 0);
        return 0;
index 9a154bad19843a2586cbd773dbef74144ebcdc02..5a406f7947989932736202d9c9d0f962d5124d4a 100644 (file)
@@ -579,8 +579,8 @@ static int sharpsl_ac_check(void)
 static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
 {
        sharpsl_pm.flags |= SHARPSL_SUSPENDED;
-       flush_delayed_work_sync(&toggle_charger);
-       flush_delayed_work_sync(&sharpsl_bat);
+       flush_delayed_work(&toggle_charger);
+       flush_delayed_work(&sharpsl_bat);
 
        if (sharpsl_pm.charge_mode == CHRG_ON)
                sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
index 0df5ae6740c6e491895cc20adfda5096437beb74..fe2c97c179d1de40ae6805cce076b46594a22101 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o common.o
+obj-y                          := timer.o console.o clock.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index bc3b5da59e256ffd050ee6bbfb9168fcc5f6cb22..790dc68c431250a8ae7021bf622102826471ba16 100644 (file)
@@ -1231,6 +1231,15 @@ static struct i2c_board_info i2c1_devices[] = {
 #define USCCR1         IOMEM(0xE6058144)
 static void __init ap4evb_init(void)
 {
+       struct pm_domain_device domain_devices[] = {
+               { "A4LC", &lcdc1_device, },
+               { "A4LC", &lcdc_device, },
+               { "A4MP", &fsi_device, },
+               { "A3SP", &sh_mmcif_device, },
+               { "A3SP", &sdhi0_device, },
+               { "A3SP", &sdhi1_device, },
+               { "A4R", &ceu_device, },
+       };
        u32 srcr4;
        struct clk *clk;
 
@@ -1463,14 +1472,8 @@ static void __init ap4evb_init(void)
 
        platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
 
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
-
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
 
        hdmi_init_pm_clock();
        fsi_init_pm_clock();
@@ -1485,6 +1488,6 @@ MACHINE_START(AP4EVB, "ap4evb")
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = ap4evb_init,
-       .init_late      = shmobile_init_late,
+       .init_late      = sh7372_pm_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index c6593d3942738a369e12645d71ebaf60f223f3a3..2912eab3b967bddedb8a42e39747e9612f04d237 100644 (file)
@@ -1209,10 +1209,10 @@ static void __init eva_init(void)
 
        eva_clock_init();
 
-       rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device);
+       rmobile_add_device_to_domain("A4LC", &lcdc0_device);
+       rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device);
        if (usb)
-               rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb);
+               rmobile_add_device_to_domain("A3SP", usb);
 }
 
 static void __init eva_earlytimer_init(void)
index 773a2b95a4e09ef485fc66fb64519406dc23b331..0a43f3189c21c176f41f13ee82062f837a9ab6f8 100644 (file)
@@ -482,12 +482,10 @@ static struct gpio_keys_button gpio_buttons[] = {
 static struct gpio_keys_platform_data gpio_key_info = {
        .buttons        = gpio_buttons,
        .nbuttons       = ARRAY_SIZE(gpio_buttons),
-       .poll_interval  = 250, /* poling at this point */
 };
 
 static struct platform_device gpio_keys_device = {
-       /* gpio-pcf857x.c driver doesn't support gpio_to_irq() */
-       .name   = "gpio-keys-polled",
+       .name   = "gpio-keys",
        .dev    = {
                .platform_data  = &gpio_key_info,
        },
@@ -550,6 +548,7 @@ static struct platform_device fsi_ak4648_device = {
 /* I2C */
 static struct pcf857x_platform_data pcf8575_pdata = {
        .gpio_base      = GPIO_PCF8575_BASE,
+       .irq            = intcs_evt2irq(0x3260), /* IRQ19 */
 };
 
 static struct i2c_board_info i2c0_devices[] = {
index 62783b5d881389d405653ba9ea2550e2b6252e0a..0c27c810cf99d0906b655f298091ffab257644fc 100644 (file)
@@ -1412,6 +1412,22 @@ static struct i2c_board_info i2c1_devices[] = {
 #define USCCR1         IOMEM(0xE6058144)
 static void __init mackerel_init(void)
 {
+       struct pm_domain_device domain_devices[] = {
+               { "A4LC", &lcdc_device, },
+               { "A4LC", &hdmi_lcdc_device, },
+               { "A4LC", &meram_device, },
+               { "A4MP", &fsi_device, },
+               { "A3SP", &usbhs0_device, },
+               { "A3SP", &usbhs1_device, },
+               { "A3SP", &nand_flash_device, },
+               { "A3SP", &sh_mmcif_device, },
+               { "A3SP", &sdhi0_device, },
+#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
+               { "A3SP", &sdhi1_device, },
+#endif
+               { "A3SP", &sdhi2_device, },
+               { "A4R", &ceu_device, },
+       };
        u32 srcr4;
        struct clk *clk;
 
@@ -1626,20 +1642,8 @@ static void __init mackerel_init(void)
 
        platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
 
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
-#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
-#endif
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
 
        hdmi_init_pm_clock();
        sh7372_pm_init();
@@ -1653,6 +1657,6 @@ MACHINE_START(MACKEREL, "mackerel")
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = mackerel_init,
-       .init_late      = shmobile_init_late,
+       .init_late      = sh7372_pm_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
deleted file mode 100644 (file)
index 608aba9..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <mach/common.h>
-
-void __init shmobile_init_late(void)
-{
-       shmobile_suspend_init();
-       shmobile_cpuidle_init();
-}
index 7b541e911ab4aebe99262ef74c8ec0fb6084c01c..9e050268cde4d237ab850a532b38c22cdb37c694 100644 (file)
 #include <asm/cpuidle.h>
 #include <asm/io.h>
 
-static void shmobile_enter_wfi(void)
+int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+                      int index)
 {
        cpu_do_idle();
-}
-
-void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
-       shmobile_enter_wfi, /* regular sleep mode */
-};
-
-static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
-                                 struct cpuidle_driver *drv,
-                                 int index)
-{
-       shmobile_cpuidle_modes[index]();
-
-       return index;
+       return 0;
 }
 
 static struct cpuidle_device shmobile_cpuidle_dev;
-static struct cpuidle_driver shmobile_cpuidle_driver = {
+static struct cpuidle_driver shmobile_cpuidle_default_driver = {
        .name                   = "shmobile_cpuidle",
        .owner                  = THIS_MODULE,
        .en_core_tk_irqen       = 1,
        .states[0]              = ARM_CPUIDLE_WFI_STATE,
+       .states[0].enter        = shmobile_enter_wfi,
        .safe_state_index       = 0, /* C1 */
        .state_count            = 1,
 };
 
-void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
+static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
+
+void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
+{
+       cpuidle_drv = drv;
+}
 
 int shmobile_cpuidle_init(void)
 {
        struct cpuidle_device *dev = &shmobile_cpuidle_dev;
-       struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
-       int i;
-
-       for (i = 0; i < CPUIDLE_STATE_MAX; i++)
-               drv->states[i].enter = shmobile_cpuidle_enter;
-
-       if (shmobile_cpuidle_setup)
-               shmobile_cpuidle_setup(drv);
 
-       cpuidle_register_driver(drv);
+       cpuidle_register_driver(cpuidle_drv);
 
-       dev->state_count = drv->state_count;
+       dev->state_count = cpuidle_drv->state_count;
        cpuidle_register_device(dev);
 
        return 0;
index f80f9c549393e14e1c5c35673ab0707d7c641eed..ed77ab8c91437c5bfc1cae270351b0bd06a253d6 100644 (file)
@@ -13,8 +13,10 @@ extern int shmobile_clk_init(void);
 extern void shmobile_handle_irq_intc(struct pt_regs *);
 extern struct platform_suspend_ops shmobile_suspend_ops;
 struct cpuidle_driver;
-extern void (*shmobile_cpuidle_modes[])(void);
-extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
+struct cpuidle_device;
+extern int shmobile_enter_wfi(struct cpuidle_device *dev,
+                             struct cpuidle_driver *drv, int index);
+extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
 
 extern void sh7367_init_irq(void);
 extern void sh7367_map_io(void);
@@ -75,8 +77,6 @@ extern void r8a7740_meram_workaround(void);
 
 extern void r8a7779_register_twd(void);
 
-extern void shmobile_init_late(void);
-
 #ifdef CONFIG_SUSPEND
 int shmobile_suspend_init(void);
 #else
@@ -100,4 +100,10 @@ static inline int shmobile_cpu_is_dead(unsigned int cpu) { return 1; }
 
 extern void shmobile_smp_init_cpus(unsigned int ncores);
 
+static inline void shmobile_init_late(void)
+{
+       shmobile_suspend_init();
+       shmobile_cpuidle_init();
+}
+
 #endif /* __ARCH_MACH_COMMON_H */
index 5a402840fe28afe2d630e8496d1edb6a69b0a9d8..690553a06887897a6c29ffb396ba60bafb29be8e 100644 (file)
@@ -12,6 +12,8 @@
 
 #include <linux/pm_domain.h>
 
+#define DEFAULT_DEV_LATENCY_NS 250000
+
 struct platform_device;
 
 struct rmobile_pm_domain {
@@ -29,16 +31,33 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
        return container_of(d, struct rmobile_pm_domain, genpd);
 }
 
+struct pm_domain_device {
+       const char *domain_name;
+       struct platform_device *pdev;
+};
+
 #ifdef CONFIG_PM
-extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd);
-extern void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
-                                       struct platform_device *pdev);
-extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
-                                   struct rmobile_pm_domain *rmobile_sd);
+extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num);
+extern void rmobile_add_device_to_domain_td(const char *domain_name,
+                                           struct platform_device *pdev,
+                                           struct gpd_timing_data *td);
+
+static inline void rmobile_add_device_to_domain(const char *domain_name,
+                                               struct platform_device *pdev)
+{
+       rmobile_add_device_to_domain_td(domain_name, pdev, NULL);
+}
+
+extern void rmobile_add_devices_to_domains(struct pm_domain_device data[],
+                                          int size);
 #else
-#define rmobile_init_pm_domain(pd) do { } while (0)
-#define rmobile_add_device_to_domain(pd, pdev) do { } while (0)
-#define rmobile_pm_add_subdomain(pd, sd) do { } while (0)
+
+#define rmobile_init_domains(domains, num) do { } while (0)
+#define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0)
+#define rmobile_add_device_to_domain(name, pdev) do { } while (0)
+
+static inline void rmobile_add_devices_to_domains(struct pm_domain_device d[],
+                                                 int size) {}
 #endif /* CONFIG_PM */
 
 #endif /* PM_RMOBILE_H */
index 7143147780df55f6e17a689e9cc67ed2a9cd17e9..59d252f4cf975b3ecd64ffde955f8e6f387f7651 100644 (file)
@@ -607,9 +607,9 @@ enum {
 };
 
 #ifdef CONFIG_PM
-extern struct rmobile_pm_domain r8a7740_pd_a4s;
-extern struct rmobile_pm_domain r8a7740_pd_a3sp;
-extern struct rmobile_pm_domain r8a7740_pd_a4lc;
+extern void __init r8a7740_init_pm_domains(void);
+#else
+static inline void r8a7740_init_pm_domains(void) {}
 #endif /* CONFIG_PM */
 
 #endif /* __ASM_R8A7740_H__ */
index f504c5e81b476a8647c2659a1847e30c9d31a9bf..499f52d2a4a193acbcf659af657aed58d7e27ffb 100644 (file)
@@ -347,17 +347,9 @@ extern int r8a7779_sysc_power_down(struct r8a7779_pm_ch *r8a7779_ch);
 extern int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch);
 
 #ifdef CONFIG_PM
-extern struct r8a7779_pm_domain r8a7779_sh4a;
-extern struct r8a7779_pm_domain r8a7779_sgx;
-extern struct r8a7779_pm_domain r8a7779_vdp1;
-extern struct r8a7779_pm_domain r8a7779_impx3;
-
-extern void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd);
-extern void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
-                                       struct platform_device *pdev);
+extern void __init r8a7779_init_pm_domains(void);
 #else
-#define r8a7779_init_pm_domain(pd) do { } while (0)
-#define r8a7779_add_device_to_domain(pd, pdev) do { } while (0)
+static inline void r8a7779_init_pm_domains(void) {}
 #endif /* CONFIG_PM */
 
 extern struct smp_operations r8a7779_smp_ops;
index b59048e6d8fd7302717b9bb4f9359eae95ce10e5..eb98b45c508902c631153c093ed14aea877893d5 100644 (file)
@@ -478,21 +478,17 @@ extern struct clk sh7372_fsibck_clk;
 extern struct clk sh7372_fsidiva_clk;
 extern struct clk sh7372_fsidivb_clk;
 
-#ifdef CONFIG_PM
-extern struct rmobile_pm_domain sh7372_pd_a4lc;
-extern struct rmobile_pm_domain sh7372_pd_a4mp;
-extern struct rmobile_pm_domain sh7372_pd_d4;
-extern struct rmobile_pm_domain sh7372_pd_a4r;
-extern struct rmobile_pm_domain sh7372_pd_a3rv;
-extern struct rmobile_pm_domain sh7372_pd_a3ri;
-extern struct rmobile_pm_domain sh7372_pd_a4s;
-extern struct rmobile_pm_domain sh7372_pd_a3sp;
-extern struct rmobile_pm_domain sh7372_pd_a3sg;
-#endif /* CONFIG_PM */
-
 extern void sh7372_intcs_suspend(void);
 extern void sh7372_intcs_resume(void);
 extern void sh7372_intca_suspend(void);
 extern void sh7372_intca_resume(void);
 
+#ifdef CONFIG_PM
+extern void __init sh7372_init_pm_domains(void);
+#else
+static inline void sh7372_init_pm_domains(void) {}
+#endif
+
+extern void __init sh7372_pm_init_late(void);
+
 #endif /* __ASM_SH7372_H__ */
index 893504d012a6bb98b583b1660b156325496eabf0..21e5316d2d881aea42b507334b9fcedd139c760d 100644 (file)
@@ -21,14 +21,6 @@ static int r8a7740_pd_a4s_suspend(void)
        return -EBUSY;
 }
 
-struct rmobile_pm_domain r8a7740_pd_a4s = {
-       .genpd.name     = "A4S",
-       .bit_shift      = 10,
-       .gov            = &pm_domain_always_on_gov,
-       .no_debug       = true,
-       .suspend        = r8a7740_pd_a4s_suspend,
-};
-
 static int r8a7740_pd_a3sp_suspend(void)
 {
        /*
@@ -38,17 +30,31 @@ static int r8a7740_pd_a3sp_suspend(void)
        return console_suspend_enabled ? 0 : -EBUSY;
 }
 
-struct rmobile_pm_domain r8a7740_pd_a3sp = {
-       .genpd.name     = "A3SP",
-       .bit_shift      = 11,
-       .gov            = &pm_domain_always_on_gov,
-       .no_debug       = true,
-       .suspend        = r8a7740_pd_a3sp_suspend,
+static struct rmobile_pm_domain r8a7740_pm_domains[] = {
+       {
+               .genpd.name     = "A4S",
+               .bit_shift      = 10,
+               .gov            = &pm_domain_always_on_gov,
+               .no_debug       = true,
+               .suspend        = r8a7740_pd_a4s_suspend,
+       },
+       {
+               .genpd.name     = "A3SP",
+               .bit_shift      = 11,
+               .gov            = &pm_domain_always_on_gov,
+               .no_debug       = true,
+               .suspend        = r8a7740_pd_a3sp_suspend,
+       },
+       {
+               .genpd.name     = "A4LC",
+               .bit_shift      = 1,
+       },
 };
 
-struct rmobile_pm_domain r8a7740_pd_a4lc = {
-       .genpd.name     = "A4LC",
-       .bit_shift      = 1,
-};
+void __init r8a7740_init_pm_domains(void)
+{
+       rmobile_init_domains(r8a7740_pm_domains, ARRAY_SIZE(r8a7740_pm_domains));
+       pm_genpd_add_subdomain_names("A4S", "A3SP");
+}
 
 #endif /* CONFIG_PM */
index a18a4ae16d2bf7ff9072cda4bb70eae8392cf9f1..d50a8e9b94a4f9ac7030bdc49dc5450674e5af9a 100644 (file)
@@ -183,7 +183,7 @@ static bool pd_active_wakeup(struct device *dev)
        return true;
 }
 
-void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
+static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
 {
        struct generic_pm_domain *genpd = &r8a7779_pd->genpd;
 
@@ -199,43 +199,44 @@ void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
                pd_power_up(&r8a7779_pd->genpd);
 }
 
-void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
-                                struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-
-       pm_genpd_add_device(&r8a7779_pd->genpd, dev);
-       if (pm_clk_no_clocks(dev))
-               pm_clk_add(dev, NULL);
-}
-
-struct r8a7779_pm_domain r8a7779_sh4a = {
-       .ch = {
-               .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */
-               .isr_bit = 16, /* SH4A */
-       }
-};
-
-struct r8a7779_pm_domain r8a7779_sgx = {
-       .ch = {
-               .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */
-               .isr_bit = 20, /* SGX */
-       }
+static struct r8a7779_pm_domain r8a7779_pm_domains[] = {
+       {
+               .genpd.name = "SH4A",
+               .ch = {
+                       .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */
+                       .isr_bit = 16, /* SH4A */
+               },
+       },
+       {
+               .genpd.name = "SGX",
+               .ch = {
+                       .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */
+                       .isr_bit = 20, /* SGX */
+               },
+       },
+       {
+               .genpd.name = "VDP1",
+               .ch = {
+                       .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
+                       .isr_bit = 21, /* VDP */
+               },
+       },
+       {
+               .genpd.name = "IMPX3",
+               .ch = {
+                       .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */
+                       .isr_bit = 24, /* IMP */
+               },
+       },
 };
 
-struct r8a7779_pm_domain r8a7779_vdp1 = {
-       .ch = {
-               .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
-               .isr_bit = 21, /* VDP */
-       }
-};
+void __init r8a7779_init_pm_domains(void)
+{
+       int j;
 
-struct r8a7779_pm_domain r8a7779_impx3 = {
-       .ch = {
-               .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */
-               .isr_bit = 24, /* IMP */
-       }
-};
+       for (j = 0; j < ARRAY_SIZE(r8a7779_pm_domains); j++)
+               r8a7779_init_pm_domain(&r8a7779_pm_domains[j]);
+}
 
 #endif /* CONFIG_PM */
 
index 32e177275e47dcff33f7b5a7850ce3dcc990426d..1fc05d9453d026df07a1fb383a81f97d6f8d460e 100644 (file)
@@ -134,7 +134,7 @@ static int rmobile_pd_start_dev(struct device *dev)
        return ret;
 }
 
-void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
+static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
 {
        struct generic_pm_domain *genpd = &rmobile_pd->genpd;
        struct dev_power_governor *gov = rmobile_pd->gov;
@@ -149,19 +149,38 @@ void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
        __rmobile_pd_power_up(rmobile_pd, false);
 }
 
-void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
-                                struct platform_device *pdev)
+void rmobile_init_domains(struct rmobile_pm_domain domains[], int num)
+{
+       int j;
+
+       for (j = 0; j < num; j++)
+               rmobile_init_pm_domain(&domains[j]);
+}
+
+void rmobile_add_device_to_domain_td(const char *domain_name,
+                                    struct platform_device *pdev,
+                                    struct gpd_timing_data *td)
 {
        struct device *dev = &pdev->dev;
 
-       pm_genpd_add_device(&rmobile_pd->genpd, dev);
+       __pm_genpd_name_add_device(domain_name, dev, td);
        if (pm_clk_no_clocks(dev))
                pm_clk_add(dev, NULL);
 }
 
-void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
-                            struct rmobile_pm_domain *rmobile_sd)
+void rmobile_add_devices_to_domains(struct pm_domain_device data[],
+                                   int size)
 {
-       pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd);
+       struct gpd_timing_data latencies = {
+               .stop_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .start_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .save_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
+       };
+       int j;
+
+       for (j = 0; j < size; j++)
+               rmobile_add_device_to_domain_td(data[j].domain_name,
+                                               data[j].pdev, &latencies);
 }
 #endif /* CONFIG_PM */
index 162121842a2b0e97cb2d032ce8d0ba6c0958041a..a0826a48dd0885b921d5cc95d524848683353790 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/irq.h>
 #include <linux/bitrev.h>
 #include <linux/console.h>
+#include <asm/cpuidle.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 #include <asm/suspend.h>
 
 #ifdef CONFIG_PM
 
-struct rmobile_pm_domain sh7372_pd_a4lc = {
-       .genpd.name = "A4LC",
-       .bit_shift = 1,
-};
-
-struct rmobile_pm_domain sh7372_pd_a4mp = {
-       .genpd.name = "A4MP",
-       .bit_shift = 2,
-};
-
-struct rmobile_pm_domain sh7372_pd_d4 = {
-       .genpd.name = "D4",
-       .bit_shift = 3,
-};
+#define PM_DOMAIN_ON_OFF_LATENCY_NS    250000
 
 static int sh7372_a4r_pd_suspend(void)
 {
@@ -94,39 +82,25 @@ static int sh7372_a4r_pd_suspend(void)
        return 0;
 }
 
-struct rmobile_pm_domain sh7372_pd_a4r = {
-       .genpd.name = "A4R",
-       .bit_shift = 5,
-       .suspend = sh7372_a4r_pd_suspend,
-       .resume = sh7372_intcs_resume,
-};
+static bool a4s_suspend_ready;
 
-struct rmobile_pm_domain sh7372_pd_a3rv = {
-       .genpd.name = "A3RV",
-       .bit_shift = 6,
-};
-
-struct rmobile_pm_domain sh7372_pd_a3ri = {
-       .genpd.name = "A3RI",
-       .bit_shift = 8,
-};
-
-static int sh7372_pd_a4s_suspend(void)
+static int sh7372_a4s_pd_suspend(void)
 {
        /*
         * The A4S domain contains the CPU core and therefore it should
-        * only be turned off if the CPU is in use.
+        * only be turned off if the CPU is not in use.  This may happen
+        * during system suspend, when SYSC is going to be used for generating
+        * resume signals and a4s_suspend_ready is set to let
+        * sh7372_enter_suspend() know that it can turn A4S off.
         */
+       a4s_suspend_ready = true;
        return -EBUSY;
 }
 
-struct rmobile_pm_domain sh7372_pd_a4s = {
-       .genpd.name = "A4S",
-       .bit_shift = 10,
-       .gov = &pm_domain_always_on_gov,
-       .no_debug = true,
-       .suspend = sh7372_pd_a4s_suspend,
-};
+static void sh7372_a4s_pd_resume(void)
+{
+       a4s_suspend_ready = false;
+}
 
 static int sh7372_a3sp_pd_suspend(void)
 {
@@ -137,18 +111,80 @@ static int sh7372_a3sp_pd_suspend(void)
        return console_suspend_enabled ? 0 : -EBUSY;
 }
 
-struct rmobile_pm_domain sh7372_pd_a3sp = {
-       .genpd.name = "A3SP",
-       .bit_shift = 11,
-       .gov = &pm_domain_always_on_gov,
-       .no_debug = true,
-       .suspend = sh7372_a3sp_pd_suspend,
+static struct rmobile_pm_domain sh7372_pm_domains[] = {
+       {
+               .genpd.name = "A4LC",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 1,
+       },
+       {
+               .genpd.name = "A4MP",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 2,
+       },
+       {
+               .genpd.name = "D4",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 3,
+       },
+       {
+               .genpd.name = "A4R",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 5,
+               .suspend = sh7372_a4r_pd_suspend,
+               .resume = sh7372_intcs_resume,
+       },
+       {
+               .genpd.name = "A3RV",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 6,
+       },
+       {
+               .genpd.name = "A3RI",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 8,
+       },
+       {
+               .genpd.name = "A4S",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 10,
+               .gov = &pm_domain_always_on_gov,
+               .no_debug = true,
+               .suspend = sh7372_a4s_pd_suspend,
+               .resume = sh7372_a4s_pd_resume,
+       },
+       {
+               .genpd.name = "A3SP",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 11,
+               .gov = &pm_domain_always_on_gov,
+               .no_debug = true,
+               .suspend = sh7372_a3sp_pd_suspend,
+       },
+       {
+               .genpd.name = "A3SG",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 13,
+       },
 };
 
-struct rmobile_pm_domain sh7372_pd_a3sg = {
-       .genpd.name = "A3SG",
-       .bit_shift = 13,
-};
+void __init sh7372_init_pm_domains(void)
+{
+       rmobile_init_domains(sh7372_pm_domains, ARRAY_SIZE(sh7372_pm_domains));
+       pm_genpd_add_subdomain_names("A4LC", "A3RV");
+       pm_genpd_add_subdomain_names("A4R", "A4LC");
+       pm_genpd_add_subdomain_names("A4S", "A3SG");
+       pm_genpd_add_subdomain_names("A4S", "A3SP");
+}
 
 #endif /* CONFIG_PM */
 
@@ -304,6 +340,21 @@ static void sh7372_enter_a3sm_common(int pllc0_on)
        sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
        sh7372_enter_sysc(pllc0_on, 1 << 12);
 }
+
+static void sh7372_enter_a4s_common(int pllc0_on)
+{
+       sh7372_intca_suspend();
+       sh7372_set_reset_vector(SMFRAM);
+       sh7372_enter_sysc(pllc0_on, 1 << 10);
+       sh7372_intca_resume();
+}
+
+static void sh7372_pm_setup_smfram(void)
+{
+       memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
+}
+#else
+static inline void sh7372_pm_setup_smfram(void) {}
 #endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */
 
 #ifdef CONFIG_CPU_IDLE
@@ -313,7 +364,8 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
        return 0;
 }
 
-static void sh7372_enter_core_standby(void)
+static int sh7372_enter_core_standby(struct cpuidle_device *dev,
+                                    struct cpuidle_driver *drv, int index)
 {
        sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
 
@@ -324,83 +376,102 @@ static void sh7372_enter_core_standby(void)
 
         /* disable reset vector translation */
        __raw_writel(0, SBAR);
+
+       return 1;
 }
 
-static void sh7372_enter_a3sm_pll_on(void)
+static int sh7372_enter_a3sm_pll_on(struct cpuidle_device *dev,
+                                   struct cpuidle_driver *drv, int index)
 {
        sh7372_enter_a3sm_common(1);
+       return 2;
 }
 
-static void sh7372_enter_a3sm_pll_off(void)
+static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev,
+                                    struct cpuidle_driver *drv, int index)
 {
        sh7372_enter_a3sm_common(0);
+       return 3;
 }
 
-static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
+static int sh7372_enter_a4s(struct cpuidle_device *dev,
+                           struct cpuidle_driver *drv, int index)
 {
-       struct cpuidle_state *state = &drv->states[drv->state_count];
-
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
-       strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
-       state->exit_latency = 10;
-       state->target_residency = 20 + 10;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
-       drv->state_count++;
-
-       state = &drv->states[drv->state_count];
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
-       strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN);
-       state->exit_latency = 20;
-       state->target_residency = 30 + 20;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on;
-       drv->state_count++;
-
-       state = &drv->states[drv->state_count];
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C4");
-       strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN);
-       state->exit_latency = 120;
-       state->target_residency = 30 + 120;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off;
-       drv->state_count++;
+       unsigned long msk, msk2;
+
+       if (!sh7372_sysc_valid(&msk, &msk2))
+               return sh7372_enter_a3sm_pll_off(dev, drv, index);
+
+       sh7372_setup_sysc(msk, msk2);
+       sh7372_enter_a4s_common(0);
+       return 4;
 }
 
+static struct cpuidle_driver sh7372_cpuidle_driver = {
+       .name                   = "sh7372_cpuidle",
+       .owner                  = THIS_MODULE,
+       .en_core_tk_irqen       = 1,
+       .state_count            = 5,
+       .safe_state_index       = 0, /* C1 */
+       .states[0] = ARM_CPUIDLE_WFI_STATE,
+       .states[0].enter = shmobile_enter_wfi,
+       .states[1] = {
+               .name = "C2",
+               .desc = "Core Standby Mode",
+               .exit_latency = 10,
+               .target_residency = 20 + 10,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_core_standby,
+       },
+       .states[2] = {
+               .name = "C3",
+               .desc = "A3SM PLL ON",
+               .exit_latency = 20,
+               .target_residency = 30 + 20,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a3sm_pll_on,
+       },
+       .states[3] = {
+               .name = "C4",
+               .desc = "A3SM PLL OFF",
+               .exit_latency = 120,
+               .target_residency = 30 + 120,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a3sm_pll_off,
+       },
+       .states[4] = {
+               .name = "C5",
+               .desc = "A4S PLL OFF",
+               .exit_latency = 240,
+               .target_residency = 30 + 240,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a4s,
+               .disabled = true,
+       },
+};
+
 static void sh7372_cpuidle_init(void)
 {
-       shmobile_cpuidle_setup = sh7372_cpuidle_setup;
+       shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
 }
 #else
 static void sh7372_cpuidle_init(void) {}
 #endif
 
 #ifdef CONFIG_SUSPEND
-static void sh7372_enter_a4s_common(int pllc0_on)
-{
-       sh7372_intca_suspend();
-       memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
-       sh7372_set_reset_vector(SMFRAM);
-       sh7372_enter_sysc(pllc0_on, 1 << 10);
-       sh7372_intca_resume();
-}
-
 static int sh7372_enter_suspend(suspend_state_t suspend_state)
 {
        unsigned long msk, msk2;
 
        /* check active clocks to determine potential wakeup sources */
-       if (sh7372_sysc_valid(&msk, &msk2)) {
-               if (!console_suspend_enabled &&
-                   sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) {
-                       /* convert INTC mask/sense to SYSC mask/sense */
-                       sh7372_setup_sysc(msk, msk2);
-
-                       /* enter A4S sleep with PLLC0 off */
-                       pr_debug("entering A4S\n");
-                       sh7372_enter_a4s_common(0);
-                       return 0;
-               }
+       if (sh7372_sysc_valid(&msk, &msk2) && a4s_suspend_ready) {
+               /* convert INTC mask/sense to SYSC mask/sense */
+               sh7372_setup_sysc(msk, msk2);
+
+               /* enter A4S sleep with PLLC0 off */
+               pr_debug("entering A4S\n");
+               sh7372_enter_a4s_common(0);
+               return 0;
        }
 
        /* default to enter A3SM sleep with PLLC0 off */
@@ -426,7 +497,7 @@ static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
                 * executed during system suspend and resume, respectively, so
                 * that those functions don't crash while accessing the INTCS.
                 */
-               pm_genpd_poweron(&sh7372_pd_a4r.genpd);
+               pm_genpd_name_poweron("A4R");
                break;
        case PM_POST_SUSPEND:
                pm_genpd_poweroff_unused();
@@ -455,6 +526,14 @@ void __init sh7372_pm_init(void)
        /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
        __raw_writel(0, PDNSEL);
 
+       sh7372_pm_setup_smfram();
+
        sh7372_suspend_init();
        sh7372_cpuidle_init();
 }
+
+void __init sh7372_pm_init_late(void)
+{
+       shmobile_init_late();
+       pm_genpd_name_attach_cpuidle("A4S", 4);
+}
index 78948a9dba0ec47547f5fca10b7aa71452600d7b..11bb1d9841975ef9be12c1591fae32c80903b55e 100644 (file)
@@ -673,12 +673,7 @@ void __init r8a7740_add_standard_devices(void)
        r8a7740_i2c_workaround(&i2c0_device);
        r8a7740_i2c_workaround(&i2c1_device);
 
-       /* PM domain */
-       rmobile_init_pm_domain(&r8a7740_pd_a4s);
-       rmobile_init_pm_domain(&r8a7740_pd_a3sp);
-       rmobile_init_pm_domain(&r8a7740_pd_a4lc);
-
-       rmobile_pm_add_subdomain(&r8a7740_pd_a4s, &r8a7740_pd_a3sp);
+       r8a7740_init_pm_domains();
 
        /* add devices */
        platform_add_devices(r8a7740_early_devices,
@@ -688,16 +683,16 @@ void __init r8a7740_add_standard_devices(void)
 
        /* add devices to PM domain  */
 
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif0_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif1_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif2_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif3_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif4_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif5_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif6_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif7_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scifb_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &i2c1_device);
+       rmobile_add_device_to_domain("A3SP",    &scif0_device);
+       rmobile_add_device_to_domain("A3SP",    &scif1_device);
+       rmobile_add_device_to_domain("A3SP",    &scif2_device);
+       rmobile_add_device_to_domain("A3SP",    &scif3_device);
+       rmobile_add_device_to_domain("A3SP",    &scif4_device);
+       rmobile_add_device_to_domain("A3SP",    &scif5_device);
+       rmobile_add_device_to_domain("A3SP",    &scif6_device);
+       rmobile_add_device_to_domain("A3SP",    &scif7_device);
+       rmobile_add_device_to_domain("A3SP",    &scifb_device);
+       rmobile_add_device_to_domain("A3SP",    &i2c1_device);
 }
 
 static void __init r8a7740_earlytimer_init(void)
index e98e46f6cf5508f8b1750c7abb2b2effd45a80db..2917668f0091c1042a70e512f50c12bacdb3ebb2 100644 (file)
@@ -251,10 +251,7 @@ void __init r8a7779_add_standard_devices(void)
 #endif
        r8a7779_pm_init();
 
-       r8a7779_init_pm_domain(&r8a7779_sh4a);
-       r8a7779_init_pm_domain(&r8a7779_sgx);
-       r8a7779_init_pm_domain(&r8a7779_vdp1);
-       r8a7779_init_pm_domain(&r8a7779_impx3);
+       r8a7779_init_pm_domains();
 
        platform_add_devices(r8a7779_early_devices,
                            ARRAY_SIZE(r8a7779_early_devices));
index 838a87be1d5c31cc46a932d58f27a73f00066ca9..a07954fbcd22b9017b588ef1f745bddf101bf419 100644 (file)
@@ -1001,21 +1001,34 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
 
 void __init sh7372_add_standard_devices(void)
 {
-       rmobile_init_pm_domain(&sh7372_pd_a4lc);
-       rmobile_init_pm_domain(&sh7372_pd_a4mp);
-       rmobile_init_pm_domain(&sh7372_pd_d4);
-       rmobile_init_pm_domain(&sh7372_pd_a4r);
-       rmobile_init_pm_domain(&sh7372_pd_a3rv);
-       rmobile_init_pm_domain(&sh7372_pd_a3ri);
-       rmobile_init_pm_domain(&sh7372_pd_a4s);
-       rmobile_init_pm_domain(&sh7372_pd_a3sp);
-       rmobile_init_pm_domain(&sh7372_pd_a3sg);
-
-       rmobile_pm_add_subdomain(&sh7372_pd_a4lc, &sh7372_pd_a3rv);
-       rmobile_pm_add_subdomain(&sh7372_pd_a4r, &sh7372_pd_a4lc);
-
-       rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sg);
-       rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sp);
+       struct pm_domain_device domain_devices[] = {
+               { "A3RV", &vpu_device, },
+               { "A4MP", &spu0_device, },
+               { "A4MP", &spu1_device, },
+               { "A3SP", &scif0_device, },
+               { "A3SP", &scif1_device, },
+               { "A3SP", &scif2_device, },
+               { "A3SP", &scif3_device, },
+               { "A3SP", &scif4_device, },
+               { "A3SP", &scif5_device, },
+               { "A3SP", &scif6_device, },
+               { "A3SP", &iic1_device, },
+               { "A3SP", &dma0_device, },
+               { "A3SP", &dma1_device, },
+               { "A3SP", &dma2_device, },
+               { "A3SP", &usb_dma0_device, },
+               { "A3SP", &usb_dma1_device, },
+               { "A4R", &iic0_device, },
+               { "A4R", &veu0_device, },
+               { "A4R", &veu1_device, },
+               { "A4R", &veu2_device, },
+               { "A4R", &veu3_device, },
+               { "A4R", &jpu_device, },
+               { "A4R", &tmu00_device, },
+               { "A4R", &tmu01_device, },
+       };
+
+       sh7372_init_pm_domains();
 
        platform_add_devices(sh7372_early_devices,
                            ARRAY_SIZE(sh7372_early_devices));
@@ -1023,30 +1036,8 @@ void __init sh7372_add_standard_devices(void)
        platform_add_devices(sh7372_late_devices,
                            ARRAY_SIZE(sh7372_late_devices));
 
-       rmobile_add_device_to_domain(&sh7372_pd_a3rv, &vpu_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif3_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif4_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif5_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif6_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &iic1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &iic0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu3_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &jpu_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu00_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu01_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
 }
 
 static void __init sh7372_earlytimer_init(void)
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
deleted file mode 100644 (file)
index a130256..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Platform definitions for tegra-kbc keyboard input driver
- *
- * Copyright (c) 2010-2011, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#ifndef ASMARM_ARCH_TEGRA_KBC_H
-#define ASMARM_ARCH_TEGRA_KBC_H
-
-#include <linux/types.h>
-#include <linux/input/matrix_keypad.h>
-
-#define KBC_MAX_GPIO   24
-#define KBC_MAX_KPENT  8
-
-#define KBC_MAX_ROW    16
-#define KBC_MAX_COL    8
-#define KBC_MAX_KEY    (KBC_MAX_ROW * KBC_MAX_COL)
-
-enum tegra_pin_type {
-       PIN_CFG_IGNORE,
-       PIN_CFG_COL,
-       PIN_CFG_ROW,
-};
-
-struct tegra_kbc_pin_cfg {
-       enum tegra_pin_type type;
-       unsigned char num;
-};
-
-struct tegra_kbc_wake_key {
-       u8 row:4;
-       u8 col:4;
-};
-
-struct tegra_kbc_platform_data {
-       unsigned int debounce_cnt;
-       unsigned int repeat_cnt;
-
-       struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
-       const struct matrix_keymap_data *keymap_data;
-
-       u32 wakeup_key;
-       bool wakeup;
-       bool use_fn_map;
-       bool use_ghost_filter;
-};
-#endif
index ef6f602b7e489b483eaa274ecdaec1342c258ebb..b8efac4daed89256a8e9dc0257c940af05033fa4 100644 (file)
@@ -1556,9 +1556,6 @@ static struct u300_mux_hog u300_mux_hogs[] = {
        {
                .dev = &uart0_device.dev,
        },
-       {
-               .dev = &pl022_device.dev,
-       },
        {
                .dev = &mmcsd_device.dev,
        },
index 32fd99204464a25da4dba6012b575ea844521aaf..a267c6d30e374f19c491244ad23fd93b07aa85d1 100644 (file)
@@ -30,16 +30,15 @@ static enum custom_pin_cfg_t pinsfor;
 #define BIAS(a,b) static unsigned long a[] = { b }
 
 BIAS(pd, PIN_PULL_DOWN);
-BIAS(slpm_gpio_nopull, PIN_SLPM_GPIO|PIN_SLPM_INPUT_NOPULL);
 BIAS(in_nopull, PIN_INPUT_NOPULL);
-BIAS(in_nopull_sleep_nowkup, PIN_INPUT_NOPULL|PIN_SLPM_WAKEUP_DISABLE);
+BIAS(in_nopull_slpm_nowkup, PIN_INPUT_NOPULL|PIN_SLPM_WAKEUP_DISABLE);
 BIAS(in_pu, PIN_INPUT_PULLUP);
 BIAS(in_pd, PIN_INPUT_PULLDOWN);
 BIAS(in_pd_slpm_in_pu, PIN_INPUT_PULLDOWN|PIN_SLPM_INPUT_PULLUP);
 BIAS(in_pu_slpm_out_lo, PIN_INPUT_PULLUP|PIN_SLPM_OUTPUT_LOW);
 BIAS(out_hi, PIN_OUTPUT_HIGH);
 BIAS(out_lo, PIN_OUTPUT_LOW);
-BIAS(out_lo_sleep_nowkup, PIN_OUTPUT_LOW|PIN_SLPM_WAKEUP_DISABLE);
+BIAS(out_lo_slpm_nowkup, PIN_OUTPUT_LOW|PIN_SLPM_WAKEUP_DISABLE);
 /* These also force them into GPIO mode */
 BIAS(gpio_in_pu, PIN_INPUT_PULLUP|PIN_GPIOMODE_ENABLED);
 BIAS(gpio_in_pd, PIN_INPUT_PULLDOWN|PIN_GPIOMODE_ENABLED);
@@ -48,23 +47,32 @@ BIAS(gpio_in_pd_slpm_gpio_nopull, PIN_INPUT_PULLDOWN|PIN_GPIOMODE_ENABLED|PIN_SL
 BIAS(gpio_out_hi, PIN_OUTPUT_HIGH|PIN_GPIOMODE_ENABLED);
 BIAS(gpio_out_lo, PIN_OUTPUT_LOW|PIN_GPIOMODE_ENABLED);
 /* Sleep modes */
-BIAS(sleep_in_wkup_pdis, PIN_SLPM_DIR_INPUT|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
-BIAS(sleep_in_nopull_wkup, PIN_INPUT_NOPULL|PIN_SLPM_WAKEUP_ENABLE);
-BIAS(sleep_out_hi_wkup_pdis, PIN_SLPM_OUTPUT_HIGH|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
-BIAS(sleep_out_lo_wkup, PIN_OUTPUT_LOW|PIN_SLPM_WAKEUP_ENABLE);
-BIAS(sleep_out_wkup_pdis, PIN_SLPM_DIR_OUTPUT|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_in_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_DIR_INPUT|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_in_nopull_wkup, PIN_SLEEPMODE_ENABLED|PIN_SLPM_DIR_INPUT|PIN_SLPM_PULL_NONE|PIN_SLPM_WAKEUP_ENABLE);
+BIAS(slpm_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_out_hi_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_OUTPUT_HIGH|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_out_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_out_lo_wkup, PIN_SLEEPMODE_ENABLED|PIN_SLPM_OUTPUT_LOW|PIN_SLPM_WAKEUP_ENABLE);
+BIAS(slpm_out_lo_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_OUTPUT_LOW|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
+BIAS(slpm_in_nopull_wkup_pdis, PIN_SLEEPMODE_ENABLED|PIN_SLPM_INPUT_NOPULL|PIN_SLPM_WAKEUP_ENABLE|PIN_SLPM_PDIS_DISABLED);
 
 /* We use these to define hog settings that are always done on boot */
 #define DB8500_MUX_HOG(group,func) \
        PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-db8500", group, func)
 #define DB8500_PIN_HOG(pin,conf) \
        PIN_MAP_CONFIGS_PIN_HOG_DEFAULT("pinctrl-db8500", pin, conf)
+#define DB8500_PIN_SLEEP(pin, conf, dev) \
+       PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_SLEEP, "pinctrl-db8500", \
+                           pin, conf)
 
 /* These are default states associated with device and changed runtime */
 #define DB8500_MUX(group,func,dev) \
        PIN_MAP_MUX_GROUP_DEFAULT(dev, "pinctrl-db8500", group, func)
 #define DB8500_PIN(pin,conf,dev) \
        PIN_MAP_CONFIGS_PIN_DEFAULT(dev, "pinctrl-db8500", pin, conf)
+#define DB8500_PIN_SLEEP(pin, conf, dev) \
+       PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_SLEEP, "pinctrl-db8500", \
+                           pin, conf)
 
 #define DB8500_PIN_SLEEP(pin,conf,dev) \
        PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_SLEEP, "pinctrl-db8500", \
@@ -134,40 +142,47 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
        DB8500_PIN("GPIO2_AH4", in_pu, "uart0"), /* RXD */
        DB8500_PIN("GPIO3_AH3", out_hi, "uart0"), /* TXD */
        /* UART0 sleep state */
-       DB8500_PIN_SLEEP("GPIO0_AJ5", sleep_in_wkup_pdis, "uart0"),
-       DB8500_PIN_SLEEP("GPIO1_AJ3", sleep_out_hi_wkup_pdis, "uart0"),
-       DB8500_PIN_SLEEP("GPIO2_AH4", sleep_in_wkup_pdis, "uart0"),
-       DB8500_PIN_SLEEP("GPIO3_AH3", sleep_out_wkup_pdis, "uart0"),
+       DB8500_PIN_SLEEP("GPIO0_AJ5", slpm_in_wkup_pdis, "uart0"),
+       DB8500_PIN_SLEEP("GPIO1_AJ3", slpm_out_hi_wkup_pdis, "uart0"),
+       DB8500_PIN_SLEEP("GPIO2_AH4", slpm_in_wkup_pdis, "uart0"),
+       DB8500_PIN_SLEEP("GPIO3_AH3", slpm_out_wkup_pdis, "uart0"),
        /* MSP1 for ALSA codec */
        DB8500_MUX("msp1txrx_a_1", "msp1", "ux500-msp-i2s.1"),
        DB8500_MUX("msp1_a_1", "msp1", "ux500-msp-i2s.1"),
-       DB8500_PIN("GPIO33_AF2", out_lo_sleep_nowkup, "ux500-msp-i2s.1"),
-       DB8500_PIN("GPIO34_AE1", in_nopull_sleep_nowkup, "ux500-msp-i2s.1"),
-       DB8500_PIN("GPIO35_AE2", in_nopull_sleep_nowkup, "ux500-msp-i2s.1"),
-       DB8500_PIN("GPIO36_AG2", in_nopull_sleep_nowkup, "ux500-msp-i2s.1"),
+       DB8500_PIN("GPIO33_AF2", out_lo_slpm_nowkup, "ux500-msp-i2s.1"),
+       DB8500_PIN("GPIO34_AE1", in_nopull_slpm_nowkup, "ux500-msp-i2s.1"),
+       DB8500_PIN("GPIO35_AE2", in_nopull_slpm_nowkup, "ux500-msp-i2s.1"),
+       DB8500_PIN("GPIO36_AG2", in_nopull_slpm_nowkup, "ux500-msp-i2s.1"),
        /* MSP1 sleep state */
-       DB8500_PIN_SLEEP("GPIO33_AF2", sleep_out_lo_wkup, "ux500-msp-i2s.1"),
-       DB8500_PIN_SLEEP("GPIO34_AE1", sleep_in_nopull_wkup, "ux500-msp-i2s.1"),
-       DB8500_PIN_SLEEP("GPIO35_AE2", sleep_in_nopull_wkup, "ux500-msp-i2s.1"),
-       DB8500_PIN_SLEEP("GPIO36_AG2", sleep_in_nopull_wkup, "ux500-msp-i2s.1"),
+       DB8500_PIN_SLEEP("GPIO33_AF2", slpm_out_lo_wkup, "ux500-msp-i2s.1"),
+       DB8500_PIN_SLEEP("GPIO34_AE1", slpm_in_nopull_wkup, "ux500-msp-i2s.1"),
+       DB8500_PIN_SLEEP("GPIO35_AE2", slpm_in_nopull_wkup, "ux500-msp-i2s.1"),
+       DB8500_PIN_SLEEP("GPIO36_AG2", slpm_in_nopull_wkup, "ux500-msp-i2s.1"),
        /* Mux in LCD data lines 8 thru 11 and LCDA CLK for MCDE TVOUT */
        DB8500_MUX("lcd_d8_d11_a_1", "lcd", "mcde-tvout"),
        DB8500_MUX("lcdaclk_b_1", "lcda", "mcde-tvout"),
        /* Mux in LCD VSI1 and pull it up for MCDE HDMI output */
        DB8500_MUX("lcdvsi1_a_1", "lcd", "av8100-hdmi"),
-       /* Mux in I2C blocks, put pins into GPIO in sleepmode no pull-up */
+       /* Mux in i2c0 block, default state */
        DB8500_MUX("i2c0_a_1", "i2c0", "nmk-i2c.0"),
-       DB8500_PIN("GPIO147_C15", slpm_gpio_nopull, "nmk-i2c.0"),
-       DB8500_PIN("GPIO148_B16", slpm_gpio_nopull, "nmk-i2c.0"),
+       /* i2c0 sleep state */
+       DB8500_PIN_SLEEP("GPIO147_C15", slpm_in_nopull_wkup_pdis, "nmk-i2c.0"), /* SDA */
+       DB8500_PIN_SLEEP("GPIO148_B16", slpm_in_nopull_wkup_pdis, "nmk-i2c.0"), /* SCL */
+       /* Mux in i2c1 block, default state  */
        DB8500_MUX("i2c1_b_2", "i2c1", "nmk-i2c.1"),
-       DB8500_PIN("GPIO16_AD3", slpm_gpio_nopull, "nmk-i2c.1"),
-       DB8500_PIN("GPIO17_AD4", slpm_gpio_nopull, "nmk-i2c.1"),
+       /* i2c1 sleep state */
+       DB8500_PIN_SLEEP("GPIO16_AD3", slpm_in_nopull_wkup_pdis, "nmk-i2c.1"), /* SDA */
+       DB8500_PIN_SLEEP("GPIO17_AD4", slpm_in_nopull_wkup_pdis, "nmk-i2c.1"), /* SCL */
+       /* Mux in i2c2 block, default state  */
        DB8500_MUX("i2c2_b_2", "i2c2", "nmk-i2c.2"),
-       DB8500_PIN("GPIO10_AF5", slpm_gpio_nopull, "nmk-i2c.2"),
-       DB8500_PIN("GPIO11_AG4", slpm_gpio_nopull, "nmk-i2c.2"),
+       /* i2c2 sleep state */
+       DB8500_PIN_SLEEP("GPIO10_AF5", slpm_in_nopull_wkup_pdis, "nmk-i2c.2"), /* SDA */
+       DB8500_PIN_SLEEP("GPIO11_AG4", slpm_in_nopull_wkup_pdis, "nmk-i2c.2"), /* SCL */
+       /* Mux in i2c3 block, default state  */
        DB8500_MUX("i2c3_c_2", "i2c3", "nmk-i2c.3"),
-       DB8500_PIN("GPIO229_AG7", slpm_gpio_nopull, "nmk-i2c.3"),
-       DB8500_PIN("GPIO230_AF7", slpm_gpio_nopull, "nmk-i2c.3"),
+       /* i2c3 sleep state */
+       DB8500_PIN_SLEEP("GPIO229_AG7", slpm_in_nopull_wkup_pdis, "nmk-i2c.3"), /* SDA */
+       DB8500_PIN_SLEEP("GPIO230_AF7", slpm_in_nopull_wkup_pdis, "nmk-i2c.3"), /* SCL */
        /* Mux in SDI0 (here called MC0) used for removable MMC/SD/SDIO cards */
        DB8500_MUX("mc0_a_1", "mc0", "sdi0"),
        DB8500_PIN("GPIO18_AC2", out_hi, "sdi0"), /* CMDDIR */
@@ -219,11 +234,15 @@ static struct pinctrl_map __initdata mop500_family_pinmap[] = {
        DB8500_MUX("usb_a_1", "usb", "musb-ux500.0"),
        DB8500_PIN("GPIO257_AE29", out_hi, "musb-ux500.0"), /* STP */
        /* Mux in SPI2 pins on the "other C1" altfunction */
-       DB8500_MUX("spi2_oc1_1", "spi2", "spi2"),
+       DB8500_MUX("spi2_oc1_2", "spi2", "spi2"),
        DB8500_PIN("GPIO216_AG12", gpio_out_hi, "spi2"), /* FRM */
        DB8500_PIN("GPIO218_AH11", in_pd, "spi2"), /* RXD */
        DB8500_PIN("GPIO215_AH13", out_lo, "spi2"), /* TXD */
        DB8500_PIN("GPIO217_AH12", out_lo, "spi2"), /* CLK */
+       /* SPI2 sleep state */
+       DB8500_PIN_SLEEP("GPIO218_AH11", slpm_in_wkup_pdis, "spi2"), /* RXD */
+       DB8500_PIN_SLEEP("GPIO215_AH13", slpm_out_lo_wkup_pdis, "spi2"), /* TXD */
+       DB8500_PIN_SLEEP("GPIO217_AH12", slpm_wkup_pdis, "spi2"), /* CLK */
 };
 
 /*
@@ -410,7 +429,7 @@ static struct pinctrl_map __initdata u9500_pinmap[] = {
        DB8500_PIN_HOG("GPIO144_B13", gpio_in_pu),
        /* HSI */
        DB8500_MUX_HOG("hsir_a_1", "hsi"),
-       DB8500_MUX_HOG("hsit_a_1", "hsi"),
+       DB8500_MUX_HOG("hsit_a_2", "hsi"),
        DB8500_PIN_HOG("GPIO219_AG10", in_pd), /* RX FLA0 */
        DB8500_PIN_HOG("GPIO220_AH10", in_pd), /* RX DAT0 */
        DB8500_PIN_HOG("GPIO221_AJ11", out_lo), /* RX RDY0 */
@@ -418,7 +437,7 @@ static struct pinctrl_map __initdata u9500_pinmap[] = {
        DB8500_PIN_HOG("GPIO223_AH9", out_lo), /* TX DAT0 */
        DB8500_PIN_HOG("GPIO224_AG9", in_pd), /* TX RDY0 */
        DB8500_PIN_HOG("GPIO225_AG8", in_pd), /* CAWAKE0 */
-       DB8500_PIN_HOG("GPIO226_AF8", out_hi), /* ACWAKE0 */
+       DB8500_PIN_HOG("GPIO226_AF8", gpio_out_hi), /* ACWAKE0 */
 };
 
 static struct pinctrl_map __initdata u8500_pinmap[] = {
index 13f555d62491e59fbae0127f89e3f2b2395d3abf..477a2d23ddf17efb95af55cebbc25d35793a417d 100644 (file)
@@ -73,11 +73,18 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
             struct dma_attrs *attrs)
 {
-       if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                __dma_page_cpu_to_dev(page, offset, size, dir);
        return pfn_to_dma(dev, page_to_pfn(page)) + offset;
 }
 
+static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
 /**
  * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -96,7 +103,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir,
                struct dma_attrs *attrs)
 {
-       if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
                                      handle & ~PAGE_MASK, size, dir);
 }
@@ -106,8 +113,7 @@ static void arm_dma_sync_single_for_cpu(struct device *dev,
 {
        unsigned int offset = handle & (PAGE_SIZE - 1);
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
-       if (!arch_is_coherent())
-               __dma_page_dev_to_cpu(page, offset, size, dir);
+       __dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
 static void arm_dma_sync_single_for_device(struct device *dev,
@@ -115,8 +121,7 @@ static void arm_dma_sync_single_for_device(struct device *dev,
 {
        unsigned int offset = handle & (PAGE_SIZE - 1);
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
-       if (!arch_is_coherent())
-               __dma_page_cpu_to_dev(page, offset, size, dir);
+       __dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
 static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
@@ -138,6 +143,22 @@ struct dma_map_ops arm_dma_ops = {
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
+static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
+       dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
+static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                                 dma_addr_t handle, struct dma_attrs *attrs);
+
+struct dma_map_ops arm_coherent_dma_ops = {
+       .alloc                  = arm_coherent_dma_alloc,
+       .free                   = arm_coherent_dma_free,
+       .mmap                   = arm_dma_mmap,
+       .get_sgtable            = arm_dma_get_sgtable,
+       .map_page               = arm_coherent_dma_map_page,
+       .map_sg                 = arm_dma_map_sg,
+       .set_dma_mask           = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_coherent_dma_ops);
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
        u64 mask = (u64)arm_dma_limit;
@@ -586,7 +607,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
 
 
 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-                        gfp_t gfp, pgprot_t prot, const void *caller)
+                        gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
 {
        u64 mask = get_coherent_dma_mask(dev);
        struct page *page;
@@ -619,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        *handle = DMA_ERROR_CODE;
        size = PAGE_ALIGN(size);
 
-       if (arch_is_coherent() || nommu())
+       if (is_coherent || nommu())
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
        else if (gfp & GFP_ATOMIC)
                addr = __alloc_from_pool(size, &page);
@@ -647,7 +668,20 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        if (dma_alloc_from_coherent(dev, size, handle, &memory))
                return memory;
 
-       return __dma_alloc(dev, size, handle, gfp, prot,
+       return __dma_alloc(dev, size, handle, gfp, prot, false,
+                          __builtin_return_address(0));
+}
+
+static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
+       dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+       pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+       void *memory;
+
+       if (dma_alloc_from_coherent(dev, size, handle, &memory))
+               return memory;
+
+       return __dma_alloc(dev, size, handle, gfp, prot, true,
                           __builtin_return_address(0));
 }
 
@@ -684,8 +718,9 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 /*
  * Free a buffer as defined by the above mapping.
  */
-void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
-                 dma_addr_t handle, struct dma_attrs *attrs)
+static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                          dma_addr_t handle, struct dma_attrs *attrs,
+                          bool is_coherent)
 {
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
@@ -694,7 +729,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 
        size = PAGE_ALIGN(size);
 
-       if (arch_is_coherent() || nommu()) {
+       if (is_coherent || nommu()) {
                __dma_free_buffer(page, size);
        } else if (__free_from_pool(cpu_addr, size)) {
                return;
@@ -710,6 +745,18 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
        }
 }
 
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                 dma_addr_t handle, struct dma_attrs *attrs)
+{
+       __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
+}
+
+static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
+                                 dma_addr_t handle, struct dma_attrs *attrs)
+{
+       __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
+}
+
 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
                 void *cpu_addr, dma_addr_t handle, size_t size,
                 struct dma_attrs *attrs)
@@ -1012,11 +1059,12 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
                if (!pages[i])
                        goto error;
 
-               if (order)
+               if (order) {
                        split_page(pages[i], order);
-               j = 1 << order;
-               while (--j)
-                       pages[i + j] = pages[i] + j;
+                       j = 1 << order;
+                       while (--j)
+                               pages[i + j] = pages[i] + j;
+               }
 
                __dma_clear_buffer(pages[i], PAGE_SIZE << order);
                i += 1 << order;
@@ -1303,7 +1351,8 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
  */
 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                          size_t size, dma_addr_t *handle,
-                         enum dma_data_direction dir, struct dma_attrs *attrs)
+                         enum dma_data_direction dir, struct dma_attrs *attrs,
+                         bool is_coherent)
 {
        struct dma_iommu_mapping *mapping = dev->archdata.mapping;
        dma_addr_t iova, iova_base;
@@ -1322,8 +1371,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                phys_addr_t phys = page_to_phys(sg_page(s));
                unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
-               if (!arch_is_coherent() &&
-                   !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               if (!is_coherent &&
+                       !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                        __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 
                ret = iommu_map(mapping->domain, iova, phys, len, 0);
@@ -1341,20 +1390,9 @@ fail:
        return ret;
 }
 
-/**
- * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * The scatter gather list elements are merged together (if possible) and
- * tagged with the appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}.
- */
-int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-                    enum dma_data_direction dir, struct dma_attrs *attrs)
+static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+                    enum dma_data_direction dir, struct dma_attrs *attrs,
+                    bool is_coherent)
 {
        struct scatterlist *s = sg, *dma = sg, *start = sg;
        int i, count = 0;
@@ -1370,7 +1408,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 
                if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
                        if (__map_sg_chunk(dev, start, size, &dma->dma_address,
-                           dir, attrs) < 0)
+                           dir, attrs, is_coherent) < 0)
                                goto bad_mapping;
 
                        dma->dma_address += offset;
@@ -1383,7 +1421,8 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
                }
                size += s->length;
        }
-       if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
+       if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
+               is_coherent) < 0)
                goto bad_mapping;
 
        dma->dma_address += offset;
@@ -1398,17 +1437,44 @@ bad_mapping:
 }
 
 /**
- * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
  * @dev: valid struct device pointer
  * @sg: list of buffers
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
  *
- * Unmap a set of streaming mode DMA translations.  Again, CPU access
- * rules concerning calls here are the same as for dma_unmap_single().
+ * Map a set of i/o coherent buffers described by scatterlist in streaming
+ * mode for DMA. The scatter gather list elements are merged together (if
+ * possible) and tagged with the appropriate dma address and length. They are
+ * obtained via sg_dma_{address,length}.
  */
-void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-                       enum dma_data_direction dir, struct dma_attrs *attrs)
+int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
+}
+
+static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
+               bool is_coherent)
 {
        struct scatterlist *s;
        int i;
@@ -1417,13 +1483,45 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
                if (sg_dma_len(s))
                        __iommu_remove_mapping(dev, sg_dma_address(s),
                                               sg_dma_len(s));
-               if (!arch_is_coherent() &&
+               if (!is_coherent &&
                    !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                        __dma_page_dev_to_cpu(sg_page(s), s->offset,
                                              s->length, dir);
        }
 }
 
+/**
+ * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+                       enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+       __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
+}
+
 /**
  * arm_iommu_sync_sg_for_cpu
  * @dev: valid struct device pointer
@@ -1438,8 +1536,7 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
        int i;
 
        for_each_sg(sg, s, nents, i)
-               if (!arch_is_coherent())
-                       __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+               __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
 
 }
 
@@ -1457,22 +1554,21 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
        int i;
 
        for_each_sg(sg, s, nents, i)
-               if (!arch_is_coherent())
-                       __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+               __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 }
 
 
 /**
- * arm_iommu_map_page
+ * arm_coherent_iommu_map_page
  * @dev: valid struct device pointer
  * @page: page that buffer resides in
  * @offset: offset into page for start of buffer
  * @size: size of buffer to map
  * @dir: DMA transfer direction
  *
- * IOMMU aware version of arm_dma_map_page()
+ * Coherent IOMMU aware version of arm_dma_map_page()
  */
-static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
             struct dma_attrs *attrs)
 {
@@ -1480,9 +1576,6 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
        dma_addr_t dma_addr;
        int ret, len = PAGE_ALIGN(size + offset);
 
-       if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
-               __dma_page_cpu_to_dev(page, offset, size, dir);
-
        dma_addr = __alloc_iova(mapping, len);
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
@@ -1497,6 +1590,51 @@ fail:
        return DMA_ERROR_CODE;
 }
 
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+            unsigned long offset, size_t size, enum dma_data_direction dir,
+            struct dma_attrs *attrs)
+{
+       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+               __dma_page_cpu_to_dev(page, offset, size, dir);
+
+       return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
+}
+
+/**
+ * arm_coherent_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Coherent IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+               size_t size, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+       dma_addr_t iova = handle & PAGE_MASK;
+       int offset = handle & ~PAGE_MASK;
+       int len = PAGE_ALIGN(size + offset);
+
+       if (!iova)
+               return;
+
+       iommu_unmap(mapping->domain, iova, len);
+       __free_iova(mapping, iova, len);
+}
+
 /**
  * arm_iommu_unmap_page
  * @dev: valid struct device pointer
@@ -1519,7 +1657,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
        if (!iova)
                return;
 
-       if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                __dma_page_dev_to_cpu(page, offset, size, dir);
 
        iommu_unmap(mapping->domain, iova, len);
@@ -1537,8 +1675,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
        if (!iova)
                return;
 
-       if (!arch_is_coherent())
-               __dma_page_dev_to_cpu(page, offset, size, dir);
+       __dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
 static void arm_iommu_sync_single_for_device(struct device *dev,
@@ -1572,6 +1709,19 @@ struct dma_map_ops iommu_ops = {
        .sync_sg_for_device     = arm_iommu_sync_sg_for_device,
 };
 
+struct dma_map_ops iommu_coherent_ops = {
+       .alloc          = arm_iommu_alloc_attrs,
+       .free           = arm_iommu_free_attrs,
+       .mmap           = arm_iommu_mmap_attrs,
+       .get_sgtable    = arm_iommu_get_sgtable,
+
+       .map_page       = arm_coherent_iommu_map_page,
+       .unmap_page     = arm_coherent_iommu_unmap_page,
+
+       .map_sg         = arm_coherent_iommu_map_sg,
+       .unmap_sg       = arm_coherent_iommu_unmap_sg,
+};
+
 /**
  * arm_iommu_create_mapping
  * @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1665,7 +1815,7 @@ int arm_iommu_attach_device(struct device *dev,
        dev->archdata.mapping = mapping;
        set_dma_ops(dev, &iommu_ops);
 
-       pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
        return 0;
 }
 
index 18144e6a3115ea0e333b37c13395ce229abc5384..941dfb9e9a78635680d85225169d0edd187aea8c 100644 (file)
@@ -422,17 +422,6 @@ static void __init build_mem_type_table(void)
        cp = &cache_policies[cachepolicy];
        vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 
-       /*
-        * Enable CPU-specific coherency if supported.
-        * (Only available on XSC3 at the moment.)
-        */
-       if (arch_is_coherent() && cpu_is_xsc3()) {
-               mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-               mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
-               mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
-               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
-               mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
-       }
        /*
         * ARMv6 and above have extended page tables.
         */
index 5e13c3884aa441c099bd274ef51d27acf36e06ab..42377ef9ea3d3fb4c5231622b21e4bc6714294ce 100644 (file)
@@ -310,7 +310,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
                omap_mbox_disable_irq(mbox, IRQ_RX);
                free_irq(mbox->irq, mbox);
                tasklet_kill(&mbox->txq->tasklet);
-               flush_work_sync(&mbox->rxq->work);
+               flush_work(&mbox->rxq->work);
                mbox_queue_free(mbox->txq);
                mbox_queue_free(mbox->rxq);
        }
index e92215428a37e315d0b5df9f962df8e4c8090dcc..72bd5ae50a890fd1fda0afe419553cf631bd2f2e 100644 (file)
@@ -138,11 +138,6 @@ config CRIS_MACH_ARTPEC3
 
 endchoice
 
-config ETRAX_VCS_SIM
-       bool "VCS Simulator"
-       help
-         Setup hardware to be run in the VCS simulator.
-
 config ETRAX_ARCH_V10
        bool
        default y if ETRAX100LX || ETRAX100LX_V2
index b34438e026be436d6c71632a250a23c187d9c6f7..1b6ad6247204c0b9d09f63f7e4731aaf70cb83cd 100644 (file)
@@ -329,7 +329,6 @@ static int __init init_axis_flash(void)
        }
 #endif
 
-#ifndef CONFIG_ETRAX_VCS_SIM
        main_mtd = flash_probe();
        if (main_mtd)
                printk(KERN_INFO "%s: 0x%08x bytes of NOR flash memory.\n",
@@ -603,34 +602,7 @@ static int __init init_axis_flash(void)
                                        "partition %d\n", part);
                }
        }
-#endif /* CONFIG_EXTRAX_VCS_SIM */
 
-#ifdef CONFIG_ETRAX_VCS_SIM
-       /* For simulator, always use a RAM partition.
-        * The rootfs will be found after the kernel in RAM,
-        * with romfs_start and romfs_end indicating location and size.
-        */
-       struct mtd_info *mtd_ram;
-
-       mtd_ram = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
-       if (!mtd_ram) {
-               panic("axisflashmap: Couldn't allocate memory for "
-                     "mtd_info!\n");
-       }
-
-       printk(KERN_INFO "axisflashmap: Adding RAM partition for romfs, "
-              "at %u, size %u\n",
-              (unsigned) romfs_start, (unsigned) romfs_length);
-
-       err = mtdram_init_device(mtd_ram, (void *)romfs_start,
-                                romfs_length, "romfs");
-       if (err) {
-               panic("axisflashmap: Could not initialize MTD RAM "
-                     "device!\n");
-       }
-#endif /* CONFIG_EXTRAX_VCS_SIM */
-
-#ifndef CONFIG_ETRAX_VCS_SIM
        if (aux_mtd) {
                aux_partition.size = aux_mtd->size;
                err = mtd_device_register(aux_mtd, &aux_partition, 1);
@@ -639,7 +611,6 @@ static int __init init_axis_flash(void)
                              "aux mtd device!\n");
 
        }
-#endif /* CONFIG_EXTRAX_VCS_SIM */
 
        return err;
 }
index 5b1ee82f63c5452750caf791fff3bd975c6d9c6a..e3dfc72d0cfd760507f42ea74355511aa134c16d 100644 (file)
@@ -97,28 +97,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
                pcibios_enable_irq(dev);
        return 0;
 }
-
-int pcibios_assign_resources(void)
-{
-       struct pci_dev *dev = NULL;
-       int idx;
-       struct resource *r;
-
-       while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-               int class = dev->class >> 8;
-
-               /* Don't touch classless devices and host bridges */
-               if (!class || class == PCI_CLASS_BRIDGE_HOST)
-                       continue;
-
-               for(idx=0; idx<6; idx++) {
-                       r = &dev->resource[idx];
-
-                       if (!r->start && r->end)
-                               pci_assign_resource(dev, idx);
-               }
-       }
-       return 0;
-}
-
-EXPORT_SYMBOL(pcibios_assign_resources);
index 5d502b9ab56da253f2bdb1336535754da99f52cf..51e34165ece7feb7d0da60d5e42af1581aa8b260 100644 (file)
        .global nand_boot
        .global swapper_pg_dir
 
-       ;; Dummy section to make it bootable with current VCS simulator
-#ifdef CONFIG_ETRAX_VCS_SIM
-       .section ".boot", "ax"
-       ba tstart
-       nop
-#endif
-
        .text
 tstart:
        ;; This is the entry point of the kernel. The CPU is currently in
@@ -75,17 +68,10 @@ secondary_cpu_entry: /* Entry point for secondary CPUs */
                | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4)     \
                | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 5)     \
                | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0
-#elif !defined(CONFIG_ETRAX_VCS_SIM)
-       move.d  REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8)       \
-               | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4)     \
-               | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0
 #else
-       ;; Map the virtual DRAM to the RW eprom area at address 0.
-       ;; Also map 0xa for the hook calls,
        move.d  REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 8)       \
                | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 4)     \
-               | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb)   \
-               | REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa), $r0
+               | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb), $r0
 #endif
 
        ;; Temporary map of 0x40 -> 0x40 and 0x00 -> 0x00.
@@ -126,27 +112,6 @@ secondary_cpu_entry: /* Entry point for secondary CPUs */
                | REG_STATE(mmu, rw_mm_cfg, seg_2, page)        \
                | REG_STATE(mmu, rw_mm_cfg, seg_1, page)        \
                | REG_STATE(mmu, rw_mm_cfg, seg_0, linear), $r2
-#elif !defined(CONFIG_ETRAX_VCS_SIM)
-       move.d  REG_STATE(mmu, rw_mm_cfg, we, on)               \
-               | REG_STATE(mmu, rw_mm_cfg, acc, on)            \
-               | REG_STATE(mmu, rw_mm_cfg, ex, on)             \
-               | REG_STATE(mmu, rw_mm_cfg, inv, on)            \
-               | REG_STATE(mmu, rw_mm_cfg, seg_f, linear)      \
-               | REG_STATE(mmu, rw_mm_cfg, seg_e, linear)      \
-               | REG_STATE(mmu, rw_mm_cfg, seg_d, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_c, linear)      \
-               | REG_STATE(mmu, rw_mm_cfg, seg_b, linear)      \
-               | REG_STATE(mmu, rw_mm_cfg, seg_a, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_9, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_8, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_7, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_6, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_5, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_4, linear)      \
-               | REG_STATE(mmu, rw_mm_cfg, seg_3, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_2, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_1, page)        \
-               | REG_STATE(mmu, rw_mm_cfg, seg_0, linear), $r2
 #else
        move.d  REG_STATE(mmu, rw_mm_cfg, we, on)               \
                | REG_STATE(mmu, rw_mm_cfg, acc, on)            \
@@ -157,7 +122,7 @@ secondary_cpu_entry: /* Entry point for secondary CPUs */
                | REG_STATE(mmu, rw_mm_cfg, seg_d, page)        \
                | REG_STATE(mmu, rw_mm_cfg, seg_c, linear)      \
                | REG_STATE(mmu, rw_mm_cfg, seg_b, linear)      \
-               | REG_STATE(mmu, rw_mm_cfg, seg_a, linear)      \
+               | REG_STATE(mmu, rw_mm_cfg, seg_a, page)        \
                | REG_STATE(mmu, rw_mm_cfg, seg_9, page)        \
                | REG_STATE(mmu, rw_mm_cfg, seg_8, page)        \
                | REG_STATE(mmu, rw_mm_cfg, seg_7, page)        \
@@ -226,7 +191,6 @@ master_cpu:
        move.d secondary_cpu_entry, $r1
        move.d $r1, [$r0]
 #endif
-#ifndef CONFIG_ETRAX_VCS_SIM
        ; Check if starting from DRAM (network->RAM boot or unpacked
        ; compressed kernel), or directly from flash.
        lapcq   ., $r0
@@ -234,7 +198,6 @@ master_cpu:
        cmp.d   0x10000, $r0    ; Arbitrary, something above this code.
        blo     _inflash0
        nop
-#endif
 
        jump    _inram          ; Jump to cached RAM.
        nop
@@ -326,7 +289,6 @@ move_cramfs:
        move.d  romfs_length, $r1
        move.d  $r0, [$r1]
 
-#ifndef CONFIG_ETRAX_VCS_SIM
        ;; The kernel could have been unpacked to DRAM by the loader, but
        ;; the cramfs image could still be in the flash immediately
        ;; following the compressed kernel image. The loader passes the address
@@ -335,10 +297,6 @@ move_cramfs:
        cmp.d   0x0ffffff8, $r9
        bhs     _no_romfs_in_flash ; R9 points outside the flash area.
        nop
-#else
-       ba _no_romfs_in_flash
-       nop
-#endif
        ;; cramfs rootfs might to be in flash. Check for it.
        move.d  [$r9], $r0      ; cramfs_super.magic
        cmp.d   CRAMFS_MAGIC, $r0
@@ -396,7 +354,6 @@ _no_romfs_in_flash:
        move.d  romfs_length, $r3
        move.d  $r2, [$r3]      ; store size at romfs_length
 
-#ifndef CONFIG_ETRAX_VCS_SIM
        add.d   $r2, $r0        ; copy from end and downwards
        add.d   $r2, $r1
 
@@ -410,7 +367,6 @@ _no_romfs_in_flash:
        subq    1, $r2
        bne     1b
        nop
-#endif
 
 4:
        ;; BSS move done.
@@ -455,7 +411,6 @@ no_command_line:
        move.d  etrax_irv, $r1  ; Set the exception base register and pointer.
        move.d  $r0, [$r1]
 
-#ifndef CONFIG_ETRAX_VCS_SIM
        ;; Clear the BSS region from _bss_start to _end.
        move.d  __bss_start, $r0
        move.d  _end, $r1
@@ -463,15 +418,6 @@ no_command_line:
        cmp.d   $r1, $r0
        blo 1b
        nop
-#endif
-
-#ifdef CONFIG_ETRAX_VCS_SIM
-       /* Set the watchdog timeout to something big. Will be removed when */
-       /* watchdog can be disabled with command line option */
-       move.d  0x7fffffff, $r10
-       jsr     CPU_WATCHDOG_TIMEOUT
-       nop
-#endif
 
        ; Initialize registers to increase determinism
        move.d __bss_start, $r0
index 8c1d35cdf00a3aa0c0a65305148a92d091ebeaef..b06813aeb120147b82502029fc3b21d7c9204fd5 100644 (file)
@@ -381,23 +381,9 @@ static int read_register(char regno, unsigned int *valptr);
 /* Serial port, reads one character. ETRAX 100 specific. from debugport.c */
 int getDebugChar(void);
 
-#ifdef CONFIG_ETRAX_VCS_SIM
-int getDebugChar(void)
-{
-  return socketread();
-}
-#endif
-
 /* Serial port, writes one character. ETRAX 100 specific. from debugport.c */
 void putDebugChar(int val);
 
-#ifdef CONFIG_ETRAX_VCS_SIM
-void putDebugChar(int val)
-{
-  socketwrite((char *)&val, 1);
-}
-#endif
-
 /* Returns the integer equivalent of a hexadecimal character. */
 static int hex(char ch);
 
index 41fa6a6893a9ce2ce94247f95f750d68e3bd6f1a..d366e0891988ad24cbc378c83c0bf8100414df05 100644 (file)
@@ -1,10 +1,8 @@
-# $Id: Makefile,v 1.3 2007/03/13 11:57:46 starvik Exp $
 #
 # Makefile for the linux kernel.
 #
 
 obj-y   := dma.o pinmux.o io.o arbiter.o
-obj-$(CONFIG_ETRAX_VCS_SIM) += vcs_hook.o
 obj-$(CONFIG_CPU_FREQ)   += cpufreq.o
 
 clean:
diff --git a/arch/cris/arch-v32/mach-a3/vcs_hook.c b/arch/cris/arch-v32/mach-a3/vcs_hook.c
deleted file mode 100644 (file)
index 58b1a54..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Simulator hook mechanism
- */
-
-#include "vcs_hook.h"
-#include <asm/io.h>
-#include <stdarg.h>
-
-#define HOOK_TRIG_ADDR      0xb7000000
-#define HOOK_MEM_BASE_ADDR  0xce000000
-
-static volatile unsigned *hook_base;
-
-#define HOOK_DATA(offset) hook_base[offset]
-#define VHOOK_DATA(offset) hook_base[offset]
-#define HOOK_TRIG(funcid) \
-       do { \
-               *((unsigned *) HOOK_TRIG_ADDR) = funcid; \
-       } while (0)
-#define HOOK_DATA_BYTE(offset) ((unsigned char *)hook_base)[offset]
-
-static void hook_init(void)
-{
-       static int first = 1;
-       if (first) {
-               first = 0;
-               hook_base = ioremap(HOOK_MEM_BASE_ADDR, 8192);
-       }
-}
-
-static unsigned hook_trig(unsigned id)
-{
-       unsigned ret;
-
-       /* preempt_disable(); */
-
-       /* Dummy read from mem to make sure data has propagated to memory
-        * before trigging */
-       ret = *hook_base;
-
-       /* trigger hook */
-       HOOK_TRIG(id);
-
-       /* wait for call to finish */
-       while (VHOOK_DATA(0) > 0) ;
-
-       /* extract return value */
-
-       ret = VHOOK_DATA(1);
-
-       return ret;
-}
-
-int hook_call(unsigned id, unsigned pcnt, ...)
-{
-       va_list ap;
-       int i;
-       unsigned ret;
-
-       hook_init();
-
-       HOOK_DATA(0) = id;
-
-       va_start(ap, pcnt);
-       for (i = 1; i <= pcnt; i++)
-               HOOK_DATA(i) = va_arg(ap, unsigned);
-       va_end(ap);
-
-       ret = hook_trig(id);
-
-       return ret;
-}
-
-int hook_call_str(unsigned id, unsigned size, const char *str)
-{
-       int i;
-       unsigned ret;
-
-       hook_init();
-
-       HOOK_DATA(0) = id;
-       HOOK_DATA(1) = size;
-
-       for (i = 0; i < size; i++)
-               HOOK_DATA_BYTE(8 + i) = str[i];
-       HOOK_DATA_BYTE(8 + i) = 0;
-
-       ret = hook_trig(id);
-
-       return ret;
-}
-
-void print_str(const char *str)
-{
-       int i;
-       /* find null at end of string */
-       for (i = 1; str[i]; i++) ;
-       hook_call(hook_print_str, i, str);
-}
-
-void CPU_WATCHDOG_TIMEOUT(unsigned t)
-{
-}
diff --git a/arch/cris/arch-v32/mach-a3/vcs_hook.h b/arch/cris/arch-v32/mach-a3/vcs_hook.h
deleted file mode 100644 (file)
index 8b73d0e..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Simulator hook call mechanism
- */
-
-#ifndef __hook_h__
-#define __hook_h__
-
-int hook_call(unsigned id, unsigned pcnt, ...);
-int hook_call_str(unsigned id, unsigned size, const char *str);
-
-enum hook_ids {
-  hook_debug_on = 1,
-  hook_debug_off,
-  hook_stop_sim_ok,
-  hook_stop_sim_fail,
-  hook_alloc_shared,
-  hook_ptr_shared,
-  hook_free_shared,
-  hook_file2shared,
-  hook_cmp_shared,
-  hook_print_params,
-  hook_sim_time,
-  hook_stop_sim,
-  hook_kick_dog,
-  hook_dog_timeout,
-  hook_rand,
-  hook_srand,
-  hook_rand_range,
-  hook_print_str,
-  hook_print_hex,
-  hook_cmp_offset_shared,
-  hook_fill_random_shared,
-  hook_alloc_random_data,
-  hook_calloc_random_data,
-  hook_print_int,
-  hook_print_uint,
-  hook_fputc,
-  hook_init_fd,
-  hook_sbrk,
-  hook_print_context_descr,
-  hook_print_data_descr,
-  hook_print_group_descr,
-  hook_fill_shared,
-  hook_sl_srand,
-  hook_sl_rand_irange,
-  hook_sl_rand_urange,
-  hook_sl_sh_malloc_aligned,
-  hook_sl_sh_calloc_aligned,
-  hook_sl_sh_alloc_random_data,
-  hook_sl_sh_file2mem,
-  hook_sl_vera_mbox_handle,
-  hook_sl_vera_mbox_put,
-  hook_sl_vera_mbox_get,
-  hook_sl_system,
-  hook_sl_sh_hexdump
-};
-
-#endif
index 41fa6a6893a9ce2ce94247f95f750d68e3bd6f1a..d366e0891988ad24cbc378c83c0bf8100414df05 100644 (file)
@@ -1,10 +1,8 @@
-# $Id: Makefile,v 1.3 2007/03/13 11:57:46 starvik Exp $
 #
 # Makefile for the linux kernel.
 #
 
 obj-y   := dma.o pinmux.o io.o arbiter.o
-obj-$(CONFIG_ETRAX_VCS_SIM) += vcs_hook.o
 obj-$(CONFIG_CPU_FREQ)   += cpufreq.o
 
 clean:
diff --git a/arch/cris/arch-v32/mach-fs/vcs_hook.c b/arch/cris/arch-v32/mach-fs/vcs_hook.c
deleted file mode 100644 (file)
index b11594a..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Call simulator hook. This is the part running in the
- * simulated program.
- */
-
-#include "vcs_hook.h"
-#include <stdarg.h>
-#include <arch-v32/hwregs/reg_map.h>
-#include <arch-v32/hwregs/intr_vect_defs.h>
-
-#define HOOK_TRIG_ADDR     0xb7000000  /* hook cvlog model reg address */
-#define HOOK_MEM_BASE_ADDR 0xa0000000  /* csp4 (shared mem) base addr */
-
-#define HOOK_DATA(offset) ((unsigned *)HOOK_MEM_BASE_ADDR)[offset]
-#define VHOOK_DATA(offset) ((volatile unsigned *)HOOK_MEM_BASE_ADDR)[offset]
-#define HOOK_TRIG(funcid) \
-       do { \
-               *((unsigned *) HOOK_TRIG_ADDR) = funcid; \
-       } while (0)
-#define HOOK_DATA_BYTE(offset) ((unsigned char *)HOOK_MEM_BASE_ADDR)[offset]
-
-int hook_call(unsigned id, unsigned pcnt, ...)
-{
-       va_list ap;
-       unsigned i;
-       unsigned ret;
-#ifdef USING_SOS
-       PREEMPT_OFF_SAVE();
-#endif
-
-       /* pass parameters */
-       HOOK_DATA(0) = id;
-
-       /* Have to make hook_print_str a special case since we call with a
-        * parameter of byte type. Should perhaps be a separate
-        * hook_call. */
-
-       if (id == hook_print_str) {
-               int i;
-               char *str;
-
-               HOOK_DATA(1) = pcnt;
-
-               va_start(ap, pcnt);
-               str = (char *)va_arg(ap, unsigned);
-
-               for (i = 0; i != pcnt; i++)
-                       HOOK_DATA_BYTE(8 + i) = str[i];
-
-               HOOK_DATA_BYTE(8 + i) = 0;      /* null byte */
-       } else {
-               va_start(ap, pcnt);
-               for (i = 1; i <= pcnt; i++)
-                       HOOK_DATA(i) = va_arg(ap, unsigned);
-               va_end(ap);
-       }
-
-       /* read from mem to make sure data has propagated to memory before
-        * trigging */
-       ret = *((volatile unsigned *)HOOK_MEM_BASE_ADDR);
-
-       /* trigger hook */
-       HOOK_TRIG(id);
-
-       /* wait for call to finish */
-       while (VHOOK_DATA(0) > 0) ;
-
-       /* extract return value */
-
-       ret = VHOOK_DATA(1);
-
-#ifdef USING_SOS
-       PREEMPT_RESTORE();
-#endif
-       return ret;
-}
-
-unsigned hook_buf(unsigned i)
-{
-       return (HOOK_DATA(i));
-}
-
-void print_str(const char *str)
-{
-       int i;
-       /* find null at end of string */
-       for (i = 1; str[i]; i++) ;
-       hook_call(hook_print_str, i, str);
-}
-
-void CPU_KICK_DOG(void)
-{
-       (void)hook_call(hook_kick_dog, 0);
-}
-
-void CPU_WATCHDOG_TIMEOUT(unsigned t)
-{
-       (void)hook_call(hook_dog_timeout, 1, t);
-}
-
diff --git a/arch/cris/arch-v32/mach-fs/vcs_hook.h b/arch/cris/arch-v32/mach-fs/vcs_hook.h
deleted file mode 100644 (file)
index c000b9f..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Call simulator hook functions
- */
-
-#ifndef HOOK_H
-#define HOOK_H
-
-int hook_call(unsigned id, unsigned pcnt, ...);
-
-enum hook_ids {
-  hook_debug_on = 1,
-  hook_debug_off,
-  hook_stop_sim_ok,
-  hook_stop_sim_fail,
-  hook_alloc_shared,
-  hook_ptr_shared,
-  hook_free_shared,
-  hook_file2shared,
-  hook_cmp_shared,
-  hook_print_params,
-  hook_sim_time,
-  hook_stop_sim,
-  hook_kick_dog,
-  hook_dog_timeout,
-  hook_rand,
-  hook_srand,
-  hook_rand_range,
-  hook_print_str,
-  hook_print_hex,
-  hook_cmp_offset_shared,
-  hook_fill_random_shared,
-  hook_alloc_random_data,
-  hook_calloc_random_data,
-  hook_print_int,
-  hook_print_uint,
-  hook_fputc,
-  hook_init_fd,
-  hook_sbrk
-
-};
-
-#endif
index 0768bc409ca88dcb32c7650460e63a18fec4eca1..3deca5253d91be5e448253abec875994ba7b9f9b 100644 (file)
@@ -73,11 +73,7 @@ void __init cris_mmu_init(void)
 #endif
                       REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
                       REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
-#ifndef CONFIG_ETRAX_VCS_SIM
                        REG_STATE(mmu, rw_mm_cfg, seg_a, page)   |
-#else
-                      REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
-#endif
                       REG_STATE(mmu, rw_mm_cfg, seg_9, page)   |
                       REG_STATE(mmu, rw_mm_cfg, seg_8, page)   |
                       REG_STATE(mmu, rw_mm_cfg, seg_7, page)   |
@@ -100,11 +96,7 @@ void __init cris_mmu_init(void)
 #endif
                          REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
                         REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
-#ifndef CONFIG_ETRAX_VCS_SIM
                         REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
-#else
-                         REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
-#endif
                         REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
                         REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
 
index 20f1b4806bfe4f6d4a35fd5e3fee626c53195be8..e5b5aab52de81ce29ef5b2994400185e2e617295 100644 (file)
  * selected bit it's possible to convert between KSEG_x and 0x40000000 where the
  * DRAM really resides. DRAM is virtually at 0xc.
  */
-#ifndef CONFIG_ETRAX_VCS_SIM
 #define __pa(x) ((unsigned long)(x) & 0x7fffffff)
 #define __va(x) ((void *)((unsigned long)(x) | 0x80000000))
-#else
-#define __pa(x) ((unsigned long)(x) & 0x3fffffff)
-#define __va(x) ((void *)((unsigned long)(x) | 0xc0000000))
-#endif
 
 #define VM_STACK_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
                                 VM_MAYREAD | VM_MAYWRITE)
index 9603c907fbc4947f4d31077355224fb83638274d..a024b7d32fed9b9066cd66e11fe8ffcd222aba9b 100644 (file)
@@ -21,13 +21,9 @@ struct thread_struct {
 
 /*
  * User-space process size. This is hardcoded into a few places, so don't
- * changed it unless everything's clear!
+ * change it unless everything's clear!
  */
-#ifndef CONFIG_ETRAX_VCS_SIM
 #define TASK_SIZE      (0xB0000000UL)
-#else
-#define TASK_SIZE      (0xA0000000UL)
-#endif
 
 /* CCS I=1, enable interrupts. */
 #define INIT_THREAD { 0, 0, (1 << I_CCS_BITNR) }
index dd1abbdcbc7aff9133e73f98e46dac05546f38f2..96c3b0fb62c12dc8cba32a6bfd054cf4ca2993ee 100644 (file)
        move.d   REG_ADDR(bif_core, regi_bif_core, rw_grp4_cfg), $r0
        move.d   CONFIG_ETRAX_MEM_GRP4_CONFIG, $r1
        move.d   $r1, [$r0]
-#ifdef CONFIG_ETRAX_VCS_SIM
-       ;; Set up minimal flash waitstates
-       move.d 0, $r10
-       move.d REG_ADDR(bif_core, regi_bif_core, rw_grp1_cfg), $r11
-       move.d $r10, [$r11]
-#endif
        .endm
 
 #endif
index 9f1cd56da28cc88884035107faa9497f220b7bd2..146da904cdd84c83fa67d5c3e88ca74f25bd8648 100644 (file)
@@ -19,7 +19,6 @@ extern unsigned long pci_mem_start;
 
 void pcibios_config_init(void);
 struct pci_bus * pcibios_scan_root(int bus);
-int pcibios_assign_resources(void);
 
 void pcibios_set_master(struct pci_dev *dev);
 void pcibios_penalize_isa_irq(int irq);
index 09d5f7fd9db164f28f0bd85ba7348af4834a2c00..3d52a5bbd857702c62baece0a658714a8b3c1632 100644 (file)
 #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
 
 #ifndef __ASSEMBLY__
+/* Explicitly size integers that represent pfns in the public interface
+ * with Xen so that we could have one ABI that works for 32 and 64 bit
+ * guests. */
+typedef unsigned long xen_pfn_t;
 /* Guest handles for primitive C types. */
 __DEFINE_GUEST_HANDLE(uchar, unsigned char);
 __DEFINE_GUEST_HANDLE(uint, unsigned int);
@@ -79,7 +83,6 @@ DEFINE_GUEST_HANDLE(void);
 DEFINE_GUEST_HANDLE(uint64_t);
 DEFINE_GUEST_HANDLE(uint32_t);
 
-typedef unsigned long xen_pfn_t;
 DEFINE_GUEST_HANDLE(xen_pfn_t);
 #define PRI_xen_pfn    "lx"
 #endif
@@ -265,6 +268,8 @@ typedef struct xen_callback xen_callback_t;
 
 #endif /* !__ASSEMBLY__ */
 
+#include <asm/pvclock-abi.h>
+
 /* Size of the shared_info area (this is not related to page size).  */
 #define XSI_SHIFT                      14
 #define XSI_SIZE                       (1 << XSI_SHIFT)
index 1c2e894067219a9c0d68cae7e00db6e5b6749a0a..9392e021c93be2cd371d487ba2c56b72af0432a1 100644 (file)
@@ -158,7 +158,8 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
        ia64_mlogbuf_dump();
        printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
                "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
-              raw_smp_processor_id(), current->pid, current_uid(),
+              raw_smp_processor_id(), current->pid,
+               from_kuid(&init_user_ns, current_uid()),
                iip, ipsr, paddr, current->comm);
 
        spin_lock(&mca_bh_lock);
index 3fa4bc536953c9494eabc776b1c77a980cb2e148..f388b4e18a37f8b5868a4cbbd422f64d9a9c24b3 100644 (file)
@@ -2306,7 +2306,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
         * partially initialize the vma for the sampling buffer
         */
        vma->vm_mm           = mm;
-       vma->vm_file         = filp;
+       vma->vm_file         = get_file(filp);
        vma->vm_flags        = VM_READ| VM_MAYREAD |VM_RESERVED;
        vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */
 
@@ -2345,8 +2345,6 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
                goto error;
        }
 
-       get_file(filp);
-
        /*
         * now insert the vma in the vm list for the process, must be
         * done with mmap lock held
@@ -2380,8 +2378,8 @@ static int
 pfm_bad_permissions(struct task_struct *task)
 {
        const struct cred *tcred;
-       uid_t uid = current_uid();
-       gid_t gid = current_gid();
+       kuid_t uid = current_uid();
+       kgid_t gid = current_gid();
        int ret;
 
        rcu_read_lock();
@@ -2389,20 +2387,20 @@ pfm_bad_permissions(struct task_struct *task)
 
        /* inspired by ptrace_attach() */
        DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
-               uid,
-               gid,
-               tcred->euid,
-               tcred->suid,
-               tcred->uid,
-               tcred->egid,
-               tcred->sgid));
-
-       ret = ((uid != tcred->euid)
-              || (uid != tcred->suid)
-              || (uid != tcred->uid)
-              || (gid != tcred->egid)
-              || (gid != tcred->sgid)
-              || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE);
+               from_kuid(&init_user_ns, uid),
+               from_kgid(&init_user_ns, gid),
+               from_kuid(&init_user_ns, tcred->euid),
+               from_kuid(&init_user_ns, tcred->suid),
+               from_kuid(&init_user_ns, tcred->uid),
+               from_kgid(&init_user_ns, tcred->egid),
+               from_kgid(&init_user_ns, tcred->sgid)));
+
+       ret = ((!uid_eq(uid, tcred->euid))
+              || (!uid_eq(uid, tcred->suid))
+              || (!uid_eq(uid, tcred->uid))
+              || (!gid_eq(gid, tcred->egid))
+              || (!gid_eq(gid, tcred->sgid))
+              || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
 
        rcu_read_unlock();
        return ret;
@@ -4782,7 +4780,7 @@ recheck:
 asmlinkage long
 sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
 {
-       struct file *file = NULL;
+       struct fd f = {NULL, 0};
        pfm_context_t *ctx = NULL;
        unsigned long flags = 0UL;
        void *args_k = NULL;
@@ -4879,17 +4877,17 @@ restart_args:
 
        ret = -EBADF;
 
-       file = fget(fd);
-       if (unlikely(file == NULL)) {
+       f = fdget(fd);
+       if (unlikely(f.file == NULL)) {
                DPRINT(("invalid fd %d\n", fd));
                goto error_args;
        }
-       if (unlikely(PFM_IS_FILE(file) == 0)) {
+       if (unlikely(PFM_IS_FILE(f.file) == 0)) {
                DPRINT(("fd %d not related to perfmon\n", fd));
                goto error_args;
        }
 
-       ctx = file->private_data;
+       ctx = f.file->private_data;
        if (unlikely(ctx == NULL)) {
                DPRINT(("no context for fd %d\n", fd));
                goto error_args;
@@ -4919,8 +4917,8 @@ abort_locked:
        if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
 
 error_args:
-       if (file)
-               fput(file);
+       if (f.file)
+               fdput(f);
 
        kfree(args_k);
 
index a199be1fe619bc12d00a9e87198b6f919a9f2c74..37dd79511cbeb548c67b4f255ff0b1fcb2c69196 100644 (file)
@@ -220,7 +220,7 @@ ia64_rt_sigreturn (struct sigscratch *scr)
        si.si_errno = 0;
        si.si_code = SI_KERNEL;
        si.si_pid = task_pid_vnr(current);
-       si.si_uid = current_uid();
+       si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
        si.si_addr = sc;
        force_sig_info(SIGSEGV, &si, current);
        return retval;
@@ -317,7 +317,7 @@ force_sigsegv_info (int sig, void __user *addr)
        si.si_errno = 0;
        si.si_code = SI_KERNEL;
        si.si_pid = task_pid_vnr(current);
-       si.si_uid = current_uid();
+       si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
        si.si_addr = addr;
        force_sig_info(SIGSEGV, &si, current);
        return 0;
index e93fdae10b2313d1034647e422411f8f7eb12e40..90d3109c82f402df0356d43be23d9d965af68630 100644 (file)
@@ -67,7 +67,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 66b26c1e848c5cc7a714c8a989088455e2cf8a64..8f4f657fdbc67987daf0bcba1948390e43f6c163 100644 (file)
@@ -67,7 +67,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 1513325159802ee618a518975cb74b895ab324e9..4571d33903fed1c1a1cc75f165e0c250e598183b 100644 (file)
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 67bb6fc117f4fbb01f839a9d0a3b4aa43dbe3d62..12f211733ba02e8f58aa121e777b4b99c7cd6631 100644 (file)
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 3e35ce5fa467cc1cbc34222df58a12f65c132601..215389a5407fa215af1501ed2ea7d9a4d95fca5a 100644 (file)
@@ -66,7 +66,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index ae81e2d190c325fcfaab20e49769d42ee9c86859..cb9dfb30b6747c1fb472c0292af103021f23f001 100644 (file)
@@ -61,7 +61,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 55d394edf63341a73ca712384b9863edf3fa5d7c..8d5def4a31e026e657ea78429a3fa36ac3d4fe42 100644 (file)
@@ -80,7 +80,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index af773743ee11e0557174f2870dab1a456105c981..e2af46f530c1c589d94e429149a78bc934f7d9f3 100644 (file)
@@ -64,7 +64,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index cdb70d66e53569a40d8a03554acdc57f1b887e1b..7c9402b2097fcb89b81202970bb1bc88a6ef90d4 100644 (file)
@@ -65,7 +65,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 46bed78d0656484bdd9e90916df875a4a6227f06..19d23db690a4789bcf3d9369850170bd6c9f40bd 100644 (file)
@@ -61,7 +61,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 86f7772bafbedd99bcf7e06d13db7e138579628e..ca6c0b4cab7754be95b0ed9bf8564cd243287abb 100644 (file)
@@ -62,7 +62,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 288261456e1fd5f2d1dfce5dc648f16100ff04a4..c80941c7759e2f7530c61b2b3a73545dea1892af 100644 (file)
@@ -62,7 +62,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index 6cd5a519ce5c26338b67a3f2c88238fd53aff5c6..80e012fa409c8da0ee7a7d65b541aa35e424abc6 100644 (file)
@@ -56,7 +56,6 @@ CONFIG_NF_CONNTRACK_MARK=y
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_IRC=m
 CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
 CONFIG_NETFILTER_XT_MATCH_MAC=m
index ad15fb10322b28f8ccf0fbc8d29427ef0606ab93..b6fde2bb51b635390c88db8a118430233710e4cf 100644 (file)
@@ -96,7 +96,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
index d1606569b0019d735e571f6652c52816ce34c6f7..936ec5a5ed8d66c4f9f67c522c5b07234276cfa9 100644 (file)
@@ -87,7 +87,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 92a60aecad5ceaac2450ca3692fa15419a26b0f0..0315ee37a20bc3e4c4c035185da68445f22dea94 100644 (file)
@@ -60,7 +60,6 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
index 5527abbb7dea56ea634a0ab1f32c628f373163e0..cd732e5b4fd5f856df3f46a0a2ee83d0d7491f66 100644 (file)
@@ -86,7 +86,6 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 9c9a123016c056eb535193be96fff1d8eee76e56..636f82b89fd30e97ed54baa75c3bca623c46745f 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
index 28c6b276c21624a4550c2411042065936464d19c..84624b17b76918f9e76b57bbe64ba6988ffd1adb 100644 (file)
@@ -108,7 +108,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 138f698d7c00048c4b172718891bac49ef778b61..44b473420d5198f86d5d77cdc99f7e50975fd990 100644 (file)
@@ -109,7 +109,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 2c0230e76d20562d8035d56c6f8749d062eef110..59d9d2fdcd48880a833d73027e5da1eb02f3830a 100644 (file)
@@ -68,7 +68,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_MATCH_COMMENT=m
index c71eb6c7989707d0fbbc609a8e27ec5261204b25..6785de7bd2a0298c58f025bda474b037e3ea07f9 100644 (file)
@@ -109,33 +109,32 @@ Efault:
 
 int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned int count)
 {
-       struct file * file;
+       struct fd arg;
        struct hpux_dirent __user * lastdirent;
        struct getdents_callback buf;
-       int error = -EBADF;
+       int error;
 
-       file = fget(fd);
-       if (!file)
-               goto out;
+       arg = fdget(fd);
+       if (!arg.file)
+               return -EBADF;
 
        buf.current_dir = dirent;
        buf.previous = NULL;
        buf.count = count;
        buf.error = 0;
 
-       error = vfs_readdir(file, filldir, &buf);
+       error = vfs_readdir(arg.file, filldir, &buf);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               if (put_user(file->f_pos, &lastdirent->d_off))
+               if (put_user(arg.file->f_pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
        }
 
-       fput(file);
-out:
+       fdput(arg);
        return error;
 }
 
index f8b394a76ac3bc4788724cc4c7078d8854a71c1e..29767a8dfea5173a5cf852f005d20fa60b7998bd 100644 (file)
@@ -55,7 +55,6 @@ CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
 CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
index db27c82e0542e44ebab171cee08701c0424fc68d..06b56245d78c090b0007668fff2b3446cb52ac0e 100644 (file)
@@ -92,7 +92,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
index 7bd1763877babeb272b7e3b6a962330de7f6d91e..f55c27609fc6a8feca61ed7f3a7fe1648a169841 100644 (file)
@@ -66,7 +66,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
index c47f2becfbc303c63a5e76b208ad4263a34b7563..be1cb6ea3a36191e6e34e7b9d0bd046028e32060 100644 (file)
@@ -167,7 +167,6 @@ CONFIG_NETFILTER_XT_TARGET_DSCP=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
 CONFIG_NETFILTER_XT_TARGET_SECMARK=m
index 559ae1ee67061ca08ccbd69c24aa60e1019dc1a1..840838769853e653f1ee950d9fdb4ed619f54ede 100644 (file)
@@ -189,7 +189,7 @@ SYSCALL_SPU(getcwd)
 SYSCALL_SPU(capget)
 SYSCALL_SPU(capset)
 COMPAT_SYS(sigaltstack)
-SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+SYSX_SPU(sys_sendfile,compat_sys_sendfile_wrapper,sys_sendfile)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 PPC_SYS(vfork)
@@ -229,7 +229,7 @@ COMPAT_SYS_SPU(sched_setaffinity)
 COMPAT_SYS_SPU(sched_getaffinity)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
-SYS32ONLY(sendfile64)
+SYSX(sys_ni_syscall,compat_sys_sendfile64_wrapper,sys_sendfile64)
 COMPAT_SYS_SPU(io_setup)
 SYSCALL_SPU(io_destroy)
 COMPAT_SYS_SPU(io_getevents)
index bd377a368611913b55e2ef272da6772540f39215..c683fa350add7f5f34547ff88f58233f1724c338 100644 (file)
 #define __ARCH_WANT_COMPAT_SYS_TIME
 #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
 #define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
 #endif
 
 /*
index 0794a3017b1b53e65e4d1aa325b79711a9fb04b1..e144498bcddda0471c96ca52e412f476aba7dd07 100644 (file)
@@ -1623,6 +1623,63 @@ static void __init prom_instantiate_rtas(void)
 }
 
 #ifdef CONFIG_PPC64
+/*
+ * Allocate room for and instantiate Stored Measurement Log (SML)
+ */
+static void __init prom_instantiate_sml(void)
+{
+       phandle ibmvtpm_node;
+       ihandle ibmvtpm_inst;
+       u32 entry = 0, size = 0;
+       u64 base;
+
+       prom_debug("prom_instantiate_sml: start...\n");
+
+       ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
+       prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
+       if (!PHANDLE_VALID(ibmvtpm_node))
+               return;
+
+       ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
+       if (!IHANDLE_VALID(ibmvtpm_inst)) {
+               prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
+               return;
+       }
+
+       if (call_prom_ret("call-method", 2, 2, &size,
+                         ADDR("sml-get-handover-size"),
+                         ibmvtpm_inst) != 0 || size == 0) {
+               prom_printf("SML get handover size failed\n");
+               return;
+       }
+
+       base = alloc_down(size, PAGE_SIZE, 0);
+       if (base == 0)
+               prom_panic("Could not allocate memory for sml\n");
+
+       prom_printf("instantiating sml at 0x%x...", base);
+
+       if (call_prom_ret("call-method", 4, 2, &entry,
+                         ADDR("sml-handover"),
+                         ibmvtpm_inst, size, base) != 0 || entry == 0) {
+               prom_printf("SML handover failed\n");
+               return;
+       }
+       prom_printf(" done\n");
+
+       reserve_mem(base, size);
+
+       prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
+                    &base, sizeof(base));
+       prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
+                    &size, sizeof(size));
+
+       prom_debug("sml base     = 0x%x\n", base);
+       prom_debug("sml size     = 0x%x\n", (long)size);
+
+       prom_debug("prom_instantiate_sml: end...\n");
+}
+
 /*
  * Allocate room for and initialize TCE tables
  */
@@ -2916,6 +2973,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
                prom_instantiate_opal();
 #endif
 
+#ifdef CONFIG_PPC64
+       /* instantiate sml */
+       prom_instantiate_sml();
+#endif
+
        /*
         * On non-powermacs, put all CPUs in spin-loops.
         *
index 81c570633ead9b95424a06410ee08337b076f65a..abd1112da54f40b1b24a08a6a0732373ad95ef0e 100644 (file)
@@ -143,48 +143,17 @@ long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t pt
  * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
  * and the register representation of a signed int (msr in 64-bit mode) is performed.
  */
-asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
+asmlinkage long compat_sys_sendfile_wrapper(u32 out_fd, u32 in_fd,
+                                           compat_off_t __user *offset, u32 count)
 {
-       mm_segment_t old_fs = get_fs();
-       int ret;
-       off_t of;
-       off_t __user *up;
-
-       if (offset && get_user(of, offset))
-               return -EFAULT;
-
-       /* The __user pointer cast is valid because of the set_fs() */          
-       set_fs(KERNEL_DS);
-       up = offset ? (off_t __user *) &of : NULL;
-       ret = sys_sendfile((int)out_fd, (int)in_fd, up, count);
-       set_fs(old_fs);
-       
-       if (offset && put_user(of, offset))
-               return -EFAULT;
-               
-       return ret;
+       return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
 }
 
-asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
+asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
+                                             compat_loff_t __user *offset, u32 count)
 {
-       mm_segment_t old_fs = get_fs();
-       int ret;
-       loff_t lof;
-       loff_t __user *up;
-       
-       if (offset && get_user(lof, offset))
-               return -EFAULT;
-               
-       /* The __user pointer cast is valid because of the set_fs() */          
-       set_fs(KERNEL_DS);
-       up = offset ? (loff_t __user *) &lof : NULL;
-       ret = sys_sendfile64(out_fd, in_fd, up, count);
-       set_fs(old_fs);
-       
-       if (offset && put_user(lof, offset))
-               return -EFAULT;
-               
-       return ret;
+       return sys_sendfile((int)out_fd, (int)in_fd,
+                           (off_t __user *)offset, count);
 }
 
 long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
index 08ffcf52a8564211d609bc2cf1fa8dfc3a6e8678..e5f028b5794e6a69498184f9b3b7b617b0701bbb 100644 (file)
@@ -470,7 +470,7 @@ bad_area_nosemaphore:
        if (is_exec && (error_code & DSISR_PROTFAULT))
                printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
                                   " page (%lx) - exploit attempt? (uid: %d)\n",
-                                  address, current_uid());
+                                  address, from_kuid(&init_user_ns, current_uid()));
 
        return SIGSEGV;
 
index 23bc9db4317e80a869c3a47074ef20b0b08f6c02..82607d621aca41880194996c578a2bfef9393edf 100644 (file)
@@ -76,7 +76,7 @@ static void spu_gov_work(struct work_struct *work)
 static void spu_gov_init_work(struct spu_gov_info_struct *info)
 {
        int delay = usecs_to_jiffies(info->poll_int);
-       INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
+       INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
        schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
 }
 
index 714bbfc3162c9be85568b4f268e827311f91281f..db4e638cf4081cd813eeed56d7203548eda19134 100644 (file)
@@ -69,8 +69,6 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags,
        umode_t, mode, int, neighbor_fd)
 {
        long ret;
-       struct file *neighbor;
-       int fput_needed;
        struct spufs_calls *calls;
 
        calls = spufs_calls_get();
@@ -78,11 +76,11 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags,
                return -ENOSYS;
 
        if (flags & SPU_CREATE_AFFINITY_SPU) {
+               struct fd neighbor = fdget(neighbor_fd);
                ret = -EBADF;
-               neighbor = fget_light(neighbor_fd, &fput_needed);
-               if (neighbor) {
-                       ret = calls->create_thread(name, flags, mode, neighbor);
-                       fput_light(neighbor, fput_needed);
+               if (neighbor.file) {
+                       ret = calls->create_thread(name, flags, mode, neighbor.file);
+                       fdput(neighbor);
                }
        } else
                ret = calls->create_thread(name, flags, mode, NULL);
@@ -94,8 +92,7 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags,
 asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
 {
        long ret;
-       struct file *filp;
-       int fput_needed;
+       struct fd arg;
        struct spufs_calls *calls;
 
        calls = spufs_calls_get();
@@ -103,10 +100,10 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
                return -ENOSYS;
 
        ret = -EBADF;
-       filp = fget_light(fd, &fput_needed);
-       if (filp) {
-               ret = calls->spu_run(filp, unpc, ustatus);
-               fput_light(filp, fput_needed);
+       arg = fdget(fd);
+       if (arg.file) {
+               ret = calls->spu_run(arg.file, unpc, ustatus);
+               fdput(arg);
        }
 
        spufs_calls_put(calls);
index c2c5b078ba80457dab11d485297ba04c1b5090ef..657e3f233a6405d14527821d998ba969418806ab 100644 (file)
@@ -106,6 +106,17 @@ static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
        return total;
 }
 
+static int match_context(const void *v, struct file *file, unsigned fd)
+{
+       struct spu_context *ctx;
+       if (file->f_op != &spufs_context_fops)
+               return 0;
+       ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+       if (ctx->flags & SPU_CREATE_NOSCHED)
+               return 0;
+       return fd + 1;
+}
+
 /*
  * The additional architecture-specific notes for Cell are various
  * context files in the spu context.
@@ -115,29 +126,18 @@ static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
  * internal functionality to dump them without needing to actually
  * open the files.
  */
+/*
+ * descriptor table is not shared, so files can't change or go away.
+ */
 static struct spu_context *coredump_next_context(int *fd)
 {
-       struct fdtable *fdt = files_fdtable(current->files);
        struct file *file;
-       struct spu_context *ctx = NULL;
-
-       for (; *fd < fdt->max_fds; (*fd)++) {
-               if (!fd_is_open(*fd, fdt))
-                       continue;
-
-               file = fcheck(*fd);
-
-               if (!file || file->f_op != &spufs_context_fops)
-                       continue;
-
-               ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
-               if (ctx->flags & SPU_CREATE_NOSCHED)
-                       continue;
-
-               break;
-       }
-
-       return ctx;
+       int n = iterate_fd(current->files, *fd, match_context, NULL);
+       if (!n)
+               return NULL;
+       *fd = n - 1;
+       file = fcheck(*fd);
+       return SPUFS_I(file->f_dentry->d_inode)->i_ctx;
 }
 
 int spufs_coredump_extra_notes_size(void)
index 6767b437a103aa2eb9d46fc19df6732f188f040b..06ea69bd387a00c7b31a4894d356383d9322426f 100644 (file)
@@ -31,8 +31,8 @@ static struct dentry *hypfs_create_update_file(struct super_block *sb,
                                               struct dentry *dir);
 
 struct hypfs_sb_info {
-       uid_t uid;                      /* uid used for files and dirs */
-       gid_t gid;                      /* gid used for files and dirs */
+       kuid_t uid;                     /* uid used for files and dirs */
+       kgid_t gid;                     /* gid used for files and dirs */
        struct dentry *update_file;     /* file to trigger update */
        time_t last_update;             /* last update time in secs since 1970 */
        struct mutex lock;              /* lock to protect update process */
@@ -72,8 +72,6 @@ static void hypfs_remove(struct dentry *dentry)
        struct dentry *parent;
 
        parent = dentry->d_parent;
-       if (!parent || !parent->d_inode)
-               return;
        mutex_lock(&parent->d_inode->i_mutex);
        if (hypfs_positive(dentry)) {
                if (S_ISDIR(dentry->d_inode->i_mode))
@@ -229,6 +227,8 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
 {
        char *str;
        substring_t args[MAX_OPT_ARGS];
+       kuid_t uid;
+       kgid_t gid;
 
        if (!options)
                return 0;
@@ -243,12 +243,18 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
                case opt_uid:
                        if (match_int(&args[0], &option))
                                return -EINVAL;
-                       hypfs_info->uid = option;
+                       uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(uid))
+                               return -EINVAL;
+                       hypfs_info->uid = uid;
                        break;
                case opt_gid:
                        if (match_int(&args[0], &option))
                                return -EINVAL;
-                       hypfs_info->gid = option;
+                       gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(gid))
+                               return -EINVAL;
+                       hypfs_info->gid = gid;
                        break;
                case opt_err:
                default:
@@ -263,8 +269,8 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
 {
        struct hypfs_sb_info *hypfs_info = root->d_sb->s_fs_info;
 
-       seq_printf(s, ",uid=%u", hypfs_info->uid);
-       seq_printf(s, ",gid=%u", hypfs_info->gid);
+       seq_printf(s, ",uid=%u", from_kuid_munged(&init_user_ns, hypfs_info->uid));
+       seq_printf(s, ",gid=%u", from_kgid_munged(&init_user_ns, hypfs_info->gid));
        return 0;
 }
 
index f606d935f4950dcbec6fca5f67b88ac70760dcda..189963c90c6eb09deb498947f057ae6ad3450f12 100644 (file)
@@ -131,13 +131,19 @@ asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
                low2highuid(suid));
 }
 
-asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
+asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __user *suidp)
 {
+       const struct cred *cred = current_cred();
        int retval;
+       u16 ruid, euid, suid;
 
-       if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) &&
-           !(retval = put_user(high2lowuid(current->cred->euid), euid)))
-               retval = put_user(high2lowuid(current->cred->suid), suid);
+       ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid));
+       euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid));
+       suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid));
+
+       if (!(retval   = put_user(ruid, ruidp)) &&
+           !(retval   = put_user(euid, euidp)))
+               retval = put_user(suid, suidp);
 
        return retval;
 }
@@ -148,13 +154,19 @@ asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
                low2highgid(sgid));
 }
 
-asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
+asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __user *sgidp)
 {
+       const struct cred *cred = current_cred();
        int retval;
+       u16 rgid, egid, sgid;
+
+       rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid));
+       egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid));
+       sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid));
 
-       if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) &&
-           !(retval = put_user(high2lowgid(current->cred->egid), egid)))
-               retval = put_user(high2lowgid(current->cred->sgid), sgid);
+       if (!(retval   = put_user(rgid, rgidp)) &&
+           !(retval   = put_user(egid, egidp)))
+               retval = put_user(sgid, sgidp);
 
        return retval;
 }
@@ -258,22 +270,22 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
 
 asmlinkage long sys32_getuid16(void)
 {
-       return high2lowuid(current->cred->uid);
+       return high2lowuid(from_kuid_munged(current_user_ns(), current_uid()));
 }
 
 asmlinkage long sys32_geteuid16(void)
 {
-       return high2lowuid(current->cred->euid);
+       return high2lowuid(from_kuid_munged(current_user_ns(), current_euid()));
 }
 
 asmlinkage long sys32_getgid16(void)
 {
-       return high2lowgid(current->cred->gid);
+       return high2lowgid(from_kgid_munged(current_user_ns(), current_gid()));
 }
 
 asmlinkage long sys32_getegid16(void)
 {
-       return high2lowgid(current->cred->egid);
+       return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
 }
 
 /*
index 637b79b096576d556e42b5d20cd31d856dd3465f..5bfb341cc5c4ad71eefe29a8f5175f715d07c59b 100644 (file)
@@ -107,7 +107,7 @@ static int switch_drv_remove(struct platform_device *pdev)
                device_remove_file(&pdev->dev, &dev_attr_switch);
 
        platform_set_drvdata(pdev, NULL);
-       flush_work_sync(&psw->work);
+       flush_work(&psw->work);
        del_timer_sync(&psw->debounce);
        free_irq(irq, pdev);
 
index 5cd01161fd00efef142d5a6bf27e563bfa8440fb..675afa285ddb75d66d849f2d07188f330711d362 100644 (file)
@@ -6,3 +6,4 @@ obj-y += kernel/
 obj-y += mm/
 obj-y += math-emu/
 obj-y += net/
+obj-y += crypto/
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile
new file mode 100644 (file)
index 0000000..6ae1ad5
--- /dev/null
@@ -0,0 +1,25 @@
+#
+# Arch-specific CryptoAPI modules.
+#
+
+obj-$(CONFIG_CRYPTO_SHA1_SPARC64) += sha1-sparc64.o
+obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o
+obj-$(CONFIG_CRYPTO_SHA512_SPARC64) += sha512-sparc64.o
+obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
+
+obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
+obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
+obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
+
+obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o
+
+sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o
+sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o
+sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o
+md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o
+
+aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o
+des-sparc64-y := des_asm.o des_glue.o crop_devid.o
+camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o
+
+crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o
diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S
new file mode 100644 (file)
index 0000000..23f6cbb
--- /dev/null
@@ -0,0 +1,1535 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+
+#include "opcodes.h"
+
+#define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \
+       AES_EROUND01(KEY_BASE +  0, I0, I1, T0) \
+       AES_EROUND23(KEY_BASE +  2, I0, I1, T1) \
+       AES_EROUND01(KEY_BASE +  4, T0, T1, I0) \
+       AES_EROUND23(KEY_BASE +  6, T0, T1, I1)
+
+#define ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       AES_EROUND01(KEY_BASE +  0, I0, I1, T0) \
+       AES_EROUND23(KEY_BASE +  2, I0, I1, T1) \
+       AES_EROUND01(KEY_BASE +  0, I2, I3, T2) \
+       AES_EROUND23(KEY_BASE +  2, I2, I3, T3) \
+       AES_EROUND01(KEY_BASE +  4, T0, T1, I0) \
+       AES_EROUND23(KEY_BASE +  6, T0, T1, I1) \
+       AES_EROUND01(KEY_BASE +  4, T2, T3, I2) \
+       AES_EROUND23(KEY_BASE +  6, T2, T3, I3)
+
+#define ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \
+       AES_EROUND01(KEY_BASE +  0, I0, I1, T0) \
+       AES_EROUND23(KEY_BASE +  2, I0, I1, T1) \
+       AES_EROUND01_L(KEY_BASE +  4, T0, T1, I0) \
+       AES_EROUND23_L(KEY_BASE +  6, T0, T1, I1)
+
+#define ENCRYPT_TWO_ROUNDS_LAST_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       AES_EROUND01(KEY_BASE +  0, I0, I1, T0) \
+       AES_EROUND23(KEY_BASE +  2, I0, I1, T1) \
+       AES_EROUND01(KEY_BASE +  0, I2, I3, T2) \
+       AES_EROUND23(KEY_BASE +  2, I2, I3, T3) \
+       AES_EROUND01_L(KEY_BASE +  4, T0, T1, I0) \
+       AES_EROUND23_L(KEY_BASE +  6, T0, T1, I1) \
+       AES_EROUND01_L(KEY_BASE +  4, T2, T3, I2) \
+       AES_EROUND23_L(KEY_BASE +  6, T2, T3, I3)
+
+       /* 10 rounds */
+#define ENCRYPT_128(KEY_BASE, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE +  0, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE +  8, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE + 32, I0, I1, T0, T1)
+
+#define ENCRYPT_128_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE +  0, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE +  8, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3)
+
+       /* 12 rounds */
+#define ENCRYPT_192(KEY_BASE, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE +  0, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE +  8, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE + 40, I0, I1, T0, T1)
+
+#define ENCRYPT_192_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE +  0, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE +  8, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3) \
+       ENCRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 40, I0, I1, I2, I3, T0, T1, T2, T3)
+
+       /* 14 rounds */
+#define ENCRYPT_256(KEY_BASE, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE +  0, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE +  8, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS(KEY_BASE + 40, I0, I1, T0, T1) \
+       ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE + 48, I0, I1, T0, T1)
+
+#define ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, TMP_BASE) \
+       ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, \
+                            TMP_BASE + 0, TMP_BASE + 2, TMP_BASE + 4, TMP_BASE + 6)
+
+#define ENCRYPT_256_2(KEY_BASE, I0, I1, I2, I3) \
+       ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE +  0, I0, I1, I2, I3, KEY_BASE + 48) \
+       ldd     [%o0 + 0xd0], %f56; \
+       ldd     [%o0 + 0xd8], %f58; \
+       ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE +  8, I0, I1, I2, I3, KEY_BASE +  0) \
+       ldd     [%o0 + 0xe0], %f60; \
+       ldd     [%o0 + 0xe8], %f62; \
+       ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, KEY_BASE +  0) \
+       ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, KEY_BASE +  0) \
+       ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, KEY_BASE +  0) \
+       ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 40, I0, I1, I2, I3, KEY_BASE +  0) \
+       AES_EROUND01(KEY_BASE +  48, I0, I1, KEY_BASE + 0) \
+       AES_EROUND23(KEY_BASE +  50, I0, I1, KEY_BASE + 2) \
+       AES_EROUND01(KEY_BASE +  48, I2, I3, KEY_BASE + 4) \
+       AES_EROUND23(KEY_BASE +  50, I2, I3, KEY_BASE + 6) \
+       AES_EROUND01_L(KEY_BASE +  52, KEY_BASE + 0, KEY_BASE + 2, I0) \
+       AES_EROUND23_L(KEY_BASE +  54, KEY_BASE + 0, KEY_BASE + 2, I1) \
+       ldd     [%o0 + 0x10], %f8; \
+       ldd     [%o0 + 0x18], %f10; \
+       AES_EROUND01_L(KEY_BASE +  52, KEY_BASE + 4, KEY_BASE + 6, I2) \
+       AES_EROUND23_L(KEY_BASE +  54, KEY_BASE + 4, KEY_BASE + 6, I3) \
+       ldd     [%o0 + 0x20], %f12; \
+       ldd     [%o0 + 0x28], %f14;
+
+#define DECRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \
+       AES_DROUND23(KEY_BASE +  0, I0, I1, T1) \
+       AES_DROUND01(KEY_BASE +  2, I0, I1, T0) \
+       AES_DROUND23(KEY_BASE +  4, T0, T1, I1) \
+       AES_DROUND01(KEY_BASE +  6, T0, T1, I0)
+
+#define DECRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       AES_DROUND23(KEY_BASE +  0, I0, I1, T1) \
+       AES_DROUND01(KEY_BASE +  2, I0, I1, T0) \
+       AES_DROUND23(KEY_BASE +  0, I2, I3, T3) \
+       AES_DROUND01(KEY_BASE +  2, I2, I3, T2) \
+       AES_DROUND23(KEY_BASE +  4, T0, T1, I1) \
+       AES_DROUND01(KEY_BASE +  6, T0, T1, I0) \
+       AES_DROUND23(KEY_BASE +  4, T2, T3, I3) \
+       AES_DROUND01(KEY_BASE +  6, T2, T3, I2)
+
+#define DECRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \
+       AES_DROUND23(KEY_BASE +  0, I0, I1, T1) \
+       AES_DROUND01(KEY_BASE +  2, I0, I1, T0) \
+       AES_DROUND23_L(KEY_BASE +  4, T0, T1, I1) \
+       AES_DROUND01_L(KEY_BASE +  6, T0, T1, I0)
+
+#define DECRYPT_TWO_ROUNDS_LAST_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       AES_DROUND23(KEY_BASE +  0, I0, I1, T1) \
+       AES_DROUND01(KEY_BASE +  2, I0, I1, T0) \
+       AES_DROUND23(KEY_BASE +  0, I2, I3, T3) \
+       AES_DROUND01(KEY_BASE +  2, I2, I3, T2) \
+       AES_DROUND23_L(KEY_BASE +  4, T0, T1, I1) \
+       AES_DROUND01_L(KEY_BASE +  6, T0, T1, I0) \
+       AES_DROUND23_L(KEY_BASE +  4, T2, T3, I3) \
+       AES_DROUND01_L(KEY_BASE +  6, T2, T3, I2)
+
+       /* 10 rounds */
+#define DECRYPT_128(KEY_BASE, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE +  0, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE +  8, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS_LAST(KEY_BASE + 32, I0, I1, T0, T1)
+
+#define DECRYPT_128_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE +  0, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE +  8, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3)
+
+       /* 12 rounds */
+#define DECRYPT_192(KEY_BASE, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE +  0, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE +  8, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS_LAST(KEY_BASE + 40, I0, I1, T0, T1)
+
+#define DECRYPT_192_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE +  0, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE +  8, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3) \
+       DECRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 40, I0, I1, I2, I3, T0, T1, T2, T3)
+
+       /* 14 rounds */
+#define DECRYPT_256(KEY_BASE, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE +  0, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE +  8, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS(KEY_BASE + 40, I0, I1, T0, T1) \
+       DECRYPT_TWO_ROUNDS_LAST(KEY_BASE + 48, I0, I1, T0, T1)
+
+#define DECRYPT_256_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, TMP_BASE) \
+       DECRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, \
+                            TMP_BASE + 0, TMP_BASE + 2, TMP_BASE + 4, TMP_BASE + 6)
+
+#define DECRYPT_256_2(KEY_BASE, I0, I1, I2, I3) \
+       DECRYPT_256_TWO_ROUNDS_2(KEY_BASE +  0, I0, I1, I2, I3, KEY_BASE + 48) \
+       ldd     [%o0 + 0x18], %f56; \
+       ldd     [%o0 + 0x10], %f58; \
+       DECRYPT_256_TWO_ROUNDS_2(KEY_BASE +  8, I0, I1, I2, I3, KEY_BASE +  0) \
+       ldd     [%o0 + 0x08], %f60; \
+       ldd     [%o0 + 0x00], %f62; \
+       DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, KEY_BASE +  0) \
+       DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, KEY_BASE +  0) \
+       DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, KEY_BASE +  0) \
+       DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 40, I0, I1, I2, I3, KEY_BASE +  0) \
+       AES_DROUND23(KEY_BASE +  48, I0, I1, KEY_BASE + 2) \
+       AES_DROUND01(KEY_BASE +  50, I0, I1, KEY_BASE + 0) \
+       AES_DROUND23(KEY_BASE +  48, I2, I3, KEY_BASE + 6) \
+       AES_DROUND01(KEY_BASE +  50, I2, I3, KEY_BASE + 4) \
+       AES_DROUND23_L(KEY_BASE +  52, KEY_BASE + 0, KEY_BASE + 2, I1) \
+       AES_DROUND01_L(KEY_BASE +  54, KEY_BASE + 0, KEY_BASE + 2, I0) \
+       ldd     [%o0 + 0xd8], %f8; \
+       ldd     [%o0 + 0xd0], %f10; \
+       AES_DROUND23_L(KEY_BASE +  52, KEY_BASE + 4, KEY_BASE + 6, I3) \
+       AES_DROUND01_L(KEY_BASE +  54, KEY_BASE + 4, KEY_BASE + 6, I2) \
+       ldd     [%o0 + 0xc8], %f12; \
+       ldd     [%o0 + 0xc0], %f14;
+
+       .align  32
+ENTRY(aes_sparc64_key_expand)
+       /* %o0=input_key, %o1=output_key, %o2=key_len */
+       VISEntry
+       ld      [%o0 + 0x00], %f0
+       ld      [%o0 + 0x04], %f1
+       ld      [%o0 + 0x08], %f2
+       ld      [%o0 + 0x0c], %f3
+
+       std     %f0, [%o1 + 0x00]
+       std     %f2, [%o1 + 0x08]
+       add     %o1, 0x10, %o1
+
+       cmp     %o2, 24
+       bl      2f
+        nop
+
+       be      1f
+        nop
+
+       /* 256-bit key expansion */
+       ld      [%o0 + 0x10], %f4
+       ld      [%o0 + 0x14], %f5
+       ld      [%o0 + 0x18], %f6
+       ld      [%o0 + 0x1c], %f7
+
+       std     %f4, [%o1 + 0x00]
+       std     %f6, [%o1 + 0x08]
+       add     %o1, 0x10, %o1
+
+       AES_KEXPAND1(0, 6, 0x0, 8)
+       AES_KEXPAND2(2, 8, 10)
+       AES_KEXPAND0(4, 10, 12)
+       AES_KEXPAND2(6, 12, 14)
+       AES_KEXPAND1(8, 14, 0x1, 16)
+       AES_KEXPAND2(10, 16, 18)
+       AES_KEXPAND0(12, 18, 20)
+       AES_KEXPAND2(14, 20, 22)
+       AES_KEXPAND1(16, 22, 0x2, 24)
+       AES_KEXPAND2(18, 24, 26)
+       AES_KEXPAND0(20, 26, 28)
+       AES_KEXPAND2(22, 28, 30)
+       AES_KEXPAND1(24, 30, 0x3, 32)
+       AES_KEXPAND2(26, 32, 34)
+       AES_KEXPAND0(28, 34, 36)
+       AES_KEXPAND2(30, 36, 38)
+       AES_KEXPAND1(32, 38, 0x4, 40)
+       AES_KEXPAND2(34, 40, 42)
+       AES_KEXPAND0(36, 42, 44)
+       AES_KEXPAND2(38, 44, 46)
+       AES_KEXPAND1(40, 46, 0x5, 48)
+       AES_KEXPAND2(42, 48, 50)
+       AES_KEXPAND0(44, 50, 52)
+       AES_KEXPAND2(46, 52, 54)
+       AES_KEXPAND1(48, 54, 0x6, 56)
+       AES_KEXPAND2(50, 56, 58)
+
+       std     %f8, [%o1 + 0x00]
+       std     %f10, [%o1 + 0x08]
+       std     %f12, [%o1 + 0x10]
+       std     %f14, [%o1 + 0x18]
+       std     %f16, [%o1 + 0x20]
+       std     %f18, [%o1 + 0x28]
+       std     %f20, [%o1 + 0x30]
+       std     %f22, [%o1 + 0x38]
+       std     %f24, [%o1 + 0x40]
+       std     %f26, [%o1 + 0x48]
+       std     %f28, [%o1 + 0x50]
+       std     %f30, [%o1 + 0x58]
+       std     %f32, [%o1 + 0x60]
+       std     %f34, [%o1 + 0x68]
+       std     %f36, [%o1 + 0x70]
+       std     %f38, [%o1 + 0x78]
+       std     %f40, [%o1 + 0x80]
+       std     %f42, [%o1 + 0x88]
+       std     %f44, [%o1 + 0x90]
+       std     %f46, [%o1 + 0x98]
+       std     %f48, [%o1 + 0xa0]
+       std     %f50, [%o1 + 0xa8]
+       std     %f52, [%o1 + 0xb0]
+       std     %f54, [%o1 + 0xb8]
+       std     %f56, [%o1 + 0xc0]
+       ba,pt   %xcc, 80f
+        std    %f58, [%o1 + 0xc8]
+
+1:     
+       /* 192-bit key expansion */
+       ld      [%o0 + 0x10], %f4
+       ld      [%o0 + 0x14], %f5
+
+       std     %f4, [%o1 + 0x00]
+       add     %o1, 0x08, %o1
+
+       AES_KEXPAND1(0, 4, 0x0, 6)
+       AES_KEXPAND2(2, 6, 8)
+       AES_KEXPAND2(4, 8, 10)
+       AES_KEXPAND1(6, 10, 0x1, 12)
+       AES_KEXPAND2(8, 12, 14)
+       AES_KEXPAND2(10, 14, 16)
+       AES_KEXPAND1(12, 16, 0x2, 18)
+       AES_KEXPAND2(14, 18, 20)
+       AES_KEXPAND2(16, 20, 22)
+       AES_KEXPAND1(18, 22, 0x3, 24)
+       AES_KEXPAND2(20, 24, 26)
+       AES_KEXPAND2(22, 26, 28)
+       AES_KEXPAND1(24, 28, 0x4, 30)
+       AES_KEXPAND2(26, 30, 32)
+       AES_KEXPAND2(28, 32, 34)
+       AES_KEXPAND1(30, 34, 0x5, 36)
+       AES_KEXPAND2(32, 36, 38)
+       AES_KEXPAND2(34, 38, 40)
+       AES_KEXPAND1(36, 40, 0x6, 42)
+       AES_KEXPAND2(38, 42, 44)
+       AES_KEXPAND2(40, 44, 46)
+       AES_KEXPAND1(42, 46, 0x7, 48)
+       AES_KEXPAND2(44, 48, 50)
+
+       std     %f6, [%o1 + 0x00]
+       std     %f8, [%o1 + 0x08]
+       std     %f10, [%o1 + 0x10]
+       std     %f12, [%o1 + 0x18]
+       std     %f14, [%o1 + 0x20]
+       std     %f16, [%o1 + 0x28]
+       std     %f18, [%o1 + 0x30]
+       std     %f20, [%o1 + 0x38]
+       std     %f22, [%o1 + 0x40]
+       std     %f24, [%o1 + 0x48]
+       std     %f26, [%o1 + 0x50]
+       std     %f28, [%o1 + 0x58]
+       std     %f30, [%o1 + 0x60]
+       std     %f32, [%o1 + 0x68]
+       std     %f34, [%o1 + 0x70]
+       std     %f36, [%o1 + 0x78]
+       std     %f38, [%o1 + 0x80]
+       std     %f40, [%o1 + 0x88]
+       std     %f42, [%o1 + 0x90]
+       std     %f44, [%o1 + 0x98]
+       std     %f46, [%o1 + 0xa0]
+       std     %f48, [%o1 + 0xa8]
+       ba,pt   %xcc, 80f
+        std    %f50, [%o1 + 0xb0]
+
+2:
+       /* 128-bit key expansion */
+       AES_KEXPAND1(0, 2, 0x0, 4)
+       AES_KEXPAND2(2, 4, 6)
+       AES_KEXPAND1(4, 6, 0x1, 8)
+       AES_KEXPAND2(6, 8, 10)
+       AES_KEXPAND1(8, 10, 0x2, 12)
+       AES_KEXPAND2(10, 12, 14)
+       AES_KEXPAND1(12, 14, 0x3, 16)
+       AES_KEXPAND2(14, 16, 18)
+       AES_KEXPAND1(16, 18, 0x4, 20)
+       AES_KEXPAND2(18, 20, 22)
+       AES_KEXPAND1(20, 22, 0x5, 24)
+       AES_KEXPAND2(22, 24, 26)
+       AES_KEXPAND1(24, 26, 0x6, 28)
+       AES_KEXPAND2(26, 28, 30)
+       AES_KEXPAND1(28, 30, 0x7, 32)
+       AES_KEXPAND2(30, 32, 34)
+       AES_KEXPAND1(32, 34, 0x8, 36)
+       AES_KEXPAND2(34, 36, 38)
+       AES_KEXPAND1(36, 38, 0x9, 40)
+       AES_KEXPAND2(38, 40, 42)
+
+       std     %f4, [%o1 + 0x00]
+       std     %f6, [%o1 + 0x08]
+       std     %f8, [%o1 + 0x10]
+       std     %f10, [%o1 + 0x18]
+       std     %f12, [%o1 + 0x20]
+       std     %f14, [%o1 + 0x28]
+       std     %f16, [%o1 + 0x30]
+       std     %f18, [%o1 + 0x38]
+       std     %f20, [%o1 + 0x40]
+       std     %f22, [%o1 + 0x48]
+       std     %f24, [%o1 + 0x50]
+       std     %f26, [%o1 + 0x58]
+       std     %f28, [%o1 + 0x60]
+       std     %f30, [%o1 + 0x68]
+       std     %f32, [%o1 + 0x70]
+       std     %f34, [%o1 + 0x78]
+       std     %f36, [%o1 + 0x80]
+       std     %f38, [%o1 + 0x88]
+       std     %f40, [%o1 + 0x90]
+       std     %f42, [%o1 + 0x98]
+80:
+       retl
+        VISExit
+ENDPROC(aes_sparc64_key_expand)
+
+       .align          32
+ENTRY(aes_sparc64_encrypt_128)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ld              [%o1 + 0x00], %f4
+       ld              [%o1 + 0x04], %f5
+       ld              [%o1 + 0x08], %f6
+       ld              [%o1 + 0x0c], %f7
+       ldd             [%o0 + 0x00], %f8
+       ldd             [%o0 + 0x08], %f10
+       ldd             [%o0 + 0x10], %f12
+       ldd             [%o0 + 0x18], %f14
+       ldd             [%o0 + 0x20], %f16
+       ldd             [%o0 + 0x28], %f18
+       ldd             [%o0 + 0x30], %f20
+       ldd             [%o0 + 0x38], %f22
+       ldd             [%o0 + 0x40], %f24
+       ldd             [%o0 + 0x48], %f26
+       ldd             [%o0 + 0x50], %f28
+       ldd             [%o0 + 0x58], %f30
+       ldd             [%o0 + 0x60], %f32
+       ldd             [%o0 + 0x68], %f34
+       ldd             [%o0 + 0x70], %f36
+       ldd             [%o0 + 0x78], %f38
+       ldd             [%o0 + 0x80], %f40
+       ldd             [%o0 + 0x88], %f42
+       ldd             [%o0 + 0x90], %f44
+       ldd             [%o0 + 0x98], %f46
+       ldd             [%o0 + 0xa0], %f48
+       ldd             [%o0 + 0xa8], %f50
+       fxor            %f8, %f4, %f4
+       fxor            %f10, %f6, %f6
+       ENCRYPT_128(12, 4, 6, 0, 2)
+       st              %f4, [%o2 + 0x00]
+       st              %f5, [%o2 + 0x04]
+       st              %f6, [%o2 + 0x08]
+       st              %f7, [%o2 + 0x0c]
+       retl
+        VISExit
+ENDPROC(aes_sparc64_encrypt_128)
+
+       .align          32
+ENTRY(aes_sparc64_encrypt_192)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ld              [%o1 + 0x00], %f4
+       ld              [%o1 + 0x04], %f5
+       ld              [%o1 + 0x08], %f6
+       ld              [%o1 + 0x0c], %f7
+
+       ldd             [%o0 + 0x00], %f8
+       ldd             [%o0 + 0x08], %f10
+
+       fxor            %f8, %f4, %f4
+       fxor            %f10, %f6, %f6
+
+       ldd             [%o0 + 0x10], %f8
+       ldd             [%o0 + 0x18], %f10
+       ldd             [%o0 + 0x20], %f12
+       ldd             [%o0 + 0x28], %f14
+       add             %o0, 0x20, %o0
+
+       ENCRYPT_TWO_ROUNDS(8, 4, 6, 0, 2)
+
+       ldd             [%o0 + 0x10], %f12
+       ldd             [%o0 + 0x18], %f14
+       ldd             [%o0 + 0x20], %f16
+       ldd             [%o0 + 0x28], %f18
+       ldd             [%o0 + 0x30], %f20
+       ldd             [%o0 + 0x38], %f22
+       ldd             [%o0 + 0x40], %f24
+       ldd             [%o0 + 0x48], %f26
+       ldd             [%o0 + 0x50], %f28
+       ldd             [%o0 + 0x58], %f30
+       ldd             [%o0 + 0x60], %f32
+       ldd             [%o0 + 0x68], %f34
+       ldd             [%o0 + 0x70], %f36
+       ldd             [%o0 + 0x78], %f38
+       ldd             [%o0 + 0x80], %f40
+       ldd             [%o0 + 0x88], %f42
+       ldd             [%o0 + 0x90], %f44
+       ldd             [%o0 + 0x98], %f46
+       ldd             [%o0 + 0xa0], %f48
+       ldd             [%o0 + 0xa8], %f50
+
+
+       ENCRYPT_128(12, 4, 6, 0, 2)
+
+       st              %f4, [%o2 + 0x00]
+       st              %f5, [%o2 + 0x04]
+       st              %f6, [%o2 + 0x08]
+       st              %f7, [%o2 + 0x0c]
+
+       retl
+        VISExit
+ENDPROC(aes_sparc64_encrypt_192)
+
+       .align          32
+ENTRY(aes_sparc64_encrypt_256)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ld              [%o1 + 0x00], %f4
+       ld              [%o1 + 0x04], %f5
+       ld              [%o1 + 0x08], %f6
+       ld              [%o1 + 0x0c], %f7
+
+       ldd             [%o0 + 0x00], %f8
+       ldd             [%o0 + 0x08], %f10
+
+       fxor            %f8, %f4, %f4
+       fxor            %f10, %f6, %f6
+
+       ldd             [%o0 + 0x10], %f8
+
+       ldd             [%o0 + 0x18], %f10
+       ldd             [%o0 + 0x20], %f12
+       ldd             [%o0 + 0x28], %f14
+       add             %o0, 0x20, %o0
+
+       ENCRYPT_TWO_ROUNDS(8, 4, 6, 0, 2)
+
+       ldd             [%o0 + 0x10], %f8
+
+       ldd             [%o0 + 0x18], %f10
+       ldd             [%o0 + 0x20], %f12
+       ldd             [%o0 + 0x28], %f14
+       add             %o0, 0x20, %o0
+
+       ENCRYPT_TWO_ROUNDS(8, 4, 6, 0, 2)
+
+       ldd             [%o0 + 0x10], %f12
+       ldd             [%o0 + 0x18], %f14
+       ldd             [%o0 + 0x20], %f16
+       ldd             [%o0 + 0x28], %f18
+       ldd             [%o0 + 0x30], %f20
+       ldd             [%o0 + 0x38], %f22
+       ldd             [%o0 + 0x40], %f24
+       ldd             [%o0 + 0x48], %f26
+       ldd             [%o0 + 0x50], %f28
+       ldd             [%o0 + 0x58], %f30
+       ldd             [%o0 + 0x60], %f32
+       ldd             [%o0 + 0x68], %f34
+       ldd             [%o0 + 0x70], %f36
+       ldd             [%o0 + 0x78], %f38
+       ldd             [%o0 + 0x80], %f40
+       ldd             [%o0 + 0x88], %f42
+       ldd             [%o0 + 0x90], %f44
+       ldd             [%o0 + 0x98], %f46
+       ldd             [%o0 + 0xa0], %f48
+       ldd             [%o0 + 0xa8], %f50
+
+       ENCRYPT_128(12, 4, 6, 0, 2)
+
+       st              %f4, [%o2 + 0x00]
+       st              %f5, [%o2 + 0x04]
+       st              %f6, [%o2 + 0x08]
+       st              %f7, [%o2 + 0x0c]
+
+       retl
+        VISExit
+ENDPROC(aes_sparc64_encrypt_256)
+
+       .align          32
+ENTRY(aes_sparc64_decrypt_128)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ld              [%o1 + 0x00], %f4
+       ld              [%o1 + 0x04], %f5
+       ld              [%o1 + 0x08], %f6
+       ld              [%o1 + 0x0c], %f7
+       ldd             [%o0 + 0xa0], %f8
+       ldd             [%o0 + 0xa8], %f10
+       ldd             [%o0 + 0x98], %f12
+       ldd             [%o0 + 0x90], %f14
+       ldd             [%o0 + 0x88], %f16
+       ldd             [%o0 + 0x80], %f18
+       ldd             [%o0 + 0x78], %f20
+       ldd             [%o0 + 0x70], %f22
+       ldd             [%o0 + 0x68], %f24
+       ldd             [%o0 + 0x60], %f26
+       ldd             [%o0 + 0x58], %f28
+       ldd             [%o0 + 0x50], %f30
+       ldd             [%o0 + 0x48], %f32
+       ldd             [%o0 + 0x40], %f34
+       ldd             [%o0 + 0x38], %f36
+       ldd             [%o0 + 0x30], %f38
+       ldd             [%o0 + 0x28], %f40
+       ldd             [%o0 + 0x20], %f42
+       ldd             [%o0 + 0x18], %f44
+       ldd             [%o0 + 0x10], %f46
+       ldd             [%o0 + 0x08], %f48
+       ldd             [%o0 + 0x00], %f50
+       fxor            %f8, %f4, %f4
+       fxor            %f10, %f6, %f6
+       DECRYPT_128(12, 4, 6, 0, 2)
+       st              %f4, [%o2 + 0x00]
+       st              %f5, [%o2 + 0x04]
+       st              %f6, [%o2 + 0x08]
+       st              %f7, [%o2 + 0x0c]
+       retl
+        VISExit
+ENDPROC(aes_sparc64_decrypt_128)
+
+       .align          32
+ENTRY(aes_sparc64_decrypt_192)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ld              [%o1 + 0x00], %f4
+       ld              [%o1 + 0x04], %f5
+       ld              [%o1 + 0x08], %f6
+       ld              [%o1 + 0x0c], %f7
+       ldd             [%o0 + 0xc0], %f8
+       ldd             [%o0 + 0xc8], %f10
+       ldd             [%o0 + 0xb8], %f12
+       ldd             [%o0 + 0xb0], %f14
+       ldd             [%o0 + 0xa8], %f16
+       ldd             [%o0 + 0xa0], %f18
+       fxor            %f8, %f4, %f4
+       fxor            %f10, %f6, %f6
+       ldd             [%o0 + 0x98], %f20
+       ldd             [%o0 + 0x90], %f22
+       ldd             [%o0 + 0x88], %f24
+       ldd             [%o0 + 0x80], %f26
+       DECRYPT_TWO_ROUNDS(12, 4, 6, 0, 2)
+       ldd             [%o0 + 0x78], %f28
+       ldd             [%o0 + 0x70], %f30
+       ldd             [%o0 + 0x68], %f32
+       ldd             [%o0 + 0x60], %f34
+       ldd             [%o0 + 0x58], %f36
+       ldd             [%o0 + 0x50], %f38
+       ldd             [%o0 + 0x48], %f40
+       ldd             [%o0 + 0x40], %f42
+       ldd             [%o0 + 0x38], %f44
+       ldd             [%o0 + 0x30], %f46
+       ldd             [%o0 + 0x28], %f48
+       ldd             [%o0 + 0x20], %f50
+       ldd             [%o0 + 0x18], %f52
+       ldd             [%o0 + 0x10], %f54
+       ldd             [%o0 + 0x08], %f56
+       ldd             [%o0 + 0x00], %f58
+       DECRYPT_128(20, 4, 6, 0, 2)
+       st              %f4, [%o2 + 0x00]
+       st              %f5, [%o2 + 0x04]
+       st              %f6, [%o2 + 0x08]
+       st              %f7, [%o2 + 0x0c]
+       retl
+        VISExit
+ENDPROC(aes_sparc64_decrypt_192)
+
+       .align          32
+ENTRY(aes_sparc64_decrypt_256)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ld              [%o1 + 0x00], %f4
+       ld              [%o1 + 0x04], %f5
+       ld              [%o1 + 0x08], %f6
+       ld              [%o1 + 0x0c], %f7
+       ldd             [%o0 + 0xe0], %f8
+       ldd             [%o0 + 0xe8], %f10
+       ldd             [%o0 + 0xd8], %f12
+       ldd             [%o0 + 0xd0], %f14
+       ldd             [%o0 + 0xc8], %f16
+       fxor            %f8, %f4, %f4
+       ldd             [%o0 + 0xc0], %f18
+       fxor            %f10, %f6, %f6
+       ldd             [%o0 + 0xb8], %f20
+       AES_DROUND23(12, 4, 6, 2)
+       ldd             [%o0 + 0xb0], %f22
+       AES_DROUND01(14, 4, 6, 0)
+       ldd             [%o0 + 0xa8], %f24
+       AES_DROUND23(16, 0, 2, 6)
+       ldd             [%o0 + 0xa0], %f26
+       AES_DROUND01(18, 0, 2, 4)
+       ldd             [%o0 + 0x98], %f12
+       AES_DROUND23(20, 4, 6, 2)
+       ldd             [%o0 + 0x90], %f14
+       AES_DROUND01(22, 4, 6, 0)
+       ldd             [%o0 + 0x88], %f16
+       AES_DROUND23(24, 0, 2, 6)
+       ldd             [%o0 + 0x80], %f18
+       AES_DROUND01(26, 0, 2, 4)
+       ldd             [%o0 + 0x78], %f20
+       AES_DROUND23(12, 4, 6, 2)
+       ldd             [%o0 + 0x70], %f22
+       AES_DROUND01(14, 4, 6, 0)
+       ldd             [%o0 + 0x68], %f24
+       AES_DROUND23(16, 0, 2, 6)
+       ldd             [%o0 + 0x60], %f26
+       AES_DROUND01(18, 0, 2, 4)
+       ldd             [%o0 + 0x58], %f28
+       AES_DROUND23(20, 4, 6, 2)
+       ldd             [%o0 + 0x50], %f30
+       AES_DROUND01(22, 4, 6, 0)
+       ldd             [%o0 + 0x48], %f32
+       AES_DROUND23(24, 0, 2, 6)
+       ldd             [%o0 + 0x40], %f34
+       AES_DROUND01(26, 0, 2, 4)
+       ldd             [%o0 + 0x38], %f36
+       AES_DROUND23(28, 4, 6, 2)
+       ldd             [%o0 + 0x30], %f38
+       AES_DROUND01(30, 4, 6, 0)
+       ldd             [%o0 + 0x28], %f40
+       AES_DROUND23(32, 0, 2, 6)
+       ldd             [%o0 + 0x20], %f42
+       AES_DROUND01(34, 0, 2, 4)
+       ldd             [%o0 + 0x18], %f44
+       AES_DROUND23(36, 4, 6, 2)
+       ldd             [%o0 + 0x10], %f46
+       AES_DROUND01(38, 4, 6, 0)
+       ldd             [%o0 + 0x08], %f48
+       AES_DROUND23(40, 0, 2, 6)
+       ldd             [%o0 + 0x00], %f50
+       AES_DROUND01(42, 0, 2, 4)
+       AES_DROUND23(44, 4, 6, 2)
+       AES_DROUND01(46, 4, 6, 0)
+       AES_DROUND23_L(48, 0, 2, 6)
+       AES_DROUND01_L(50, 0, 2, 4)
+       st              %f4, [%o2 + 0x00]
+       st              %f5, [%o2 + 0x04]
+       st              %f6, [%o2 + 0x08]
+       st              %f7, [%o2 + 0x0c]
+       retl
+        VISExit
+ENDPROC(aes_sparc64_decrypt_256)
+
+       .align          32
+ENTRY(aes_sparc64_load_encrypt_keys_128)
+       /* %o0=key */
+       VISEntry
+       ldd             [%o0 + 0x10], %f8
+       ldd             [%o0 + 0x18], %f10
+       ldd             [%o0 + 0x20], %f12
+       ldd             [%o0 + 0x28], %f14
+       ldd             [%o0 + 0x30], %f16
+       ldd             [%o0 + 0x38], %f18
+       ldd             [%o0 + 0x40], %f20
+       ldd             [%o0 + 0x48], %f22
+       ldd             [%o0 + 0x50], %f24
+       ldd             [%o0 + 0x58], %f26
+       ldd             [%o0 + 0x60], %f28
+       ldd             [%o0 + 0x68], %f30
+       ldd             [%o0 + 0x70], %f32
+       ldd             [%o0 + 0x78], %f34
+       ldd             [%o0 + 0x80], %f36
+       ldd             [%o0 + 0x88], %f38
+       ldd             [%o0 + 0x90], %f40
+       ldd             [%o0 + 0x98], %f42
+       ldd             [%o0 + 0xa0], %f44
+       retl
+        ldd            [%o0 + 0xa8], %f46
+ENDPROC(aes_sparc64_load_encrypt_keys_128)
+
+       .align          32
+ENTRY(aes_sparc64_load_encrypt_keys_192)
+       /* %o0=key */
+       VISEntry
+       ldd             [%o0 + 0x10], %f8
+       ldd             [%o0 + 0x18], %f10
+       ldd             [%o0 + 0x20], %f12
+       ldd             [%o0 + 0x28], %f14
+       ldd             [%o0 + 0x30], %f16
+       ldd             [%o0 + 0x38], %f18
+       ldd             [%o0 + 0x40], %f20
+       ldd             [%o0 + 0x48], %f22
+       ldd             [%o0 + 0x50], %f24
+       ldd             [%o0 + 0x58], %f26
+       ldd             [%o0 + 0x60], %f28
+       ldd             [%o0 + 0x68], %f30
+       ldd             [%o0 + 0x70], %f32
+       ldd             [%o0 + 0x78], %f34
+       ldd             [%o0 + 0x80], %f36
+       ldd             [%o0 + 0x88], %f38
+       ldd             [%o0 + 0x90], %f40
+       ldd             [%o0 + 0x98], %f42
+       ldd             [%o0 + 0xa0], %f44
+       ldd             [%o0 + 0xa8], %f46
+       ldd             [%o0 + 0xb0], %f48
+       ldd             [%o0 + 0xb8], %f50
+       ldd             [%o0 + 0xc0], %f52
+       retl
+        ldd            [%o0 + 0xc8], %f54
+ENDPROC(aes_sparc64_load_encrypt_keys_192)
+
+       .align          32
+ENTRY(aes_sparc64_load_encrypt_keys_256)
+       /* %o0=key */
+       VISEntry
+       ldd             [%o0 + 0x10], %f8
+       ldd             [%o0 + 0x18], %f10
+       ldd             [%o0 + 0x20], %f12
+       ldd             [%o0 + 0x28], %f14
+       ldd             [%o0 + 0x30], %f16
+       ldd             [%o0 + 0x38], %f18
+       ldd             [%o0 + 0x40], %f20
+       ldd             [%o0 + 0x48], %f22
+       ldd             [%o0 + 0x50], %f24
+       ldd             [%o0 + 0x58], %f26
+       ldd             [%o0 + 0x60], %f28
+       ldd             [%o0 + 0x68], %f30
+       ldd             [%o0 + 0x70], %f32
+       ldd             [%o0 + 0x78], %f34
+       ldd             [%o0 + 0x80], %f36
+       ldd             [%o0 + 0x88], %f38
+       ldd             [%o0 + 0x90], %f40
+       ldd             [%o0 + 0x98], %f42
+       ldd             [%o0 + 0xa0], %f44
+       ldd             [%o0 + 0xa8], %f46
+       ldd             [%o0 + 0xb0], %f48
+       ldd             [%o0 + 0xb8], %f50
+       ldd             [%o0 + 0xc0], %f52
+       ldd             [%o0 + 0xc8], %f54
+       ldd             [%o0 + 0xd0], %f56
+       ldd             [%o0 + 0xd8], %f58
+       ldd             [%o0 + 0xe0], %f60
+       retl
+        ldd            [%o0 + 0xe8], %f62
+ENDPROC(aes_sparc64_load_encrypt_keys_256)
+
+       .align          32
+ENTRY(aes_sparc64_load_decrypt_keys_128)
+       /* %o0=key */
+       VISEntry
+       ldd             [%o0 + 0x98], %f8
+       ldd             [%o0 + 0x90], %f10
+       ldd             [%o0 + 0x88], %f12
+       ldd             [%o0 + 0x80], %f14
+       ldd             [%o0 + 0x78], %f16
+       ldd             [%o0 + 0x70], %f18
+       ldd             [%o0 + 0x68], %f20
+       ldd             [%o0 + 0x60], %f22
+       ldd             [%o0 + 0x58], %f24
+       ldd             [%o0 + 0x50], %f26
+       ldd             [%o0 + 0x48], %f28
+       ldd             [%o0 + 0x40], %f30
+       ldd             [%o0 + 0x38], %f32
+       ldd             [%o0 + 0x30], %f34
+       ldd             [%o0 + 0x28], %f36
+       ldd             [%o0 + 0x20], %f38
+       ldd             [%o0 + 0x18], %f40
+       ldd             [%o0 + 0x10], %f42
+       ldd             [%o0 + 0x08], %f44
+       retl
+        ldd            [%o0 + 0x00], %f46
+ENDPROC(aes_sparc64_load_decrypt_keys_128)
+
+       .align          32
+ENTRY(aes_sparc64_load_decrypt_keys_192)
+       /* %o0=key */
+       VISEntry
+       ldd             [%o0 + 0xb8], %f8
+       ldd             [%o0 + 0xb0], %f10
+       ldd             [%o0 + 0xa8], %f12
+       ldd             [%o0 + 0xa0], %f14
+       ldd             [%o0 + 0x98], %f16
+       ldd             [%o0 + 0x90], %f18
+       ldd             [%o0 + 0x88], %f20
+       ldd             [%o0 + 0x80], %f22
+       ldd             [%o0 + 0x78], %f24
+       ldd             [%o0 + 0x70], %f26
+       ldd             [%o0 + 0x68], %f28
+       ldd             [%o0 + 0x60], %f30
+       ldd             [%o0 + 0x58], %f32
+       ldd             [%o0 + 0x50], %f34
+       ldd             [%o0 + 0x48], %f36
+       ldd             [%o0 + 0x40], %f38
+       ldd             [%o0 + 0x38], %f40
+       ldd             [%o0 + 0x30], %f42
+       ldd             [%o0 + 0x28], %f44
+       ldd             [%o0 + 0x20], %f46
+       ldd             [%o0 + 0x18], %f48
+       ldd             [%o0 + 0x10], %f50
+       ldd             [%o0 + 0x08], %f52
+       retl
+        ldd            [%o0 + 0x00], %f54
+ENDPROC(aes_sparc64_load_decrypt_keys_192)
+
+       .align          32
+ENTRY(aes_sparc64_load_decrypt_keys_256)
+       /* %o0=key */
+       VISEntry
+       ldd             [%o0 + 0xd8], %f8
+       ldd             [%o0 + 0xd0], %f10
+       ldd             [%o0 + 0xc8], %f12
+       ldd             [%o0 + 0xc0], %f14
+       ldd             [%o0 + 0xb8], %f16
+       ldd             [%o0 + 0xb0], %f18
+       ldd             [%o0 + 0xa8], %f20
+       ldd             [%o0 + 0xa0], %f22
+       ldd             [%o0 + 0x98], %f24
+       ldd             [%o0 + 0x90], %f26
+       ldd             [%o0 + 0x88], %f28
+       ldd             [%o0 + 0x80], %f30
+       ldd             [%o0 + 0x78], %f32
+       ldd             [%o0 + 0x70], %f34
+       ldd             [%o0 + 0x68], %f36
+       ldd             [%o0 + 0x60], %f38
+       ldd             [%o0 + 0x58], %f40
+       ldd             [%o0 + 0x50], %f42
+       ldd             [%o0 + 0x48], %f44
+       ldd             [%o0 + 0x40], %f46
+       ldd             [%o0 + 0x38], %f48
+       ldd             [%o0 + 0x30], %f50
+       ldd             [%o0 + 0x28], %f52
+       ldd             [%o0 + 0x20], %f54
+       ldd             [%o0 + 0x18], %f56
+       ldd             [%o0 + 0x10], %f58
+       ldd             [%o0 + 0x08], %f60
+       retl
+        ldd            [%o0 + 0x00], %f62
+ENDPROC(aes_sparc64_load_decrypt_keys_256)
+
+       .align          32
+ENTRY(aes_sparc64_ecb_encrypt_128)
+       /* %o0=key, %o1=input, %o2=output, %o3=len */
+       ldx             [%o0 + 0x00], %g1
+       subcc           %o3, 0x10, %o3
+       be              10f
+        ldx            [%o0 + 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       ldx             [%o1 + 0x10], %o4
+       ldx             [%o1 + 0x18], %o5
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       xor             %g1, %o4, %g3
+       xor             %g2, %o5, %g7
+       MOVXTOD_G3_F60
+       MOVXTOD_G7_F62
+       ENCRYPT_128_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       std             %f60, [%o2 + 0x10]
+       std             %f62, [%o2 + 0x18]
+       sub             %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz            %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       ENCRYPT_128(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    retl
+        nop
+ENDPROC(aes_sparc64_ecb_encrypt_128)
+
+       .align          32
+ENTRY(aes_sparc64_ecb_encrypt_192)
+       /* %o0=key, %o1=input, %o2=output, %o3=len */
+       ldx             [%o0 + 0x00], %g1
+       subcc           %o3, 0x10, %o3
+       be              10f
+        ldx            [%o0 + 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       ldx             [%o1 + 0x10], %o4
+       ldx             [%o1 + 0x18], %o5
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       xor             %g1, %o4, %g3
+       xor             %g2, %o5, %g7
+       MOVXTOD_G3_F60
+       MOVXTOD_G7_F62
+       ENCRYPT_192_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       std             %f60, [%o2 + 0x10]
+       std             %f62, [%o2 + 0x18]
+       sub             %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz            %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       ENCRYPT_192(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    retl
+        nop
+ENDPROC(aes_sparc64_ecb_encrypt_192)
+
+       .align          32
+ENTRY(aes_sparc64_ecb_encrypt_256)
+       /* %o0=key, %o1=input, %o2=output, %o3=len */
+       ldx             [%o0 + 0x00], %g1
+       subcc           %o3, 0x10, %o3
+       be              10f
+        ldx            [%o0 + 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       ldx             [%o1 + 0x10], %o4
+       ldx             [%o1 + 0x18], %o5
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       xor             %g1, %o4, %g3
+       xor             %g2, %o5, %g7
+       MOVXTOD_G3_F0
+       MOVXTOD_G7_F2
+       ENCRYPT_256_2(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       std             %f0, [%o2 + 0x10]
+       std             %f2, [%o2 + 0x18]
+       sub             %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz            %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       ENCRYPT_256(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    retl
+        nop
+ENDPROC(aes_sparc64_ecb_encrypt_256)
+
+       .align          32
+ENTRY(aes_sparc64_ecb_decrypt_128)
+       /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
+       ldx             [%o0 - 0x10], %g1
+       subcc           %o3, 0x10, %o3
+       be              10f
+        ldx            [%o0 - 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       ldx             [%o1 + 0x10], %o4
+       ldx             [%o1 + 0x18], %o5
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       xor             %g1, %o4, %g3
+       xor             %g2, %o5, %g7
+       MOVXTOD_G3_F60
+       MOVXTOD_G7_F62
+       DECRYPT_128_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       std             %f60, [%o2 + 0x10]
+       std             %f62, [%o2 + 0x18]
+       sub             %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz,pt         %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       DECRYPT_128(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    retl
+        nop
+ENDPROC(aes_sparc64_ecb_decrypt_128)
+
+       .align          32
+ENTRY(aes_sparc64_ecb_decrypt_192)
+       /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
+       ldx             [%o0 - 0x10], %g1
+       subcc           %o3, 0x10, %o3
+       be              10f
+        ldx            [%o0 - 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       ldx             [%o1 + 0x10], %o4
+       ldx             [%o1 + 0x18], %o5
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       xor             %g1, %o4, %g3
+       xor             %g2, %o5, %g7
+       MOVXTOD_G3_F60
+       MOVXTOD_G7_F62
+       DECRYPT_192_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       std             %f60, [%o2 + 0x10]
+       std             %f62, [%o2 + 0x18]
+       sub             %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz,pt         %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       DECRYPT_192(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    retl
+        nop
+ENDPROC(aes_sparc64_ecb_decrypt_192)
+
+       .align          32
+ENTRY(aes_sparc64_ecb_decrypt_256)
+       /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
+       ldx             [%o0 - 0x10], %g1
+       subcc           %o3, 0x10, %o3
+       be              10f
+        ldx            [%o0 - 0x08], %g2
+       sub             %o0, 0xf0, %o0
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       ldx             [%o1 + 0x10], %o4
+       ldx             [%o1 + 0x18], %o5
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       xor             %g1, %o4, %g3
+       xor             %g2, %o5, %g7
+       MOVXTOD_G3_F0
+       MOVXTOD_G7_F2
+       DECRYPT_256_2(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       std             %f0, [%o2 + 0x10]
+       std             %f2, [%o2 + 0x18]
+       sub             %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz,pt         %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       DECRYPT_256(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    retl
+        nop
+ENDPROC(aes_sparc64_ecb_decrypt_256)
+
+       .align          32
+ENTRY(aes_sparc64_cbc_encrypt_128)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldd             [%o4 + 0x00], %f4
+       ldd             [%o4 + 0x08], %f6
+       ldx             [%o0 + 0x00], %g1
+       ldx             [%o0 + 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       add             %o1, 0x10, %o1
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F0
+       MOVXTOD_G7_F2
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       ENCRYPT_128(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       subcc           %o3, 0x10, %o3
+       bne,pt          %xcc, 1b
+        add            %o2, 0x10, %o2
+       std             %f4, [%o4 + 0x00]
+       std             %f6, [%o4 + 0x08]
+       retl
+        nop
+ENDPROC(aes_sparc64_cbc_encrypt_128)
+
+       .align          32
+ENTRY(aes_sparc64_cbc_encrypt_192)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldd             [%o4 + 0x00], %f4
+       ldd             [%o4 + 0x08], %f6
+       ldx             [%o0 + 0x00], %g1
+       ldx             [%o0 + 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       add             %o1, 0x10, %o1
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F0
+       MOVXTOD_G7_F2
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       ENCRYPT_192(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       subcc           %o3, 0x10, %o3
+       bne,pt          %xcc, 1b
+        add            %o2, 0x10, %o2
+       std             %f4, [%o4 + 0x00]
+       std             %f6, [%o4 + 0x08]
+       retl
+        nop
+ENDPROC(aes_sparc64_cbc_encrypt_192)
+
+       .align          32
+ENTRY(aes_sparc64_cbc_encrypt_256)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldd             [%o4 + 0x00], %f4
+       ldd             [%o4 + 0x08], %f6
+       ldx             [%o0 + 0x00], %g1
+       ldx             [%o0 + 0x08], %g2
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       add             %o1, 0x10, %o1
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F0
+       MOVXTOD_G7_F2
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       ENCRYPT_256(8, 4, 6, 0, 2)
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       subcc           %o3, 0x10, %o3
+       bne,pt          %xcc, 1b
+        add            %o2, 0x10, %o2
+       std             %f4, [%o4 + 0x00]
+       std             %f6, [%o4 + 0x08]
+       retl
+        nop
+ENDPROC(aes_sparc64_cbc_encrypt_256)
+
+       .align          32
+ENTRY(aes_sparc64_cbc_decrypt_128)
+       /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len, %o4=iv */
+       ldx             [%o0 - 0x10], %g1
+       ldx             [%o0 - 0x08], %g2
+       ldx             [%o4 + 0x00], %o0
+       ldx             [%o4 + 0x08], %o5
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       add             %o1, 0x10, %o1
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       DECRYPT_128(8, 4, 6, 0, 2)
+       MOVXTOD_O0_F0
+       MOVXTOD_O5_F2
+       xor             %g1, %g3, %o0
+       xor             %g2, %g7, %o5
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       subcc           %o3, 0x10, %o3
+       bne,pt          %xcc, 1b
+        add            %o2, 0x10, %o2
+       stx             %o0, [%o4 + 0x00]
+       stx             %o5, [%o4 + 0x08]
+       retl
+        nop
+ENDPROC(aes_sparc64_cbc_decrypt_128)
+
+       .align          32
+ENTRY(aes_sparc64_cbc_decrypt_192)
+       /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len, %o4=iv */
+       ldx             [%o0 - 0x10], %g1
+       ldx             [%o0 - 0x08], %g2
+       ldx             [%o4 + 0x00], %o0
+       ldx             [%o4 + 0x08], %o5
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       add             %o1, 0x10, %o1
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       DECRYPT_192(8, 4, 6, 0, 2)
+       MOVXTOD_O0_F0
+       MOVXTOD_O5_F2
+       xor             %g1, %g3, %o0
+       xor             %g2, %g7, %o5
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       subcc           %o3, 0x10, %o3
+       bne,pt          %xcc, 1b
+        add            %o2, 0x10, %o2
+       stx             %o0, [%o4 + 0x00]
+       stx             %o5, [%o4 + 0x08]
+       retl
+        nop
+ENDPROC(aes_sparc64_cbc_decrypt_192)
+
+       .align          32
+ENTRY(aes_sparc64_cbc_decrypt_256)
+       /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len, %o4=iv */
+       ldx             [%o0 - 0x10], %g1
+       ldx             [%o0 - 0x08], %g2
+       ldx             [%o4 + 0x00], %o0
+       ldx             [%o4 + 0x08], %o5
+1:     ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       add             %o1, 0x10, %o1
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+       MOVXTOD_G3_F4
+       MOVXTOD_G7_F6
+       DECRYPT_256(8, 4, 6, 0, 2)
+       MOVXTOD_O0_F0
+       MOVXTOD_O5_F2
+       xor             %g1, %g3, %o0
+       xor             %g2, %g7, %o5
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+       subcc           %o3, 0x10, %o3
+       bne,pt          %xcc, 1b
+        add            %o2, 0x10, %o2
+       stx             %o0, [%o4 + 0x00]
+       stx             %o5, [%o4 + 0x08]
+       retl
+        nop
+ENDPROC(aes_sparc64_cbc_decrypt_256)
+
+       .align          32
+ENTRY(aes_sparc64_ctr_crypt_128)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldx             [%o4 + 0x00], %g3
+       ldx             [%o4 + 0x08], %g7
+       subcc           %o3, 0x10, %o3
+       ldx             [%o0 + 0x00], %g1
+       be              10f
+        ldx            [%o0 + 0x08], %g2
+1:     xor             %g1, %g3, %o5
+       MOVXTOD_O5_F0
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F2
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       xor             %g1, %g3, %o5
+       MOVXTOD_O5_F4
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F6
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       ENCRYPT_128_2(8, 0, 2, 4, 6, 56, 58, 60, 62)
+       ldd             [%o1 + 0x00], %f56
+       ldd             [%o1 + 0x08], %f58
+       ldd             [%o1 + 0x10], %f60
+       ldd             [%o1 + 0x18], %f62
+       fxor            %f56, %f0, %f56
+       fxor            %f58, %f2, %f58
+       fxor            %f60, %f4, %f60
+       fxor            %f62, %f6, %f62
+       std             %f56, [%o2 + 0x00]
+       std             %f58, [%o2 + 0x08]
+       std             %f60, [%o2 + 0x10]
+       std             %f62, [%o2 + 0x18]
+       subcc           %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz            %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    xor             %g1, %g3, %o5
+       MOVXTOD_O5_F0
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F2
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       ENCRYPT_128(8, 0, 2, 4, 6)
+       ldd             [%o1 + 0x00], %f4
+       ldd             [%o1 + 0x08], %f6
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    stx             %g3, [%o4 + 0x00]
+       retl
+        stx            %g7, [%o4 + 0x08]
+ENDPROC(aes_sparc64_ctr_crypt_128)
+
+       .align          32
+ENTRY(aes_sparc64_ctr_crypt_192)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldx             [%o4 + 0x00], %g3
+       ldx             [%o4 + 0x08], %g7
+       subcc           %o3, 0x10, %o3
+       ldx             [%o0 + 0x00], %g1
+       be              10f
+        ldx            [%o0 + 0x08], %g2
+1:     xor             %g1, %g3, %o5
+       MOVXTOD_O5_F0
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F2
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       xor             %g1, %g3, %o5
+       MOVXTOD_O5_F4
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F6
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       ENCRYPT_192_2(8, 0, 2, 4, 6, 56, 58, 60, 62)
+       ldd             [%o1 + 0x00], %f56
+       ldd             [%o1 + 0x08], %f58
+       ldd             [%o1 + 0x10], %f60
+       ldd             [%o1 + 0x18], %f62
+       fxor            %f56, %f0, %f56
+       fxor            %f58, %f2, %f58
+       fxor            %f60, %f4, %f60
+       fxor            %f62, %f6, %f62
+       std             %f56, [%o2 + 0x00]
+       std             %f58, [%o2 + 0x08]
+       std             %f60, [%o2 + 0x10]
+       std             %f62, [%o2 + 0x18]
+       subcc           %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz            %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+10:    xor             %g1, %g3, %o5
+       MOVXTOD_O5_F0
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F2
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       ENCRYPT_192(8, 0, 2, 4, 6)
+       ldd             [%o1 + 0x00], %f4
+       ldd             [%o1 + 0x08], %f6
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    stx             %g3, [%o4 + 0x00]
+       retl
+        stx            %g7, [%o4 + 0x08]
+ENDPROC(aes_sparc64_ctr_crypt_192)
+
+       .align          32
+ENTRY(aes_sparc64_ctr_crypt_256)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldx             [%o4 + 0x00], %g3
+       ldx             [%o4 + 0x08], %g7
+       subcc           %o3, 0x10, %o3
+       ldx             [%o0 + 0x00], %g1
+       be              10f
+        ldx            [%o0 + 0x08], %g2
+1:     xor             %g1, %g3, %o5
+       MOVXTOD_O5_F0
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F2
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       xor             %g1, %g3, %o5
+       MOVXTOD_O5_F4
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F6
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       ENCRYPT_256_2(8, 0, 2, 4, 6)
+       ldd             [%o1 + 0x00], %f56
+       ldd             [%o1 + 0x08], %f58
+       ldd             [%o1 + 0x10], %f60
+       ldd             [%o1 + 0x18], %f62
+       fxor            %f56, %f0, %f56
+       fxor            %f58, %f2, %f58
+       fxor            %f60, %f4, %f60
+       fxor            %f62, %f6, %f62
+       std             %f56, [%o2 + 0x00]
+       std             %f58, [%o2 + 0x08]
+       std             %f60, [%o2 + 0x10]
+       std             %f62, [%o2 + 0x18]
+       subcc           %o3, 0x20, %o3
+       add             %o1, 0x20, %o1
+       brgz            %o3, 1b
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+       ldd             [%o0 + 0xd0], %f56
+       ldd             [%o0 + 0xd8], %f58
+       ldd             [%o0 + 0xe0], %f60
+       ldd             [%o0 + 0xe8], %f62
+10:    xor             %g1, %g3, %o5
+       MOVXTOD_O5_F0
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F2
+       add             %g7, 1, %g7
+       add             %g3, 1, %o5
+       movrz           %g7, %o5, %g3
+       ENCRYPT_256(8, 0, 2, 4, 6)
+       ldd             [%o1 + 0x00], %f4
+       ldd             [%o1 + 0x08], %f6
+       fxor            %f4, %f0, %f4
+       fxor            %f6, %f2, %f6
+       std             %f4, [%o2 + 0x00]
+       std             %f6, [%o2 + 0x08]
+11:    stx             %g3, [%o4 + 0x00]
+       retl
+        stx            %g7, [%o4 + 0x08]
+ENDPROC(aes_sparc64_ctr_crypt_256)
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
new file mode 100644 (file)
index 0000000..8f1c998
--- /dev/null
@@ -0,0 +1,477 @@
+/* Glue code for AES encryption optimized for sparc64 crypto opcodes.
+ *
+ * This is based largely upon arch/x86/crypto/aesni-intel_glue.c
+ *
+ * Copyright (C) 2008, Intel Corp.
+ *    Author: Huang Ying <ying.huang@intel.com>
+ *
+ * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
+ * interface for 64-bit kernels.
+ *    Authors: Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+#include <asm/fpumacro.h>
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+struct aes_ops {
+       void (*encrypt)(const u64 *key, const u32 *input, u32 *output);
+       void (*decrypt)(const u64 *key, const u32 *input, u32 *output);
+       void (*load_encrypt_keys)(const u64 *key);
+       void (*load_decrypt_keys)(const u64 *key);
+       void (*ecb_encrypt)(const u64 *key, const u64 *input, u64 *output,
+                           unsigned int len);
+       void (*ecb_decrypt)(const u64 *key, const u64 *input, u64 *output,
+                           unsigned int len);
+       void (*cbc_encrypt)(const u64 *key, const u64 *input, u64 *output,
+                           unsigned int len, u64 *iv);
+       void (*cbc_decrypt)(const u64 *key, const u64 *input, u64 *output,
+                           unsigned int len, u64 *iv);
+       void (*ctr_crypt)(const u64 *key, const u64 *input, u64 *output,
+                         unsigned int len, u64 *iv);
+};
+
+struct crypto_sparc64_aes_ctx {
+       struct aes_ops *ops;
+       u64 key[AES_MAX_KEYLENGTH / sizeof(u64)];
+       u32 key_length;
+       u32 expanded_key_length;
+};
+
+extern void aes_sparc64_encrypt_128(const u64 *key, const u32 *input,
+                                   u32 *output);
+extern void aes_sparc64_encrypt_192(const u64 *key, const u32 *input,
+                                   u32 *output);
+extern void aes_sparc64_encrypt_256(const u64 *key, const u32 *input,
+                                   u32 *output);
+
+extern void aes_sparc64_decrypt_128(const u64 *key, const u32 *input,
+                                   u32 *output);
+extern void aes_sparc64_decrypt_192(const u64 *key, const u32 *input,
+                                   u32 *output);
+extern void aes_sparc64_decrypt_256(const u64 *key, const u32 *input,
+                                   u32 *output);
+
+extern void aes_sparc64_load_encrypt_keys_128(const u64 *key);
+extern void aes_sparc64_load_encrypt_keys_192(const u64 *key);
+extern void aes_sparc64_load_encrypt_keys_256(const u64 *key);
+
+extern void aes_sparc64_load_decrypt_keys_128(const u64 *key);
+extern void aes_sparc64_load_decrypt_keys_192(const u64 *key);
+extern void aes_sparc64_load_decrypt_keys_256(const u64 *key);
+
+extern void aes_sparc64_ecb_encrypt_128(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len);
+extern void aes_sparc64_ecb_encrypt_192(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len);
+extern void aes_sparc64_ecb_encrypt_256(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len);
+
+extern void aes_sparc64_ecb_decrypt_128(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len);
+extern void aes_sparc64_ecb_decrypt_192(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len);
+extern void aes_sparc64_ecb_decrypt_256(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len);
+
+extern void aes_sparc64_cbc_encrypt_128(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len,
+                                       u64 *iv);
+
+extern void aes_sparc64_cbc_encrypt_192(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len,
+                                       u64 *iv);
+
+extern void aes_sparc64_cbc_encrypt_256(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len,
+                                       u64 *iv);
+
+extern void aes_sparc64_cbc_decrypt_128(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len,
+                                       u64 *iv);
+
+extern void aes_sparc64_cbc_decrypt_192(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len,
+                                       u64 *iv);
+
+extern void aes_sparc64_cbc_decrypt_256(const u64 *key, const u64 *input,
+                                       u64 *output, unsigned int len,
+                                       u64 *iv);
+
+extern void aes_sparc64_ctr_crypt_128(const u64 *key, const u64 *input,
+                                     u64 *output, unsigned int len,
+                                     u64 *iv);
+extern void aes_sparc64_ctr_crypt_192(const u64 *key, const u64 *input,
+                                     u64 *output, unsigned int len,
+                                     u64 *iv);
+extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input,
+                                     u64 *output, unsigned int len,
+                                     u64 *iv);
+
+struct aes_ops aes128_ops = {
+       .encrypt                = aes_sparc64_encrypt_128,
+       .decrypt                = aes_sparc64_decrypt_128,
+       .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_128,
+       .load_decrypt_keys      = aes_sparc64_load_decrypt_keys_128,
+       .ecb_encrypt            = aes_sparc64_ecb_encrypt_128,
+       .ecb_decrypt            = aes_sparc64_ecb_decrypt_128,
+       .cbc_encrypt            = aes_sparc64_cbc_encrypt_128,
+       .cbc_decrypt            = aes_sparc64_cbc_decrypt_128,
+       .ctr_crypt              = aes_sparc64_ctr_crypt_128,
+};
+
+struct aes_ops aes192_ops = {
+       .encrypt                = aes_sparc64_encrypt_192,
+       .decrypt                = aes_sparc64_decrypt_192,
+       .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_192,
+       .load_decrypt_keys      = aes_sparc64_load_decrypt_keys_192,
+       .ecb_encrypt            = aes_sparc64_ecb_encrypt_192,
+       .ecb_decrypt            = aes_sparc64_ecb_decrypt_192,
+       .cbc_encrypt            = aes_sparc64_cbc_encrypt_192,
+       .cbc_decrypt            = aes_sparc64_cbc_decrypt_192,
+       .ctr_crypt              = aes_sparc64_ctr_crypt_192,
+};
+
+struct aes_ops aes256_ops = {
+       .encrypt                = aes_sparc64_encrypt_256,
+       .decrypt                = aes_sparc64_decrypt_256,
+       .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_256,
+       .load_decrypt_keys      = aes_sparc64_load_decrypt_keys_256,
+       .ecb_encrypt            = aes_sparc64_ecb_encrypt_256,
+       .ecb_decrypt            = aes_sparc64_ecb_decrypt_256,
+       .cbc_encrypt            = aes_sparc64_cbc_encrypt_256,
+       .cbc_decrypt            = aes_sparc64_cbc_decrypt_256,
+       .ctr_crypt              = aes_sparc64_ctr_crypt_256,
+};
+
+extern void aes_sparc64_key_expand(const u32 *in_key, u64 *output_key,
+                                  unsigned int key_len);
+
+static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                      unsigned int key_len)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+       u32 *flags = &tfm->crt_flags;
+
+       switch (key_len) {
+       case AES_KEYSIZE_128:
+               ctx->expanded_key_length = 0xb0;
+               ctx->ops = &aes128_ops;
+               break;
+
+       case AES_KEYSIZE_192:
+               ctx->expanded_key_length = 0xd0;
+               ctx->ops = &aes192_ops;
+               break;
+
+       case AES_KEYSIZE_256:
+               ctx->expanded_key_length = 0xf0;
+               ctx->ops = &aes256_ops;
+               break;
+
+       default:
+               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+
+       aes_sparc64_key_expand((const u32 *)in_key, &ctx->key[0], key_len);
+       ctx->key_length = key_len;
+
+       return 0;
+}
+
+static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       ctx->ops->encrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
+}
+
+static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst);
+}
+
+#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
+
+static int ecb_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       ctx->ops->load_encrypt_keys(&ctx->key[0]);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & AES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       ctx->ops->ecb_encrypt(&ctx->key[0],
+                                             (const u64 *)walk.src.virt.addr,
+                                             (u64 *) walk.dst.virt.addr,
+                                             block_len);
+               }
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       u64 *key_end;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       ctx->ops->load_decrypt_keys(&ctx->key[0]);
+       key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & AES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       ctx->ops->ecb_decrypt(key_end,
+                                             (const u64 *) walk.src.virt.addr,
+                                             (u64 *) walk.dst.virt.addr, block_len);
+               }
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+
+       return err;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       ctx->ops->load_encrypt_keys(&ctx->key[0]);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & AES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       ctx->ops->cbc_encrypt(&ctx->key[0],
+                                             (const u64 *)walk.src.virt.addr,
+                                             (u64 *) walk.dst.virt.addr,
+                                             block_len, (u64 *) walk.iv);
+               }
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       u64 *key_end;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       ctx->ops->load_decrypt_keys(&ctx->key[0]);
+       key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & AES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       ctx->ops->cbc_decrypt(key_end,
+                                             (const u64 *) walk.src.virt.addr,
+                                             (u64 *) walk.dst.virt.addr,
+                                             block_len, (u64 *) walk.iv);
+               }
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+
+       return err;
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc,
+                    struct scatterlist *dst, struct scatterlist *src,
+                    unsigned int nbytes)
+{
+       struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       ctx->ops->load_encrypt_keys(&ctx->key[0]);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & AES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       ctx->ops->ctr_crypt(&ctx->key[0],
+                                           (const u64 *)walk.src.virt.addr,
+                                           (u64 *) walk.dst.virt.addr,
+                                           block_len, (u64 *) walk.iv);
+               }
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static struct crypto_alg algs[] = { {
+       .cra_name               = "aes",
+       .cra_driver_name        = "aes-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct crypto_sparc64_aes_ctx),
+       .cra_alignmask          = 3,
+       .cra_module             = THIS_MODULE,
+       .cra_u  = {
+               .cipher = {
+                       .cia_min_keysize        = AES_MIN_KEY_SIZE,
+                       .cia_max_keysize        = AES_MAX_KEY_SIZE,
+                       .cia_setkey             = aes_set_key,
+                       .cia_encrypt            = aes_encrypt,
+                       .cia_decrypt            = aes_decrypt
+               }
+       }
+}, {
+       .cra_name               = "ecb(aes)",
+       .cra_driver_name        = "ecb-aes-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct crypto_sparc64_aes_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = aes_set_key,
+                       .encrypt        = ecb_encrypt,
+                       .decrypt        = ecb_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "cbc(aes)",
+       .cra_driver_name        = "cbc-aes-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct crypto_sparc64_aes_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = aes_set_key,
+                       .encrypt        = cbc_encrypt,
+                       .decrypt        = cbc_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "ctr(aes)",
+       .cra_driver_name        = "ctr-aes-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct crypto_sparc64_aes_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = AES_MIN_KEY_SIZE,
+                       .max_keysize    = AES_MAX_KEY_SIZE,
+                       .setkey         = aes_set_key,
+                       .encrypt        = ctr_crypt,
+                       .decrypt        = ctr_crypt,
+               },
+       },
+} };
+
+static bool __init sparc64_has_aes_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_AES))
+               return false;
+
+       return true;
+}
+
+static int __init aes_sparc64_mod_init(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(algs); i++)
+               INIT_LIST_HEAD(&algs[i].cra_list);
+
+       if (sparc64_has_aes_opcode()) {
+               pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
+               return crypto_register_algs(algs, ARRAY_SIZE(algs));
+       }
+       pr_info("sparc64 aes opcodes not available.\n");
+       return -ENODEV;
+}
+
+static void __exit aes_sparc64_mod_fini(void)
+{
+       crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+}
+
+module_init(aes_sparc64_mod_init);
+module_exit(aes_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
+
+MODULE_ALIAS("aes");
diff --git a/arch/sparc/crypto/camellia_asm.S b/arch/sparc/crypto/camellia_asm.S
new file mode 100644 (file)
index 0000000..cc39553
--- /dev/null
@@ -0,0 +1,563 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+
+#include "opcodes.h"
+
+#define CAMELLIA_6ROUNDS(KEY_BASE, I0, I1) \
+       CAMELLIA_F(KEY_BASE +  0, I1, I0, I1) \
+       CAMELLIA_F(KEY_BASE +  2, I0, I1, I0) \
+       CAMELLIA_F(KEY_BASE +  4, I1, I0, I1) \
+       CAMELLIA_F(KEY_BASE +  6, I0, I1, I0) \
+       CAMELLIA_F(KEY_BASE +  8, I1, I0, I1) \
+       CAMELLIA_F(KEY_BASE + 10, I0, I1, I0)
+
+#define CAMELLIA_6ROUNDS_FL_FLI(KEY_BASE, I0, I1) \
+       CAMELLIA_6ROUNDS(KEY_BASE, I0, I1) \
+       CAMELLIA_FL(KEY_BASE + 12, I0, I0) \
+       CAMELLIA_FLI(KEY_BASE + 14, I1, I1)
+
+       .data
+
+       .align  8
+SIGMA: .xword  0xA09E667F3BCC908B
+       .xword  0xB67AE8584CAA73B2
+       .xword  0xC6EF372FE94F82BE
+       .xword  0x54FF53A5F1D36F1C
+       .xword  0x10E527FADE682D1D
+       .xword  0xB05688C2B3E6C1FD
+
+       .text
+
+       .align  32
+ENTRY(camellia_sparc64_key_expand)
+       /* %o0=in_key, %o1=encrypt_key, %o2=key_len, %o3=decrypt_key */
+       VISEntry
+       ld      [%o0 + 0x00], %f0       ! i0, k[0]
+       ld      [%o0 + 0x04], %f1       ! i1, k[1]
+       ld      [%o0 + 0x08], %f2       ! i2, k[2]
+       ld      [%o0 + 0x0c], %f3       ! i3, k[3]
+       std     %f0, [%o1 + 0x00]       ! k[0, 1]
+       fsrc2   %f0, %f28
+       std     %f2, [%o1 + 0x08]       ! k[2, 3]
+       cmp     %o2, 16
+       be      10f
+        fsrc2  %f2, %f30
+
+       ld      [%o0 + 0x10], %f0
+       ld      [%o0 + 0x14], %f1
+       std     %f0, [%o1 + 0x20]       ! k[8, 9]
+       cmp     %o2, 24
+       fone    %f10
+       be,a    1f
+        fxor   %f10, %f0, %f2
+       ld      [%o0 + 0x18], %f2
+       ld      [%o0 + 0x1c], %f3
+1:
+       std     %f2, [%o1 + 0x28]       ! k[10, 11]
+       fxor    %f28, %f0, %f0
+       fxor    %f30, %f2, %f2
+
+10:
+       sethi   %hi(SIGMA), %g3
+       or      %g3, %lo(SIGMA), %g3
+       ldd     [%g3 + 0x00], %f16
+       ldd     [%g3 + 0x08], %f18
+       ldd     [%g3 + 0x10], %f20
+       ldd     [%g3 + 0x18], %f22
+       ldd     [%g3 + 0x20], %f24
+       ldd     [%g3 + 0x28], %f26
+       CAMELLIA_F(16, 2, 0, 2)
+       CAMELLIA_F(18, 0, 2, 0)
+       fxor    %f28, %f0, %f0
+       fxor    %f30, %f2, %f2
+       CAMELLIA_F(20, 2, 0, 2)
+       CAMELLIA_F(22, 0, 2, 0)
+
+#define ROTL128(S01, S23, TMP1, TMP2, N)       \
+       srlx    S01, (64 - N), TMP1;            \
+       sllx    S01, N, S01;                    \
+       srlx    S23, (64 - N), TMP2;            \
+       sllx    S23, N, S23;                    \
+       or      S01, TMP2, S01;                 \
+       or      S23, TMP1, S23
+
+       cmp     %o2, 16
+       bne     1f
+        nop
+       /* 128-bit key */
+       std     %f0, [%o1 + 0x10]       ! k[ 4,  5]
+       std     %f2, [%o1 + 0x18]       ! k[ 6,  7]
+       MOVDTOX_F0_O4
+       MOVDTOX_F2_O5
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x30]       ! k[12, 13]
+       stx     %o5, [%o1 + 0x38]       ! k[14, 15]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x40]       ! k[16, 17]
+       stx     %o5, [%o1 + 0x48]       ! k[18, 19]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x60]       ! k[24, 25]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x70]       ! k[28, 29]
+       stx     %o5, [%o1 + 0x78]       ! k[30, 31]
+       ROTL128(%o4, %o5, %g2, %g3, 34)
+       stx     %o4, [%o1 + 0xa0]       ! k[40, 41]
+       stx     %o5, [%o1 + 0xa8]       ! k[42, 43]
+       ROTL128(%o4, %o5, %g2, %g3, 17)
+       stx     %o4, [%o1 + 0xc0]       ! k[48, 49]
+       stx     %o5, [%o1 + 0xc8]       ! k[50, 51]
+
+       ldx     [%o1 + 0x00], %o4       ! k[ 0,  1]
+       ldx     [%o1 + 0x08], %o5       ! k[ 2,  3]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x20]       ! k[ 8,  9]
+       stx     %o5, [%o1 + 0x28]       ! k[10, 11]
+       ROTL128(%o4, %o5, %g2, %g3, 30)
+       stx     %o4, [%o1 + 0x50]       ! k[20, 21]
+       stx     %o5, [%o1 + 0x58]       ! k[22, 23]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o5, [%o1 + 0x68]       ! k[26, 27]
+       ROTL128(%o4, %o5, %g2, %g3, 17)
+       stx     %o4, [%o1 + 0x80]       ! k[32, 33]
+       stx     %o5, [%o1 + 0x88]       ! k[34, 35]
+       ROTL128(%o4, %o5, %g2, %g3, 17)
+       stx     %o4, [%o1 + 0x90]       ! k[36, 37]
+       stx     %o5, [%o1 + 0x98]       ! k[38, 39]
+       ROTL128(%o4, %o5, %g2, %g3, 17)
+       stx     %o4, [%o1 + 0xb0]       ! k[44, 45]
+       stx     %o5, [%o1 + 0xb8]       ! k[46, 47]
+
+       ba,pt   %xcc, 2f
+        mov    (3 * 16 * 4), %o0
+
+1:
+       /* 192-bit or 256-bit key */
+       std     %f0, [%o1 + 0x30]       ! k[12, 13]
+       std     %f2, [%o1 + 0x38]       ! k[14, 15]
+       ldd     [%o1 + 0x20], %f4       ! k[ 8,  9]
+       ldd     [%o1 + 0x28], %f6       ! k[10, 11]
+       fxor    %f0, %f4, %f0
+       fxor    %f2, %f6, %f2
+       CAMELLIA_F(24, 2, 0, 2)
+       CAMELLIA_F(26, 0, 2, 0)
+       std     %f0, [%o1 + 0x10]       ! k[ 4,  5]
+       std     %f2, [%o1 + 0x18]       ! k[ 6,  7]
+       MOVDTOX_F0_O4
+       MOVDTOX_F2_O5
+       ROTL128(%o4, %o5, %g2, %g3, 30)
+       stx     %o4, [%o1 + 0x50]       ! k[20, 21]
+       stx     %o5, [%o1 + 0x58]       ! k[22, 23]
+       ROTL128(%o4, %o5, %g2, %g3, 30)
+       stx     %o4, [%o1 + 0xa0]       ! k[40, 41]
+       stx     %o5, [%o1 + 0xa8]       ! k[42, 43]
+       ROTL128(%o4, %o5, %g2, %g3, 51)
+       stx     %o4, [%o1 + 0x100]      ! k[64, 65]
+       stx     %o5, [%o1 + 0x108]      ! k[66, 67]
+       ldx     [%o1 + 0x20], %o4       ! k[ 8,  9]
+       ldx     [%o1 + 0x28], %o5       ! k[10, 11]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x20]       ! k[ 8,  9]
+       stx     %o5, [%o1 + 0x28]       ! k[10, 11]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x40]       ! k[16, 17]
+       stx     %o5, [%o1 + 0x48]       ! k[18, 19]
+       ROTL128(%o4, %o5, %g2, %g3, 30)
+       stx     %o4, [%o1 + 0x90]       ! k[36, 37]
+       stx     %o5, [%o1 + 0x98]       ! k[38, 39]
+       ROTL128(%o4, %o5, %g2, %g3, 34)
+       stx     %o4, [%o1 + 0xd0]       ! k[52, 53]
+       stx     %o5, [%o1 + 0xd8]       ! k[54, 55]
+       ldx     [%o1 + 0x30], %o4       ! k[12, 13]
+       ldx     [%o1 + 0x38], %o5       ! k[14, 15]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x30]       ! k[12, 13]
+       stx     %o5, [%o1 + 0x38]       ! k[14, 15]
+       ROTL128(%o4, %o5, %g2, %g3, 30)
+       stx     %o4, [%o1 + 0x70]       ! k[28, 29]
+       stx     %o5, [%o1 + 0x78]       ! k[30, 31]
+       srlx    %o4, 32, %g2
+       srlx    %o5, 32, %g3
+       stw     %o4, [%o1 + 0xc0]       ! k[48]
+       stw     %g3, [%o1 + 0xc4]       ! k[49]
+       stw     %o5, [%o1 + 0xc8]       ! k[50]
+       stw     %g2, [%o1 + 0xcc]       ! k[51]
+       ROTL128(%o4, %o5, %g2, %g3, 49)
+       stx     %o4, [%o1 + 0xe0]       ! k[56, 57]
+       stx     %o5, [%o1 + 0xe8]       ! k[58, 59]
+       ldx     [%o1 + 0x00], %o4       ! k[ 0,  1]
+       ldx     [%o1 + 0x08], %o5       ! k[ 2,  3]
+       ROTL128(%o4, %o5, %g2, %g3, 45)
+       stx     %o4, [%o1 + 0x60]       ! k[24, 25]
+       stx     %o5, [%o1 + 0x68]       ! k[26, 27]
+       ROTL128(%o4, %o5, %g2, %g3, 15)
+       stx     %o4, [%o1 + 0x80]       ! k[32, 33]
+       stx     %o5, [%o1 + 0x88]       ! k[34, 35]
+       ROTL128(%o4, %o5, %g2, %g3, 17)
+       stx     %o4, [%o1 + 0xb0]       ! k[44, 45]
+       stx     %o5, [%o1 + 0xb8]       ! k[46, 47]
+       ROTL128(%o4, %o5, %g2, %g3, 34)
+       stx     %o4, [%o1 + 0xf0]       ! k[60, 61]
+       stx     %o5, [%o1 + 0xf8]       ! k[62, 63]
+       mov     (4 * 16 * 4), %o0
+2:
+       add     %o1, %o0, %o1
+       ldd     [%o1 + 0x00], %f0
+       ldd     [%o1 + 0x08], %f2
+       std     %f0, [%o3 + 0x00]
+       std     %f2, [%o3 + 0x08]
+       add     %o3, 0x10, %o3
+1:
+       sub     %o1, (16 * 4), %o1
+       ldd     [%o1 + 0x38], %f0
+       ldd     [%o1 + 0x30], %f2
+       ldd     [%o1 + 0x28], %f4
+       ldd     [%o1 + 0x20], %f6
+       ldd     [%o1 + 0x18], %f8
+       ldd     [%o1 + 0x10], %f10
+       std     %f0, [%o3 + 0x00]
+       std     %f2, [%o3 + 0x08]
+       std     %f4, [%o3 + 0x10]
+       std     %f6, [%o3 + 0x18]
+       std     %f8, [%o3 + 0x20]
+       std     %f10, [%o3 + 0x28]
+
+       ldd     [%o1 + 0x08], %f0
+       ldd     [%o1 + 0x00], %f2
+       std     %f0, [%o3 + 0x30]
+       std     %f2, [%o3 + 0x38]
+       subcc   %o0, (16 * 4), %o0
+       bne,pt  %icc, 1b
+        add    %o3, (16 * 4), %o3
+
+       std     %f2, [%o3 - 0x10]
+       std     %f0, [%o3 - 0x08]
+
+       retl
+        VISExit
+ENDPROC(camellia_sparc64_key_expand)
+
+       .align  32
+ENTRY(camellia_sparc64_crypt)
+       /* %o0=key, %o1=input, %o2=output, %o3=key_len */
+       VISEntry
+
+       ld      [%o1 + 0x00], %f0
+       ld      [%o1 + 0x04], %f1
+       ld      [%o1 + 0x08], %f2
+       ld      [%o1 + 0x0c], %f3
+
+       ldd     [%o0 + 0x00], %f4
+       ldd     [%o0 + 0x08], %f6
+
+       cmp     %o3, 16
+       fxor    %f4, %f0, %f0
+       be      1f
+        fxor   %f6, %f2, %f2
+
+       ldd     [%o0 + 0x10], %f8
+       ldd     [%o0 + 0x18], %f10
+       ldd     [%o0 + 0x20], %f12
+       ldd     [%o0 + 0x28], %f14
+       ldd     [%o0 + 0x30], %f16
+       ldd     [%o0 + 0x38], %f18
+       ldd     [%o0 + 0x40], %f20
+       ldd     [%o0 + 0x48], %f22
+       add     %o0, 0x40, %o0
+
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+
+1:
+       ldd     [%o0 + 0x10], %f8
+       ldd     [%o0 + 0x18], %f10
+       ldd     [%o0 + 0x20], %f12
+       ldd     [%o0 + 0x28], %f14
+       ldd     [%o0 + 0x30], %f16
+       ldd     [%o0 + 0x38], %f18
+       ldd     [%o0 + 0x40], %f20
+       ldd     [%o0 + 0x48], %f22
+       ldd     [%o0 + 0x50], %f24
+       ldd     [%o0 + 0x58], %f26
+       ldd     [%o0 + 0x60], %f28
+       ldd     [%o0 + 0x68], %f30
+       ldd     [%o0 + 0x70], %f32
+       ldd     [%o0 + 0x78], %f34
+       ldd     [%o0 + 0x80], %f36
+       ldd     [%o0 + 0x88], %f38
+       ldd     [%o0 + 0x90], %f40
+       ldd     [%o0 + 0x98], %f42
+       ldd     [%o0 + 0xa0], %f44
+       ldd     [%o0 + 0xa8], %f46
+       ldd     [%o0 + 0xb0], %f48
+       ldd     [%o0 + 0xb8], %f50
+       ldd     [%o0 + 0xc0], %f52
+       ldd     [%o0 + 0xc8], %f54
+
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+       CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
+       CAMELLIA_6ROUNDS(40, 0, 2)
+       fxor    %f52, %f2, %f2
+       fxor    %f54, %f0, %f0
+
+       st      %f2, [%o2 + 0x00]
+       st      %f3, [%o2 + 0x04]
+       st      %f0, [%o2 + 0x08]
+       st      %f1, [%o2 + 0x0c]
+
+       retl
+        VISExit
+ENDPROC(camellia_sparc64_crypt)
+
+       .align  32
+ENTRY(camellia_sparc64_load_keys)
+       /* %o0=key, %o1=key_len */
+       VISEntry
+       ldd     [%o0 + 0x00], %f4
+       ldd     [%o0 + 0x08], %f6
+       ldd     [%o0 + 0x10], %f8
+       ldd     [%o0 + 0x18], %f10
+       ldd     [%o0 + 0x20], %f12
+       ldd     [%o0 + 0x28], %f14
+       ldd     [%o0 + 0x30], %f16
+       ldd     [%o0 + 0x38], %f18
+       ldd     [%o0 + 0x40], %f20
+       ldd     [%o0 + 0x48], %f22
+       ldd     [%o0 + 0x50], %f24
+       ldd     [%o0 + 0x58], %f26
+       ldd     [%o0 + 0x60], %f28
+       ldd     [%o0 + 0x68], %f30
+       ldd     [%o0 + 0x70], %f32
+       ldd     [%o0 + 0x78], %f34
+       ldd     [%o0 + 0x80], %f36
+       ldd     [%o0 + 0x88], %f38
+       ldd     [%o0 + 0x90], %f40
+       ldd     [%o0 + 0x98], %f42
+       ldd     [%o0 + 0xa0], %f44
+       ldd     [%o0 + 0xa8], %f46
+       ldd     [%o0 + 0xb0], %f48
+       ldd     [%o0 + 0xb8], %f50
+       ldd     [%o0 + 0xc0], %f52
+       retl
+        ldd    [%o0 + 0xc8], %f54
+ENDPROC(camellia_sparc64_load_keys)
+
+       .align  32
+ENTRY(camellia_sparc64_ecb_crypt_3_grand_rounds)
+       /* %o0=input, %o1=output, %o2=len, %o3=key */
+1:     ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       add     %o0, 0x10, %o0
+       fxor    %f4, %f0, %f0
+       fxor    %f6, %f2, %f2
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+       CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
+       CAMELLIA_6ROUNDS(40, 0, 2)
+       fxor    %f52, %f2, %f2
+       fxor    %f54, %f0, %f0
+       std     %f2, [%o1 + 0x00]
+       std     %f0, [%o1 + 0x08]
+       subcc   %o2, 0x10, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x10, %o1
+       retl
+        nop
+ENDPROC(camellia_sparc64_ecb_crypt_3_grand_rounds)
+
+       .align  32
+ENTRY(camellia_sparc64_ecb_crypt_4_grand_rounds)
+       /* %o0=input, %o1=output, %o2=len, %o3=key */
+1:     ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       add     %o0, 0x10, %o0
+       fxor    %f4, %f0, %f0
+       fxor    %f6, %f2, %f2
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+       ldd     [%o3 + 0xd0], %f8
+       ldd     [%o3 + 0xd8], %f10
+       ldd     [%o3 + 0xe0], %f12
+       ldd     [%o3 + 0xe8], %f14
+       ldd     [%o3 + 0xf0], %f16
+       ldd     [%o3 + 0xf8], %f18
+       ldd     [%o3 + 0x100], %f20
+       ldd     [%o3 + 0x108], %f22
+       CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
+       CAMELLIA_6ROUNDS_FL_FLI(40, 0, 2)
+       CAMELLIA_F(8, 2, 0, 2)
+       CAMELLIA_F(10, 0, 2, 0)
+       ldd     [%o3 + 0x10], %f8
+       ldd     [%o3 + 0x18], %f10
+       CAMELLIA_F(12, 2, 0, 2)
+       CAMELLIA_F(14, 0, 2, 0)
+       ldd     [%o3 + 0x20], %f12
+       ldd     [%o3 + 0x28], %f14
+       CAMELLIA_F(16, 2, 0, 2)
+       CAMELLIA_F(18, 0, 2, 0)
+       ldd     [%o3 + 0x30], %f16
+       ldd     [%o3 + 0x38], %f18
+       fxor    %f20, %f2, %f2
+       fxor    %f22, %f0, %f0
+       ldd     [%o3 + 0x40], %f20
+       ldd     [%o3 + 0x48], %f22
+       std     %f2, [%o1 + 0x00]
+       std     %f0, [%o1 + 0x08]
+       subcc   %o2, 0x10, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x10, %o1
+       retl
+        nop
+ENDPROC(camellia_sparc64_ecb_crypt_4_grand_rounds)
+
+       .align  32
+ENTRY(camellia_sparc64_cbc_encrypt_3_grand_rounds)
+       /* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
+       ldd     [%o4 + 0x00], %f60
+       ldd     [%o4 + 0x08], %f62
+1:     ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       add     %o0, 0x10, %o0
+       fxor    %f60, %f0, %f0
+       fxor    %f62, %f2, %f2
+       fxor    %f4, %f0, %f0
+       fxor    %f6, %f2, %f2
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+       CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
+       CAMELLIA_6ROUNDS(40, 0, 2)
+       fxor    %f52, %f2, %f60
+       fxor    %f54, %f0, %f62
+       std     %f60, [%o1 + 0x00]
+       std     %f62, [%o1 + 0x08]
+       subcc   %o2, 0x10, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x10, %o1
+       std     %f60, [%o4 + 0x00]
+       retl
+        std    %f62, [%o4 + 0x08]
+ENDPROC(camellia_sparc64_cbc_encrypt_3_grand_rounds)
+
+       .align  32
+ENTRY(camellia_sparc64_cbc_encrypt_4_grand_rounds)
+       /* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
+       ldd     [%o4 + 0x00], %f60
+       ldd     [%o4 + 0x08], %f62
+1:     ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       add     %o0, 0x10, %o0
+       fxor    %f60, %f0, %f0
+       fxor    %f62, %f2, %f2
+       fxor    %f4, %f0, %f0
+       fxor    %f6, %f2, %f2
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+       ldd     [%o3 + 0xd0], %f8
+       ldd     [%o3 + 0xd8], %f10
+       ldd     [%o3 + 0xe0], %f12
+       ldd     [%o3 + 0xe8], %f14
+       ldd     [%o3 + 0xf0], %f16
+       ldd     [%o3 + 0xf8], %f18
+       ldd     [%o3 + 0x100], %f20
+       ldd     [%o3 + 0x108], %f22
+       CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
+       CAMELLIA_6ROUNDS_FL_FLI(40, 0, 2)
+       CAMELLIA_F(8, 2, 0, 2)
+       CAMELLIA_F(10, 0, 2, 0)
+       ldd     [%o3 + 0x10], %f8
+       ldd     [%o3 + 0x18], %f10
+       CAMELLIA_F(12, 2, 0, 2)
+       CAMELLIA_F(14, 0, 2, 0)
+       ldd     [%o3 + 0x20], %f12
+       ldd     [%o3 + 0x28], %f14
+       CAMELLIA_F(16, 2, 0, 2)
+       CAMELLIA_F(18, 0, 2, 0)
+       ldd     [%o3 + 0x30], %f16
+       ldd     [%o3 + 0x38], %f18
+       fxor    %f20, %f2, %f60
+       fxor    %f22, %f0, %f62
+       ldd     [%o3 + 0x40], %f20
+       ldd     [%o3 + 0x48], %f22
+       std     %f60, [%o1 + 0x00]
+       std     %f62, [%o1 + 0x08]
+       subcc   %o2, 0x10, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x10, %o1
+       std     %f60, [%o4 + 0x00]
+       retl
+        std    %f62, [%o4 + 0x08]
+ENDPROC(camellia_sparc64_cbc_encrypt_4_grand_rounds)
+
+       .align  32
+ENTRY(camellia_sparc64_cbc_decrypt_3_grand_rounds)
+       /* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
+       ldd     [%o4 + 0x00], %f60
+       ldd     [%o4 + 0x08], %f62
+1:     ldd     [%o0 + 0x00], %f56
+       ldd     [%o0 + 0x08], %f58
+       add     %o0, 0x10, %o0
+       fxor    %f4, %f56, %f0
+       fxor    %f6, %f58, %f2
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+       CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
+       CAMELLIA_6ROUNDS(40, 0, 2)
+       fxor    %f52, %f2, %f2
+       fxor    %f54, %f0, %f0
+       fxor    %f60, %f2, %f2
+       fxor    %f62, %f0, %f0
+       fsrc2   %f56, %f60
+       fsrc2   %f58, %f62
+       std     %f2, [%o1 + 0x00]
+       std     %f0, [%o1 + 0x08]
+       subcc   %o2, 0x10, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x10, %o1
+       std     %f60, [%o4 + 0x00]
+       retl
+        std    %f62, [%o4 + 0x08]
+ENDPROC(camellia_sparc64_cbc_decrypt_3_grand_rounds)
+
+       .align  32
+ENTRY(camellia_sparc64_cbc_decrypt_4_grand_rounds)
+       /* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
+       ldd     [%o4 + 0x00], %f60
+       ldd     [%o4 + 0x08], %f62
+1:     ldd     [%o0 + 0x00], %f56
+       ldd     [%o0 + 0x08], %f58
+       add     %o0, 0x10, %o0
+       fxor    %f4, %f56, %f0
+       fxor    %f6, %f58, %f2
+       CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
+       ldd     [%o3 + 0xd0], %f8
+       ldd     [%o3 + 0xd8], %f10
+       ldd     [%o3 + 0xe0], %f12
+       ldd     [%o3 + 0xe8], %f14
+       ldd     [%o3 + 0xf0], %f16
+       ldd     [%o3 + 0xf8], %f18
+       ldd     [%o3 + 0x100], %f20
+       ldd     [%o3 + 0x108], %f22
+       CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
+       CAMELLIA_6ROUNDS_FL_FLI(40, 0, 2)
+       CAMELLIA_F(8, 2, 0, 2)
+       CAMELLIA_F(10, 0, 2, 0)
+       ldd     [%o3 + 0x10], %f8
+       ldd     [%o3 + 0x18], %f10
+       CAMELLIA_F(12, 2, 0, 2)
+       CAMELLIA_F(14, 0, 2, 0)
+       ldd     [%o3 + 0x20], %f12
+       ldd     [%o3 + 0x28], %f14
+       CAMELLIA_F(16, 2, 0, 2)
+       CAMELLIA_F(18, 0, 2, 0)
+       ldd     [%o3 + 0x30], %f16
+       ldd     [%o3 + 0x38], %f18
+       fxor    %f20, %f2, %f2
+       fxor    %f22, %f0, %f0
+       ldd     [%o3 + 0x40], %f20
+       ldd     [%o3 + 0x48], %f22
+       fxor    %f60, %f2, %f2
+       fxor    %f62, %f0, %f0
+       fsrc2   %f56, %f60
+       fsrc2   %f58, %f62
+       std     %f2, [%o1 + 0x00]
+       std     %f0, [%o1 + 0x08]
+       subcc   %o2, 0x10, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x10, %o1
+       std     %f60, [%o4 + 0x00]
+       retl
+        std    %f62, [%o4 + 0x08]
+ENDPROC(camellia_sparc64_cbc_decrypt_4_grand_rounds)
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
new file mode 100644 (file)
index 0000000..42905c0
--- /dev/null
@@ -0,0 +1,322 @@
+/* Glue code for CAMELLIA encryption optimized for sparc64 crypto opcodes.
+ *
+ * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+
+#include <asm/fpumacro.h>
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+#define CAMELLIA_MIN_KEY_SIZE        16
+#define CAMELLIA_MAX_KEY_SIZE        32
+#define CAMELLIA_BLOCK_SIZE          16
+#define CAMELLIA_TABLE_BYTE_LEN     272
+
+struct camellia_sparc64_ctx {
+       u64 encrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
+       u64 decrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
+       int key_len;
+};
+
+extern void camellia_sparc64_key_expand(const u32 *in_key, u64 *encrypt_key,
+                                       unsigned int key_len, u64 *decrypt_key);
+
+static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key,
+                           unsigned int key_len)
+{
+       struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
+       const u32 *in_key = (const u32 *) _in_key;
+       u32 *flags = &tfm->crt_flags;
+
+       if (key_len != 16 && key_len != 24 && key_len != 32) {
+               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+
+       ctx->key_len = key_len;
+
+       camellia_sparc64_key_expand(in_key, &ctx->encrypt_key[0],
+                                   key_len, &ctx->decrypt_key[0]);
+       return 0;
+}
+
+extern void camellia_sparc64_crypt(const u64 *key, const u32 *input,
+                                  u32 *output, unsigned int key_len);
+
+static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       camellia_sparc64_crypt(&ctx->encrypt_key[0],
+                              (const u32 *) src,
+                              (u32 *) dst, ctx->key_len);
+}
+
+static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       camellia_sparc64_crypt(&ctx->decrypt_key[0],
+                              (const u32 *) src,
+                              (u32 *) dst, ctx->key_len);
+}
+
+extern void camellia_sparc64_load_keys(const u64 *key, unsigned int key_len);
+
+typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len,
+                         const u64 *key);
+
+extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds;
+extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds;
+
+#define CAMELLIA_BLOCK_MASK    (~(CAMELLIA_BLOCK_SIZE - 1))
+
+static int __ecb_crypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes, bool encrypt)
+{
+       struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       ecb_crypt_op *op;
+       const u64 *key;
+       int err;
+
+       op = camellia_sparc64_ecb_crypt_3_grand_rounds;
+       if (ctx->key_len != 16)
+               op = camellia_sparc64_ecb_crypt_4_grand_rounds;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       if (encrypt)
+               key = &ctx->encrypt_key[0];
+       else
+               key = &ctx->decrypt_key[0];
+       camellia_sparc64_load_keys(key, ctx->key_len);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       const u64 *src64;
+                       u64 *dst64;
+
+                       src64 = (const u64 *)walk.src.virt.addr;
+                       dst64 = (u64 *) walk.dst.virt.addr;
+                       op(src64, dst64, block_len, key);
+               }
+               nbytes &= CAMELLIA_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       return __ecb_crypt(desc, dst, src, nbytes, true);
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       return __ecb_crypt(desc, dst, src, nbytes, false);
+}
+
+typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len,
+                         const u64 *key, u64 *iv);
+
+extern cbc_crypt_op camellia_sparc64_cbc_encrypt_3_grand_rounds;
+extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds;
+extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds;
+extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds;
+
+static int cbc_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       cbc_crypt_op *op;
+       const u64 *key;
+       int err;
+
+       op = camellia_sparc64_cbc_encrypt_3_grand_rounds;
+       if (ctx->key_len != 16)
+               op = camellia_sparc64_cbc_encrypt_4_grand_rounds;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       key = &ctx->encrypt_key[0];
+       camellia_sparc64_load_keys(key, ctx->key_len);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       const u64 *src64;
+                       u64 *dst64;
+
+                       src64 = (const u64 *)walk.src.virt.addr;
+                       dst64 = (u64 *) walk.dst.virt.addr;
+                       op(src64, dst64, block_len, key,
+                          (u64 *) walk.iv);
+               }
+               nbytes &= CAMELLIA_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       cbc_crypt_op *op;
+       const u64 *key;
+       int err;
+
+       op = camellia_sparc64_cbc_decrypt_3_grand_rounds;
+       if (ctx->key_len != 16)
+               op = camellia_sparc64_cbc_decrypt_4_grand_rounds;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       key = &ctx->decrypt_key[0];
+       camellia_sparc64_load_keys(key, ctx->key_len);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       const u64 *src64;
+                       u64 *dst64;
+
+                       src64 = (const u64 *)walk.src.virt.addr;
+                       dst64 = (u64 *) walk.dst.virt.addr;
+                       op(src64, dst64, block_len, key,
+                          (u64 *) walk.iv);
+               }
+               nbytes &= CAMELLIA_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static struct crypto_alg algs[] = { {
+       .cra_name               = "camellia",
+       .cra_driver_name        = "camellia-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          = CAMELLIA_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct camellia_sparc64_ctx),
+       .cra_alignmask          = 3,
+       .cra_module             = THIS_MODULE,
+       .cra_u  = {
+               .cipher = {
+                       .cia_min_keysize        = CAMELLIA_MIN_KEY_SIZE,
+                       .cia_max_keysize        = CAMELLIA_MAX_KEY_SIZE,
+                       .cia_setkey             = camellia_set_key,
+                       .cia_encrypt            = camellia_encrypt,
+                       .cia_decrypt            = camellia_decrypt
+               }
+       }
+}, {
+       .cra_name               = "ecb(camellia)",
+       .cra_driver_name        = "ecb-camellia-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = CAMELLIA_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct camellia_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = CAMELLIA_MIN_KEY_SIZE,
+                       .max_keysize    = CAMELLIA_MAX_KEY_SIZE,
+                       .setkey         = camellia_set_key,
+                       .encrypt        = ecb_encrypt,
+                       .decrypt        = ecb_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "cbc(camellia)",
+       .cra_driver_name        = "cbc-camellia-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = CAMELLIA_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct camellia_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = CAMELLIA_MIN_KEY_SIZE,
+                       .max_keysize    = CAMELLIA_MAX_KEY_SIZE,
+                       .setkey         = camellia_set_key,
+                       .encrypt        = cbc_encrypt,
+                       .decrypt        = cbc_decrypt,
+               },
+       },
+}
+};
+
+static bool __init sparc64_has_camellia_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_CAMELLIA))
+               return false;
+
+       return true;
+}
+
+static int __init camellia_sparc64_mod_init(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(algs); i++)
+               INIT_LIST_HEAD(&algs[i].cra_list);
+
+       if (sparc64_has_camellia_opcode()) {
+               pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
+               return crypto_register_algs(algs, ARRAY_SIZE(algs));
+       }
+       pr_info("sparc64 camellia opcodes not available.\n");
+       return -ENODEV;
+}
+
+static void __exit camellia_sparc64_mod_fini(void)
+{
+       crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+}
+
+module_init(camellia_sparc64_mod_init);
+module_exit(camellia_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
+
+MODULE_ALIAS("aes");
diff --git a/arch/sparc/crypto/crc32c_asm.S b/arch/sparc/crypto/crc32c_asm.S
new file mode 100644 (file)
index 0000000..2b1976e
--- /dev/null
@@ -0,0 +1,20 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+#include <asm/asi.h>
+
+#include "opcodes.h"
+
+ENTRY(crc32c_sparc64)
+       /* %o0=crc32p, %o1=data_ptr, %o2=len */
+       VISEntryHalf
+       lda     [%o0] ASI_PL, %f1
+1:     ldd     [%o1], %f2
+       CRC32C(0,2,0)
+       subcc   %o2, 8, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x8, %o1
+       sta     %f1, [%o0] ASI_PL
+       VISExitHalf
+2:     retl
+        nop
+ENDPROC(crc32c_sparc64)
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
new file mode 100644 (file)
index 0000000..0bd89ce
--- /dev/null
@@ -0,0 +1,179 @@
+/* Glue code for CRC32C optimized for sparc64 crypto opcodes.
+ *
+ * This is based largely upon arch/x86/crypto/crc32c-intel.c
+ *
+ * Copyright (C) 2008 Intel Corporation
+ * Authors: Austin Zhang <austin_zhang@linux.intel.com>
+ *          Kent Liu <kent.liu@intel.com>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/crc32.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key,
+                                unsigned int keylen)
+{
+       u32 *mctx = crypto_shash_ctx(hash);
+
+       if (keylen != sizeof(u32)) {
+               crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       *(__le32 *)mctx = le32_to_cpup((__le32 *)key);
+       return 0;
+}
+
+static int crc32c_sparc64_init(struct shash_desc *desc)
+{
+       u32 *mctx = crypto_shash_ctx(desc->tfm);
+       u32 *crcp = shash_desc_ctx(desc);
+
+       *crcp = *mctx;
+
+       return 0;
+}
+
+extern void crc32c_sparc64(u32 *crcp, const u64 *data, unsigned int len);
+
+static void crc32c_compute(u32 *crcp, const u64 *data, unsigned int len)
+{
+       unsigned int asm_len;
+
+       asm_len = len & ~7U;
+       if (asm_len) {
+               crc32c_sparc64(crcp, data, asm_len);
+               data += asm_len / 8;
+               len -= asm_len;
+       }
+       if (len)
+               *crcp = __crc32c_le(*crcp, (const unsigned char *) data, len);
+}
+
+static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data,
+                                unsigned int len)
+{
+       u32 *crcp = shash_desc_ctx(desc);
+
+       crc32c_compute(crcp, (const u64 *) data, len);
+
+       return 0;
+}
+
+static int __crc32c_sparc64_finup(u32 *crcp, const u8 *data, unsigned int len,
+                                 u8 *out)
+{
+       u32 tmp = *crcp;
+
+       crc32c_compute(&tmp, (const u64 *) data, len);
+
+       *(__le32 *) out = ~cpu_to_le32(tmp);
+       return 0;
+}
+
+static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data,
+                               unsigned int len, u8 *out)
+{
+       return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out);
+}
+
+static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out)
+{
+       u32 *crcp = shash_desc_ctx(desc);
+
+       *(__le32 *) out = ~cpu_to_le32p(crcp);
+       return 0;
+}
+
+static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data,
+                                unsigned int len, u8 *out)
+{
+       return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len,
+                                     out);
+}
+
+static int crc32c_sparc64_cra_init(struct crypto_tfm *tfm)
+{
+       u32 *key = crypto_tfm_ctx(tfm);
+
+       *key = ~0;
+
+       return 0;
+}
+
+#define CHKSUM_BLOCK_SIZE      1
+#define CHKSUM_DIGEST_SIZE     4
+
+static struct shash_alg alg = {
+       .setkey                 =       crc32c_sparc64_setkey,
+       .init                   =       crc32c_sparc64_init,
+       .update                 =       crc32c_sparc64_update,
+       .final                  =       crc32c_sparc64_final,
+       .finup                  =       crc32c_sparc64_finup,
+       .digest                 =       crc32c_sparc64_digest,
+       .descsize               =       sizeof(u32),
+       .digestsize             =       CHKSUM_DIGEST_SIZE,
+       .base                   =       {
+               .cra_name               =       "crc32c",
+               .cra_driver_name        =       "crc32c-sparc64",
+               .cra_priority           =       SPARC_CR_OPCODE_PRIORITY,
+               .cra_blocksize          =       CHKSUM_BLOCK_SIZE,
+               .cra_ctxsize            =       sizeof(u32),
+               .cra_alignmask          =       7,
+               .cra_module             =       THIS_MODULE,
+               .cra_init               =       crc32c_sparc64_cra_init,
+       }
+};
+
+static bool __init sparc64_has_crc32c_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_CRC32C))
+               return false;
+
+       return true;
+}
+
+static int __init crc32c_sparc64_mod_init(void)
+{
+       if (sparc64_has_crc32c_opcode()) {
+               pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n");
+               return crypto_register_shash(&alg);
+       }
+       pr_info("sparc64 crc32c opcode not available.\n");
+       return -ENODEV;
+}
+
+static void __exit crc32c_sparc64_mod_fini(void)
+{
+       crypto_unregister_shash(&alg);
+}
+
+module_init(crc32c_sparc64_mod_init);
+module_exit(crc32c_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
+
+MODULE_ALIAS("crc32c");
diff --git a/arch/sparc/crypto/crop_devid.c b/arch/sparc/crypto/crop_devid.c
new file mode 100644 (file)
index 0000000..5f5724a
--- /dev/null
@@ -0,0 +1,14 @@
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+/* This is a dummy device table linked into all of the crypto
+ * opcode drivers.  It serves to trigger the module autoloading
+ * mechanisms in userspace which scan the OF device tree and
+ * load any modules which have device table entries that
+ * match OF device nodes.
+ */
+static const struct of_device_id crypto_opcode_match[] = {
+       { .name = "cpu", .compatible = "sun4v", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, crypto_opcode_match);
diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S
new file mode 100644 (file)
index 0000000..30b6e90
--- /dev/null
@@ -0,0 +1,418 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+
+#include "opcodes.h"
+
+       .align  32
+ENTRY(des_sparc64_key_expand)
+       /* %o0=input_key, %o1=output_key */
+       VISEntryHalf
+       ld      [%o0 + 0x00], %f0
+       ld      [%o0 + 0x04], %f1
+       DES_KEXPAND(0, 0, 0)
+       DES_KEXPAND(0, 1, 2)
+       DES_KEXPAND(2, 3, 6)
+       DES_KEXPAND(2, 2, 4)
+       DES_KEXPAND(6, 3, 10)
+       DES_KEXPAND(6, 2, 8)
+       DES_KEXPAND(10, 3, 14)
+       DES_KEXPAND(10, 2, 12)
+       DES_KEXPAND(14, 1, 16)
+       DES_KEXPAND(16, 3, 20)
+       DES_KEXPAND(16, 2, 18)
+       DES_KEXPAND(20, 3, 24)
+       DES_KEXPAND(20, 2, 22)
+       DES_KEXPAND(24, 3, 28)
+       DES_KEXPAND(24, 2, 26)
+       DES_KEXPAND(28, 1, 30)
+       std     %f0, [%o1 + 0x00]
+       std     %f2, [%o1 + 0x08]
+       std     %f4, [%o1 + 0x10]
+       std     %f6, [%o1 + 0x18]
+       std     %f8, [%o1 + 0x20]
+       std     %f10, [%o1 + 0x28]
+       std     %f12, [%o1 + 0x30]
+       std     %f14, [%o1 + 0x38]
+       std     %f16, [%o1 + 0x40]
+       std     %f18, [%o1 + 0x48]
+       std     %f20, [%o1 + 0x50]
+       std     %f22, [%o1 + 0x58]
+       std     %f24, [%o1 + 0x60]
+       std     %f26, [%o1 + 0x68]
+       std     %f28, [%o1 + 0x70]
+       std     %f30, [%o1 + 0x78]
+       retl
+        VISExitHalf
+ENDPROC(des_sparc64_key_expand)
+
+       .align  32
+ENTRY(des_sparc64_crypt)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ldd     [%o1 + 0x00], %f32
+       ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       ldd     [%o0 + 0x10], %f4
+       ldd     [%o0 + 0x18], %f6
+       ldd     [%o0 + 0x20], %f8
+       ldd     [%o0 + 0x28], %f10
+       ldd     [%o0 + 0x30], %f12
+       ldd     [%o0 + 0x38], %f14
+       ldd     [%o0 + 0x40], %f16
+       ldd     [%o0 + 0x48], %f18
+       ldd     [%o0 + 0x50], %f20
+       ldd     [%o0 + 0x58], %f22
+       ldd     [%o0 + 0x60], %f24
+       ldd     [%o0 + 0x68], %f26
+       ldd     [%o0 + 0x70], %f28
+       ldd     [%o0 + 0x78], %f30
+       DES_IP(32, 32)
+       DES_ROUND(0, 2, 32, 32)
+       DES_ROUND(4, 6, 32, 32)
+       DES_ROUND(8, 10, 32, 32)
+       DES_ROUND(12, 14, 32, 32)
+       DES_ROUND(16, 18, 32, 32)
+       DES_ROUND(20, 22, 32, 32)
+       DES_ROUND(24, 26, 32, 32)
+       DES_ROUND(28, 30, 32, 32)
+       DES_IIP(32, 32)
+       std     %f32, [%o2 + 0x00]
+       retl
+        VISExit
+ENDPROC(des_sparc64_crypt)
+
+       .align  32
+ENTRY(des_sparc64_load_keys)
+       /* %o0=key */
+       VISEntry
+       ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       ldd     [%o0 + 0x10], %f4
+       ldd     [%o0 + 0x18], %f6
+       ldd     [%o0 + 0x20], %f8
+       ldd     [%o0 + 0x28], %f10
+       ldd     [%o0 + 0x30], %f12
+       ldd     [%o0 + 0x38], %f14
+       ldd     [%o0 + 0x40], %f16
+       ldd     [%o0 + 0x48], %f18
+       ldd     [%o0 + 0x50], %f20
+       ldd     [%o0 + 0x58], %f22
+       ldd     [%o0 + 0x60], %f24
+       ldd     [%o0 + 0x68], %f26
+       ldd     [%o0 + 0x70], %f28
+       retl
+        ldd    [%o0 + 0x78], %f30
+ENDPROC(des_sparc64_load_keys)
+
+       .align  32
+ENTRY(des_sparc64_ecb_crypt)
+       /* %o0=input, %o1=output, %o2=len */
+1:     ldd     [%o0 + 0x00], %f32
+       add     %o0, 0x08, %o0
+       DES_IP(32, 32)
+       DES_ROUND(0, 2, 32, 32)
+       DES_ROUND(4, 6, 32, 32)
+       DES_ROUND(8, 10, 32, 32)
+       DES_ROUND(12, 14, 32, 32)
+       DES_ROUND(16, 18, 32, 32)
+       DES_ROUND(20, 22, 32, 32)
+       DES_ROUND(24, 26, 32, 32)
+       DES_ROUND(28, 30, 32, 32)
+       DES_IIP(32, 32)
+       std     %f32, [%o1 + 0x00]
+       subcc   %o2, 0x08, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x08, %o1
+       retl
+        nop
+ENDPROC(des_sparc64_ecb_crypt)
+
+       .align  32
+ENTRY(des_sparc64_cbc_encrypt)
+       /* %o0=input, %o1=output, %o2=len, %o3=IV */
+       ldd     [%o3 + 0x00], %f32
+1:     ldd     [%o0 + 0x00], %f34
+       fxor    %f32, %f34, %f32
+       DES_IP(32, 32)
+       DES_ROUND(0, 2, 32, 32)
+       DES_ROUND(4, 6, 32, 32)
+       DES_ROUND(8, 10, 32, 32)
+       DES_ROUND(12, 14, 32, 32)
+       DES_ROUND(16, 18, 32, 32)
+       DES_ROUND(20, 22, 32, 32)
+       DES_ROUND(24, 26, 32, 32)
+       DES_ROUND(28, 30, 32, 32)
+       DES_IIP(32, 32)
+       std     %f32, [%o1 + 0x00]
+       add     %o0, 0x08, %o0
+       subcc   %o2, 0x08, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x08, %o1
+       retl
+        std    %f32, [%o3 + 0x00]
+ENDPROC(des_sparc64_cbc_encrypt)
+
+       .align  32
+ENTRY(des_sparc64_cbc_decrypt)
+       /* %o0=input, %o1=output, %o2=len, %o3=IV */
+       ldd     [%o3 + 0x00], %f34
+1:     ldd     [%o0 + 0x00], %f36
+       DES_IP(36, 32)
+       DES_ROUND(0, 2, 32, 32)
+       DES_ROUND(4, 6, 32, 32)
+       DES_ROUND(8, 10, 32, 32)
+       DES_ROUND(12, 14, 32, 32)
+       DES_ROUND(16, 18, 32, 32)
+       DES_ROUND(20, 22, 32, 32)
+       DES_ROUND(24, 26, 32, 32)
+       DES_ROUND(28, 30, 32, 32)
+       DES_IIP(32, 32)
+       fxor    %f32, %f34, %f32
+       fsrc2   %f36, %f34
+       std     %f32, [%o1 + 0x00]
+       add     %o0, 0x08, %o0
+       subcc   %o2, 0x08, %o2
+       bne,pt  %icc, 1b
+        add    %o1, 0x08, %o1
+       retl
+        std    %f36, [%o3 + 0x00]
+ENDPROC(des_sparc64_cbc_decrypt)
+
+       .align  32
+ENTRY(des3_ede_sparc64_crypt)
+       /* %o0=key, %o1=input, %o2=output */
+       VISEntry
+       ldd     [%o1 + 0x00], %f32
+       ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       ldd     [%o0 + 0x10], %f4
+       ldd     [%o0 + 0x18], %f6
+       ldd     [%o0 + 0x20], %f8
+       ldd     [%o0 + 0x28], %f10
+       ldd     [%o0 + 0x30], %f12
+       ldd     [%o0 + 0x38], %f14
+       ldd     [%o0 + 0x40], %f16
+       ldd     [%o0 + 0x48], %f18
+       ldd     [%o0 + 0x50], %f20
+       ldd     [%o0 + 0x58], %f22
+       ldd     [%o0 + 0x60], %f24
+       ldd     [%o0 + 0x68], %f26
+       ldd     [%o0 + 0x70], %f28
+       ldd     [%o0 + 0x78], %f30
+       DES_IP(32, 32)
+       DES_ROUND(0, 2, 32, 32)
+       ldd     [%o0 + 0x80], %f0
+       ldd     [%o0 + 0x88], %f2
+       DES_ROUND(4, 6, 32, 32)
+       ldd     [%o0 + 0x90], %f4
+       ldd     [%o0 + 0x98], %f6
+       DES_ROUND(8, 10, 32, 32)
+       ldd     [%o0 + 0xa0], %f8
+       ldd     [%o0 + 0xa8], %f10
+       DES_ROUND(12, 14, 32, 32)
+       ldd     [%o0 + 0xb0], %f12
+       ldd     [%o0 + 0xb8], %f14
+       DES_ROUND(16, 18, 32, 32)
+       ldd     [%o0 + 0xc0], %f16
+       ldd     [%o0 + 0xc8], %f18
+       DES_ROUND(20, 22, 32, 32)
+       ldd     [%o0 + 0xd0], %f20
+       ldd     [%o0 + 0xd8], %f22
+       DES_ROUND(24, 26, 32, 32)
+       ldd     [%o0 + 0xe0], %f24
+       ldd     [%o0 + 0xe8], %f26
+       DES_ROUND(28, 30, 32, 32)
+       ldd     [%o0 + 0xf0], %f28
+       ldd     [%o0 + 0xf8], %f30
+       DES_IIP(32, 32)
+       DES_IP(32, 32)
+       DES_ROUND(0, 2, 32, 32)
+       ldd     [%o0 + 0x100], %f0
+       ldd     [%o0 + 0x108], %f2
+       DES_ROUND(4, 6, 32, 32)
+       ldd     [%o0 + 0x110], %f4
+       ldd     [%o0 + 0x118], %f6
+       DES_ROUND(8, 10, 32, 32)
+       ldd     [%o0 + 0x120], %f8
+       ldd     [%o0 + 0x128], %f10
+       DES_ROUND(12, 14, 32, 32)
+       ldd     [%o0 + 0x130], %f12
+       ldd     [%o0 + 0x138], %f14
+       DES_ROUND(16, 18, 32, 32)
+       ldd     [%o0 + 0x140], %f16
+       ldd     [%o0 + 0x148], %f18
+       DES_ROUND(20, 22, 32, 32)
+       ldd     [%o0 + 0x150], %f20
+       ldd     [%o0 + 0x158], %f22
+       DES_ROUND(24, 26, 32, 32)
+       ldd     [%o0 + 0x160], %f24
+       ldd     [%o0 + 0x168], %f26
+       DES_ROUND(28, 30, 32, 32)
+       ldd     [%o0 + 0x170], %f28
+       ldd     [%o0 + 0x178], %f30
+       DES_IIP(32, 32)
+       DES_IP(32, 32)
+       DES_ROUND(0, 2, 32, 32)
+       DES_ROUND(4, 6, 32, 32)
+       DES_ROUND(8, 10, 32, 32)
+       DES_ROUND(12, 14, 32, 32)
+       DES_ROUND(16, 18, 32, 32)
+       DES_ROUND(20, 22, 32, 32)
+       DES_ROUND(24, 26, 32, 32)
+       DES_ROUND(28, 30, 32, 32)
+       DES_IIP(32, 32)
+
+       std     %f32, [%o2 + 0x00]
+       retl
+        VISExit
+ENDPROC(des3_ede_sparc64_crypt)
+
+       .align  32
+ENTRY(des3_ede_sparc64_load_keys)
+       /* %o0=key */
+       VISEntry
+       ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       ldd     [%o0 + 0x10], %f4
+       ldd     [%o0 + 0x18], %f6
+       ldd     [%o0 + 0x20], %f8
+       ldd     [%o0 + 0x28], %f10
+       ldd     [%o0 + 0x30], %f12
+       ldd     [%o0 + 0x38], %f14
+       ldd     [%o0 + 0x40], %f16
+       ldd     [%o0 + 0x48], %f18
+       ldd     [%o0 + 0x50], %f20
+       ldd     [%o0 + 0x58], %f22
+       ldd     [%o0 + 0x60], %f24
+       ldd     [%o0 + 0x68], %f26
+       ldd     [%o0 + 0x70], %f28
+       ldd     [%o0 + 0x78], %f30
+       ldd     [%o0 + 0x80], %f32
+       ldd     [%o0 + 0x88], %f34
+       ldd     [%o0 + 0x90], %f36
+       ldd     [%o0 + 0x98], %f38
+       ldd     [%o0 + 0xa0], %f40
+       ldd     [%o0 + 0xa8], %f42
+       ldd     [%o0 + 0xb0], %f44
+       ldd     [%o0 + 0xb8], %f46
+       ldd     [%o0 + 0xc0], %f48
+       ldd     [%o0 + 0xc8], %f50
+       ldd     [%o0 + 0xd0], %f52
+       ldd     [%o0 + 0xd8], %f54
+       ldd     [%o0 + 0xe0], %f56
+       retl
+        ldd    [%o0 + 0xe8], %f58
+ENDPROC(des3_ede_sparc64_load_keys)
+
+#define DES3_LOOP_BODY(X) \
+       DES_IP(X, X) \
+       DES_ROUND(0, 2, X, X) \
+       DES_ROUND(4, 6, X, X) \
+       DES_ROUND(8, 10, X, X) \
+       DES_ROUND(12, 14, X, X) \
+       DES_ROUND(16, 18, X, X) \
+       ldd     [%o0 + 0xf0], %f16; \
+       ldd     [%o0 + 0xf8], %f18; \
+       DES_ROUND(20, 22, X, X) \
+       ldd     [%o0 + 0x100], %f20; \
+       ldd     [%o0 + 0x108], %f22; \
+       DES_ROUND(24, 26, X, X) \
+       ldd     [%o0 + 0x110], %f24; \
+       ldd     [%o0 + 0x118], %f26; \
+       DES_ROUND(28, 30, X, X) \
+       ldd     [%o0 + 0x120], %f28; \
+       ldd     [%o0 + 0x128], %f30; \
+       DES_IIP(X, X) \
+       DES_IP(X, X) \
+       DES_ROUND(32, 34, X, X) \
+       ldd     [%o0 + 0x130], %f0; \
+       ldd     [%o0 + 0x138], %f2; \
+       DES_ROUND(36, 38, X, X) \
+       ldd     [%o0 + 0x140], %f4; \
+       ldd     [%o0 + 0x148], %f6; \
+       DES_ROUND(40, 42, X, X) \
+       ldd     [%o0 + 0x150], %f8; \
+       ldd     [%o0 + 0x158], %f10; \
+       DES_ROUND(44, 46, X, X) \
+       ldd     [%o0 + 0x160], %f12; \
+       ldd     [%o0 + 0x168], %f14; \
+       DES_ROUND(48, 50, X, X) \
+       DES_ROUND(52, 54, X, X) \
+       DES_ROUND(56, 58, X, X) \
+       DES_ROUND(16, 18, X, X) \
+       ldd     [%o0 + 0x170], %f16; \
+       ldd     [%o0 + 0x178], %f18; \
+       DES_IIP(X, X) \
+       DES_IP(X, X) \
+       DES_ROUND(20, 22, X, X) \
+       ldd     [%o0 + 0x50], %f20; \
+       ldd     [%o0 + 0x58], %f22; \
+       DES_ROUND(24, 26, X, X) \
+       ldd     [%o0 + 0x60], %f24; \
+       ldd     [%o0 + 0x68], %f26; \
+       DES_ROUND(28, 30, X, X) \
+       ldd     [%o0 + 0x70], %f28; \
+       ldd     [%o0 + 0x78], %f30; \
+       DES_ROUND(0, 2, X, X) \
+       ldd     [%o0 + 0x00], %f0; \
+       ldd     [%o0 + 0x08], %f2; \
+       DES_ROUND(4, 6, X, X) \
+       ldd     [%o0 + 0x10], %f4; \
+       ldd     [%o0 + 0x18], %f6; \
+       DES_ROUND(8, 10, X, X) \
+       ldd     [%o0 + 0x20], %f8; \
+       ldd     [%o0 + 0x28], %f10; \
+       DES_ROUND(12, 14, X, X) \
+       ldd     [%o0 + 0x30], %f12; \
+       ldd     [%o0 + 0x38], %f14; \
+       DES_ROUND(16, 18, X, X) \
+       ldd     [%o0 + 0x40], %f16; \
+       ldd     [%o0 + 0x48], %f18; \
+       DES_IIP(X, X)
+
+       .align  32
+ENTRY(des3_ede_sparc64_ecb_crypt)
+       /* %o0=key, %o1=input, %o2=output, %o3=len */
+1:     ldd     [%o1 + 0x00], %f60
+       DES3_LOOP_BODY(60)
+       std     %f60, [%o2 + 0x00]
+       subcc   %o3, 0x08, %o3
+       bne,pt  %icc, 1b
+        add    %o2, 0x08, %o2
+       retl
+        nop
+ENDPROC(des3_ede_sparc64_ecb_crypt)
+
+       .align  32
+ENTRY(des3_ede_sparc64_cbc_encrypt)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldd     [%o4 + 0x00], %f60
+1:     ldd     [%o1 + 0x00], %f62
+       fxor    %f60, %f62, %f60
+       DES3_LOOP_BODY(60)
+       std     %f60, [%o2 + 0x00]
+       add     %o1, 0x08, %o1
+       subcc   %o3, 0x08, %o3
+       bne,pt  %icc, 1b
+        add    %o2, 0x08, %o2
+       retl
+        std    %f60, [%o4 + 0x00]
+ENDPROC(des3_ede_sparc64_cbc_encrypt)
+
+       .align  32
+ENTRY(des3_ede_sparc64_cbc_decrypt)
+       /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
+       ldd     [%o4 + 0x00], %f62
+1:     ldx     [%o1 + 0x00], %g1
+       MOVXTOD_G1_F60
+       DES3_LOOP_BODY(60)
+       fxor    %f62, %f60, %f60
+       MOVXTOD_G1_F62
+       std     %f60, [%o2 + 0x00]
+       add     %o1, 0x08, %o1
+       subcc   %o3, 0x08, %o3
+       bne,pt  %icc, 1b
+        add    %o2, 0x08, %o2
+       retl
+        stx    %g1, [%o4 + 0x00]
+ENDPROC(des3_ede_sparc64_cbc_decrypt)
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
new file mode 100644 (file)
index 0000000..c4940c2
--- /dev/null
@@ -0,0 +1,529 @@
+/* Glue code for DES encryption optimized for sparc64 crypto opcodes.
+ *
+ * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+#include <asm/fpumacro.h>
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+struct des_sparc64_ctx {
+       u64 encrypt_expkey[DES_EXPKEY_WORDS / 2];
+       u64 decrypt_expkey[DES_EXPKEY_WORDS / 2];
+};
+
+struct des3_ede_sparc64_ctx {
+       u64 encrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2];
+       u64 decrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2];
+};
+
+static void encrypt_to_decrypt(u64 *d, const u64 *e)
+{
+       const u64 *s = e + (DES_EXPKEY_WORDS / 2) - 1;
+       int i;
+
+       for (i = 0; i < DES_EXPKEY_WORDS / 2; i++)
+               *d++ = *s--;
+}
+
+extern void des_sparc64_key_expand(const u32 *input_key, u64 *key);
+
+static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
+                      unsigned int keylen)
+{
+       struct des_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
+       u32 *flags = &tfm->crt_flags;
+       u32 tmp[DES_EXPKEY_WORDS];
+       int ret;
+
+       /* Even though we have special instructions for key expansion,
+        * we call des_ekey() so that we don't have to write our own
+        * weak key detection code.
+        */
+       ret = des_ekey(tmp, key);
+       if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+               *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+               return -EINVAL;
+       }
+
+       des_sparc64_key_expand((const u32 *) key, &dctx->encrypt_expkey[0]);
+       encrypt_to_decrypt(&dctx->decrypt_expkey[0], &dctx->encrypt_expkey[0]);
+
+       return 0;
+}
+
+extern void des_sparc64_crypt(const u64 *key, const u64 *input,
+                             u64 *output);
+
+static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
+       const u64 *K = ctx->encrypt_expkey;
+
+       des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
+}
+
+static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
+       const u64 *K = ctx->decrypt_expkey;
+
+       des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
+}
+
+extern void des_sparc64_load_keys(const u64 *key);
+
+extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output,
+                                 unsigned int len);
+
+#define DES_BLOCK_MASK (~(DES_BLOCK_SIZE - 1))
+
+static int __ecb_crypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes, bool encrypt)
+{
+       struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       if (encrypt)
+               des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
+       else
+               des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & DES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       des_sparc64_ecb_crypt((const u64 *)walk.src.virt.addr,
+                                             (u64 *) walk.dst.virt.addr,
+                                             block_len);
+               }
+               nbytes &= DES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       return __ecb_crypt(desc, dst, src, nbytes, true);
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       return __ecb_crypt(desc, dst, src, nbytes, false);
+}
+
+extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output,
+                                   unsigned int len, u64 *iv);
+
+static int cbc_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & DES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       des_sparc64_cbc_encrypt((const u64 *)walk.src.virt.addr,
+                                               (u64 *) walk.dst.virt.addr,
+                                               block_len, (u64 *) walk.iv);
+               }
+               nbytes &= DES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output,
+                                   unsigned int len, u64 *iv);
+
+static int cbc_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & DES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       des_sparc64_cbc_decrypt((const u64 *)walk.src.virt.addr,
+                                               (u64 *) walk.dst.virt.addr,
+                                               block_len, (u64 *) walk.iv);
+               }
+               nbytes &= DES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
+                           unsigned int keylen)
+{
+       struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
+       const u32 *K = (const u32 *)key;
+       u32 *flags = &tfm->crt_flags;
+       u64 k1[DES_EXPKEY_WORDS / 2];
+       u64 k2[DES_EXPKEY_WORDS / 2];
+       u64 k3[DES_EXPKEY_WORDS / 2];
+
+       if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+                    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+                    (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+               *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+               return -EINVAL;
+       }
+
+       des_sparc64_key_expand((const u32 *)key, k1);
+       key += DES_KEY_SIZE;
+       des_sparc64_key_expand((const u32 *)key, k2);
+       key += DES_KEY_SIZE;
+       des_sparc64_key_expand((const u32 *)key, k3);
+
+       memcpy(&dctx->encrypt_expkey[0], &k1[0], sizeof(k1));
+       encrypt_to_decrypt(&dctx->encrypt_expkey[DES_EXPKEY_WORDS / 2], &k2[0]);
+       memcpy(&dctx->encrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2],
+              &k3[0], sizeof(k3));
+
+       encrypt_to_decrypt(&dctx->decrypt_expkey[0], &k3[0]);
+       memcpy(&dctx->decrypt_expkey[DES_EXPKEY_WORDS / 2],
+              &k2[0], sizeof(k2));
+       encrypt_to_decrypt(&dctx->decrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2],
+                          &k1[0]);
+
+       return 0;
+}
+
+extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input,
+                                  u64 *output);
+
+static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
+       const u64 *K = ctx->encrypt_expkey;
+
+       des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
+}
+
+static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
+       const u64 *K = ctx->decrypt_expkey;
+
+       des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
+}
+
+extern void des3_ede_sparc64_load_keys(const u64 *key);
+
+extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input,
+                                      u64 *output, unsigned int len);
+
+static int __ecb3_crypt(struct blkcipher_desc *desc,
+                       struct scatterlist *dst, struct scatterlist *src,
+                       unsigned int nbytes, bool encrypt)
+{
+       struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       const u64 *K;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       if (encrypt)
+               K = &ctx->encrypt_expkey[0];
+       else
+               K = &ctx->decrypt_expkey[0];
+       des3_ede_sparc64_load_keys(K);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & DES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       const u64 *src64 = (const u64 *)walk.src.virt.addr;
+                       des3_ede_sparc64_ecb_crypt(K, src64,
+                                                  (u64 *) walk.dst.virt.addr,
+                                                  block_len);
+               }
+               nbytes &= DES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static int ecb3_encrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       return __ecb3_crypt(desc, dst, src, nbytes, true);
+}
+
+static int ecb3_decrypt(struct blkcipher_desc *desc,
+                      struct scatterlist *dst, struct scatterlist *src,
+                      unsigned int nbytes)
+{
+       return __ecb3_crypt(desc, dst, src, nbytes, false);
+}
+
+extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input,
+                                        u64 *output, unsigned int len,
+                                        u64 *iv);
+
+static int cbc3_encrypt(struct blkcipher_desc *desc,
+                       struct scatterlist *dst, struct scatterlist *src,
+                       unsigned int nbytes)
+{
+       struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       const u64 *K;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       K = &ctx->encrypt_expkey[0];
+       des3_ede_sparc64_load_keys(K);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & DES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       const u64 *src64 = (const u64 *)walk.src.virt.addr;
+                       des3_ede_sparc64_cbc_encrypt(K, src64,
+                                                    (u64 *) walk.dst.virt.addr,
+                                                    block_len,
+                                                    (u64 *) walk.iv);
+               }
+               nbytes &= DES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input,
+                                        u64 *output, unsigned int len,
+                                        u64 *iv);
+
+static int cbc3_decrypt(struct blkcipher_desc *desc,
+                       struct scatterlist *dst, struct scatterlist *src,
+                       unsigned int nbytes)
+{
+       struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       const u64 *K;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       K = &ctx->decrypt_expkey[0];
+       des3_ede_sparc64_load_keys(K);
+       while ((nbytes = walk.nbytes)) {
+               unsigned int block_len = nbytes & DES_BLOCK_MASK;
+
+               if (likely(block_len)) {
+                       const u64 *src64 = (const u64 *)walk.src.virt.addr;
+                       des3_ede_sparc64_cbc_decrypt(K, src64,
+                                                    (u64 *) walk.dst.virt.addr,
+                                                    block_len,
+                                                    (u64 *) walk.iv);
+               }
+               nbytes &= DES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       fprs_write(0);
+       return err;
+}
+
+static struct crypto_alg algs[] = { {
+       .cra_name               = "des",
+       .cra_driver_name        = "des-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          = DES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_module             = THIS_MODULE,
+       .cra_u  = {
+               .cipher = {
+                       .cia_min_keysize        = DES_KEY_SIZE,
+                       .cia_max_keysize        = DES_KEY_SIZE,
+                       .cia_setkey             = des_set_key,
+                       .cia_encrypt            = des_encrypt,
+                       .cia_decrypt            = des_decrypt
+               }
+       }
+}, {
+       .cra_name               = "ecb(des)",
+       .cra_driver_name        = "ecb-des-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES_KEY_SIZE,
+                       .max_keysize    = DES_KEY_SIZE,
+                       .setkey         = des_set_key,
+                       .encrypt        = ecb_encrypt,
+                       .decrypt        = ecb_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "cbc(des)",
+       .cra_driver_name        = "cbc-des-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES_KEY_SIZE,
+                       .max_keysize    = DES_KEY_SIZE,
+                       .setkey         = des_set_key,
+                       .encrypt        = cbc_encrypt,
+                       .decrypt        = cbc_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "des3_ede",
+       .cra_driver_name        = "des3_ede-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_module             = THIS_MODULE,
+       .cra_u  = {
+               .cipher = {
+                       .cia_min_keysize        = DES3_EDE_KEY_SIZE,
+                       .cia_max_keysize        = DES3_EDE_KEY_SIZE,
+                       .cia_setkey             = des3_ede_set_key,
+                       .cia_encrypt            = des3_ede_encrypt,
+                       .cia_decrypt            = des3_ede_decrypt
+               }
+       }
+}, {
+       .cra_name               = "ecb(des3_ede)",
+       .cra_driver_name        = "ecb-des3_ede-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .setkey         = des3_ede_set_key,
+                       .encrypt        = ecb3_encrypt,
+                       .decrypt        = ecb3_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "cbc(des3_ede)",
+       .cra_driver_name        = "cbc-des3_ede-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_sparc64_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .setkey         = des3_ede_set_key,
+                       .encrypt        = cbc3_encrypt,
+                       .decrypt        = cbc3_decrypt,
+               },
+       },
+} };
+
+static bool __init sparc64_has_des_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_DES))
+               return false;
+
+       return true;
+}
+
+static int __init des_sparc64_mod_init(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(algs); i++)
+               INIT_LIST_HEAD(&algs[i].cra_list);
+
+       if (sparc64_has_des_opcode()) {
+               pr_info("Using sparc64 des opcodes optimized DES implementation\n");
+               return crypto_register_algs(algs, ARRAY_SIZE(algs));
+       }
+       pr_info("sparc64 des opcodes not available.\n");
+       return -ENODEV;
+}
+
+static void __exit des_sparc64_mod_fini(void)
+{
+       crypto_unregister_algs(algs, ARRAY_SIZE(algs));
+}
+
+module_init(des_sparc64_mod_init);
+module_exit(des_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
+
+MODULE_ALIAS("des");
diff --git a/arch/sparc/crypto/md5_asm.S b/arch/sparc/crypto/md5_asm.S
new file mode 100644 (file)
index 0000000..3150404
--- /dev/null
@@ -0,0 +1,70 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+
+#include "opcodes.h"
+
+ENTRY(md5_sparc64_transform)
+       /* %o0 = digest, %o1 = data, %o2 = rounds */
+       VISEntryHalf
+       ld      [%o0 + 0x00], %f0
+       ld      [%o0 + 0x04], %f1
+       andcc   %o1, 0x7, %g0
+       ld      [%o0 + 0x08], %f2
+       bne,pn  %xcc, 10f
+        ld     [%o0 + 0x0c], %f3
+
+1:
+       ldd     [%o1 + 0x00], %f8
+       ldd     [%o1 + 0x08], %f10
+       ldd     [%o1 + 0x10], %f12
+       ldd     [%o1 + 0x18], %f14
+       ldd     [%o1 + 0x20], %f16
+       ldd     [%o1 + 0x28], %f18
+       ldd     [%o1 + 0x30], %f20
+       ldd     [%o1 + 0x38], %f22
+
+       MD5
+
+       subcc   %o2, 1, %o2
+       bne,pt  %xcc, 1b
+        add    %o1, 0x40, %o1
+
+5:
+       st      %f0, [%o0 + 0x00]
+       st      %f1, [%o0 + 0x04]
+       st      %f2, [%o0 + 0x08]
+       st      %f3, [%o0 + 0x0c]
+       retl
+        VISExitHalf
+10:
+       alignaddr %o1, %g0, %o1
+
+       ldd     [%o1 + 0x00], %f10
+1:
+       ldd     [%o1 + 0x08], %f12
+       ldd     [%o1 + 0x10], %f14
+       ldd     [%o1 + 0x18], %f16
+       ldd     [%o1 + 0x20], %f18
+       ldd     [%o1 + 0x28], %f20
+       ldd     [%o1 + 0x30], %f22
+       ldd     [%o1 + 0x38], %f24
+       ldd     [%o1 + 0x40], %f26
+
+       faligndata %f10, %f12, %f8
+       faligndata %f12, %f14, %f10
+       faligndata %f14, %f16, %f12
+       faligndata %f16, %f18, %f14
+       faligndata %f18, %f20, %f16
+       faligndata %f20, %f22, %f18
+       faligndata %f22, %f24, %f20
+       faligndata %f24, %f26, %f22
+
+       MD5
+
+       subcc   %o2, 1, %o2
+       fsrc2   %f26, %f10
+       bne,pt  %xcc, 1b
+        add    %o1, 0x40, %o1
+
+       ba,a,pt %xcc, 5b
+ENDPROC(md5_sparc64_transform)
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
new file mode 100644 (file)
index 0000000..603d723
--- /dev/null
@@ -0,0 +1,188 @@
+/* Glue code for MD5 hashing optimized for sparc64 crypto opcodes.
+ *
+ * This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c
+ * and crypto/md5.c which are:
+ *
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ * Copyright (c) Mathias Krause <minipli@googlemail.com>
+ * Copyright (c) Cryptoapi developers.
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/md5.h>
+
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+asmlinkage void md5_sparc64_transform(u32 *digest, const char *data,
+                                     unsigned int rounds);
+
+static int md5_sparc64_init(struct shash_desc *desc)
+{
+       struct md5_state *mctx = shash_desc_ctx(desc);
+
+       mctx->hash[0] = cpu_to_le32(0x67452301);
+       mctx->hash[1] = cpu_to_le32(0xefcdab89);
+       mctx->hash[2] = cpu_to_le32(0x98badcfe);
+       mctx->hash[3] = cpu_to_le32(0x10325476);
+       mctx->byte_count = 0;
+
+       return 0;
+}
+
+static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data,
+                                unsigned int len, unsigned int partial)
+{
+       unsigned int done = 0;
+
+       sctx->byte_count += len;
+       if (partial) {
+               done = MD5_HMAC_BLOCK_SIZE - partial;
+               memcpy((u8 *)sctx->block + partial, data, done);
+               md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1);
+       }
+       if (len - done >= MD5_HMAC_BLOCK_SIZE) {
+               const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE;
+
+               md5_sparc64_transform(sctx->hash, data + done, rounds);
+               done += rounds * MD5_HMAC_BLOCK_SIZE;
+       }
+
+       memcpy(sctx->block, data + done, len - done);
+}
+
+static int md5_sparc64_update(struct shash_desc *desc, const u8 *data,
+                             unsigned int len)
+{
+       struct md5_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
+
+       /* Handle the fast case right here */
+       if (partial + len < MD5_HMAC_BLOCK_SIZE) {
+               sctx->byte_count += len;
+               memcpy((u8 *)sctx->block + partial, data, len);
+       } else
+               __md5_sparc64_update(sctx, data, len, partial);
+
+       return 0;
+}
+
+/* Add padding and return the message digest. */
+static int md5_sparc64_final(struct shash_desc *desc, u8 *out)
+{
+       struct md5_state *sctx = shash_desc_ctx(desc);
+       unsigned int i, index, padlen;
+       u32 *dst = (u32 *)out;
+       __le64 bits;
+       static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, };
+
+       bits = cpu_to_le64(sctx->byte_count << 3);
+
+       /* Pad out to 56 mod 64 and append length */
+       index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
+       padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index);
+
+       /* We need to fill a whole block for __md5_sparc64_update() */
+       if (padlen <= 56) {
+               sctx->byte_count += padlen;
+               memcpy((u8 *)sctx->block + index, padding, padlen);
+       } else {
+               __md5_sparc64_update(sctx, padding, padlen, index);
+       }
+       __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
+
+       /* Store state in digest */
+       for (i = 0; i < MD5_HASH_WORDS; i++)
+               dst[i] = sctx->hash[i];
+
+       /* Wipe context */
+       memset(sctx, 0, sizeof(*sctx));
+
+       return 0;
+}
+
+static int md5_sparc64_export(struct shash_desc *desc, void *out)
+{
+       struct md5_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(out, sctx, sizeof(*sctx));
+
+       return 0;
+}
+
+static int md5_sparc64_import(struct shash_desc *desc, const void *in)
+{
+       struct md5_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(sctx, in, sizeof(*sctx));
+
+       return 0;
+}
+
+static struct shash_alg alg = {
+       .digestsize     =       MD5_DIGEST_SIZE,
+       .init           =       md5_sparc64_init,
+       .update         =       md5_sparc64_update,
+       .final          =       md5_sparc64_final,
+       .export         =       md5_sparc64_export,
+       .import         =       md5_sparc64_import,
+       .descsize       =       sizeof(struct md5_state),
+       .statesize      =       sizeof(struct md5_state),
+       .base           =       {
+               .cra_name       =       "md5",
+               .cra_driver_name=       "md5-sparc64",
+               .cra_priority   =       SPARC_CR_OPCODE_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       MD5_HMAC_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+};
+
+static bool __init sparc64_has_md5_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_MD5))
+               return false;
+
+       return true;
+}
+
+static int __init md5_sparc64_mod_init(void)
+{
+       if (sparc64_has_md5_opcode()) {
+               pr_info("Using sparc64 md5 opcode optimized MD5 implementation\n");
+               return crypto_register_shash(&alg);
+       }
+       pr_info("sparc64 md5 opcode not available.\n");
+       return -ENODEV;
+}
+
+static void __exit md5_sparc64_mod_fini(void)
+{
+       crypto_unregister_shash(&alg);
+}
+
+module_init(md5_sparc64_mod_init);
+module_exit(md5_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
+
+MODULE_ALIAS("md5");
diff --git a/arch/sparc/crypto/opcodes.h b/arch/sparc/crypto/opcodes.h
new file mode 100644 (file)
index 0000000..19cbaea
--- /dev/null
@@ -0,0 +1,99 @@
+#ifndef _OPCODES_H
+#define _OPCODES_H
+
+#define SPARC_CR_OPCODE_PRIORITY       300
+
+#define F3F(x,y,z)     (((x)<<30)|((y)<<19)|((z)<<5))
+
+#define FPD_ENCODE(x)  (((x) >> 5) | ((x) & ~(0x20)))
+
+#define RS1(x)         (FPD_ENCODE(x) << 14)
+#define RS2(x)         (FPD_ENCODE(x) <<  0)
+#define RS3(x)         (FPD_ENCODE(x) <<  9)
+#define RD(x)          (FPD_ENCODE(x) << 25)
+#define IMM5_0(x)      ((x)           <<  0)
+#define IMM5_9(x)      ((x)           <<  9)
+
+#define CRC32C(a,b,c)  \
+       .word           (F3F(2,0x36,0x147)|RS1(a)|RS2(b)|RD(c));
+
+#define MD5            \
+       .word   0x81b02800;
+#define SHA1           \
+       .word   0x81b02820;
+#define SHA256         \
+       .word   0x81b02840;
+#define SHA512         \
+       .word   0x81b02860;
+
+#define AES_EROUND01(a,b,c,d)  \
+       .word   (F3F(2, 0x19, 0)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_EROUND23(a,b,c,d)  \
+       .word   (F3F(2, 0x19, 1)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_DROUND01(a,b,c,d)  \
+       .word   (F3F(2, 0x19, 2)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_DROUND23(a,b,c,d)  \
+       .word   (F3F(2, 0x19, 3)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_EROUND01_L(a,b,c,d)        \
+       .word   (F3F(2, 0x19, 4)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_EROUND23_L(a,b,c,d)        \
+       .word   (F3F(2, 0x19, 5)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_DROUND01_L(a,b,c,d)        \
+       .word   (F3F(2, 0x19, 6)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_DROUND23_L(a,b,c,d)        \
+       .word   (F3F(2, 0x19, 7)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define AES_KEXPAND1(a,b,c,d)  \
+       .word   (F3F(2, 0x19, 8)|RS1(a)|RS2(b)|IMM5_9(c)|RD(d));
+#define AES_KEXPAND0(a,b,c)    \
+       .word   (F3F(2, 0x36, 0x130)|RS1(a)|RS2(b)|RD(c));
+#define AES_KEXPAND2(a,b,c)    \
+       .word   (F3F(2, 0x36, 0x131)|RS1(a)|RS2(b)|RD(c));
+
+#define DES_IP(a,b)            \
+       .word           (F3F(2, 0x36, 0x134)|RS1(a)|RD(b));
+#define DES_IIP(a,b)           \
+       .word           (F3F(2, 0x36, 0x135)|RS1(a)|RD(b));
+#define DES_KEXPAND(a,b,c)     \
+       .word           (F3F(2, 0x36, 0x136)|RS1(a)|IMM5_0(b)|RD(c));
+#define DES_ROUND(a,b,c,d)     \
+       .word           (F3F(2, 0x19, 0x009)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+
+#define CAMELLIA_F(a,b,c,d)            \
+       .word           (F3F(2, 0x19, 0x00c)|RS1(a)|RS2(b)|RS3(c)|RD(d));
+#define CAMELLIA_FL(a,b,c)             \
+       .word           (F3F(2, 0x36, 0x13c)|RS1(a)|RS2(b)|RD(c));
+#define CAMELLIA_FLI(a,b,c)            \
+       .word           (F3F(2, 0x36, 0x13d)|RS1(a)|RS2(b)|RD(c));
+
+#define MOVDTOX_F0_O4          \
+       .word   0x99b02200
+#define MOVDTOX_F2_O5          \
+       .word   0x9bb02202
+#define MOVXTOD_G1_F60                 \
+       .word   0xbbb02301
+#define MOVXTOD_G1_F62                 \
+       .word   0xbfb02301
+#define MOVXTOD_G3_F4          \
+       .word   0x89b02303;
+#define MOVXTOD_G7_F6          \
+       .word   0x8db02307;
+#define MOVXTOD_G3_F0          \
+       .word   0x81b02303;
+#define MOVXTOD_G7_F2          \
+       .word   0x85b02307;
+#define MOVXTOD_O0_F0          \
+       .word   0x81b02308;
+#define MOVXTOD_O5_F0          \
+       .word   0x81b0230d;
+#define MOVXTOD_O5_F2          \
+       .word   0x85b0230d;
+#define MOVXTOD_O5_F4          \
+       .word   0x89b0230d;
+#define MOVXTOD_O5_F6          \
+       .word   0x8db0230d;
+#define MOVXTOD_G3_F60         \
+       .word   0xbbb02303;
+#define MOVXTOD_G7_F62         \
+       .word   0xbfb02307;
+
+#endif /* _OPCODES_H */
diff --git a/arch/sparc/crypto/sha1_asm.S b/arch/sparc/crypto/sha1_asm.S
new file mode 100644 (file)
index 0000000..219d10c
--- /dev/null
@@ -0,0 +1,72 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+
+#include "opcodes.h"
+
+ENTRY(sha1_sparc64_transform)
+       /* %o0 = digest, %o1 = data, %o2 = rounds */
+       VISEntryHalf
+       ld      [%o0 + 0x00], %f0
+       ld      [%o0 + 0x04], %f1
+       ld      [%o0 + 0x08], %f2
+       andcc   %o1, 0x7, %g0
+       ld      [%o0 + 0x0c], %f3
+       bne,pn  %xcc, 10f
+        ld     [%o0 + 0x10], %f4
+
+1:
+       ldd     [%o1 + 0x00], %f8
+       ldd     [%o1 + 0x08], %f10
+       ldd     [%o1 + 0x10], %f12
+       ldd     [%o1 + 0x18], %f14
+       ldd     [%o1 + 0x20], %f16
+       ldd     [%o1 + 0x28], %f18
+       ldd     [%o1 + 0x30], %f20
+       ldd     [%o1 + 0x38], %f22
+
+       SHA1
+
+       subcc   %o2, 1, %o2
+       bne,pt  %xcc, 1b
+        add    %o1, 0x40, %o1
+
+5:
+       st      %f0, [%o0 + 0x00]
+       st      %f1, [%o0 + 0x04]
+       st      %f2, [%o0 + 0x08]
+       st      %f3, [%o0 + 0x0c]
+       st      %f4, [%o0 + 0x10]
+       retl
+        VISExitHalf
+10:
+       alignaddr %o1, %g0, %o1
+
+       ldd     [%o1 + 0x00], %f10
+1:
+       ldd     [%o1 + 0x08], %f12
+       ldd     [%o1 + 0x10], %f14
+       ldd     [%o1 + 0x18], %f16
+       ldd     [%o1 + 0x20], %f18
+       ldd     [%o1 + 0x28], %f20
+       ldd     [%o1 + 0x30], %f22
+       ldd     [%o1 + 0x38], %f24
+       ldd     [%o1 + 0x40], %f26
+
+       faligndata %f10, %f12, %f8
+       faligndata %f12, %f14, %f10
+       faligndata %f14, %f16, %f12
+       faligndata %f16, %f18, %f14
+       faligndata %f18, %f20, %f16
+       faligndata %f20, %f22, %f18
+       faligndata %f22, %f24, %f20
+       faligndata %f24, %f26, %f22
+
+       SHA1
+
+       subcc   %o2, 1, %o2
+       fsrc2   %f26, %f10
+       bne,pt  %xcc, 1b
+        add    %o1, 0x40, %o1
+
+       ba,a,pt %xcc, 5b
+ENDPROC(sha1_sparc64_transform)
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
new file mode 100644 (file)
index 0000000..2bbb20b
--- /dev/null
@@ -0,0 +1,183 @@
+/* Glue code for SHA1 hashing optimized for sparc64 crypto opcodes.
+ *
+ * This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c
+ *
+ * Copyright (c) Alan Smithee.
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ * Copyright (c) Mathias Krause <minipli@googlemail.com>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data,
+                                      unsigned int rounds);
+
+static int sha1_sparc64_init(struct shash_desc *desc)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       *sctx = (struct sha1_state){
+               .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+       };
+
+       return 0;
+}
+
+static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data,
+                                 unsigned int len, unsigned int partial)
+{
+       unsigned int done = 0;
+
+       sctx->count += len;
+       if (partial) {
+               done = SHA1_BLOCK_SIZE - partial;
+               memcpy(sctx->buffer + partial, data, done);
+               sha1_sparc64_transform(sctx->state, sctx->buffer, 1);
+       }
+       if (len - done >= SHA1_BLOCK_SIZE) {
+               const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
+
+               sha1_sparc64_transform(sctx->state, data + done, rounds);
+               done += rounds * SHA1_BLOCK_SIZE;
+       }
+
+       memcpy(sctx->buffer, data + done, len - done);
+}
+
+static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data,
+                              unsigned int len)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+       /* Handle the fast case right here */
+       if (partial + len < SHA1_BLOCK_SIZE) {
+               sctx->count += len;
+               memcpy(sctx->buffer + partial, data, len);
+       } else
+               __sha1_sparc64_update(sctx, data, len, partial);
+
+       return 0;
+}
+
+/* Add padding and return the message digest. */
+static int sha1_sparc64_final(struct shash_desc *desc, u8 *out)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+       unsigned int i, index, padlen;
+       __be32 *dst = (__be32 *)out;
+       __be64 bits;
+       static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
+
+       bits = cpu_to_be64(sctx->count << 3);
+
+       /* Pad out to 56 mod 64 and append length */
+       index = sctx->count % SHA1_BLOCK_SIZE;
+       padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
+
+       /* We need to fill a whole block for __sha1_sparc64_update() */
+       if (padlen <= 56) {
+               sctx->count += padlen;
+               memcpy(sctx->buffer + index, padding, padlen);
+       } else {
+               __sha1_sparc64_update(sctx, padding, padlen, index);
+       }
+       __sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
+
+       /* Store state in digest */
+       for (i = 0; i < 5; i++)
+               dst[i] = cpu_to_be32(sctx->state[i]);
+
+       /* Wipe context */
+       memset(sctx, 0, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha1_sparc64_export(struct shash_desc *desc, void *out)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(out, sctx, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha1_sparc64_import(struct shash_desc *desc, const void *in)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(sctx, in, sizeof(*sctx));
+
+       return 0;
+}
+
+static struct shash_alg alg = {
+       .digestsize     =       SHA1_DIGEST_SIZE,
+       .init           =       sha1_sparc64_init,
+       .update         =       sha1_sparc64_update,
+       .final          =       sha1_sparc64_final,
+       .export         =       sha1_sparc64_export,
+       .import         =       sha1_sparc64_import,
+       .descsize       =       sizeof(struct sha1_state),
+       .statesize      =       sizeof(struct sha1_state),
+       .base           =       {
+               .cra_name       =       "sha1",
+               .cra_driver_name=       "sha1-sparc64",
+               .cra_priority   =       SPARC_CR_OPCODE_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA1_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+};
+
+static bool __init sparc64_has_sha1_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_SHA1))
+               return false;
+
+       return true;
+}
+
+static int __init sha1_sparc64_mod_init(void)
+{
+       if (sparc64_has_sha1_opcode()) {
+               pr_info("Using sparc64 sha1 opcode optimized SHA-1 implementation\n");
+               return crypto_register_shash(&alg);
+       }
+       pr_info("sparc64 sha1 opcode not available.\n");
+       return -ENODEV;
+}
+
+static void __exit sha1_sparc64_mod_fini(void)
+{
+       crypto_unregister_shash(&alg);
+}
+
+module_init(sha1_sparc64_mod_init);
+module_exit(sha1_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
+
+MODULE_ALIAS("sha1");
diff --git a/arch/sparc/crypto/sha256_asm.S b/arch/sparc/crypto/sha256_asm.S
new file mode 100644 (file)
index 0000000..b5f3d58
--- /dev/null
@@ -0,0 +1,78 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+
+#include "opcodes.h"
+
+ENTRY(sha256_sparc64_transform)
+       /* %o0 = digest, %o1 = data, %o2 = rounds */
+       VISEntryHalf
+       ld      [%o0 + 0x00], %f0
+       ld      [%o0 + 0x04], %f1
+       ld      [%o0 + 0x08], %f2
+       ld      [%o0 + 0x0c], %f3
+       ld      [%o0 + 0x10], %f4
+       ld      [%o0 + 0x14], %f5
+       andcc   %o1, 0x7, %g0
+       ld      [%o0 + 0x18], %f6
+       bne,pn  %xcc, 10f
+        ld     [%o0 + 0x1c], %f7
+
+1:
+       ldd     [%o1 + 0x00], %f8
+       ldd     [%o1 + 0x08], %f10
+       ldd     [%o1 + 0x10], %f12
+       ldd     [%o1 + 0x18], %f14
+       ldd     [%o1 + 0x20], %f16
+       ldd     [%o1 + 0x28], %f18
+       ldd     [%o1 + 0x30], %f20
+       ldd     [%o1 + 0x38], %f22
+
+       SHA256
+
+       subcc   %o2, 1, %o2
+       bne,pt  %xcc, 1b
+        add    %o1, 0x40, %o1
+
+5:
+       st      %f0, [%o0 + 0x00]
+       st      %f1, [%o0 + 0x04]
+       st      %f2, [%o0 + 0x08]
+       st      %f3, [%o0 + 0x0c]
+       st      %f4, [%o0 + 0x10]
+       st      %f5, [%o0 + 0x14]
+       st      %f6, [%o0 + 0x18]
+       st      %f7, [%o0 + 0x1c]
+       retl
+        VISExitHalf
+10:
+       alignaddr %o1, %g0, %o1
+
+       ldd     [%o1 + 0x00], %f10
+1:
+       ldd     [%o1 + 0x08], %f12
+       ldd     [%o1 + 0x10], %f14
+       ldd     [%o1 + 0x18], %f16
+       ldd     [%o1 + 0x20], %f18
+       ldd     [%o1 + 0x28], %f20
+       ldd     [%o1 + 0x30], %f22
+       ldd     [%o1 + 0x38], %f24
+       ldd     [%o1 + 0x40], %f26
+
+       faligndata %f10, %f12, %f8
+       faligndata %f12, %f14, %f10
+       faligndata %f14, %f16, %f12
+       faligndata %f16, %f18, %f14
+       faligndata %f18, %f20, %f16
+       faligndata %f20, %f22, %f18
+       faligndata %f22, %f24, %f20
+       faligndata %f24, %f26, %f22
+
+       SHA256
+
+       subcc   %o2, 1, %o2
+       fsrc2   %f26, %f10
+       bne,pt  %xcc, 1b
+        add    %o1, 0x40, %o1
+
+       ba,a,pt %xcc, 5b
+ENDPROC(sha256_sparc64_transform)
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
new file mode 100644 (file)
index 0000000..591e656
--- /dev/null
@@ -0,0 +1,241 @@
+/* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes.
+ *
+ * This is based largely upon crypto/sha256_generic.c
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data,
+                                        unsigned int rounds);
+
+static int sha224_sparc64_init(struct shash_desc *desc)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+       sctx->state[0] = SHA224_H0;
+       sctx->state[1] = SHA224_H1;
+       sctx->state[2] = SHA224_H2;
+       sctx->state[3] = SHA224_H3;
+       sctx->state[4] = SHA224_H4;
+       sctx->state[5] = SHA224_H5;
+       sctx->state[6] = SHA224_H6;
+       sctx->state[7] = SHA224_H7;
+       sctx->count = 0;
+
+       return 0;
+}
+
+static int sha256_sparc64_init(struct shash_desc *desc)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+       sctx->state[0] = SHA256_H0;
+       sctx->state[1] = SHA256_H1;
+       sctx->state[2] = SHA256_H2;
+       sctx->state[3] = SHA256_H3;
+       sctx->state[4] = SHA256_H4;
+       sctx->state[5] = SHA256_H5;
+       sctx->state[6] = SHA256_H6;
+       sctx->state[7] = SHA256_H7;
+       sctx->count = 0;
+
+       return 0;
+}
+
+static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data,
+                                   unsigned int len, unsigned int partial)
+{
+       unsigned int done = 0;
+
+       sctx->count += len;
+       if (partial) {
+               done = SHA256_BLOCK_SIZE - partial;
+               memcpy(sctx->buf + partial, data, done);
+               sha256_sparc64_transform(sctx->state, sctx->buf, 1);
+       }
+       if (len - done >= SHA256_BLOCK_SIZE) {
+               const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
+
+               sha256_sparc64_transform(sctx->state, data + done, rounds);
+               done += rounds * SHA256_BLOCK_SIZE;
+       }
+
+       memcpy(sctx->buf, data + done, len - done);
+}
+
+static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data,
+                                unsigned int len)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+       /* Handle the fast case right here */
+       if (partial + len < SHA256_BLOCK_SIZE) {
+               sctx->count += len;
+               memcpy(sctx->buf + partial, data, len);
+       } else
+               __sha256_sparc64_update(sctx, data, len, partial);
+
+       return 0;
+}
+
+static int sha256_sparc64_final(struct shash_desc *desc, u8 *out)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+       unsigned int i, index, padlen;
+       __be32 *dst = (__be32 *)out;
+       __be64 bits;
+       static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
+
+       bits = cpu_to_be64(sctx->count << 3);
+
+       /* Pad out to 56 mod 64 and append length */
+       index = sctx->count % SHA256_BLOCK_SIZE;
+       padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index);
+
+       /* We need to fill a whole block for __sha256_sparc64_update() */
+       if (padlen <= 56) {
+               sctx->count += padlen;
+               memcpy(sctx->buf + index, padding, padlen);
+       } else {
+               __sha256_sparc64_update(sctx, padding, padlen, index);
+       }
+       __sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
+
+       /* Store state in digest */
+       for (i = 0; i < 8; i++)
+               dst[i] = cpu_to_be32(sctx->state[i]);
+
+       /* Wipe context */
+       memset(sctx, 0, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash)
+{
+       u8 D[SHA256_DIGEST_SIZE];
+
+       sha256_sparc64_final(desc, D);
+
+       memcpy(hash, D, SHA224_DIGEST_SIZE);
+       memset(D, 0, SHA256_DIGEST_SIZE);
+
+       return 0;
+}
+
+static int sha256_sparc64_export(struct shash_desc *desc, void *out)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(out, sctx, sizeof(*sctx));
+       return 0;
+}
+
+static int sha256_sparc64_import(struct shash_desc *desc, const void *in)
+{
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(sctx, in, sizeof(*sctx));
+       return 0;
+}
+
+static struct shash_alg sha256 = {
+       .digestsize     =       SHA256_DIGEST_SIZE,
+       .init           =       sha256_sparc64_init,
+       .update         =       sha256_sparc64_update,
+       .final          =       sha256_sparc64_final,
+       .export         =       sha256_sparc64_export,
+       .import         =       sha256_sparc64_import,
+       .descsize       =       sizeof(struct sha256_state),
+       .statesize      =       sizeof(struct sha256_state),
+       .base           =       {
+               .cra_name       =       "sha256",
+               .cra_driver_name=       "sha256-sparc64",
+               .cra_priority   =       SPARC_CR_OPCODE_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA256_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+};
+
+static struct shash_alg sha224 = {
+       .digestsize     =       SHA224_DIGEST_SIZE,
+       .init           =       sha224_sparc64_init,
+       .update         =       sha256_sparc64_update,
+       .final          =       sha224_sparc64_final,
+       .descsize       =       sizeof(struct sha256_state),
+       .base           =       {
+               .cra_name       =       "sha224",
+               .cra_driver_name=       "sha224-sparc64",
+               .cra_priority   =       SPARC_CR_OPCODE_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA224_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+};
+
+static bool __init sparc64_has_sha256_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_SHA256))
+               return false;
+
+       return true;
+}
+
+static int __init sha256_sparc64_mod_init(void)
+{
+       if (sparc64_has_sha256_opcode()) {
+               int ret = crypto_register_shash(&sha224);
+               if (ret < 0)
+                       return ret;
+
+               ret = crypto_register_shash(&sha256);
+               if (ret < 0) {
+                       crypto_unregister_shash(&sha224);
+                       return ret;
+               }
+
+               pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n");
+               return 0;
+       }
+       pr_info("sparc64 sha256 opcode not available.\n");
+       return -ENODEV;
+}
+
+static void __exit sha256_sparc64_mod_fini(void)
+{
+       crypto_unregister_shash(&sha224);
+       crypto_unregister_shash(&sha256);
+}
+
+module_init(sha256_sparc64_mod_init);
+module_exit(sha256_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
+
+MODULE_ALIAS("sha224");
+MODULE_ALIAS("sha256");
diff --git a/arch/sparc/crypto/sha512_asm.S b/arch/sparc/crypto/sha512_asm.S
new file mode 100644 (file)
index 0000000..54bfba7
--- /dev/null
@@ -0,0 +1,102 @@
+#include <linux/linkage.h>
+#include <asm/visasm.h>
+
+#include "opcodes.h"
+
+ENTRY(sha512_sparc64_transform)
+       /* %o0 = digest, %o1 = data, %o2 = rounds */
+       VISEntry
+       ldd     [%o0 + 0x00], %f0
+       ldd     [%o0 + 0x08], %f2
+       ldd     [%o0 + 0x10], %f4
+       ldd     [%o0 + 0x18], %f6
+       ldd     [%o0 + 0x20], %f8
+       ldd     [%o0 + 0x28], %f10
+       andcc   %o1, 0x7, %g0
+       ldd     [%o0 + 0x30], %f12
+       bne,pn  %xcc, 10f
+        ldd    [%o0 + 0x38], %f14
+
+1:
+       ldd     [%o1 + 0x00], %f16
+       ldd     [%o1 + 0x08], %f18
+       ldd     [%o1 + 0x10], %f20
+       ldd     [%o1 + 0x18], %f22
+       ldd     [%o1 + 0x20], %f24
+       ldd     [%o1 + 0x28], %f26
+       ldd     [%o1 + 0x30], %f28
+       ldd     [%o1 + 0x38], %f30
+       ldd     [%o1 + 0x40], %f32
+       ldd     [%o1 + 0x48], %f34
+       ldd     [%o1 + 0x50], %f36
+       ldd     [%o1 + 0x58], %f38
+       ldd     [%o1 + 0x60], %f40
+       ldd     [%o1 + 0x68], %f42
+       ldd     [%o1 + 0x70], %f44
+       ldd     [%o1 + 0x78], %f46
+
+       SHA512
+
+       subcc   %o2, 1, %o2
+       bne,pt  %xcc, 1b
+        add    %o1, 0x80, %o1
+
+5:
+       std     %f0, [%o0 + 0x00]
+       std     %f2, [%o0 + 0x08]
+       std     %f4, [%o0 + 0x10]
+       std     %f6, [%o0 + 0x18]
+       std     %f8, [%o0 + 0x20]
+       std     %f10, [%o0 + 0x28]
+       std     %f12, [%o0 + 0x30]
+       std     %f14, [%o0 + 0x38]
+       retl
+        VISExit
+10:
+       alignaddr %o1, %g0, %o1
+
+       ldd     [%o1 + 0x00], %f18
+1:
+       ldd     [%o1 + 0x08], %f20
+       ldd     [%o1 + 0x10], %f22
+       ldd     [%o1 + 0x18], %f24
+       ldd     [%o1 + 0x20], %f26
+       ldd     [%o1 + 0x28], %f28
+       ldd     [%o1 + 0x30], %f30
+       ldd     [%o1 + 0x38], %f32
+       ldd     [%o1 + 0x40], %f34
+       ldd     [%o1 + 0x48], %f36
+       ldd     [%o1 + 0x50], %f38
+       ldd     [%o1 + 0x58], %f40
+       ldd     [%o1 + 0x60], %f42
+       ldd     [%o1 + 0x68], %f44
+       ldd     [%o1 + 0x70], %f46
+       ldd     [%o1 + 0x78], %f48
+       ldd     [%o1 + 0x80], %f50
+
+       faligndata %f18, %f20, %f16
+       faligndata %f20, %f22, %f18
+       faligndata %f22, %f24, %f20
+       faligndata %f24, %f26, %f22
+       faligndata %f26, %f28, %f24
+       faligndata %f28, %f30, %f26
+       faligndata %f30, %f32, %f28
+       faligndata %f32, %f34, %f30
+       faligndata %f34, %f36, %f32
+       faligndata %f36, %f38, %f34
+       faligndata %f38, %f40, %f36
+       faligndata %f40, %f42, %f38
+       faligndata %f42, %f44, %f40
+       faligndata %f44, %f46, %f42
+       faligndata %f46, %f48, %f44
+       faligndata %f48, %f50, %f46
+
+       SHA512
+
+       subcc   %o2, 1, %o2
+       fsrc2   %f50, %f18
+       bne,pt  %xcc, 1b
+        add    %o1, 0x80, %o1
+
+       ba,a,pt %xcc, 5b
+ENDPROC(sha512_sparc64_transform)
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
new file mode 100644 (file)
index 0000000..486f0a2
--- /dev/null
@@ -0,0 +1,226 @@
+/* Glue code for SHA512 hashing optimized for sparc64 crypto opcodes.
+ *
+ * This is based largely upon crypto/sha512_generic.c
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org>
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+
+#include <asm/pstate.h>
+#include <asm/elf.h>
+
+#include "opcodes.h"
+
+asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data,
+                                        unsigned int rounds);
+
+static int sha512_sparc64_init(struct shash_desc *desc)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+       sctx->state[0] = SHA512_H0;
+       sctx->state[1] = SHA512_H1;
+       sctx->state[2] = SHA512_H2;
+       sctx->state[3] = SHA512_H3;
+       sctx->state[4] = SHA512_H4;
+       sctx->state[5] = SHA512_H5;
+       sctx->state[6] = SHA512_H6;
+       sctx->state[7] = SHA512_H7;
+       sctx->count[0] = sctx->count[1] = 0;
+
+       return 0;
+}
+
+static int sha384_sparc64_init(struct shash_desc *desc)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+       sctx->state[0] = SHA384_H0;
+       sctx->state[1] = SHA384_H1;
+       sctx->state[2] = SHA384_H2;
+       sctx->state[3] = SHA384_H3;
+       sctx->state[4] = SHA384_H4;
+       sctx->state[5] = SHA384_H5;
+       sctx->state[6] = SHA384_H6;
+       sctx->state[7] = SHA384_H7;
+       sctx->count[0] = sctx->count[1] = 0;
+
+       return 0;
+}
+
+static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data,
+                                   unsigned int len, unsigned int partial)
+{
+       unsigned int done = 0;
+
+       if ((sctx->count[0] += len) < len)
+               sctx->count[1]++;
+       if (partial) {
+               done = SHA512_BLOCK_SIZE - partial;
+               memcpy(sctx->buf + partial, data, done);
+               sha512_sparc64_transform(sctx->state, sctx->buf, 1);
+       }
+       if (len - done >= SHA512_BLOCK_SIZE) {
+               const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
+
+               sha512_sparc64_transform(sctx->state, data + done, rounds);
+               done += rounds * SHA512_BLOCK_SIZE;
+       }
+
+       memcpy(sctx->buf, data + done, len - done);
+}
+
+static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data,
+                                unsigned int len)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+       /* Handle the fast case right here */
+       if (partial + len < SHA512_BLOCK_SIZE) {
+               if ((sctx->count[0] += len) < len)
+                       sctx->count[1]++;
+               memcpy(sctx->buf + partial, data, len);
+       } else
+               __sha512_sparc64_update(sctx, data, len, partial);
+
+       return 0;
+}
+
+static int sha512_sparc64_final(struct shash_desc *desc, u8 *out)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+       unsigned int i, index, padlen;
+       __be64 *dst = (__be64 *)out;
+       __be64 bits[2];
+       static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
+
+       /* Save number of bits */
+       bits[1] = cpu_to_be64(sctx->count[0] << 3);
+       bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+
+       /* Pad out to 112 mod 128 and append length */
+       index = sctx->count[0] % SHA512_BLOCK_SIZE;
+       padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index);
+
+       /* We need to fill a whole block for __sha512_sparc64_update() */
+       if (padlen <= 112) {
+               if ((sctx->count[0] += padlen) < padlen)
+                       sctx->count[1]++;
+               memcpy(sctx->buf + index, padding, padlen);
+       } else {
+               __sha512_sparc64_update(sctx, padding, padlen, index);
+       }
+       __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112);
+
+       /* Store state in digest */
+       for (i = 0; i < 8; i++)
+               dst[i] = cpu_to_be64(sctx->state[i]);
+
+       /* Wipe context */
+       memset(sctx, 0, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash)
+{
+       u8 D[64];
+
+       sha512_sparc64_final(desc, D);
+
+       memcpy(hash, D, 48);
+       memset(D, 0, 64);
+
+       return 0;
+}
+
+static struct shash_alg sha512 = {
+       .digestsize     =       SHA512_DIGEST_SIZE,
+       .init           =       sha512_sparc64_init,
+       .update         =       sha512_sparc64_update,
+       .final          =       sha512_sparc64_final,
+       .descsize       =       sizeof(struct sha512_state),
+       .base           =       {
+               .cra_name       =       "sha512",
+               .cra_driver_name=       "sha512-sparc64",
+               .cra_priority   =       SPARC_CR_OPCODE_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA512_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+};
+
+static struct shash_alg sha384 = {
+       .digestsize     =       SHA384_DIGEST_SIZE,
+       .init           =       sha384_sparc64_init,
+       .update         =       sha512_sparc64_update,
+       .final          =       sha384_sparc64_final,
+       .descsize       =       sizeof(struct sha512_state),
+       .base           =       {
+               .cra_name       =       "sha384",
+               .cra_driver_name=       "sha384-sparc64",
+               .cra_priority   =       SPARC_CR_OPCODE_PRIORITY,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA384_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+};
+
+static bool __init sparc64_has_sha512_opcode(void)
+{
+       unsigned long cfr;
+
+       if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
+               return false;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+       if (!(cfr & CFR_SHA512))
+               return false;
+
+       return true;
+}
+
+static int __init sha512_sparc64_mod_init(void)
+{
+       if (sparc64_has_sha512_opcode()) {
+               int ret = crypto_register_shash(&sha384);
+               if (ret < 0)
+                       return ret;
+
+               ret = crypto_register_shash(&sha512);
+               if (ret < 0) {
+                       crypto_unregister_shash(&sha384);
+                       return ret;
+               }
+
+               pr_info("Using sparc64 sha512 opcode optimized SHA-512/SHA-384 implementation\n");
+               return 0;
+       }
+       pr_info("sparc64 sha512 opcode not available.\n");
+       return -ENODEV;
+}
+
+static void __exit sha512_sparc64_mod_fini(void)
+{
+       crypto_unregister_shash(&sha384);
+       crypto_unregister_shash(&sha512);
+}
+
+module_init(sha512_sparc64_mod_init);
+module_exit(sha512_sparc64_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
+
+MODULE_ALIAS("sha384");
+MODULE_ALIAS("sha512");
index 61ebe7411ceb0af141b4bc9ceef1c1bcf36521fc..cc0006dc5d4a9c32183d8aeade580db2733b232a 100644 (file)
 /* SpitFire and later extended ASIs.  The "(III)" marker designates
  * UltraSparc-III and later specific ASIs.  The "(CMT)" marker designates
  * Chip Multi Threading specific ASIs.  "(NG)" designates Niagara specific
- * ASIs, "(4V)" designates SUN4V specific ASIs.
+ * ASIs, "(4V)" designates SUN4V specific ASIs.  "(NG4)" designates SPARC-T4
+ * and later ASIs.
  */
 #define ASI_PHYS_USE_EC                0x14 /* PADDR, E-cachable               */
 #define ASI_PHYS_BYPASS_EC_E   0x15 /* PADDR, E-bit                    */
 #define ASI_UDBL_CONTROL_R     0x7f /* External UDB control regs rd low*/
 #define ASI_INTR_R             0x7f /* IRQ vector dispatch read        */
 #define ASI_INTR_DATAN_R       0x7f /* (III) In irq vector data reg N  */
+#define ASI_PIC                        0xb0 /* (NG4) PIC registers             */
 #define ASI_PST8_P             0xc0 /* Primary, 8 8-bit, partial       */
 #define ASI_PST8_S             0xc1 /* Secondary, 8 8-bit, partial     */
 #define ASI_PST16_P            0xc2 /* Primary, 4 16-bit, partial      */
index 7df8b7f544d47a5520065c7e452e03552b12f6b8..370ca1e71ffbc8dc2fb4cd9567b44f95cf61eee6 100644 (file)
 #define AV_SPARC_IMA           0x00400000 /* integer multiply-add */
 #define AV_SPARC_ASI_CACHE_SPARING \
                                0x00800000 /* cache sparing ASIs available */
+#define AV_SPARC_PAUSE         0x01000000 /* PAUSE available */
+#define AV_SPARC_CBCOND                0x02000000 /* CBCOND insns available */
+
+/* Solaris decided to enumerate every single crypto instruction type
+ * in the AT_HWCAP bits.  This is wasteful, since if crypto is present,
+ * you still need to look in the CFR register to see if the opcode is
+ * really available.  So we simply advertise only "crypto" support.
+ */
+#define HWCAP_SPARC_CRYPTO     0x04000000 /* CRYPTO insns available */
 
 #define CORE_DUMP_USE_REGSET
 
index 015a761eaa322c1a9d11d2f20281c42184daa4bc..ca121f0fa3ec73314de54f3fc8c3e73988e594bb 100644 (file)
@@ -2934,6 +2934,16 @@ extern unsigned long sun4v_reboot_data_set(unsigned long ra,
                                           unsigned long len);
 #endif
 
+#define HV_FAST_VT_GET_PERFREG         0x184
+#define HV_FAST_VT_SET_PERFREG         0x185
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
+                                         unsigned long *reg_val);
+extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+                                         unsigned long reg_val);
+#endif
+
 /* Function numbers for HV_CORE_TRAP.  */
 #define HV_CORE_SET_VER                        0x00
 #define HV_CORE_PUTCHAR                        0x01
@@ -2964,6 +2974,7 @@ extern unsigned long sun4v_reboot_data_set(unsigned long ra,
 #define HV_GRP_NIU                     0x0204
 #define HV_GRP_VF_CPU                  0x0205
 #define HV_GRP_KT_CPU                  0x0209
+#define HV_GRP_VT_CPU                  0x020c
 #define HV_GRP_DIAG                    0x0300
 
 #ifndef __ASSEMBLY__
index 9faa046713fbaf3f6491a4dc3afca964849d9912..139097f3a67bb9dfaa77b01aed9bd177c27a846a 100644 (file)
@@ -73,6 +73,7 @@ extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
 
 extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
 extern void mdesc_populate_present_mask(cpumask_t *mask);
+extern void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
 
 extern void sun4v_mdesc_init(void);
 
index 27517879a6c2318176a0458078a3b81ead17df7b..c72f3045820ccf07866c7d4fe3a8fcd7bd4648f7 100644 (file)
@@ -94,7 +94,7 @@ extern int prom_getprev(void);
 extern void prom_console_write_buf(const char *buf, int len);
 
 /* Prom's internal routines, don't use in kernel/boot code. */
-extern void prom_printf(const char *fmt, ...);
+extern __printf(1, 2) void prom_printf(const char *fmt, ...);
 extern void prom_write(const char *buf, unsigned int len);
 
 /* Multiprocessor operations... */
index 97a90475c31478be1f20d870daf62469488934e6..a12dbe3b7762daf86e969401ad760a4b5305df60 100644 (file)
@@ -98,7 +98,7 @@ extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
 extern void prom_console_write_buf(const char *buf, int len);
 
 /* Prom's internal routines, don't use in kernel/boot code. */
-extern void prom_printf(const char *fmt, ...);
+extern __printf(1, 2) void prom_printf(const char *fmt, ...);
 extern void prom_write(const char *buf, unsigned int len);
 
 /* Multiprocessor operations... */
index 288d7beba051e8d976e91b759dae623a1a1124da..942bb17f60cd6a7d4f223a4b5106d15cf3468933 100644 (file)
@@ -2,8 +2,13 @@
 #define __PCR_H
 
 struct pcr_ops {
-       u64 (*read)(void);
-       void (*write)(u64);
+       u64 (*read_pcr)(unsigned long);
+       void (*write_pcr)(unsigned long, u64);
+       u64 (*read_pic)(unsigned long);
+       void (*write_pic)(unsigned long, u64);
+       u64 (*nmi_picl_value)(unsigned int nmi_hz);
+       u64 pcr_nmi_enable;
+       u64 pcr_nmi_disable;
 };
 extern const struct pcr_ops *pcr_ops;
 
@@ -27,21 +32,18 @@ extern void schedule_deferred_pcr_work(void);
 #define PCR_N2_SL1_SHIFT       27
 #define PCR_N2_OV1             0x80000000
 
-extern unsigned int picl_shift;
-
-/* In order to commonize as much of the implementation as
- * possible, we use PICH as our counter.  Mostly this is
- * to accommodate Niagara-1 which can only count insn cycles
- * in PICH.
- */
-static inline u64 picl_value(unsigned int nmi_hz)
-{
-       u32 delta = local_cpu_data().clock_tick / (nmi_hz << picl_shift);
-
-       return ((u64)((0 - delta) & 0xffffffff)) << 32;
-}
-
-extern u64 pcr_enable;
+#define PCR_N4_OV              0x00000001 /* PIC overflow             */
+#define PCR_N4_TOE             0x00000002 /* Trap On Event            */
+#define PCR_N4_UTRACE          0x00000004 /* Trace user events        */
+#define PCR_N4_STRACE          0x00000008 /* Trace supervisor events  */
+#define PCR_N4_HTRACE          0x00000010 /* Trace hypervisor events  */
+#define PCR_N4_MASK            0x000007e0 /* Event mask               */
+#define PCR_N4_MASK_SHIFT      5
+#define PCR_N4_SL              0x0000f800 /* Event Select             */
+#define PCR_N4_SL_SHIFT                11
+#define PCR_N4_PICNPT          0x00010000 /* PIC non-privileged trap  */
+#define PCR_N4_PICNHT          0x00020000 /* PIC non-hypervisor trap  */
+#define PCR_N4_NTC             0x00040000 /* Next-To-Commit wrap      */
 
 extern int pcr_arch_init(void);
 
index 3332d2cba6c1d15fa39a1f6070cbc4055279e63d..214feefa577c815970605cea9e0b11e84e4d4815 100644 (file)
@@ -54,11 +54,6 @@ enum perfctr_opcode {
        PERFCTR_GETPCR
 };
 
-/* I don't want the kernel's namespace to be polluted with this
- * stuff when this file is included.  --DaveM
- */
-#ifndef __KERNEL__
-
 #define  PRIV 0x00000001
 #define  SYS  0x00000002
 #define  USR  0x00000004
@@ -168,29 +163,4 @@ struct vcounter_struct {
   unsigned long long vcnt1;
 };
 
-#else /* !(__KERNEL__) */
-
-#ifndef CONFIG_SPARC32
-
-/* Performance counter register access. */
-#define read_pcr(__p)  __asm__ __volatile__("rd        %%pcr, %0" : "=r" (__p))
-#define write_pcr(__p) __asm__ __volatile__("wr        %0, 0x0, %%pcr" : : "r" (__p))
-#define read_pic(__p)  __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
-
-/* Blackbird errata workaround.  See commentary in
- * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
- * for more information.
- */
-#define write_pic(__p)                                         \
-       __asm__ __volatile__("ba,pt     %%xcc, 99f\n\t"         \
-                            " nop\n\t"                         \
-                            ".align    64\n"                   \
-                         "99:wr        %0, 0x0, %%pic\n\t"     \
-                            "rd        %%pic, %%g0" : : "r" (__p))
-#define reset_pic()    write_pic(0)
-
-#endif /* !CONFIG_SPARC32 */
-
-#endif /* !(__KERNEL__) */
-
 #endif /* !(PERF_COUNTER_API) */
index a26a53777bb06b51fccd5d8add777039be1dfcf5..4b6b998afd9982040f79ace9935eea99af7062d3 100644 (file)
 #define VERS_MAXTL     _AC(0x000000000000ff00,UL) /* Max Trap Level.   */
 #define VERS_MAXWIN    _AC(0x000000000000001f,UL) /* Max RegWindow Idx.*/
 
+/* Compatability Feature Register (%asr26), SPARC-T4 and later  */
+#define CFR_AES                _AC(0x0000000000000001,UL) /* Supports AES opcodes     */
+#define CFR_DES                _AC(0x0000000000000002,UL) /* Supports DES opcodes     */
+#define CFR_KASUMI     _AC(0x0000000000000004,UL) /* Supports KASUMI opcodes  */
+#define CFR_CAMELLIA   _AC(0x0000000000000008,UL) /* Supports CAMELLIA opcodes*/
+#define CFR_MD5                _AC(0x0000000000000010,UL) /* Supports MD5 opcodes     */
+#define CFR_SHA1       _AC(0x0000000000000020,UL) /* Supports SHA1 opcodes    */
+#define CFR_SHA256     _AC(0x0000000000000040,UL) /* Supports SHA256 opcodes  */
+#define CFR_SHA512     _AC(0x0000000000000080,UL) /* Supports SHA512 opcodes  */
+#define CFR_MPMUL      _AC(0x0000000000000100,UL) /* Supports MPMUL opcodes   */
+#define CFR_MONTMUL    _AC(0x0000000000000200,UL) /* Supports MONTMUL opcodes */
+#define CFR_MONTSQR    _AC(0x0000000000000400,UL) /* Supports MONTSQR opcodes */
+#define CFR_CRC32C     _AC(0x0000000000000800,UL) /* Supports CRC32C opcodes  */
+
 #endif /* !(_SPARC64_PSTATE_H) */
index fb2693464807dd59020bd737cd115e6f5f45a410..d9a677c51926815865d3db306ac4e6cd1c5b90d5 100644 (file)
 #else
 #define __ARCH_WANT_COMPAT_SYS_TIME
 #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
 #endif
 
 /*
index b42ddbf9651eb3d2e485d8d6934a7d6052e281dc..ee5dcced2499066732e97408b74d5459a6e452db 100644 (file)
@@ -559,10 +559,10 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara2_patch
         nop
        cmp     %g1, SUN4V_CHIP_NIAGARA4
-       be,pt   %xcc, niagara2_patch
+       be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_NIAGARA5
-       be,pt   %xcc, niagara2_patch
+       be,pt   %xcc, niagara4_patch
         nop
 
        call    generic_patch_copyops
@@ -573,6 +573,16 @@ niagara_tlb_fixup:
         nop
 
        ba,a,pt %xcc, 80f
+niagara4_patch:
+       call    niagara4_patch_copyops
+        nop
+       call    niagara_patch_bzero
+        nop
+       call    niagara4_patch_pageops
+        nop
+
+       ba,a,pt %xcc, 80f
+
 niagara2_patch:
        call    niagara2_patch_copyops
         nop
index 8593672838fde52d133e2597e25753c19f12394c..c0a2de0fd62424914754e19885f3ec1abff1c44e 100644 (file)
@@ -45,6 +45,7 @@ static struct api_info api_table[] = {
        { .group = HV_GRP_NIU,                                  },
        { .group = HV_GRP_VF_CPU,                               },
        { .group = HV_GRP_KT_CPU,                               },
+       { .group = HV_GRP_VT_CPU,                               },
        { .group = HV_GRP_DIAG,         .flags = FLAG_PRE_API   },
 };
 
@@ -193,7 +194,7 @@ void __init sun4v_hvapi_init(void)
 
 bad:
        prom_printf("HVAPI: Cannot register API group "
-                   "%lx with major(%u) minor(%u)\n",
+                   "%lx with major(%lu) minor(%lu)\n",
                    group, major, minor);
        prom_halt();
 }
index 58d60de4d65b496d1287de3468d58e2cb08e6075..f3ab509b76a8daf32123ec47bbbf0ebb5b360bde 100644 (file)
@@ -805,3 +805,19 @@ ENTRY(sun4v_reboot_data_set)
        retl
         nop
 ENDPROC(sun4v_reboot_data_set)
+
+ENTRY(sun4v_vt_get_perfreg)
+       mov     %o1, %o4
+       mov     HV_FAST_VT_GET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+ENDPROC(sun4v_vt_get_perfreg)
+
+ENTRY(sun4v_vt_set_perfreg)
+       mov     HV_FAST_VT_SET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+ENDPROC(sun4v_vt_set_perfreg)
index 79f310364849af8f191e18414d4d9147a9ec8a1e..0746e5e32b372ab3a81eb64c2aae79d957b6e88a 100644 (file)
@@ -188,31 +188,26 @@ valid_addr_bitmap_patch:
        be,pn           %xcc, kvmap_dtlb_longpath
 
 2:      sethi          %hi(kpte_linear_bitmap), %g2
-       or              %g2, %lo(kpte_linear_bitmap), %g2
 
        /* Get the 256MB physical address index. */
        sllx            %g4, 21, %g5
-       mov             1, %g7
+       or              %g2, %lo(kpte_linear_bitmap), %g2
        srlx            %g5, 21 + 28, %g5
+       and             %g5, (32 - 1), %g7
 
-       /* Don't try this at home kids... this depends upon srlx
-        * only taking the low 6 bits of the shift count in %g5.
-        */
-       sllx            %g7, %g5, %g7
-
-       /* Divide by 64 to get the offset into the bitmask.  */
-       srlx            %g5, 6, %g5
+       /* Divide by 32 to get the offset into the bitmask.  */
+       srlx            %g5, 5, %g5
+       add             %g7, %g7, %g7
        sllx            %g5, 3, %g5
 
-       /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
+       /* kern_linear_pte_xor[(mask >> shift) & 3)] */
        ldx             [%g2 + %g5], %g2
-       andcc           %g2, %g7, %g0
+       srlx            %g2, %g7, %g7
        sethi           %hi(kern_linear_pte_xor), %g5
+       and             %g7, 3, %g7
        or              %g5, %lo(kern_linear_pte_xor), %g5
-       bne,a,pt        %xcc, 1f
-        add            %g5, 8, %g5
-
-1:     ldx             [%g5], %g2
+       sllx            %g7, 3, %g7
+       ldx             [%g5 + %g7], %g2
 
        .globl          kvmap_linear_patch
 kvmap_linear_patch:
index 6dc796280589221f12526111647e20a9eb0911fc..831c001604e8e9bf431b2993ec577b40b6e3f776 100644 (file)
@@ -817,6 +817,30 @@ void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
        mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
 }
 
+static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+       const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL);
+       unsigned long *pgsz_mask = arg;
+       u64 val;
+
+       val = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
+              HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
+       if (pgsz_prop)
+               val = *pgsz_prop;
+
+       if (!*pgsz_mask)
+               *pgsz_mask = val;
+       else
+               *pgsz_mask &= val;
+       return NULL;
+}
+
+void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
+{
+       *pgsz_mask = 0;
+       mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
+}
+
 static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
 {
        const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
index eb1c1f010a4797e629e370968e3980f887c92837..6479256fd5a4b650a2f5c3ca3cf5c501033dc9fb 100644 (file)
@@ -22,7 +22,6 @@
 #include <asm/perf_event.h>
 #include <asm/ptrace.h>
 #include <asm/pcr.h>
-#include <asm/perfctr.h>
 
 #include "kstack.h"
 
@@ -109,7 +108,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
                       pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
                touched = 1;
        else
-               pcr_ops->write(PCR_PIC_PRIV);
+               pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
 
        sum = local_cpu_data().irq0_irqs;
        if (__get_cpu_var(nmi_touch)) {
@@ -126,8 +125,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
                __this_cpu_write(alert_counter, 0);
        }
        if (__get_cpu_var(wd_enabled)) {
-               write_pic(picl_value(nmi_hz));
-               pcr_ops->write(pcr_enable);
+               pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
+               pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
        }
 
        restore_hardirq_stack(orig_sp);
@@ -166,7 +165,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
 
 void stop_nmi_watchdog(void *unused)
 {
-       pcr_ops->write(PCR_PIC_PRIV);
+       pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
        __get_cpu_var(wd_enabled) = 0;
        atomic_dec(&nmi_active);
 }
@@ -223,10 +222,10 @@ void start_nmi_watchdog(void *unused)
        __get_cpu_var(wd_enabled) = 1;
        atomic_inc(&nmi_active);
 
-       pcr_ops->write(PCR_PIC_PRIV);
-       write_pic(picl_value(nmi_hz));
+       pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
+       pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
 
-       pcr_ops->write(pcr_enable);
+       pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
 }
 
 static void nmi_adjust_hz_one(void *unused)
@@ -234,10 +233,10 @@ static void nmi_adjust_hz_one(void *unused)
        if (!__get_cpu_var(wd_enabled))
                return;
 
-       pcr_ops->write(PCR_PIC_PRIV);
-       write_pic(picl_value(nmi_hz));
+       pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
+       pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
 
-       pcr_ops->write(pcr_enable);
+       pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
 }
 
 void nmi_adjust_hz(unsigned int new_hz)
index 7661e84a05a06c23e34ef49fb5c63bfb69934761..051b69caeffd382b9252a3986154bb8250b3f87d 100644 (file)
@@ -594,7 +594,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
                printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
                       vdma[0], vdma[1]);
                return -EINVAL;
-       };
+       }
 
        dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
        num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
index 0ce0dd2332aac60802a0cce15366e2b0158c2d36..269af58497aa8b0844ea6e26d083e1c60eb60bca 100644 (file)
 #include <asm/pil.h>
 #include <asm/pcr.h>
 #include <asm/nmi.h>
+#include <asm/asi.h>
 #include <asm/spitfire.h>
-#include <asm/perfctr.h>
 
 /* This code is shared between various users of the performance
  * counters.  Users will be oprofile, pseudo-NMI watchdog, and the
  * perf_event support layer.
  */
 
-#define PCR_SUN4U_ENABLE       (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
-#define PCR_N2_ENABLE          (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
-                                PCR_N2_TOE_OV1 | \
-                                (2 << PCR_N2_SL1_SHIFT) | \
-                                (0xff << PCR_N2_MASK1_SHIFT))
-
-u64 pcr_enable;
-unsigned int picl_shift;
-
 /* Performance counter interrupts run unmasked at PIL level 15.
  * Therefore we can't do things like wakeups and other work
  * that expects IRQ disabling to be adhered to in locking etc.
@@ -60,39 +51,144 @@ void arch_irq_work_raise(void)
 const struct pcr_ops *pcr_ops;
 EXPORT_SYMBOL_GPL(pcr_ops);
 
-static u64 direct_pcr_read(void)
+static u64 direct_pcr_read(unsigned long reg_num)
 {
        u64 val;
 
-       read_pcr(val);
+       WARN_ON_ONCE(reg_num != 0);
+       __asm__ __volatile__("rd %%pcr, %0" : "=r" (val));
        return val;
 }
 
-static void direct_pcr_write(u64 val)
+static void direct_pcr_write(unsigned long reg_num, u64 val)
+{
+       WARN_ON_ONCE(reg_num != 0);
+       __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val));
+}
+
+static u64 direct_pic_read(unsigned long reg_num)
 {
-       write_pcr(val);
+       u64 val;
+
+       WARN_ON_ONCE(reg_num != 0);
+       __asm__ __volatile__("rd %%pic, %0" : "=r" (val));
+       return val;
+}
+
+static void direct_pic_write(unsigned long reg_num, u64 val)
+{
+       WARN_ON_ONCE(reg_num != 0);
+
+       /* Blackbird errata workaround.  See commentary in
+        * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
+        * for more information.
+        */
+       __asm__ __volatile__("ba,pt     %%xcc, 99f\n\t"
+                            " nop\n\t"
+                            ".align    64\n"
+                         "99:wr        %0, 0x0, %%pic\n\t"
+                            "rd        %%pic, %%g0" : : "r" (val));
+}
+
+static u64 direct_picl_value(unsigned int nmi_hz)
+{
+       u32 delta = local_cpu_data().clock_tick / nmi_hz;
+
+       return ((u64)((0 - delta) & 0xffffffff)) << 32;
 }
 
 static const struct pcr_ops direct_pcr_ops = {
-       .read   = direct_pcr_read,
-       .write  = direct_pcr_write,
+       .read_pcr               = direct_pcr_read,
+       .write_pcr              = direct_pcr_write,
+       .read_pic               = direct_pic_read,
+       .write_pic              = direct_pic_write,
+       .nmi_picl_value         = direct_picl_value,
+       .pcr_nmi_enable         = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE),
+       .pcr_nmi_disable        = PCR_PIC_PRIV,
 };
 
-static void n2_pcr_write(u64 val)
+static void n2_pcr_write(unsigned long reg_num, u64 val)
 {
        unsigned long ret;
 
+       WARN_ON_ONCE(reg_num != 0);
        if (val & PCR_N2_HTRACE) {
                ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
                if (ret != HV_EOK)
-                       write_pcr(val);
+                       direct_pcr_write(reg_num, val);
        } else
-               write_pcr(val);
+               direct_pcr_write(reg_num, val);
+}
+
+static u64 n2_picl_value(unsigned int nmi_hz)
+{
+       u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
+
+       return ((u64)((0 - delta) & 0xffffffff)) << 32;
 }
 
 static const struct pcr_ops n2_pcr_ops = {
-       .read   = direct_pcr_read,
-       .write  = n2_pcr_write,
+       .read_pcr               = direct_pcr_read,
+       .write_pcr              = n2_pcr_write,
+       .read_pic               = direct_pic_read,
+       .write_pic              = direct_pic_write,
+       .nmi_picl_value         = n2_picl_value,
+       .pcr_nmi_enable         = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE |
+                                  PCR_N2_TOE_OV1 |
+                                  (2 << PCR_N2_SL1_SHIFT) |
+                                  (0xff << PCR_N2_MASK1_SHIFT)),
+       .pcr_nmi_disable        = PCR_PIC_PRIV,
+};
+
+static u64 n4_pcr_read(unsigned long reg_num)
+{
+       unsigned long val;
+
+       (void) sun4v_vt_get_perfreg(reg_num, &val);
+
+       return val;
+}
+
+static void n4_pcr_write(unsigned long reg_num, u64 val)
+{
+       (void) sun4v_vt_set_perfreg(reg_num, val);
+}
+
+static u64 n4_pic_read(unsigned long reg_num)
+{
+       unsigned long val;
+
+       __asm__ __volatile__("ldxa [%1] %2, %0"
+                            : "=r" (val)
+                            : "r" (reg_num * 0x8UL), "i" (ASI_PIC));
+
+       return val;
+}
+
+static void n4_pic_write(unsigned long reg_num, u64 val)
+{
+       __asm__ __volatile__("stxa %0, [%1] %2"
+                            : /* no outputs */
+                            : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC));
+}
+
+static u64 n4_picl_value(unsigned int nmi_hz)
+{
+       u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2);
+
+       return ((u64)((0 - delta) & 0xffffffff));
+}
+
+static const struct pcr_ops n4_pcr_ops = {
+       .read_pcr               = n4_pcr_read,
+       .write_pcr              = n4_pcr_write,
+       .read_pic               = n4_pic_read,
+       .write_pic              = n4_pic_write,
+       .nmi_picl_value         = n4_picl_value,
+       .pcr_nmi_enable         = (PCR_N4_PICNPT | PCR_N4_STRACE |
+                                  PCR_N4_UTRACE | PCR_N4_TOE |
+                                  (26 << PCR_N4_SL_SHIFT)),
+       .pcr_nmi_disable        = PCR_N4_PICNPT,
 };
 
 static unsigned long perf_hsvc_group;
@@ -115,6 +211,10 @@ static int __init register_perf_hsvc(void)
                        perf_hsvc_group = HV_GRP_KT_CPU;
                        break;
 
+               case SUN4V_CHIP_NIAGARA4:
+                       perf_hsvc_group = HV_GRP_VT_CPU;
+                       break;
+
                default:
                        return -ENODEV;
                }
@@ -139,6 +239,29 @@ static void __init unregister_perf_hsvc(void)
        sun4v_hvapi_unregister(perf_hsvc_group);
 }
 
+static int __init setup_sun4v_pcr_ops(void)
+{
+       int ret = 0;
+
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_NIAGARA1:
+       case SUN4V_CHIP_NIAGARA2:
+       case SUN4V_CHIP_NIAGARA3:
+               pcr_ops = &n2_pcr_ops;
+               break;
+
+       case SUN4V_CHIP_NIAGARA4:
+               pcr_ops = &n4_pcr_ops;
+               break;
+
+       default:
+               ret = -ENODEV;
+               break;
+       }
+
+       return ret;
+}
+
 int __init pcr_arch_init(void)
 {
        int err = register_perf_hsvc();
@@ -148,15 +271,14 @@ int __init pcr_arch_init(void)
 
        switch (tlb_type) {
        case hypervisor:
-               pcr_ops = &n2_pcr_ops;
-               pcr_enable = PCR_N2_ENABLE;
-               picl_shift = 2;
+               err = setup_sun4v_pcr_ops();
+               if (err)
+                       goto out_unregister;
                break;
 
        case cheetah:
        case cheetah_plus:
                pcr_ops = &direct_pcr_ops;
-               pcr_enable = PCR_SUN4U_ENABLE;
                break;
 
        case spitfire:
index 5713957dcb8a38bb3baeb168e2ea33656f8a9a56..e48651dace1bdca7812d285dd198f05e5068ab47 100644 (file)
 #include <linux/atomic.h>
 #include <asm/nmi.h>
 #include <asm/pcr.h>
-#include <asm/perfctr.h>
 #include <asm/cacheflush.h>
 
 #include "kernel.h"
 #include "kstack.h"
 
-/* Sparc64 chips have two performance counters, 32-bits each, with
- * overflow interrupts generated on transition from 0xffffffff to 0.
- * The counters are accessed in one go using a 64-bit register.
+/* Two classes of sparc64 chips currently exist.  All of which have
+ * 32-bit counters which can generate overflow interrupts on the
+ * transition from 0xffffffff to 0.
  *
- * Both counters are controlled using a single control register.  The
- * only way to stop all sampling is to clear all of the context (user,
- * supervisor, hypervisor) sampling enable bits.  But these bits apply
- * to both counters, thus the two counters can't be enabled/disabled
- * individually.
+ * All chips upto and including SPARC-T3 have two performance
+ * counters.  The two 32-bit counters are accessed in one go using a
+ * single 64-bit register.
  *
- * The control register has two event fields, one for each of the two
- * counters.  It's thus nearly impossible to have one counter going
- * while keeping the other one stopped.  Therefore it is possible to
- * get overflow interrupts for counters not currently "in use" and
- * that condition must be checked in the overflow interrupt handler.
+ * On these older chips both counters are controlled using a single
+ * control register.  The only way to stop all sampling is to clear
+ * all of the context (user, supervisor, hypervisor) sampling enable
+ * bits.  But these bits apply to both counters, thus the two counters
+ * can't be enabled/disabled individually.
+ *
+ * Furthermore, the control register on these older chips have two
+ * event fields, one for each of the two counters.  It's thus nearly
+ * impossible to have one counter going while keeping the other one
+ * stopped.  Therefore it is possible to get overflow interrupts for
+ * counters not currently "in use" and that condition must be checked
+ * in the overflow interrupt handler.
  *
  * So we use a hack, in that we program inactive counters with the
  * "sw_count0" and "sw_count1" events.  These count how many times
  * the instruction "sethi %hi(0xfc000), %g0" is executed.  It's an
  * unusual way to encode a NOP and therefore will not trigger in
  * normal code.
+ *
+ * Starting with SPARC-T4 we have one control register per counter.
+ * And the counters are stored in individual registers.  The registers
+ * for the counters are 64-bit but only a 32-bit counter is
+ * implemented.  The event selections on SPARC-T4 lack any
+ * restrictions, therefore we can elide all of the complicated
+ * conflict resolution code we have for SPARC-T3 and earlier chips.
  */
 
-#define MAX_HWEVENTS                   2
+#define MAX_HWEVENTS                   4
+#define MAX_PCRS                       4
 #define MAX_PERIOD                     ((1UL << 32) - 1)
 
 #define PIC_UPPER_INDEX                        0
@@ -90,8 +102,8 @@ struct cpu_hw_events {
         */
        int                     current_idx[MAX_HWEVENTS];
 
-       /* Software copy of %pcr register on this cpu.  */
-       u64                     pcr;
+       /* Software copy of %pcr register(s) on this cpu.  */
+       u64                     pcr[MAX_HWEVENTS];
 
        /* Enabled/disable state.  */
        int                     enabled;
@@ -103,6 +115,8 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 /* An event map describes the characteristics of a performance
  * counter event.  In particular it gives the encoding as well as
  * a mask telling which counters the event can be measured on.
+ *
+ * The mask is unused on SPARC-T4 and later.
  */
 struct perf_event_map {
        u16     encoding;
@@ -142,15 +156,53 @@ struct sparc_pmu {
        const struct perf_event_map     *(*event_map)(int);
        const cache_map_t               *cache_map;
        int                             max_events;
+       u32                             (*read_pmc)(int);
+       void                            (*write_pmc)(int, u64);
        int                             upper_shift;
        int                             lower_shift;
        int                             event_mask;
+       int                             user_bit;
+       int                             priv_bit;
        int                             hv_bit;
        int                             irq_bit;
        int                             upper_nop;
        int                             lower_nop;
+       unsigned int                    flags;
+#define SPARC_PMU_ALL_EXCLUDES_SAME    0x00000001
+#define SPARC_PMU_HAS_CONFLICTS                0x00000002
+       int                             max_hw_events;
+       int                             num_pcrs;
+       int                             num_pic_regs;
 };
 
+static u32 sparc_default_read_pmc(int idx)
+{
+       u64 val;
+
+       val = pcr_ops->read_pic(0);
+       if (idx == PIC_UPPER_INDEX)
+               val >>= 32;
+
+       return val & 0xffffffff;
+}
+
+static void sparc_default_write_pmc(int idx, u64 val)
+{
+       u64 shift, mask, pic;
+
+       shift = 0;
+       if (idx == PIC_UPPER_INDEX)
+               shift = 32;
+
+       mask = ((u64) 0xffffffff) << shift;
+       val <<= shift;
+
+       pic = pcr_ops->read_pic(0);
+       pic &= ~mask;
+       pic |= val;
+       pcr_ops->write_pic(0, pic);
+}
+
 static const struct perf_event_map ultra3_perfmon_event_map[] = {
        [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
@@ -268,11 +320,20 @@ static const struct sparc_pmu ultra3_pmu = {
        .event_map      = ultra3_event_map,
        .cache_map      = &ultra3_cache_map,
        .max_events     = ARRAY_SIZE(ultra3_perfmon_event_map),
+       .read_pmc       = sparc_default_read_pmc,
+       .write_pmc      = sparc_default_write_pmc,
        .upper_shift    = 11,
        .lower_shift    = 4,
        .event_mask     = 0x3f,
+       .user_bit       = PCR_UTRACE,
+       .priv_bit       = PCR_STRACE,
        .upper_nop      = 0x1c,
        .lower_nop      = 0x14,
+       .flags          = (SPARC_PMU_ALL_EXCLUDES_SAME |
+                          SPARC_PMU_HAS_CONFLICTS),
+       .max_hw_events  = 2,
+       .num_pcrs       = 1,
+       .num_pic_regs   = 1,
 };
 
 /* Niagara1 is very limited.  The upper PIC is hard-locked to count
@@ -397,11 +458,20 @@ static const struct sparc_pmu niagara1_pmu = {
        .event_map      = niagara1_event_map,
        .cache_map      = &niagara1_cache_map,
        .max_events     = ARRAY_SIZE(niagara1_perfmon_event_map),
+       .read_pmc       = sparc_default_read_pmc,
+       .write_pmc      = sparc_default_write_pmc,
        .upper_shift    = 0,
        .lower_shift    = 4,
        .event_mask     = 0x7,
+       .user_bit       = PCR_UTRACE,
+       .priv_bit       = PCR_STRACE,
        .upper_nop      = 0x0,
        .lower_nop      = 0x0,
+       .flags          = (SPARC_PMU_ALL_EXCLUDES_SAME |
+                          SPARC_PMU_HAS_CONFLICTS),
+       .max_hw_events  = 2,
+       .num_pcrs       = 1,
+       .num_pic_regs   = 1,
 };
 
 static const struct perf_event_map niagara2_perfmon_event_map[] = {
@@ -523,13 +593,203 @@ static const struct sparc_pmu niagara2_pmu = {
        .event_map      = niagara2_event_map,
        .cache_map      = &niagara2_cache_map,
        .max_events     = ARRAY_SIZE(niagara2_perfmon_event_map),
+       .read_pmc       = sparc_default_read_pmc,
+       .write_pmc      = sparc_default_write_pmc,
        .upper_shift    = 19,
        .lower_shift    = 6,
        .event_mask     = 0xfff,
-       .hv_bit         = 0x8,
+       .user_bit       = PCR_UTRACE,
+       .priv_bit       = PCR_STRACE,
+       .hv_bit         = PCR_N2_HTRACE,
        .irq_bit        = 0x30,
        .upper_nop      = 0x220,
        .lower_nop      = 0x220,
+       .flags          = (SPARC_PMU_ALL_EXCLUDES_SAME |
+                          SPARC_PMU_HAS_CONFLICTS),
+       .max_hw_events  = 2,
+       .num_pcrs       = 1,
+       .num_pic_regs   = 1,
+};
+
+static const struct perf_event_map niagara4_perfmon_event_map[] = {
+       [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) },
+       [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f },
+       [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 },
+       [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 },
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 },
+       [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f },
+};
+
+static const struct perf_event_map *niagara4_event_map(int event_id)
+{
+       return &niagara4_perfmon_event_map[event_id];
+}
+
+static const cache_map_t niagara4_cache_map = {
+[C(L1D)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
+               [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
+               [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(L1I)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f },
+               [C(RESULT_MISS)] = { (11 << 6) | 0x03 },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(LL)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(DTLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { (17 << 6) | 0x3f },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(ITLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { (6 << 6) | 0x3f },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(BPU)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(NODE)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)  ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+};
+
+static u32 sparc_vt_read_pmc(int idx)
+{
+       u64 val = pcr_ops->read_pic(idx);
+
+       return val & 0xffffffff;
+}
+
+static void sparc_vt_write_pmc(int idx, u64 val)
+{
+       u64 pcr;
+
+       /* There seems to be an internal latch on the overflow event
+        * on SPARC-T4 that prevents it from triggering unless you
+        * update the PIC exactly as we do here.  The requirement
+        * seems to be that you have to turn off event counting in the
+        * PCR around the PIC update.
+        *
+        * For example, after the following sequence:
+        *
+        * 1) set PIC to -1
+        * 2) enable event counting and overflow reporting in PCR
+        * 3) overflow triggers, softint 15 handler invoked
+        * 4) clear OV bit in PCR
+        * 5) write PIC to -1
+        *
+        * a subsequent overflow event will not trigger.  This
+        * sequence works on SPARC-T3 and previous chips.
+        */
+       pcr = pcr_ops->read_pcr(idx);
+       pcr_ops->write_pcr(idx, PCR_N4_PICNPT);
+
+       pcr_ops->write_pic(idx, val & 0xffffffff);
+
+       pcr_ops->write_pcr(idx, pcr);
+}
+
+static const struct sparc_pmu niagara4_pmu = {
+       .event_map      = niagara4_event_map,
+       .cache_map      = &niagara4_cache_map,
+       .max_events     = ARRAY_SIZE(niagara4_perfmon_event_map),
+       .read_pmc       = sparc_vt_read_pmc,
+       .write_pmc      = sparc_vt_write_pmc,
+       .upper_shift    = 5,
+       .lower_shift    = 5,
+       .event_mask     = 0x7ff,
+       .user_bit       = PCR_N4_UTRACE,
+       .priv_bit       = PCR_N4_STRACE,
+
+       /* We explicitly don't support hypervisor tracing.  The T4
+        * generates the overflow event for precise events via a trap
+        * which will not be generated (ie. it's completely lost) if
+        * we happen to be in the hypervisor when the event triggers.
+        * Essentially, the overflow event reporting is completely
+        * unusable when you have hypervisor mode tracing enabled.
+        */
+       .hv_bit         = 0,
+
+       .irq_bit        = PCR_N4_TOE,
+       .upper_nop      = 0,
+       .lower_nop      = 0,
+       .flags          = 0,
+       .max_hw_events  = 4,
+       .num_pcrs       = 4,
+       .num_pic_regs   = 4,
 };
 
 static const struct sparc_pmu *sparc_pmu __read_mostly;
@@ -558,55 +818,35 @@ static u64 nop_for_index(int idx)
 static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
        u64 val, mask = mask_for_index(idx);
+       int pcr_index = 0;
 
-       val = cpuc->pcr;
+       if (sparc_pmu->num_pcrs > 1)
+               pcr_index = idx;
+
+       val = cpuc->pcr[pcr_index];
        val &= ~mask;
        val |= hwc->config;
-       cpuc->pcr = val;
+       cpuc->pcr[pcr_index] = val;
 
-       pcr_ops->write(cpuc->pcr);
+       pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
 }
 
 static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
        u64 mask = mask_for_index(idx);
        u64 nop = nop_for_index(idx);
+       int pcr_index = 0;
        u64 val;
 
-       val = cpuc->pcr;
+       if (sparc_pmu->num_pcrs > 1)
+               pcr_index = idx;
+
+       val = cpuc->pcr[pcr_index];
        val &= ~mask;
        val |= nop;
-       cpuc->pcr = val;
+       cpuc->pcr[pcr_index] = val;
 
-       pcr_ops->write(cpuc->pcr);
-}
-
-static u32 read_pmc(int idx)
-{
-       u64 val;
-
-       read_pic(val);
-       if (idx == PIC_UPPER_INDEX)
-               val >>= 32;
-
-       return val & 0xffffffff;
-}
-
-static void write_pmc(int idx, u64 val)
-{
-       u64 shift, mask, pic;
-
-       shift = 0;
-       if (idx == PIC_UPPER_INDEX)
-               shift = 32;
-
-       mask = ((u64) 0xffffffff) << shift;
-       val <<= shift;
-
-       read_pic(pic);
-       pic &= ~mask;
-       pic |= val;
-       write_pic(pic);
+       pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
 }
 
 static u64 sparc_perf_event_update(struct perf_event *event,
@@ -618,7 +858,7 @@ static u64 sparc_perf_event_update(struct perf_event *event,
 
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
-       new_raw_count = read_pmc(idx);
+       new_raw_count = sparc_pmu->read_pmc(idx);
 
        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
@@ -658,25 +898,17 @@ static int sparc_perf_event_set_period(struct perf_event *event,
 
        local64_set(&hwc->prev_count, (u64)-left);
 
-       write_pmc(idx, (u64)(-left) & 0xffffffff);
+       sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
 
        perf_event_update_userpage(event);
 
        return ret;
 }
 
-/* If performance event entries have been added, move existing
- * events around (if necessary) and then assign new entries to
- * counters.
- */
-static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
+static void read_in_all_counters(struct cpu_hw_events *cpuc)
 {
        int i;
 
-       if (!cpuc->n_added)
-               goto out;
-
-       /* Read in the counters which are moving.  */
        for (i = 0; i < cpuc->n_events; i++) {
                struct perf_event *cp = cpuc->event[i];
 
@@ -687,6 +919,20 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
                        cpuc->current_idx[i] = PIC_NO_INDEX;
                }
        }
+}
+
+/* On this PMU all PICs are programmed using a single PCR.  Calculate
+ * the combined control register value.
+ *
+ * For such chips we require that all of the events have the same
+ * configuration, so just fetch the settings from the first entry.
+ */
+static void calculate_single_pcr(struct cpu_hw_events *cpuc)
+{
+       int i;
+
+       if (!cpuc->n_added)
+               goto out;
 
        /* Assign to counters all unassigned events.  */
        for (i = 0; i < cpuc->n_events; i++) {
@@ -702,20 +948,71 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
                cpuc->current_idx[i] = idx;
 
                enc = perf_event_get_enc(cpuc->events[i]);
-               pcr &= ~mask_for_index(idx);
+               cpuc->pcr[0] &= ~mask_for_index(idx);
                if (hwc->state & PERF_HES_STOPPED)
-                       pcr |= nop_for_index(idx);
+                       cpuc->pcr[0] |= nop_for_index(idx);
                else
-                       pcr |= event_encoding(enc, idx);
+                       cpuc->pcr[0] |= event_encoding(enc, idx);
        }
 out:
-       return pcr;
+       cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
+}
+
+/* On this PMU each PIC has it's own PCR control register.  */
+static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+{
+       int i;
+
+       if (!cpuc->n_added)
+               goto out;
+
+       for (i = 0; i < cpuc->n_events; i++) {
+               struct perf_event *cp = cpuc->event[i];
+               struct hw_perf_event *hwc = &cp->hw;
+               int idx = hwc->idx;
+               u64 enc;
+
+               if (cpuc->current_idx[i] != PIC_NO_INDEX)
+                       continue;
+
+               sparc_perf_event_set_period(cp, hwc, idx);
+               cpuc->current_idx[i] = idx;
+
+               enc = perf_event_get_enc(cpuc->events[i]);
+               cpuc->pcr[idx] &= ~mask_for_index(idx);
+               if (hwc->state & PERF_HES_STOPPED)
+                       cpuc->pcr[idx] |= nop_for_index(idx);
+               else
+                       cpuc->pcr[idx] |= event_encoding(enc, idx);
+       }
+out:
+       for (i = 0; i < cpuc->n_events; i++) {
+               struct perf_event *cp = cpuc->event[i];
+               int idx = cp->hw.idx;
+
+               cpuc->pcr[idx] |= cp->hw.config_base;
+       }
+}
+
+/* If performance event entries have been added, move existing events
+ * around (if necessary) and then assign new entries to counters.
+ */
+static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
+{
+       if (cpuc->n_added)
+               read_in_all_counters(cpuc);
+
+       if (sparc_pmu->num_pcrs == 1) {
+               calculate_single_pcr(cpuc);
+       } else {
+               calculate_multiple_pcrs(cpuc);
+       }
 }
 
 static void sparc_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       u64 pcr;
+       int i;
 
        if (cpuc->enabled)
                return;
@@ -723,26 +1020,17 @@ static void sparc_pmu_enable(struct pmu *pmu)
        cpuc->enabled = 1;
        barrier();
 
-       pcr = cpuc->pcr;
-       if (!cpuc->n_events) {
-               pcr = 0;
-       } else {
-               pcr = maybe_change_configuration(cpuc, pcr);
-
-               /* We require that all of the events have the same
-                * configuration, so just fetch the settings from the
-                * first entry.
-                */
-               cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
-       }
+       if (cpuc->n_events)
+               update_pcrs_for_enable(cpuc);
 
-       pcr_ops->write(cpuc->pcr);
+       for (i = 0; i < sparc_pmu->num_pcrs; i++)
+               pcr_ops->write_pcr(i, cpuc->pcr[i]);
 }
 
 static void sparc_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       u64 val;
+       int i;
 
        if (!cpuc->enabled)
                return;
@@ -750,12 +1038,14 @@ static void sparc_pmu_disable(struct pmu *pmu)
        cpuc->enabled = 0;
        cpuc->n_added = 0;
 
-       val = cpuc->pcr;
-       val &= ~(PCR_UTRACE | PCR_STRACE |
-                sparc_pmu->hv_bit | sparc_pmu->irq_bit);
-       cpuc->pcr = val;
+       for (i = 0; i < sparc_pmu->num_pcrs; i++) {
+               u64 val = cpuc->pcr[i];
 
-       pcr_ops->write(cpuc->pcr);
+               val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
+                        sparc_pmu->hv_bit | sparc_pmu->irq_bit);
+               cpuc->pcr[i] = val;
+               pcr_ops->write_pcr(i, cpuc->pcr[i]);
+       }
 }
 
 static int active_event_index(struct cpu_hw_events *cpuc,
@@ -854,9 +1144,11 @@ static DEFINE_MUTEX(pmc_grab_mutex);
 static void perf_stop_nmi_watchdog(void *unused)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int i;
 
        stop_nmi_watchdog(NULL);
-       cpuc->pcr = pcr_ops->read();
+       for (i = 0; i < sparc_pmu->num_pcrs; i++)
+               cpuc->pcr[i] = pcr_ops->read_pcr(i);
 }
 
 void perf_event_grab_pmc(void)
@@ -942,9 +1234,17 @@ static int sparc_check_constraints(struct perf_event **evts,
        if (!n_ev)
                return 0;
 
-       if (n_ev > MAX_HWEVENTS)
+       if (n_ev > sparc_pmu->max_hw_events)
                return -1;
 
+       if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
+               int i;
+
+               for (i = 0; i < n_ev; i++)
+                       evts[i]->hw.idx = i;
+               return 0;
+       }
+
        msk0 = perf_event_get_msk(events[0]);
        if (n_ev == 1) {
                if (msk0 & PIC_LOWER)
@@ -1000,6 +1300,9 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
        struct perf_event *event;
        int i, n, first;
 
+       if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
+               return 0;
+
        n = n_prev + n_new;
        if (n <= 1)
                return 0;
@@ -1059,7 +1362,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
        perf_pmu_disable(event->pmu);
 
        n0 = cpuc->n_events;
-       if (n0 >= MAX_HWEVENTS)
+       if (n0 >= sparc_pmu->max_hw_events)
                goto out;
 
        cpuc->event[n0] = event;
@@ -1146,16 +1449,16 @@ static int sparc_pmu_event_init(struct perf_event *event)
        /* We save the enable bits in the config_base.  */
        hwc->config_base = sparc_pmu->irq_bit;
        if (!attr->exclude_user)
-               hwc->config_base |= PCR_UTRACE;
+               hwc->config_base |= sparc_pmu->user_bit;
        if (!attr->exclude_kernel)
-               hwc->config_base |= PCR_STRACE;
+               hwc->config_base |= sparc_pmu->priv_bit;
        if (!attr->exclude_hv)
                hwc->config_base |= sparc_pmu->hv_bit;
 
        n = 0;
        if (event->group_leader != event) {
                n = collect_events(event->group_leader,
-                                  MAX_HWEVENTS - 1,
+                                  sparc_pmu->max_hw_events - 1,
                                   evts, events, current_idx_dmy);
                if (n < 0)
                        return -EINVAL;
@@ -1254,8 +1557,7 @@ static struct pmu pmu = {
 void perf_event_print_debug(void)
 {
        unsigned long flags;
-       u64 pcr, pic;
-       int cpu;
+       int cpu, i;
 
        if (!sparc_pmu)
                return;
@@ -1264,12 +1566,13 @@ void perf_event_print_debug(void)
 
        cpu = smp_processor_id();
 
-       pcr = pcr_ops->read();
-       read_pic(pic);
-
        pr_info("\n");
-       pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
-               cpu, pcr, pic);
+       for (i = 0; i < sparc_pmu->num_pcrs; i++)
+               pr_info("CPU#%d: PCR%d[%016llx]\n",
+                       cpu, i, pcr_ops->read_pcr(i));
+       for (i = 0; i < sparc_pmu->num_pic_regs; i++)
+               pr_info("CPU#%d: PIC%d[%016llx]\n",
+                       cpu, i, pcr_ops->read_pic(i));
 
        local_irq_restore(flags);
 }
@@ -1305,8 +1608,9 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
         * Do this before we peek at the counters to determine
         * overflow so we don't lose any events.
         */
-       if (sparc_pmu->irq_bit)
-               pcr_ops->write(cpuc->pcr);
+       if (sparc_pmu->irq_bit &&
+           sparc_pmu->num_pcrs == 1)
+               pcr_ops->write_pcr(0, cpuc->pcr[0]);
 
        for (i = 0; i < cpuc->n_events; i++) {
                struct perf_event *event = cpuc->event[i];
@@ -1314,6 +1618,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                struct hw_perf_event *hwc;
                u64 val;
 
+               if (sparc_pmu->irq_bit &&
+                   sparc_pmu->num_pcrs > 1)
+                       pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
+
                hwc = &event->hw;
                val = sparc_perf_event_update(event, hwc, idx);
                if (val & (1ULL << 31))
@@ -1352,6 +1660,10 @@ static bool __init supported_pmu(void)
                sparc_pmu = &niagara2_pmu;
                return true;
        }
+       if (!strcmp(sparc_pmu_type, "niagara4")) {
+               sparc_pmu = &niagara4_pmu;
+               return true;
+       }
        return false;
 }
 
index 340c5b976d280ba9948dd5b4774759d4baf401be..d397d7fc5c2830e288ddc60abc0c6465368db28f 100644 (file)
@@ -37,7 +37,7 @@ void * __init prom_early_alloc(unsigned long size)
        void *ret;
 
        if (!paddr) {
-               prom_printf("prom_early_alloc(%lu) failed\n");
+               prom_printf("prom_early_alloc(%lu) failed\n", size);
                prom_halt();
        }
 
index 1414d16712b272fd966bb03d94fa810f53425f19..0800e71d8a880242083688b385e88d564e4a681a 100644 (file)
@@ -340,7 +340,12 @@ static const char *hwcaps[] = {
         */
        "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
        "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
-       "ima", "cspare",
+       "ima", "cspare", "pause", "cbcond",
+};
+
+static const char *crypto_hwcaps[] = {
+       "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
+       "sha512", "mpmul", "montmul", "montsqr", "crc32c",
 };
 
 void cpucap_info(struct seq_file *m)
@@ -357,27 +362,61 @@ void cpucap_info(struct seq_file *m)
                        printed++;
                }
        }
+       if (caps & HWCAP_SPARC_CRYPTO) {
+               unsigned long cfr;
+
+               __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+               for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
+                       unsigned long bit = 1UL << i;
+                       if (cfr & bit) {
+                               seq_printf(m, "%s%s",
+                                          printed ? "," : "", crypto_hwcaps[i]);
+                               printed++;
+                       }
+               }
+       }
        seq_putc(m, '\n');
 }
 
+static void __init report_one_hwcap(int *printed, const char *name)
+{
+       if ((*printed) == 0)
+               printk(KERN_INFO "CPU CAPS: [");
+       printk(KERN_CONT "%s%s",
+              (*printed) ? "," : "", name);
+       if (++(*printed) == 8) {
+               printk(KERN_CONT "]\n");
+               *printed = 0;
+       }
+}
+
+static void __init report_crypto_hwcaps(int *printed)
+{
+       unsigned long cfr;
+       int i;
+
+       __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
+
+       for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
+               unsigned long bit = 1UL << i;
+               if (cfr & bit)
+                       report_one_hwcap(printed, crypto_hwcaps[i]);
+       }
+}
+
 static void __init report_hwcaps(unsigned long caps)
 {
        int i, printed = 0;
 
-       printk(KERN_INFO "CPU CAPS: [");
        for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
                unsigned long bit = 1UL << i;
-               if (caps & bit) {
-                       printk(KERN_CONT "%s%s",
-                              printed ? "," : "", hwcaps[i]);
-                       if (++printed == 8) {
-                               printk(KERN_CONT "]\n");
-                               printk(KERN_INFO "CPU CAPS: [");
-                               printed = 0;
-                       }
-               }
+               if (caps & bit)
+                       report_one_hwcap(&printed, hwcaps[i]);
        }
-       printk(KERN_CONT "]\n");
+       if (caps & HWCAP_SPARC_CRYPTO)
+               report_crypto_hwcaps(&printed);
+       if (printed != 0)
+               printk(KERN_CONT "]\n");
 }
 
 static unsigned long __init mdesc_cpu_hwcap_list(void)
@@ -411,6 +450,10 @@ static unsigned long __init mdesc_cpu_hwcap_list(void)
                                break;
                        }
                }
+               for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
+                       if (!strcmp(prop, crypto_hwcaps[i]))
+                               caps |= HWCAP_SPARC_CRYPTO;
+               }
 
                plen = strlen(prop) + 1;
                prop += plen;
index d97f3eb72e064d70cf1a6dbb9cbd8d8cac4c2d40..44025f4ba41f883d61647f180575651ee5113645 100644 (file)
@@ -90,7 +90,7 @@ SIGN1(sys32_mkdir, sys_mkdir, %o1)
 SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
 SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
 SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
-SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
+SIGN2(sys32_sendfile64, sys_sendfile, %o0, %o1)
 SIGN1(sys32_prctl, sys_prctl, %o0)
 SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0)
 SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2)
index f7392336961f84a956d3d693c72df3a8674b5d97..d862499eb01ccfe298b7101190dfc29f3ebcf44d 100644 (file)
@@ -506,52 +506,6 @@ long compat_sys_fadvise64_64(int fd,
                                advice);
 }
 
-asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
-                                   compat_off_t __user *offset,
-                                   compat_size_t count)
-{
-       mm_segment_t old_fs = get_fs();
-       int ret;
-       off_t of;
-       
-       if (offset && get_user(of, offset))
-               return -EFAULT;
-               
-       set_fs(KERNEL_DS);
-       ret = sys_sendfile(out_fd, in_fd,
-                          offset ? (off_t __user *) &of : NULL,
-                          count);
-       set_fs(old_fs);
-       
-       if (offset && put_user(of, offset))
-               return -EFAULT;
-               
-       return ret;
-}
-
-asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
-                                     compat_loff_t __user *offset,
-                                     compat_size_t count)
-{
-       mm_segment_t old_fs = get_fs();
-       int ret;
-       loff_t lof;
-       
-       if (offset && get_user(lof, offset))
-               return -EFAULT;
-               
-       set_fs(KERNEL_DS);
-       ret = sys_sendfile64(out_fd, in_fd,
-                            offset ? (loff_t __user *) &lof : NULL,
-                            count);
-       set_fs(old_fs);
-       
-       if (offset && put_user(lof, offset))
-               return -EFAULT;
-               
-       return ret;
-}
-
 /* This is just a version for 32-bit applications which does
  * not force O_LARGEFILE on.
  */
index 3b05e6697710da1718be093e1f56dccaddc04e94..fa1f1d375ffc263571ab3b75e4dd0477284d583b 100644 (file)
@@ -850,7 +850,7 @@ void __init cheetah_ecache_flush_init(void)
        ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
 
        if (ecache_flush_physbase == ~0UL) {
-               prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
+               prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
                            "contiguous physical memory.\n",
                            ecache_flush_size);
                prom_halt();
index dff4096f3dec045472e037a62d644d1e8cbb910e..30f6ab51c551593e2763584cee49e7e71147876b 100644 (file)
@@ -32,6 +32,9 @@ lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o
 lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o
 lib-$(CONFIG_SPARC64) +=  NG2patch.o
 
+lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o
+lib-$(CONFIG_SPARC64) +=  NG4patch.o NG4copy_page.o
+
 lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
 lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
 
index 03eadf66b0d37ba57bb4516b7ec5e97dc219aa32..2c20ad63ddbf2bbf8a4da5e751e49650d8be7060 100644 (file)
@@ -14,7 +14,7 @@
 #define FPRS_FEF  0x04
 #ifdef MEMCPY_DEBUG
 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
-                    clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+                    clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0;
 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
 #else
 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
@@ -182,13 +182,13 @@ FUNC_NAME:        /* %o0=dst, %o1=src, %o2=len */
        cmp             %g2, 0
        tne             %xcc, 5
        PREAMBLE
-       mov             %o0, GLOBAL_SPARE
+       mov             %o0, %o3
        cmp             %o2, 0
        be,pn           %XCC, 85f
-        or             %o0, %o1, %o3
+        or             %o0, %o1, GLOBAL_SPARE
        cmp             %o2, 16
        blu,a,pn        %XCC, 80f
-        or             %o3, %o2, %o3
+        or             GLOBAL_SPARE, %o2, GLOBAL_SPARE
 
        /* 2 blocks (128 bytes) is the minimum we can do the block
         * copy with.  We need to ensure that we'll iterate at least
@@ -202,7 +202,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
         */
        cmp             %o2, (4 * 64)
        blu,pt          %XCC, 75f
-        andcc          %o3, 0x7, %g0
+        andcc          GLOBAL_SPARE, 0x7, %g0
 
        /* %o0: dst
         * %o1: src
@@ -404,13 +404,13 @@ FUNC_NAME:        /* %o0=dst, %o1=src, %o2=len */
         * over. If anything is left, we copy it one byte at a time.
         */
        brz,pt          %o2, 85f
-        sub            %o0, %o1, %o3
+        sub            %o0, %o1, GLOBAL_SPARE
        ba,a,pt         %XCC, 90f
 
        .align          64
 75: /* 16 < len <= 64 */
        bne,pn          %XCC, 75f
-        sub            %o0, %o1, %o3
+        sub            %o0, %o1, GLOBAL_SPARE
 
 72:
        andn            %o2, 0xf, %o4
@@ -420,9 +420,9 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
        add             %o1, 0x08, %o1
        EX_LD(LOAD(ldx, %o1, %g1))
        sub             %o1, 0x08, %o1
-       EX_ST(STORE(stx, %o5, %o1 + %o3))
+       EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
        add             %o1, 0x8, %o1
-       EX_ST(STORE(stx, %g1, %o1 + %o3))
+       EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE))
        bgu,pt          %XCC, 1b
         add            %o1, 0x8, %o1
 73:    andcc           %o2, 0x8, %g0
@@ -430,14 +430,14 @@ FUNC_NAME:        /* %o0=dst, %o1=src, %o2=len */
         nop
        sub             %o2, 0x8, %o2
        EX_LD(LOAD(ldx, %o1, %o5))
-       EX_ST(STORE(stx, %o5, %o1 + %o3))
+       EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE))
        add             %o1, 0x8, %o1
 1:     andcc           %o2, 0x4, %g0
        be,pt           %XCC, 1f
         nop
        sub             %o2, 0x4, %o2
        EX_LD(LOAD(lduw, %o1, %o5))
-       EX_ST(STORE(stw, %o5, %o1 + %o3))
+       EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE))
        add             %o1, 0x4, %o1
 1:     cmp             %o2, 0
        be,pt           %XCC, 85f
@@ -454,11 +454,11 @@ FUNC_NAME:        /* %o0=dst, %o1=src, %o2=len */
 
 1:     subcc           %g1, 1, %g1
        EX_LD(LOAD(ldub, %o1, %o5))
-       EX_ST(STORE(stb, %o5, %o1 + %o3))
+       EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE))
        bgu,pt          %icc, 1b
         add            %o1, 1, %o1
 
-2:     add             %o1, %o3, %o0
+2:     add             %o1, GLOBAL_SPARE, %o0
        andcc           %o1, 0x7, %g1
        bne,pt          %icc, 8f
         sll            %g1, 3, %g1
@@ -468,16 +468,16 @@ FUNC_NAME:        /* %o0=dst, %o1=src, %o2=len */
         nop
        ba,a,pt         %xcc, 73b
 
-8:     mov             64, %o3
+8:     mov             64, GLOBAL_SPARE
        andn            %o1, 0x7, %o1
        EX_LD(LOAD(ldx, %o1, %g2))
-       sub             %o3, %g1, %o3
+       sub             GLOBAL_SPARE, %g1, GLOBAL_SPARE
        andn            %o2, 0x7, %o4
        sllx            %g2, %g1, %g2
 1:     add             %o1, 0x8, %o1
        EX_LD(LOAD(ldx, %o1, %g3))
        subcc           %o4, 0x8, %o4
-       srlx            %g3, %o3, %o5
+       srlx            %g3, GLOBAL_SPARE, %o5
        or              %o5, %g2, %o5
        EX_ST(STORE(stx, %o5, %o0))
        add             %o0, 0x8, %o0
@@ -489,32 +489,32 @@ FUNC_NAME:        /* %o0=dst, %o1=src, %o2=len */
        be,pn           %icc, 85f
         add            %o1, %g1, %o1
        ba,pt           %xcc, 90f
-        sub            %o0, %o1, %o3
+        sub            %o0, %o1, GLOBAL_SPARE
 
        .align          64
 80: /* 0 < len <= 16 */
-       andcc           %o3, 0x3, %g0
+       andcc           GLOBAL_SPARE, 0x3, %g0
        bne,pn          %XCC, 90f
-        sub            %o0, %o1, %o3
+        sub            %o0, %o1, GLOBAL_SPARE
 
 1:
        subcc           %o2, 4, %o2
        EX_LD(LOAD(lduw, %o1, %g1))
-       EX_ST(STORE(stw, %g1, %o1 + %o3))
+       EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE))
        bgu,pt          %XCC, 1b
         add            %o1, 4, %o1
 
 85:    retl
-        mov            EX_RETVAL(GLOBAL_SPARE), %o0
+        mov            EX_RETVAL(%o3), %o0
 
        .align          32
 90:
        subcc           %o2, 1, %o2
        EX_LD(LOAD(ldub, %o1, %g1))
-       EX_ST(STORE(stb, %g1, %o1 + %o3))
+       EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE))
        bgu,pt          %XCC, 90b
         add            %o1, 1, %o1
        retl
-        mov            EX_RETVAL(GLOBAL_SPARE), %o0
+        mov            EX_RETVAL(%o3), %o0
 
        .size           FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
new file mode 100644 (file)
index 0000000..fd9f903
--- /dev/null
@@ -0,0 +1,30 @@
+/* NG4copy_from_user.S: Niagara-4 optimized copy from userspace.
+ *
+ * Copyright (C) 2012 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_LD(x)               \
+98:    x;                      \
+       .section __ex_table,"a";\
+       .align 4;               \
+       .word 98b, __retl_one_asi;\
+       .text;                  \
+       .align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS       0x11
+#endif
+
+#define FUNC_NAME              NG4copy_from_user
+#define LOAD(type,addr,dest)   type##a [addr] %asi, dest
+#define EX_RETVAL(x)           0
+
+#ifdef __KERNEL__
+#define PREAMBLE                                       \
+       rd              %asi, %g1;                      \
+       cmp             %g1, ASI_AIUS;                  \
+       bne,pn          %icc, ___copy_in_user;          \
+        nop
+#endif
+
+#include "NG4memcpy.S"
diff --git a/arch/sparc/lib/NG4copy_page.S b/arch/sparc/lib/NG4copy_page.S
new file mode 100644 (file)
index 0000000..f30ec10
--- /dev/null
@@ -0,0 +1,57 @@
+/* NG4copy_page.S: Niagara-4 optimized copy page.
+ *
+ * Copyright (C) 2012 (davem@davemloft.net)
+ */
+
+#include <asm/asi.h>
+#include <asm/page.h>
+
+       .text
+       .align          32
+
+       .register       %g2, #scratch
+       .register       %g3, #scratch
+
+       .globl          NG4copy_user_page
+NG4copy_user_page:     /* %o0=dest, %o1=src, %o2=vaddr */
+       prefetch        [%o1 + 0x000], #n_reads_strong
+       prefetch        [%o1 + 0x040], #n_reads_strong
+       prefetch        [%o1 + 0x080], #n_reads_strong
+       prefetch        [%o1 + 0x0c0], #n_reads_strong
+       set             PAGE_SIZE, %g7
+       prefetch        [%o1 + 0x100], #n_reads_strong
+       prefetch        [%o1 + 0x140], #n_reads_strong
+       prefetch        [%o1 + 0x180], #n_reads_strong
+       prefetch        [%o1 + 0x1c0], #n_reads_strong
+1:
+       ldx             [%o1 + 0x00], %o2
+       subcc           %g7, 0x40, %g7
+       ldx             [%o1 + 0x08], %o3
+       ldx             [%o1 + 0x10], %o4
+       ldx             [%o1 + 0x18], %o5
+       ldx             [%o1 + 0x20], %g1
+       stxa            %o2, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       ldx             [%o1 + 0x28], %g2
+       stxa            %o3, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       ldx             [%o1 + 0x30], %g3
+       stxa            %o4, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       ldx             [%o1 + 0x38], %o2
+       add             %o1, 0x40, %o1
+       stxa            %o5, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       stxa            %g1, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       stxa            %g2, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       stxa            %g3, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       stxa            %o2, [%o0] ASI_BLK_INIT_QUAD_LDD_P
+       add             %o0, 0x08, %o0
+       bne,pt          %icc, 1b
+        prefetch       [%o1 + 0x200], #n_reads_strong
+       retl
+        membar         #StoreLoad | #StoreStore
+       .size           NG4copy_user_page,.-NG4copy_user_page
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
new file mode 100644 (file)
index 0000000..9744c45
--- /dev/null
@@ -0,0 +1,39 @@
+/* NG4copy_to_user.S: Niagara-4 optimized copy to userspace.
+ *
+ * Copyright (C) 2012 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_ST(x)               \
+98:    x;                      \
+       .section __ex_table,"a";\
+       .align 4;               \
+       .word 98b, __retl_one_asi;\
+       .text;                  \
+       .align 4;
+
+#ifndef ASI_AIUS
+#define ASI_AIUS       0x11
+#endif
+
+#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS
+#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23
+#endif
+
+#define FUNC_NAME              NG4copy_to_user
+#define STORE(type,src,addr)   type##a src, [addr] %asi
+#define STORE_ASI              ASI_BLK_INIT_QUAD_LDD_AIUS
+#define EX_RETVAL(x)           0
+
+#ifdef __KERNEL__
+       /* Writing to %asi is _expensive_ so we hardcode it.
+        * Reading %asi to check for KERNEL_DS is comparatively
+        * cheap.
+        */
+#define PREAMBLE                                       \
+       rd              %asi, %g1;                      \
+       cmp             %g1, ASI_AIUS;                  \
+       bne,pn          %icc, ___copy_in_user;          \
+        nop
+#endif
+
+#include "NG4memcpy.S"
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
new file mode 100644 (file)
index 0000000..9cf2ee0
--- /dev/null
@@ -0,0 +1,360 @@
+/* NG4memcpy.S: Niagara-4 optimized memcpy.
+ *
+ * Copyright (C) 2012 David S. Miller (davem@davemloft.net)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE   %g7
+#else
+#define ASI_BLK_INIT_QUAD_LDD_P 0xe2
+#define FPRS_FEF  0x04
+
+/* On T4 it is very expensive to access ASRs like %fprs and
+ * %asi, avoiding a read or a write can save ~50 cycles.
+ */
+#define FPU_ENTER                      \
+       rd      %fprs, %o5;             \
+       andcc   %o5, FPRS_FEF, %g0;     \
+       be,a,pn %icc, 999f;             \
+        wr     %g0, FPRS_FEF, %fprs;   \
+       999:
+
+#ifdef MEMCPY_DEBUG
+#define VISEntryHalf FPU_ENTER; \
+                    clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0;
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntryHalf FPU_ENTER
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+
+#define GLOBAL_SPARE   %g5
+#endif
+
+#ifndef STORE_ASI
+#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
+#define STORE_ASI      ASI_BLK_INIT_QUAD_LDD_P
+#else
+#define STORE_ASI      0x80            /* ASI_P */
+#endif
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)       x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)       x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)   x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)   type [addr], dest
+#endif
+
+#ifndef STORE
+#ifndef MEMCPY_DEBUG
+#define STORE(type,src,addr)   type src, [addr]
+#else
+#define STORE(type,src,addr)   type##a src, [addr] %asi
+#endif
+#endif
+
+#ifndef STORE_INIT
+#define STORE_INIT(src,addr)   stxa src, [addr] STORE_ASI
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME      NG4memcpy
+#endif
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+       .register       %g2,#scratch
+       .register       %g3,#scratch
+
+       .text
+       .align          64
+
+       .globl  FUNC_NAME
+       .type   FUNC_NAME,#function
+FUNC_NAME:     /* %o0=dst, %o1=src, %o2=len */
+#ifdef MEMCPY_DEBUG
+       wr              %g0, 0x80, %asi
+#endif
+       srlx            %o2, 31, %g2
+       cmp             %g2, 0
+       tne             %XCC, 5
+       PREAMBLE
+       mov             %o0, %o3
+       brz,pn          %o2, .Lexit
+        cmp            %o2, 3
+       ble,pn          %icc, .Ltiny
+        cmp            %o2, 19
+       ble,pn          %icc, .Lsmall
+        or             %o0, %o1, %g2
+       cmp             %o2, 128
+       bl,pn           %icc, .Lmedium
+        nop
+
+.Llarge:/* len >= 0x80 */
+       /* First get dest 8 byte aligned.  */
+       sub             %g0, %o0, %g1
+       and             %g1, 0x7, %g1
+       brz,pt          %g1, 51f
+        sub            %o2, %g1, %o2
+
+1:     EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+       add             %o1, 1, %o1
+       subcc           %g1, 1, %g1
+       add             %o0, 1, %o0
+       bne,pt          %icc, 1b
+        EX_ST(STORE(stb, %g2, %o0 - 0x01))
+
+51:    LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
+       LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
+       LOAD(prefetch, %o1 + 0x0c0, #n_reads_strong)
+       LOAD(prefetch, %o1 + 0x100, #n_reads_strong)
+       LOAD(prefetch, %o1 + 0x140, #n_reads_strong)
+       LOAD(prefetch, %o1 + 0x180, #n_reads_strong)
+       LOAD(prefetch, %o1 + 0x1c0, #n_reads_strong)
+       LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
+
+       /* Check if we can use the straight fully aligned
+        * loop, or we require the alignaddr/faligndata variant.
+        */
+       andcc           %o1, 0x7, %o5
+       bne,pn          %icc, .Llarge_src_unaligned
+        sub            %g0, %o0, %g1
+
+       /* Legitimize the use of initializing stores by getting dest
+        * to be 64-byte aligned.
+        */
+       and             %g1, 0x3f, %g1
+       brz,pt          %g1, .Llarge_aligned
+        sub            %o2, %g1, %o2
+
+1:     EX_LD(LOAD(ldx, %o1 + 0x00, %g2))
+       add             %o1, 8, %o1
+       subcc           %g1, 8, %g1
+       add             %o0, 8, %o0
+       bne,pt          %icc, 1b
+        EX_ST(STORE(stx, %g2, %o0 - 0x08))
+
+.Llarge_aligned:
+       /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
+       andn            %o2, 0x3f, %o4
+       sub             %o2, %o4, %o2
+
+1:     EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+       add             %o1, 0x40, %o1
+       EX_LD(LOAD(ldx, %o1 - 0x38, %g2))
+       subcc           %o4, 0x40, %o4
+       EX_LD(LOAD(ldx, %o1 - 0x30, %g3))
+       EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE))
+       EX_LD(LOAD(ldx, %o1 - 0x20, %o5))
+       EX_ST(STORE_INIT(%g1, %o0))
+       add             %o0, 0x08, %o0
+       EX_ST(STORE_INIT(%g2, %o0))
+       add             %o0, 0x08, %o0
+       EX_LD(LOAD(ldx, %o1 - 0x18, %g2))
+       EX_ST(STORE_INIT(%g3, %o0))
+       add             %o0, 0x08, %o0
+       EX_LD(LOAD(ldx, %o1 - 0x10, %g3))
+       EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+       add             %o0, 0x08, %o0
+       EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE))
+       EX_ST(STORE_INIT(%o5, %o0))
+       add             %o0, 0x08, %o0
+       EX_ST(STORE_INIT(%g2, %o0))
+       add             %o0, 0x08, %o0
+       EX_ST(STORE_INIT(%g3, %o0))
+       add             %o0, 0x08, %o0
+       EX_ST(STORE_INIT(GLOBAL_SPARE, %o0))
+       add             %o0, 0x08, %o0
+       bne,pt          %icc, 1b
+        LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
+
+       membar          #StoreLoad | #StoreStore
+
+       brz,pn          %o2, .Lexit
+        cmp            %o2, 19
+       ble,pn          %icc, .Lsmall_unaligned
+        nop
+       ba,a,pt         %icc, .Lmedium_noprefetch
+
+.Lexit:        retl
+        mov            EX_RETVAL(%o3), %o0
+
+.Llarge_src_unaligned:
+       andn            %o2, 0x3f, %o4
+       sub             %o2, %o4, %o2
+       VISEntryHalf
+       alignaddr       %o1, %g0, %g1
+       add             %o1, %o4, %o1
+       EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
+1:     EX_LD(LOAD(ldd, %g1 + 0x08, %f2))
+       subcc           %o4, 0x40, %o4
+       EX_LD(LOAD(ldd, %g1 + 0x10, %f4))
+       EX_LD(LOAD(ldd, %g1 + 0x18, %f6))
+       EX_LD(LOAD(ldd, %g1 + 0x20, %f8))
+       EX_LD(LOAD(ldd, %g1 + 0x28, %f10))
+       EX_LD(LOAD(ldd, %g1 + 0x30, %f12))
+       EX_LD(LOAD(ldd, %g1 + 0x38, %f14))
+       faligndata      %f0, %f2, %f16
+       EX_LD(LOAD(ldd, %g1 + 0x40, %f0))
+       faligndata      %f2, %f4, %f18
+       add             %g1, 0x40, %g1
+       faligndata      %f4, %f6, %f20
+       faligndata      %f6, %f8, %f22
+       faligndata      %f8, %f10, %f24
+       faligndata      %f10, %f12, %f26
+       faligndata      %f12, %f14, %f28
+       faligndata      %f14, %f0, %f30
+       EX_ST(STORE(std, %f16, %o0 + 0x00))
+       EX_ST(STORE(std, %f18, %o0 + 0x08))
+       EX_ST(STORE(std, %f20, %o0 + 0x10))
+       EX_ST(STORE(std, %f22, %o0 + 0x18))
+       EX_ST(STORE(std, %f24, %o0 + 0x20))
+       EX_ST(STORE(std, %f26, %o0 + 0x28))
+       EX_ST(STORE(std, %f28, %o0 + 0x30))
+       EX_ST(STORE(std, %f30, %o0 + 0x38))
+       add             %o0, 0x40, %o0
+       bne,pt          %icc, 1b
+        LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+       VISExitHalf
+
+       brz,pn          %o2, .Lexit
+        cmp            %o2, 19
+       ble,pn          %icc, .Lsmall_unaligned
+        nop
+       ba,a,pt         %icc, .Lmedium_unaligned
+
+.Lmedium:
+       LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
+       andcc           %g2, 0x7, %g0
+       bne,pn          %icc, .Lmedium_unaligned
+        nop
+.Lmedium_noprefetch:
+       andncc          %o2, 0x20 - 1, %o5
+       be,pn           %icc, 2f
+        sub            %o2, %o5, %o2
+1:     EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+       EX_LD(LOAD(ldx, %o1 + 0x08, %g2))
+       EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE))
+       EX_LD(LOAD(ldx, %o1 + 0x18, %o4))
+       add             %o1, 0x20, %o1
+       subcc           %o5, 0x20, %o5
+       EX_ST(STORE(stx, %g1, %o0 + 0x00))
+       EX_ST(STORE(stx, %g2, %o0 + 0x08))
+       EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10))
+       EX_ST(STORE(stx, %o4, %o0 + 0x18))
+       bne,pt          %icc, 1b
+        add            %o0, 0x20, %o0
+2:     andcc           %o2, 0x18, %o5
+       be,pt           %icc, 3f
+        sub            %o2, %o5, %o2
+1:     EX_LD(LOAD(ldx, %o1 + 0x00, %g1))
+       add             %o1, 0x08, %o1
+       add             %o0, 0x08, %o0
+       subcc           %o5, 0x08, %o5
+       bne,pt          %icc, 1b
+        EX_ST(STORE(stx, %g1, %o0 - 0x08))
+3:     brz,pt          %o2, .Lexit
+        cmp            %o2, 0x04
+       bl,pn           %icc, .Ltiny
+        nop
+       EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+       add             %o1, 0x04, %o1
+       add             %o0, 0x04, %o0
+       subcc           %o2, 0x04, %o2
+       bne,pn          %icc, .Ltiny
+        EX_ST(STORE(stw, %g1, %o0 - 0x04))
+       ba,a,pt         %icc, .Lexit
+.Lmedium_unaligned:
+       /* First get dest 8 byte aligned.  */
+       sub             %g0, %o0, %g1
+       and             %g1, 0x7, %g1
+       brz,pt          %g1, 2f
+        sub            %o2, %g1, %o2
+
+1:     EX_LD(LOAD(ldub, %o1 + 0x00, %g2))
+       add             %o1, 1, %o1
+       subcc           %g1, 1, %g1
+       add             %o0, 1, %o0
+       bne,pt          %icc, 1b
+        EX_ST(STORE(stb, %g2, %o0 - 0x01))
+2:
+       and             %o1, 0x7, %g1
+       brz,pn          %g1, .Lmedium_noprefetch
+        sll            %g1, 3, %g1
+       mov             64, %g2
+       sub             %g2, %g1, %g2
+       andn            %o1, 0x7, %o1
+       EX_LD(LOAD(ldx, %o1 + 0x00, %o4))
+       sllx            %o4, %g1, %o4
+       andn            %o2, 0x08 - 1, %o5
+       sub             %o2, %o5, %o2
+1:     EX_LD(LOAD(ldx, %o1 + 0x08, %g3))
+       add             %o1, 0x08, %o1
+       subcc           %o5, 0x08, %o5
+       srlx            %g3, %g2, GLOBAL_SPARE
+       or              GLOBAL_SPARE, %o4, GLOBAL_SPARE
+       EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00))
+       add             %o0, 0x08, %o0
+       bne,pt          %icc, 1b
+        sllx           %g3, %g1, %o4
+       srl             %g1, 3, %g1
+       add             %o1, %g1, %o1
+       brz,pn          %o2, .Lexit
+        nop
+       ba,pt           %icc, .Lsmall_unaligned
+
+.Ltiny:
+       EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+       subcc           %o2, 1, %o2
+       be,pn           %icc, .Lexit
+        EX_ST(STORE(stb, %g1, %o0 + 0x00))
+       EX_LD(LOAD(ldub, %o1 + 0x01, %g1))
+       subcc           %o2, 1, %o2
+       be,pn           %icc, .Lexit
+        EX_ST(STORE(stb, %g1, %o0 + 0x01))
+       EX_LD(LOAD(ldub, %o1 + 0x02, %g1))
+       ba,pt           %icc, .Lexit
+        EX_ST(STORE(stb, %g1, %o0 + 0x02))
+
+.Lsmall:
+       andcc           %g2, 0x3, %g0
+       bne,pn          %icc, .Lsmall_unaligned
+        andn           %o2, 0x4 - 1, %o5
+       sub             %o2, %o5, %o2
+1:
+       EX_LD(LOAD(lduw, %o1 + 0x00, %g1))
+       add             %o1, 0x04, %o1
+       subcc           %o5, 0x04, %o5
+       add             %o0, 0x04, %o0
+       bne,pt          %icc, 1b
+        EX_ST(STORE(stw, %g1, %o0 - 0x04))
+       brz,pt          %o2, .Lexit
+        nop
+       ba,a,pt         %icc, .Ltiny
+
+.Lsmall_unaligned:
+1:     EX_LD(LOAD(ldub, %o1 + 0x00, %g1))
+       add             %o1, 1, %o1
+       add             %o0, 1, %o0
+       subcc           %o2, 1, %o2
+       bne,pt          %icc, 1b
+        EX_ST(STORE(stb, %g1, %o0 - 0x01))
+       ba,a,pt         %icc, .Lexit
+       .size           FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG4patch.S b/arch/sparc/lib/NG4patch.S
new file mode 100644 (file)
index 0000000..c21c34c
--- /dev/null
@@ -0,0 +1,43 @@
+/* NG4patch.S: Patch Ultra-I routines with Niagara-4 variant.
+ *
+ * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
+ */
+
+#define BRANCH_ALWAYS  0x10680000
+#define NOP            0x01000000
+#define NG_DO_PATCH(OLD, NEW)  \
+       sethi   %hi(NEW), %g1; \
+       or      %g1, %lo(NEW), %g1; \
+       sethi   %hi(OLD), %g2; \
+       or      %g2, %lo(OLD), %g2; \
+       sub     %g1, %g2, %g1; \
+       sethi   %hi(BRANCH_ALWAYS), %g3; \
+       sll     %g1, 11, %g1; \
+       srl     %g1, 11 + 2, %g1; \
+       or      %g3, %lo(BRANCH_ALWAYS), %g3; \
+       or      %g3, %g1, %g3; \
+       stw     %g3, [%g2]; \
+       sethi   %hi(NOP), %g3; \
+       or      %g3, %lo(NOP), %g3; \
+       stw     %g3, [%g2 + 0x4]; \
+       flush   %g2;
+
+       .globl  niagara4_patch_copyops
+       .type   niagara4_patch_copyops,#function
+niagara4_patch_copyops:
+       NG_DO_PATCH(memcpy, NG4memcpy)
+       NG_DO_PATCH(___copy_from_user, NG4copy_from_user)
+       NG_DO_PATCH(___copy_to_user, NG4copy_to_user)
+       retl
+        nop
+       .size   niagara4_patch_copyops,.-niagara4_patch_copyops
+
+       .globl  niagara4_patch_pageops
+       .type   niagara4_patch_pageops,#function
+niagara4_patch_pageops:
+       NG_DO_PATCH(copy_user_page, NG4copy_user_page)
+       NG_DO_PATCH(_clear_page, NGclear_page)
+       NG_DO_PATCH(clear_user_page, NGclear_user_page)
+       retl
+        nop
+       .size   niagara4_patch_pageops,.-niagara4_patch_pageops
index b9e790b9c6b8c41356883593f3e70e81ea2733c7..423d46e2258be1d97d08b6ed0fd5709ea156eaba 100644 (file)
@@ -59,6 +59,8 @@ NGcopy_user_page:     /* %o0=dest, %o1=src, %o2=vaddr */
         restore
 
        .align          32
+       .globl          NGclear_page
+       .globl          NGclear_user_page
 NGclear_page:          /* %o0=dest */
 NGclear_user_page:     /* %o0=dest, %o1=vaddr */
        rd              %asi, %g3
index 3b31218cafc6c515d0f24ee809004e15b3e4ab94..ee31b884c61b8390fa6a97e5d619ac4d1febe8cd 100644 (file)
@@ -134,6 +134,10 @@ EXPORT_SYMBOL(copy_user_page);
 void VISenter(void);
 EXPORT_SYMBOL(VISenter);
 
+/* CRYPTO code needs this */
+void VISenterhalf(void);
+EXPORT_SYMBOL(VISenterhalf);
+
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
                unsigned long *);
index d58edf5fefdb6a4a3fdf484f46588cc411727c38..7a9b788c6ced6d3b1e8685ee2b22e829496c0224 100644 (file)
 
 #include "init_64.h"
 
-unsigned long kern_linear_pte_xor[2] __read_mostly;
+unsigned long kern_linear_pte_xor[4] __read_mostly;
 
-/* A bitmap, one bit for every 256MB of physical memory.  If the bit
- * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
- * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
+/* A bitmap, two bits for every 256MB of physical memory.  These two
+ * bits determine what page size we use for kernel linear
+ * translations.  They form an index into kern_linear_pte_xor[].  The
+ * value in the indexed slot is XOR'd with the TLB miss virtual
+ * address to form the resulting TTE.  The mapping is:
+ *
+ *     0       ==>     4MB
+ *     1       ==>     256MB
+ *     2       ==>     2GB
+ *     3       ==>     16GB
+ *
+ * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
+ * support 2GB pages, and hopefully future cpus will support the 16GB
+ * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
+ * if these larger page sizes are not supported by the cpu.
+ *
+ * It would be nice to determine this from the machine description
+ * 'cpu' properties, but we need to have this table setup before the
+ * MDESC is initialized.
  */
 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
 
 #ifndef CONFIG_DEBUG_PAGEALLOC
-/* A special kernel TSB for 4MB and 256MB linear mappings.
- * Space is allocated for this right after the trap table
- * in arch/sparc64/kernel/head.S
+/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
+ * Space is allocated for this right after the trap table in
+ * arch/sparc64/kernel/head.S
  */
 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
 #endif
 
+static unsigned long cpu_pgsz_mask;
+
 #define MAX_BANKS      32
 
 static struct linux_prom64_registers pavail[MAX_BANKS] __devinitdata;
@@ -101,7 +119,8 @@ static void __init read_obp_memory(const char *property,
 
        ret = prom_getproperty(node, property, (char *) regs, prop_size);
        if (ret == -1) {
-               prom_printf("Couldn't get %s property from /memory.\n");
+               prom_printf("Couldn't get %s property from /memory.\n",
+                               property);
                prom_halt();
        }
 
@@ -403,6 +422,12 @@ EXPORT_SYMBOL(flush_icache_range);
 
 void mmu_info(struct seq_file *m)
 {
+       static const char *pgsz_strings[] = {
+               "8K", "64K", "512K", "4MB", "32MB",
+               "256MB", "2GB", "16GB",
+       };
+       int i, printed;
+
        if (tlb_type == cheetah)
                seq_printf(m, "MMU Type\t: Cheetah\n");
        else if (tlb_type == cheetah_plus)
@@ -414,6 +439,17 @@ void mmu_info(struct seq_file *m)
        else
                seq_printf(m, "MMU Type\t: ???\n");
 
+       seq_printf(m, "MMU PGSZs\t: ");
+       printed = 0;
+       for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
+               if (cpu_pgsz_mask & (1UL << i)) {
+                       seq_printf(m, "%s%s",
+                                  printed ? "," : "", pgsz_strings[i]);
+                       printed++;
+               }
+       }
+       seq_putc(m, '\n');
+
 #ifdef CONFIG_DEBUG_DCFLUSH
        seq_printf(m, "DCPageFlushes\t: %d\n",
                   atomic_read(&dcpage_flushes));
@@ -462,7 +498,7 @@ static void __init read_obp_translations(void)
                prom_halt();
        }
        if (unlikely(n > sizeof(prom_trans))) {
-               prom_printf("prom_mappings: Size %Zd is too big.\n", n);
+               prom_printf("prom_mappings: Size %d is too big.\n", n);
                prom_halt();
        }
 
@@ -524,7 +560,7 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
        unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
 
        if (ret != 0) {
-               prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
+               prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
                            "errors with %lx\n", vaddr, 0, pte, mmu, ret);
                prom_halt();
        }
@@ -1358,32 +1394,75 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
 extern unsigned int kvmap_linear_patch[1];
 #endif /* CONFIG_DEBUG_PAGEALLOC */
 
-static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
+static void __init kpte_set_val(unsigned long index, unsigned long val)
 {
-       const unsigned long shift_256MB = 28;
-       const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
-       const unsigned long size_256MB = (1UL << shift_256MB);
+       unsigned long *ptr = kpte_linear_bitmap;
 
-       while (start < end) {
-               long remains;
+       val <<= ((index % (BITS_PER_LONG / 2)) * 2);
+       ptr += (index / (BITS_PER_LONG / 2));
 
-               remains = end - start;
-               if (remains < size_256MB)
-                       break;
+       *ptr |= val;
+}
 
-               if (start & mask_256MB) {
-                       start = (start + size_256MB) & ~mask_256MB;
-                       continue;
-               }
+static const unsigned long kpte_shift_min = 28; /* 256MB */
+static const unsigned long kpte_shift_max = 34; /* 16GB */
+static const unsigned long kpte_shift_incr = 3;
+
+static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
+                                          unsigned long shift)
+{
+       unsigned long size = (1UL << shift);
+       unsigned long mask = (size - 1UL);
+       unsigned long remains = end - start;
+       unsigned long val;
+
+       if (remains < size || (start & mask))
+               return start;
+
+       /* VAL maps:
+        *
+        *      shift 28 --> kern_linear_pte_xor index 1
+        *      shift 31 --> kern_linear_pte_xor index 2
+        *      shift 34 --> kern_linear_pte_xor index 3
+        */
+       val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
+
+       remains &= ~mask;
+       if (shift != kpte_shift_max)
+               remains = size;
 
-               while (remains >= size_256MB) {
-                       unsigned long index = start >> shift_256MB;
+       while (remains) {
+               unsigned long index = start >> kpte_shift_min;
 
-                       __set_bit(index, kpte_linear_bitmap);
+               kpte_set_val(index, val);
 
-                       start += size_256MB;
-                       remains -= size_256MB;
+               start += 1UL << kpte_shift_min;
+               remains -= 1UL << kpte_shift_min;
+       }
+
+       return start;
+}
+
+static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
+{
+       unsigned long smallest_size, smallest_mask;
+       unsigned long s;
+
+       smallest_size = (1UL << kpte_shift_min);
+       smallest_mask = (smallest_size - 1UL);
+
+       while (start < end) {
+               unsigned long orig_start = start;
+
+               for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
+                       start = kpte_mark_using_shift(start, end, s);
+
+                       if (start != orig_start)
+                               break;
                }
+
+               if (start == orig_start)
+                       start = (start + smallest_size) & ~smallest_mask;
        }
 }
 
@@ -1577,13 +1656,16 @@ static void __init sun4v_ktsb_init(void)
        ktsb_descr[0].resv = 0;
 
 #ifndef CONFIG_DEBUG_PAGEALLOC
-       /* Second KTSB for 4MB/256MB mappings.  */
+       /* Second KTSB for 4MB/256MB/2GB/16GB mappings.  */
        ktsb_pa = (kern_base +
                   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
 
        ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
-       ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
-                                  HV_PGSZ_MASK_256MB);
+       ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
+                                   HV_PGSZ_MASK_256MB |
+                                   HV_PGSZ_MASK_2GB |
+                                   HV_PGSZ_MASK_16GB) &
+                                  cpu_pgsz_mask);
        ktsb_descr[1].assoc = 1;
        ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
        ktsb_descr[1].ctx_idx = 0;
@@ -1606,6 +1688,47 @@ void __cpuinit sun4v_ktsb_register(void)
        }
 }
 
+static void __init sun4u_linear_pte_xor_finalize(void)
+{
+#ifndef CONFIG_DEBUG_PAGEALLOC
+       /* This is where we would add Panther support for
+        * 32MB and 256MB pages.
+        */
+#endif
+}
+
+static void __init sun4v_linear_pte_xor_finalize(void)
+{
+#ifndef CONFIG_DEBUG_PAGEALLOC
+       if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
+               kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
+                       0xfffff80000000000UL;
+               kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+                                          _PAGE_P_4V | _PAGE_W_4V);
+       } else {
+               kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
+       }
+
+       if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
+               kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
+                       0xfffff80000000000UL;
+               kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+                                          _PAGE_P_4V | _PAGE_W_4V);
+       } else {
+               kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
+       }
+
+       if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
+               kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
+                       0xfffff80000000000UL;
+               kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+                                          _PAGE_P_4V | _PAGE_W_4V);
+       } else {
+               kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
+       }
+#endif
+}
+
 /* paging_init() sets up the page tables */
 
 static unsigned long last_valid_pfn;
@@ -1665,10 +1788,8 @@ void __init paging_init(void)
                ktsb_phys_patch();
        }
 
-       if (tlb_type == hypervisor) {
+       if (tlb_type == hypervisor)
                sun4v_patch_tlb_handlers();
-               sun4v_ktsb_init();
-       }
 
        /* Find available physical memory...
         *
@@ -1727,9 +1848,6 @@ void __init paging_init(void)
 
        __flush_tlb_all();
 
-       if (tlb_type == hypervisor)
-               sun4v_ktsb_register();
-
        prom_build_devicetree();
        of_populate_present_mask();
 #ifndef CONFIG_SMP
@@ -1742,8 +1860,36 @@ void __init paging_init(void)
 #ifndef CONFIG_SMP
                mdesc_fill_in_cpu_data(cpu_all_mask);
 #endif
+               mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
+
+               sun4v_linear_pte_xor_finalize();
+
+               sun4v_ktsb_init();
+               sun4v_ktsb_register();
+       } else {
+               unsigned long impl, ver;
+
+               cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
+                                HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
+
+               __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
+               impl = ((ver >> 32) & 0xffff);
+               if (impl == PANTHER_IMPL)
+                       cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
+                                         HV_PGSZ_MASK_256MB);
+
+               sun4u_linear_pte_xor_finalize();
        }
 
+       /* Flush the TLBs and the 4M TSB so that the updated linear
+        * pte XOR settings are realized for all mappings.
+        */
+       __flush_tlb_all();
+#ifndef CONFIG_DEBUG_PAGEALLOC
+       memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
+#endif
+       __flush_tlb_all();
+
        /* Setup bootmem... */
        last_valid_pfn = end_pfn = bootmem_init(phys_base);
 
@@ -2110,6 +2256,7 @@ static void __init sun4u_pgprot_init(void)
 {
        unsigned long page_none, page_shared, page_copy, page_readonly;
        unsigned long page_exec_bit;
+       int i;
 
        PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
                                _PAGE_CACHE_4U | _PAGE_P_4U |
@@ -2137,8 +2284,8 @@ static void __init sun4u_pgprot_init(void)
        kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
                                   _PAGE_P_4U | _PAGE_W_4U);
 
-       /* XXX Should use 256MB on Panther. XXX */
-       kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
+       for (i = 1; i < 4; i++)
+               kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
 
        _PAGE_SZBITS = _PAGE_SZBITS_4U;
        _PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
@@ -2164,6 +2311,7 @@ static void __init sun4v_pgprot_init(void)
 {
        unsigned long page_none, page_shared, page_copy, page_readonly;
        unsigned long page_exec_bit;
+       int i;
 
        PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
                                _PAGE_CACHE_4V | _PAGE_P_4V |
@@ -2185,15 +2333,8 @@ static void __init sun4v_pgprot_init(void)
        kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
                                   _PAGE_P_4V | _PAGE_W_4V);
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
-               0xfffff80000000000UL;
-#else
-       kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
-               0xfffff80000000000UL;
-#endif
-       kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
-                                  _PAGE_P_4V | _PAGE_W_4V);
+       for (i = 1; i < 4; i++)
+               kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
 
        pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
                     __ACCESS_BITS_4V | _PAGE_E_4V);
index 3e1ac8b96cae092017d57b5f6c1eb2023dc72af1..0661aa606decec7480e4042aed430f8729af5bd5 100644 (file)
@@ -8,12 +8,12 @@
 #define MAX_PHYS_ADDRESS       (1UL << 41UL)
 #define KPTE_BITMAP_CHUNK_SZ           (256UL * 1024UL * 1024UL)
 #define KPTE_BITMAP_BYTES      \
-       ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
+       ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
 #define VALID_ADDR_BITMAP_CHUNK_SZ     (4UL * 1024UL * 1024UL)
 #define VALID_ADDR_BITMAP_BYTES        \
        ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
 
-extern unsigned long kern_linear_pte_xor[2];
+extern unsigned long kern_linear_pte_xor[4];
 extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
 extern unsigned int sparc64_highest_unlocked_tlb_ent;
 extern unsigned long sparc64_kern_pri_context;
index a8a58cad9d2b1eaab321e3bc87347fab0e5eb24c..0f4f7191fbbad93d6600ac409f6baae24c911d5e 100644 (file)
@@ -90,8 +90,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
            it to us. */
         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
        if (!tmp) {
-               prom_printf("Unable to allocate iommu table [0x%08x]\n",
-                           IOMMU_NPTES*sizeof(iopte_t));
+               prom_printf("Unable to allocate iommu table [0x%lx]\n",
+                           IOMMU_NPTES * sizeof(iopte_t));
                prom_halt();
        }
        iommu->page_table = (iopte_t *)tmp;
index e9073e9501b37792e5e6941253b6c13f601102c3..28368701ef796ff5c4268398264cdfb504a0be84 100644 (file)
@@ -464,8 +464,12 @@ void bpf_jit_compile(struct sk_filter *fp)
                                emit_alu_K(OR, K);
                                break;
                        case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+                       case BPF_S_ALU_XOR_X:
                                emit_alu_X(XOR);
                                break;
+                       case BPF_S_ALU_XOR_K:   /* A ^= K */
+                               emit_alu_K(XOR, K);
+                               break;
                        case BPF_S_ALU_LSH_X:   /* A <<= X */
                                emit_alu_X(SLL);
                                break;
index 0270620a16926956b6b6a6e95422a439c8b09cb2..8c5eff6d6df5577ea987d01cbf79cde62cab3506 100644 (file)
@@ -134,7 +134,6 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
index c11de27a9bcb232061be13744514589282ef68ed..e7a3dfcbcda7094ef4c7fa818c650716d2103397 100644 (file)
@@ -132,7 +132,6 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
 CONFIG_NETFILTER_XT_TARGET_TEE=m
 CONFIG_NETFILTER_XT_TARGET_TPROXY=m
 CONFIG_NETFILTER_XT_TARGET_TRACE=m
index c17de0db6736e1bcef0fca9ab1a3d624cd271580..9efeb6da48bc6c959fd1c87d92fba1057baa07a1 100644 (file)
@@ -21,6 +21,9 @@
 #include <linux/un.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/file.h>
 #include <asm/uaccess.h>
 #include <asm/switch_to.h>
 
@@ -118,90 +121,38 @@ void mconsole_log(struct mc_request *req)
        mconsole_reply(req, "", 0, 0);
 }
 
-/* This is a more convoluted version of mconsole_proc, which has some stability
- * problems; however, we need it fixed, because it is expected that UML users
- * mount HPPFS instead of procfs on /proc. And we want mconsole_proc to still
- * show the real procfs content, not the ones from hppfs.*/
-#if 0
 void mconsole_proc(struct mc_request *req)
 {
        struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt;
-       struct file *file;
-       int n;
-       char *ptr = req->request.data, *buf;
-       mm_segment_t old_fs = get_fs();
-
-       ptr += strlen("proc");
-       ptr = skip_spaces(ptr);
-
-       file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
-       if (IS_ERR(file)) {
-               mconsole_reply(req, "Failed to open file", 1, 0);
-               goto out;
-       }
-
-       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (buf == NULL) {
-               mconsole_reply(req, "Failed to allocate buffer", 1, 0);
-               goto out_fput;
-       }
-
-       if (file->f_op->read) {
-               do {
-                       loff_t pos;
-                       set_fs(KERNEL_DS);
-                       n = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
-                       file_pos_write(file, pos);
-                       set_fs(old_fs);
-                       if (n >= 0) {
-                               buf[n] = '\0';
-                               mconsole_reply(req, buf, 0, (n > 0));
-                       }
-                       else {
-                               mconsole_reply(req, "Read of file failed",
-                                              1, 0);
-                               goto out_free;
-                       }
-               } while (n > 0);
-       }
-       else mconsole_reply(req, "", 0, 0);
-
- out_free:
-       kfree(buf);
- out_fput:
-       fput(file);
- out: ;
-}
-#endif
-
-void mconsole_proc(struct mc_request *req)
-{
-       char path[64];
        char *buf;
        int len;
-       int fd;
+       struct file *file;
        int first_chunk = 1;
        char *ptr = req->request.data;
 
        ptr += strlen("proc");
        ptr = skip_spaces(ptr);
-       snprintf(path, sizeof(path), "/proc/%s", ptr);
 
-       fd = sys_open(path, 0, 0);
-       if (fd < 0) {
+       file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
+       if (IS_ERR(file)) {
                mconsole_reply(req, "Failed to open file", 1, 0);
-               printk(KERN_ERR "open %s: %d\n",path,fd);
+               printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
                goto out;
        }
 
        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
        if (buf == NULL) {
                mconsole_reply(req, "Failed to allocate buffer", 1, 0);
-               goto out_close;
+               goto out_fput;
        }
 
-       for (;;) {
-               len = sys_read(fd, buf, PAGE_SIZE-1);
+       do {
+               loff_t pos;
+               mm_segment_t old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
+               set_fs(old_fs);
+               file->f_pos = pos;
                if (len < 0) {
                        mconsole_reply(req, "Read of file failed", 1, 0);
                        goto out_free;
@@ -211,22 +162,14 @@ void mconsole_proc(struct mc_request *req)
                        mconsole_reply(req, "\n", 0, 1);
                        first_chunk = 0;
                }
-               if (len == PAGE_SIZE-1) {
-                       buf[len] = '\0';
-                       mconsole_reply(req, buf, 0, 1);
-               } else {
-                       buf[len] = '\0';
-                       mconsole_reply(req, buf, 0, 0);
-                       break;
-               }
-       }
-
+               buf[len] = '\0';
+               mconsole_reply(req, buf, 0, (len > 0));
+       } while (len > 0);
  out_free:
        kfree(buf);
- out_close:
-       sys_close(fd);
- out:
-       /* nothing */;
+ out_fput:
+       fput(file);
+ out: ;
 }
 
 #define UML_MCONSOLE_HELPTEXT \
index 957ec87385afe0c5c53fbac8afac109e75c374d5..fbee9714d9abafd8f4a95c6586acdefaf1e1bd66 100644 (file)
 
 #define MSR_IA32_PERF_STATUS           0x00000198
 #define MSR_IA32_PERF_CTL              0x00000199
+#define MSR_AMD_PSTATE_DEF_BASE                0xc0010064
+#define MSR_AMD_PERF_STATUS            0xc0010063
+#define MSR_AMD_PERF_CTL               0xc0010062
 
 #define MSR_IA32_MPERF                 0x000000e7
 #define MSR_IA32_APERF                 0x000000e8
index 80502a2bb7898b1075d108b5a2b2a1bde4d3b1cc..1707cfa928fbff64b74a85a2de28c345308353fc 100644 (file)
 #endif
 
 #ifndef __ASSEMBLY__
+/* Explicitly size integers that represent pfns in the public interface
+ * with Xen so that on ARM we can have one ABI that works for 32 and 64
+ * bit guests. */
+typedef unsigned long xen_pfn_t;
 /* Guest handles for primitive C types. */
 __DEFINE_GUEST_HANDLE(uchar, unsigned char);
 __DEFINE_GUEST_HANDLE(uint,  unsigned int);
@@ -57,6 +61,7 @@ DEFINE_GUEST_HANDLE(long);
 DEFINE_GUEST_HANDLE(void);
 DEFINE_GUEST_HANDLE(uint64_t);
 DEFINE_GUEST_HANDLE(uint32_t);
+DEFINE_GUEST_HANDLE(xen_pfn_t);
 #endif
 
 #ifndef HYPERVISOR_VIRT_START
@@ -121,6 +126,8 @@ struct arch_shared_info {
 #include <asm/xen/interface_64.h>
 #endif
 
+#include <asm/pvclock-abi.h>
+
 #ifndef __ASSEMBLY__
 /*
  * The following is all CPU context. Note that the fpu_ctxt block is filled
index 1be1ab7d6a4120d945aa22abf98a523f617196d8..ee52fcac6f72211ec3ac06e0e295193bc0e44817 100644 (file)
@@ -5,10 +5,12 @@
 extern int xen_swiotlb;
 extern int __init pci_xen_swiotlb_detect(void);
 extern void __init pci_xen_swiotlb_init(void);
+extern int pci_xen_swiotlb_init_late(void);
 #else
 #define xen_swiotlb (0)
 static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
 static inline void __init pci_xen_swiotlb_init(void) { }
+static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
 #endif
 
 #endif /* _ASM_X86_SWIOTLB_XEN_H */
index 33643a8bcbbb0f1887dc2f815fe469aaf0e0507a..520d2bd0b9c583de16ce246ffbf938c5c9f48b33 100644 (file)
@@ -280,6 +280,31 @@ void bpf_jit_compile(struct sk_filter *fp)
                                }
                                EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
                                break;
+                       case BPF_S_ALU_MOD_X: /* A %= X; */
+                               seen |= SEEN_XREG;
+                               EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
+                               if (pc_ret0 > 0) {
+                                       /* addrs[pc_ret0 - 1] is start address of target
+                                        * (addrs[i] - 6) is the address following this jmp
+                                        * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
+                                        */
+                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
+                                                               (addrs[i] - 6));
+                               } else {
+                                       EMIT_COND_JMP(X86_JNE, 2 + 5);
+                                       CLEAR_A();
+                                       EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
+                               }
+                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
+                               EMIT2(0xf7, 0xf3);      /* div %ebx */
+                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
+                               break;
+                       case BPF_S_ALU_MOD_K: /* A %= K; */
+                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
+                               EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
+                               EMIT2(0xf7, 0xf1);      /* div %ecx */
+                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
+                               break;
                        case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
                                EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
                                EMIT(K, 4);
@@ -310,9 +335,18 @@ void bpf_jit_compile(struct sk_filter *fp)
                                        EMIT1_off32(0x0d, K);   /* or imm32,%eax */
                                break;
                        case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+                       case BPF_S_ALU_XOR_X:
                                seen |= SEEN_XREG;
                                EMIT2(0x31, 0xd8);              /* xor %ebx,%eax */
                                break;
+                       case BPF_S_ALU_XOR_K: /* A ^= K; */
+                               if (K == 0)
+                                       break;
+                               if (is_imm8(K))
+                                       EMIT3(0x83, 0xf0, K);   /* xor imm8,%eax */
+                               else
+                                       EMIT1_off32(0x35, K);   /* xor imm32,%eax */
+                               break;
                        case BPF_S_ALU_LSH_X: /* A <<= X; */
                                seen |= SEEN_XREG;
                                EMIT4(0x89, 0xd9, 0xd3, 0xe0);  /* mov %ebx,%ecx; shl %cl,%eax */
index ec57bd3818a4cdeb0c45d3c3b13cc7a79db85f9b..7005ced5d1ad7ab93a3862f269855da4cf9a9171 100644 (file)
@@ -6,8 +6,9 @@
 
 #include <xen/xen.h>
 #include <xen/interface/physdev.h>
+#include "xen-ops.h"
 
-unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
+static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
 {
        struct physdev_apic apic_op;
        int ret;
index 1fbe75a95f15953d3d0ed6cd0dd7c911a88eac0b..2d932c351f91fec22daa30d1a1aabe20a772a2bb 100644 (file)
@@ -80,6 +80,8 @@
 #include "smp.h"
 #include "multicalls.h"
 
+#include <xen/events.h>
+
 EXPORT_SYMBOL_GPL(hypercall_page);
 
 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
@@ -1288,7 +1290,6 @@ asmlinkage void __init xen_start_kernel(void)
 {
        struct physdev_set_iopl set_iopl;
        int rc;
-       pgd_t *pgd;
 
        if (!xen_start_info)
                return;
@@ -1380,8 +1381,6 @@ asmlinkage void __init xen_start_kernel(void)
        acpi_numa = -1;
 #endif
 
-       pgd = (pgd_t *)xen_start_info->pt_base;
-
        /* Don't do the full vcpu_info placement stuff until we have a
           possible map and a non-dummy shared_info. */
        per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
@@ -1390,7 +1389,7 @@ asmlinkage void __init xen_start_kernel(void)
        early_boot_irqs_disabled = true;
 
        xen_raw_console_write("mapping kernel into physical memory\n");
-       pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+       xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
 
        /* Allocate and initialize top and mid mfn levels for p2m structure */
        xen_build_mfn_list_list();
@@ -1441,11 +1440,19 @@ asmlinkage void __init xen_start_kernel(void)
                const struct dom0_vga_console_info *info =
                        (void *)((char *)xen_start_info +
                                 xen_start_info->console.dom0.info_off);
+               struct xen_platform_op op = {
+                       .cmd = XENPF_firmware_info,
+                       .interface_version = XENPF_INTERFACE_VERSION,
+                       .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
+               };
 
                xen_init_vga(info, xen_start_info->console.dom0.info_size);
                xen_start_info->console.domU.mfn = 0;
                xen_start_info->console.domU.evtchn = 0;
 
+               if (HYPERVISOR_dom0_op(&op) == 0)
+                       boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
+
                xen_init_apic();
 
                /* Make sure ACS will be enabled */
index 7a769b7526cb2d5209380d65836ba5b2449aecb2..5a16824cc2b3ca88919e484d79473af3f2e951a4 100644 (file)
@@ -84,6 +84,7 @@
  */
 DEFINE_SPINLOCK(xen_reservation_lock);
 
+#ifdef CONFIG_X86_32
 /*
  * Identity map, in addition to plain kernel map.  This needs to be
  * large enough to allocate page table pages to allocate the rest.
@@ -91,7 +92,7 @@ DEFINE_SPINLOCK(xen_reservation_lock);
  */
 #define LEVEL1_IDENT_ENTRIES   (PTRS_PER_PTE * 4)
 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
-
+#endif
 #ifdef CONFIG_X86_64
 /* l3 pud for userspace vsyscall mapping */
 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
@@ -1176,13 +1177,6 @@ static void xen_exit_mmap(struct mm_struct *mm)
 
 static void xen_post_allocator_init(void);
 
-static void __init xen_pagetable_init(void)
-{
-       paging_init();
-       xen_setup_shared_info();
-       xen_post_allocator_init();
-}
-
 static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
 {
        /* reserve the range used */
@@ -1197,6 +1191,87 @@ static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
        }
 }
 
+#ifdef CONFIG_X86_64
+static void __init xen_cleanhighmap(unsigned long vaddr,
+                                   unsigned long vaddr_end)
+{
+       unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
+       pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
+
+       /* NOTE: The loop is more greedy than the cleanup_highmap variant.
+        * We include the PMD passed in on _both_ boundaries. */
+       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+                       pmd++, vaddr += PMD_SIZE) {
+               if (pmd_none(*pmd))
+                       continue;
+               if (vaddr < (unsigned long) _text || vaddr > kernel_end)
+                       set_pmd(pmd, __pmd(0));
+       }
+       /* In case we did something silly, we should crash in this function
+        * instead of somewhere later and be confusing. */
+       xen_mc_flush();
+}
+#endif
+static void __init xen_pagetable_init(void)
+{
+#ifdef CONFIG_X86_64
+       unsigned long size;
+       unsigned long addr;
+#endif
+       paging_init();
+       xen_setup_shared_info();
+#ifdef CONFIG_X86_64
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               unsigned long new_mfn_list;
+
+               size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+
+               /* On 32-bit, we get zero so this never gets executed. */
+               new_mfn_list = xen_revector_p2m_tree();
+               if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
+                       /* using __ka address and sticking INVALID_P2M_ENTRY! */
+                       memset((void *)xen_start_info->mfn_list, 0xff, size);
+
+                       /* We should be in __ka space. */
+                       BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
+                       addr = xen_start_info->mfn_list;
+                       /* We roundup to the PMD, which means that if anybody at this stage is
+                        * using the __ka address of xen_start_info or xen_start_info->shared_info
+                        * they are in going to crash. Fortunatly we have already revectored
+                        * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
+                       size = roundup(size, PMD_SIZE);
+                       xen_cleanhighmap(addr, addr + size);
+
+                       size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+                       memblock_free(__pa(xen_start_info->mfn_list), size);
+                       /* And revector! Bye bye old array */
+                       xen_start_info->mfn_list = new_mfn_list;
+               } else
+                       goto skip;
+       }
+       /* At this stage, cleanup_highmap has already cleaned __ka space
+        * from _brk_limit way up to the max_pfn_mapped (which is the end of
+        * the ramdisk). We continue on, erasing PMD entries that point to page
+        * tables - do note that they are accessible at this stage via __va.
+        * For good measure we also round up to the PMD - which means that if
+        * anybody is using __ka address to the initial boot-stack - and try
+        * to use it - they are going to crash. The xen_start_info has been
+        * taken care of already in xen_setup_kernel_pagetable. */
+       addr = xen_start_info->pt_base;
+       size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
+
+       xen_cleanhighmap(addr, addr + size);
+       xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
+#ifdef DEBUG
+       /* This is superflous and is not neccessary, but you know what
+        * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
+        * anything at this stage. */
+       xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
+#endif
+skip:
+#endif
+       xen_post_allocator_init();
+}
 static void xen_write_cr2(unsigned long cr2)
 {
        this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
@@ -1652,7 +1727,7 @@ static void set_page_prot(void *addr, pgprot_t prot)
        if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
                BUG();
 }
-
+#ifdef CONFIG_X86_32
 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
 {
        unsigned pmdidx, pteidx;
@@ -1703,7 +1778,7 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
 
        set_page_prot(pmd, PAGE_KERNEL_RO);
 }
-
+#endif
 void __init xen_setup_machphys_mapping(void)
 {
        struct xen_machphys_mapping mapping;
@@ -1731,7 +1806,20 @@ static void convert_pfn_mfn(void *v)
        for (i = 0; i < PTRS_PER_PTE; i++)
                pte[i] = xen_make_pte(pte[i].pte);
 }
-
+static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
+                                unsigned long addr)
+{
+       if (*pt_base == PFN_DOWN(__pa(addr))) {
+               set_page_prot((void *)addr, PAGE_KERNEL);
+               clear_page((void *)addr);
+               (*pt_base)++;
+       }
+       if (*pt_end == PFN_DOWN(__pa(addr))) {
+               set_page_prot((void *)addr, PAGE_KERNEL);
+               clear_page((void *)addr);
+               (*pt_end)--;
+       }
+}
 /*
  * Set up the initial kernel pagetable.
  *
@@ -1743,11 +1831,13 @@ static void convert_pfn_mfn(void *v)
  * of the physical mapping once some sort of allocator has been set
  * up.
  */
-pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
-                                        unsigned long max_pfn)
+void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
        pud_t *l3;
        pmd_t *l2;
+       unsigned long addr[3];
+       unsigned long pt_base, pt_end;
+       unsigned i;
 
        /* max_pfn_mapped is the last pfn mapped in the initial memory
         * mappings. Considering that on Xen after the kernel mappings we
@@ -1755,32 +1845,53 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
         * set max_pfn_mapped to the last real pfn mapped. */
        max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
 
+       pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
+       pt_end = pt_base + xen_start_info->nr_pt_frames;
+
        /* Zap identity mapping */
        init_level4_pgt[0] = __pgd(0);
 
        /* Pre-constructed entries are in pfn, so convert to mfn */
+       /* L4[272] -> level3_ident_pgt
+        * L4[511] -> level3_kernel_pgt */
        convert_pfn_mfn(init_level4_pgt);
+
+       /* L3_i[0] -> level2_ident_pgt */
        convert_pfn_mfn(level3_ident_pgt);
+       /* L3_k[510] -> level2_kernel_pgt
+        * L3_i[511] -> level2_fixmap_pgt */
        convert_pfn_mfn(level3_kernel_pgt);
 
+       /* We get [511][511] and have Xen's version of level2_kernel_pgt */
        l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
        l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
 
-       memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-       memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-
+       addr[0] = (unsigned long)pgd;
+       addr[1] = (unsigned long)l3;
+       addr[2] = (unsigned long)l2;
+       /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
+        * Both L4[272][0] and L4[511][511] have entries that point to the same
+        * L2 (PMD) tables. Meaning that if you modify it in __va space
+        * it will be also modified in the __ka space! (But if you just
+        * modify the PMD table to point to other PTE's or none, then you
+        * are OK - which is what cleanup_highmap does) */
+       copy_page(level2_ident_pgt, l2);
+       /* Graft it onto L4[511][511] */
+       copy_page(level2_kernel_pgt, l2);
+
+       /* Get [511][510] and graft that in level2_fixmap_pgt */
        l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
        l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-       memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-
-       /* Set up identity map */
-       xen_map_identity_early(level2_ident_pgt, max_pfn);
+       copy_page(level2_fixmap_pgt, l2);
+       /* Note that we don't do anything with level1_fixmap_pgt which
+        * we don't need. */
 
        /* Make pagetable pieces RO */
        set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
        set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+       set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
        set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
        set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
 
@@ -1791,22 +1902,28 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
        /* Unpin Xen-provided one */
        pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
 
-       /* Switch over */
-       pgd = init_level4_pgt;
-
        /*
         * At this stage there can be no user pgd, and no page
         * structure to attach it to, so make sure we just set kernel
         * pgd.
         */
        xen_mc_batch();
-       __xen_write_cr3(true, __pa(pgd));
+       __xen_write_cr3(true, __pa(init_level4_pgt));
        xen_mc_issue(PARAVIRT_LAZY_CPU);
 
-       memblock_reserve(__pa(xen_start_info->pt_base),
-                        xen_start_info->nr_pt_frames * PAGE_SIZE);
+       /* We can't that easily rip out L3 and L2, as the Xen pagetables are
+        * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ...  for
+        * the initial domain. For guests using the toolstack, they are in:
+        * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
+        * rip out the [L4] (pgd), but for guests we shave off three pages.
+        */
+       for (i = 0; i < ARRAY_SIZE(addr); i++)
+               check_pt_base(&pt_base, &pt_end, addr[i]);
 
-       return pgd;
+       /* Our (by three pages) smaller Xen pagetable that we are using */
+       memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
+       /* Revector the xen_start_info */
+       xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
 }
 #else  /* !CONFIG_X86_64 */
 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
@@ -1831,8 +1948,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
         */
        swapper_kernel_pmd =
                extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
-       memcpy(swapper_kernel_pmd, initial_kernel_pmd,
-              sizeof(pmd_t) * PTRS_PER_PMD);
+       copy_page(swapper_kernel_pmd, initial_kernel_pmd);
        swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
                __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
        set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
@@ -1849,8 +1965,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
        pv_mmu_ops.write_cr3 = &xen_write_cr3;
 }
 
-pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
-                                        unsigned long max_pfn)
+void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
@@ -1862,11 +1977,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
                                  512*1024);
 
        kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
-       memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
+       copy_page(initial_kernel_pmd, kernel_pmd);
 
        xen_map_identity_early(initial_kernel_pmd, max_pfn);
 
-       memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
+       copy_page(initial_page_table, pgd);
        initial_page_table[KERNEL_PGD_BOUNDARY] =
                __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
 
@@ -1882,8 +1997,6 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
 
        memblock_reserve(__pa(xen_start_info->pt_base),
                         xen_start_info->nr_pt_frames * PAGE_SIZE);
-
-       return initial_page_table;
 }
 #endif /* CONFIG_X86_64 */
 
@@ -2333,6 +2446,9 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
        unsigned long range;
        int err = 0;
 
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return -EINVAL;
+
        prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
 
        BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
@@ -2351,8 +2467,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
                if (err)
                        goto out;
 
-               err = -EFAULT;
-               if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
+               err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
+               if (err < 0)
                        goto out;
 
                nr -= batch;
index 72213da605f50c07f3684ca07d351a8bbd7435ef..95fb2aa5927efc4678eccd8a51d91044b4dd9adb 100644 (file)
@@ -22,7 +22,7 @@
  *
  * P2M_PER_PAGE depends on the architecture, as a mfn is always
  * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
- * 512 and 1024 entries respectively. 
+ * 512 and 1024 entries respectively.
  *
  * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
  *
  *      /    | ~0, ~0, ....  |
  *     |     \---------------/
  *     |
- *     p2m_missing             p2m_missing
- * /------------------\     /------------\
- * | [p2m_mid_missing]+---->| ~0, ~0, ~0 |
- * | [p2m_mid_missing]+---->| ..., ~0    |
- * \------------------/     \------------/
+ *   p2m_mid_missing           p2m_missing
+ * /-----------------\     /------------\
+ * | [p2m_missing]   +---->| ~0, ~0, ~0 |
+ * | [p2m_missing]   +---->| ..., ~0    |
+ * \-----------------/     \------------/
  *
  * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
  */
@@ -396,7 +396,85 @@ void __init xen_build_dynamic_phys_to_machine(void)
 
        m2p_override_init();
 }
+#ifdef CONFIG_X86_64
+#include <linux/bootmem.h>
+unsigned long __init xen_revector_p2m_tree(void)
+{
+       unsigned long va_start;
+       unsigned long va_end;
+       unsigned long pfn;
+       unsigned long pfn_free = 0;
+       unsigned long *mfn_list = NULL;
+       unsigned long size;
+
+       va_start = xen_start_info->mfn_list;
+       /*We copy in increments of P2M_PER_PAGE * sizeof(unsigned long),
+        * so make sure it is rounded up to that */
+       size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+       va_end = va_start + size;
+
+       /* If we were revectored already, don't do it again. */
+       if (va_start <= __START_KERNEL_map && va_start >= __PAGE_OFFSET)
+               return 0;
+
+       mfn_list = alloc_bootmem_align(size, PAGE_SIZE);
+       if (!mfn_list) {
+               pr_warn("Could not allocate space for a new P2M tree!\n");
+               return xen_start_info->mfn_list;
+       }
+       /* Fill it out with INVALID_P2M_ENTRY value */
+       memset(mfn_list, 0xFF, size);
+
+       for (pfn = 0; pfn < ALIGN(MAX_DOMAIN_PAGES, P2M_PER_PAGE); pfn += P2M_PER_PAGE) {
+               unsigned topidx = p2m_top_index(pfn);
+               unsigned mididx;
+               unsigned long *mid_p;
+
+               if (!p2m_top[topidx])
+                       continue;
+
+               if (p2m_top[topidx] == p2m_mid_missing)
+                       continue;
+
+               mididx = p2m_mid_index(pfn);
+               mid_p = p2m_top[topidx][mididx];
+               if (!mid_p)
+                       continue;
+               if ((mid_p == p2m_missing) || (mid_p == p2m_identity))
+                       continue;
+
+               if ((unsigned long)mid_p == INVALID_P2M_ENTRY)
+                       continue;
+
+               /* The old va. Rebase it on mfn_list */
+               if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) {
+                       unsigned long *new;
+
+                       if (pfn_free  > (size / sizeof(unsigned long))) {
+                               WARN(1, "Only allocated for %ld pages, but we want %ld!\n",
+                                    size / sizeof(unsigned long), pfn_free);
+                               return 0;
+                       }
+                       new = &mfn_list[pfn_free];
+
+                       copy_page(new, mid_p);
+                       p2m_top[topidx][mididx] = &mfn_list[pfn_free];
+                       p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn_free]);
+
+                       pfn_free += P2M_PER_PAGE;
 
+               }
+               /* This should be the leafs allocated for identity from _brk. */
+       }
+       return (unsigned long)mfn_list;
+
+}
+#else
+unsigned long __init xen_revector_p2m_tree(void)
+{
+       return 0;
+}
+#endif
 unsigned long get_phys_to_machine(unsigned long pfn)
 {
        unsigned topidx, mididx, idx;
@@ -430,7 +508,7 @@ static void free_p2m_page(void *p)
        free_page((unsigned long)p);
 }
 
-/* 
+/*
  * Fully allocate the p2m structure for a given pfn.  We need to check
  * that both the top and mid levels are allocated, and make sure the
  * parallel mfn tree is kept in sync.  We may race with other cpus, so
index 967633ad98c48b262cf7739c7cdcdb7b8d79641a..969570491c3964d0023dc82a231ff683ee88f735 100644 (file)
@@ -8,6 +8,14 @@
 #include <xen/xen.h>
 #include <asm/iommu_table.h>
 
+
+#include <asm/xen/swiotlb-xen.h>
+#ifdef CONFIG_X86_64
+#include <asm/iommu.h>
+#include <asm/dma.h>
+#endif
+#include <linux/export.h>
+
 int xen_swiotlb __read_mostly;
 
 static struct dma_map_ops xen_swiotlb_dma_ops = {
@@ -34,34 +42,64 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
 int __init pci_xen_swiotlb_detect(void)
 {
 
+       if (!xen_pv_domain())
+               return 0;
+
        /* If running as PV guest, either iommu=soft, or swiotlb=force will
         * activate this IOMMU. If running as PV privileged, activate it
         * irregardless.
         */
-       if ((xen_initial_domain() || swiotlb || swiotlb_force) &&
-           (xen_pv_domain()))
+       if ((xen_initial_domain() || swiotlb || swiotlb_force))
                xen_swiotlb = 1;
 
        /* If we are running under Xen, we MUST disable the native SWIOTLB.
         * Don't worry about swiotlb_force flag activating the native, as
         * the 'swiotlb' flag is the only one turning it on. */
-       if (xen_pv_domain())
-               swiotlb = 0;
+       swiotlb = 0;
 
+#ifdef CONFIG_X86_64
+       /* pci_swiotlb_detect_4gb turns on native SWIOTLB if no_iommu == 0
+        * (so no iommu=X command line over-writes).
+        * Considering that PV guests do not want the *native SWIOTLB* but
+        * only Xen SWIOTLB it is not useful to us so set no_iommu=1 here.
+        */
+       if (max_pfn > MAX_DMA32_PFN)
+               no_iommu = 1;
+#endif
        return xen_swiotlb;
 }
 
 void __init pci_xen_swiotlb_init(void)
 {
        if (xen_swiotlb) {
-               xen_swiotlb_init(1);
+               xen_swiotlb_init(1, true /* early */);
                dma_ops = &xen_swiotlb_dma_ops;
 
                /* Make sure ACS will be enabled */
                pci_request_acs();
        }
 }
+
+int pci_xen_swiotlb_init_late(void)
+{
+       int rc;
+
+       if (xen_swiotlb)
+               return 0;
+
+       rc = xen_swiotlb_init(1, false /* late */);
+       if (rc)
+               return rc;
+
+       dma_ops = &xen_swiotlb_dma_ops;
+       /* Make sure ACS will be enabled */
+       pci_request_acs();
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
+
 IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
-                 0,
+                 NULL,
                  pci_xen_swiotlb_init,
-                 0);
+                 NULL);
index ffcf2615640b0bd1f8e3596dbaf4d07c49d5dc51..0a7852483ffef27583fb6c4b5b7c3684c5b3847c 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 
 #include <xen/platform_pci.h>
+#include "xen-ops.h"
 
 #define XEN_PLATFORM_ERR_MAGIC -1
 #define XEN_PLATFORM_ERR_PROTOCOL -2
index e2d62d697b5dffc60ad6ea53e2ad3f1ce33282a3..8971a26d21abb943d5c9e23b5c6630a207f43536 100644 (file)
@@ -432,6 +432,24 @@ char * __init xen_memory_setup(void)
         *  - mfn_list
         *  - xen_start_info
         * See comment above "struct start_info" in <xen/interface/xen.h>
+        * We tried to make the the memblock_reserve more selective so
+        * that it would be clear what region is reserved. Sadly we ran
+        * in the problem wherein on a 64-bit hypervisor with a 32-bit
+        * initial domain, the pt_base has the cr3 value which is not
+        * neccessarily where the pagetable starts! As Jan put it: "
+        * Actually, the adjustment turns out to be correct: The page
+        * tables for a 32-on-64 dom0 get allocated in the order "first L1",
+        * "first L2", "first L3", so the offset to the page table base is
+        * indeed 2. When reading xen/include/public/xen.h's comment
+        * very strictly, this is not a violation (since there nothing is said
+        * that the first thing in the page table space is pointed to by
+        * pt_base; I admit that this seems to be implied though, namely
+        * do I think that it is implied that the page table space is the
+        * range [pt_base, pt_base + nt_pt_frames), whereas that
+        * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
+        * which - without a priori knowledge - the kernel would have
+        * difficulty to figure out)." - so lets just fall back to the
+        * easy way and reserve the whole region.
         */
        memblock_reserve(__pa(xen_start_info->mfn_list),
                         xen_start_info->pt_base - xen_start_info->mfn_list);
index 1cd7f4d11e298bdf7b55e087602e209df45827ae..6722e3733f02433e75bf2d91f185191e3ada65c0 100644 (file)
@@ -35,6 +35,7 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
                        info->u.text_mode_3.font_height;
                break;
 
+       case XEN_VGATYPE_EFI_LFB:
        case XEN_VGATYPE_VESA_LFB:
                if (size < offsetof(struct dom0_vga_console_info,
                                    u.vesa_lfb.gbl_caps))
@@ -54,6 +55,12 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
                screen_info->blue_pos = info->u.vesa_lfb.blue_pos;
                screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
                screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
+
+               if (info->video_type == XEN_VGATYPE_EFI_LFB) {
+                       screen_info->orig_video_isVGA = VIDEO_TYPE_EFI;
+                       break;
+               }
+
                if (size >= offsetof(struct dom0_vga_console_info,
                                     u.vesa_lfb.gbl_caps)
                    + sizeof(info->u.vesa_lfb.gbl_caps))
index aaa7291c9259f55c0fc5ac0aab5f09ff788511a0..7faed5869e5bf215cafe4c0fdbe24ba38fcd66d0 100644 (file)
@@ -28,9 +28,61 @@ ENTRY(startup_xen)
        __FINIT
 
 .pushsection .text
-       .align PAGE_SIZE
+       .balign PAGE_SIZE
 ENTRY(hypercall_page)
-       .skip PAGE_SIZE
+#define NEXT_HYPERCALL(x) \
+       ENTRY(xen_hypercall_##x) \
+       .skip 32
+
+NEXT_HYPERCALL(set_trap_table)
+NEXT_HYPERCALL(mmu_update)
+NEXT_HYPERCALL(set_gdt)
+NEXT_HYPERCALL(stack_switch)
+NEXT_HYPERCALL(set_callbacks)
+NEXT_HYPERCALL(fpu_taskswitch)
+NEXT_HYPERCALL(sched_op_compat)
+NEXT_HYPERCALL(platform_op)
+NEXT_HYPERCALL(set_debugreg)
+NEXT_HYPERCALL(get_debugreg)
+NEXT_HYPERCALL(update_descriptor)
+NEXT_HYPERCALL(ni)
+NEXT_HYPERCALL(memory_op)
+NEXT_HYPERCALL(multicall)
+NEXT_HYPERCALL(update_va_mapping)
+NEXT_HYPERCALL(set_timer_op)
+NEXT_HYPERCALL(event_channel_op_compat)
+NEXT_HYPERCALL(xen_version)
+NEXT_HYPERCALL(console_io)
+NEXT_HYPERCALL(physdev_op_compat)
+NEXT_HYPERCALL(grant_table_op)
+NEXT_HYPERCALL(vm_assist)
+NEXT_HYPERCALL(update_va_mapping_otherdomain)
+NEXT_HYPERCALL(iret)
+NEXT_HYPERCALL(vcpu_op)
+NEXT_HYPERCALL(set_segment_base)
+NEXT_HYPERCALL(mmuext_op)
+NEXT_HYPERCALL(xsm_op)
+NEXT_HYPERCALL(nmi_op)
+NEXT_HYPERCALL(sched_op)
+NEXT_HYPERCALL(callback_op)
+NEXT_HYPERCALL(xenoprof_op)
+NEXT_HYPERCALL(event_channel_op)
+NEXT_HYPERCALL(physdev_op)
+NEXT_HYPERCALL(hvm_op)
+NEXT_HYPERCALL(sysctl)
+NEXT_HYPERCALL(domctl)
+NEXT_HYPERCALL(kexec_op)
+NEXT_HYPERCALL(tmem_op) /* 38 */
+ENTRY(xen_hypercall_rsvr)
+       .skip 320
+NEXT_HYPERCALL(mca) /* 48 */
+NEXT_HYPERCALL(arch_1)
+NEXT_HYPERCALL(arch_2)
+NEXT_HYPERCALL(arch_3)
+NEXT_HYPERCALL(arch_4)
+NEXT_HYPERCALL(arch_5)
+NEXT_HYPERCALL(arch_6)
+       .balign PAGE_SIZE
 .popsection
 
        ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS,       .asciz "linux")
index 202d4c150154fb31ddb03da8f8144a45f21c02a7..bb5a8105ea8604e6fdb0b2f720b5e0403ceaf100 100644 (file)
@@ -27,7 +27,7 @@ void xen_setup_mfn_list_list(void);
 void xen_setup_shared_info(void);
 void xen_build_mfn_list_list(void);
 void xen_setup_machphys_mapping(void);
-pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
+void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
 void xen_reserve_top(void);
 extern unsigned long xen_max_p2m_pfn;
 
@@ -45,6 +45,7 @@ void xen_hvm_init_shared_info(void);
 void xen_unplug_emulated_devices(void);
 
 void __init xen_build_dynamic_phys_to_machine(void);
+unsigned long __init xen_revector_p2m_tree(void);
 
 void xen_init_irq_ops(void);
 void xen_setup_timer(int cpu);
index f3b44a65fc7ad5f127bee8bcbadf5b486a7e5c71..cafcd743118969daec377f52f09e41594d188347 100644 (file)
@@ -737,6 +737,14 @@ struct cgroup_subsys blkio_subsys = {
        .subsys_id = blkio_subsys_id,
        .base_cftypes = blkcg_files,
        .module = THIS_MODULE,
+
+       /*
+        * blkio subsystem is utterly broken in terms of hierarchy support.
+        * It treats all cgroups equally regardless of where they're
+        * located in the hierarchy - all cgroups are treated as if they're
+        * right below the root.  Fix it and remove the following.
+        */
+       .broken_hierarchy = true,
 };
 EXPORT_SYMBOL_GPL(blkio_subsys);
 
index ee3cb3a5e2782292bba4807c0d9cc887de48a31e..d2da64170513caae07726bca207f71ba13065b46 100644 (file)
@@ -262,7 +262,7 @@ EXPORT_SYMBOL(blk_start_queue);
  **/
 void blk_stop_queue(struct request_queue *q)
 {
-       __cancel_delayed_work(&q->delay_work);
+       cancel_delayed_work(&q->delay_work);
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
@@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
  */
 void blk_run_queue_async(struct request_queue *q)
 {
-       if (likely(!blk_queue_stopped(q))) {
-               __cancel_delayed_work(&q->delay_work);
-               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
-       }
+       if (likely(!blk_queue_stopped(q)))
+               mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
 EXPORT_SYMBOL(blk_run_queue_async);
 
index e287c19908c8a31d3c4d29b1586921066032afa6..a9664fa0b6097ace6a48d48e0992a6d83ed32a9d 100644 (file)
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
 
 /*
  * Worker for allocating per cpu stat for tgs. This is scheduled on the
- * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * system_wq once there are some groups on the alloc_list waiting for
  * allocation.
  */
 static void tg_stats_alloc_fn(struct work_struct *work)
@@ -194,8 +194,7 @@ alloc_stats:
                stats_cpu = alloc_percpu(struct tg_stats_cpu);
                if (!stats_cpu) {
                        /* allocation failed, try again after some time */
-                       queue_delayed_work(system_nrt_wq, dwork,
-                                          msecs_to_jiffies(10));
+                       schedule_delayed_work(dwork, msecs_to_jiffies(10));
                        return;
                }
        }
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
         */
        spin_lock_irqsave(&tg_stats_alloc_lock, flags);
        list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
-       queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+       schedule_delayed_work(&tg_stats_alloc_work, 0);
        spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 }
 
@@ -930,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 
        /* schedule work if limits changed even if no bio is queued */
        if (total_nr_queued(td) || td->limits_changed) {
-               /*
-                * We might have a work scheduled to be executed in future.
-                * Cancel that and schedule a new one.
-                */
-               __cancel_delayed_work(dwork);
-               queue_delayed_work(kthrotld_workqueue, dwork, delay);
+               mod_delayed_work(kthrotld_workqueue, dwork, delay);
                throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
                                delay, jiffies);
        }
index d839723303c856ae221bdf54ec1de93544698d95..6cace663a80e4a89492f8b656f580a944b134f9d 100644 (file)
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
        intv = disk_events_poll_jiffies(disk);
        set_timer_slack(&ev->dwork.timer, intv / 4);
        if (check_now)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
        else if (intv)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
 out_unlock:
        spin_unlock_irqrestore(&ev->lock, flags);
 }
@@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
 
        spin_lock_irq(&ev->lock);
        ev->clearing |= mask;
-       if (!ev->block) {
-               cancel_delayed_work(&ev->dwork);
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
-       }
+       if (!ev->block)
+               mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
        spin_unlock_irq(&ev->lock);
 }
 
@@ -1573,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
 
        /* uncondtionally schedule event check and wait for it to finish */
        disk_block_events(disk);
-       queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+       queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
        flush_delayed_work(&ev->dwork);
        __disk_unblock_events(disk, false);
 
@@ -1610,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
 
        intv = disk_events_poll_jiffies(disk);
        if (!ev->block && intv)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
 
        spin_unlock_irq(&ev->lock);
 
index a3238051b03e73b2ab3f79e267b1e47a8716dc74..957cc56ce4b90296a2ef83531487e72d755db554 100644 (file)
@@ -336,6 +336,15 @@ config CRYPTO_CRC32C_INTEL
          gain performance compared with software implementation.
          Module will be crc32c-intel.
 
+config CRYPTO_CRC32C_SPARC64
+       tristate "CRC32c CRC algorithm (SPARC64)"
+       depends on SPARC64
+       select CRYPTO_HASH
+       select CRC32
+       help
+         CRC32c CRC algorithm implemented using sparc64 crypto instructions,
+         when available.
+
 config CRYPTO_GHASH
        tristate "GHASH digest algorithm"
        select CRYPTO_GF128MUL
@@ -354,6 +363,15 @@ config CRYPTO_MD5
        help
          MD5 message digest algorithm (RFC1321).
 
+config CRYPTO_MD5_SPARC64
+       tristate "MD5 digest algorithm (SPARC64)"
+       depends on SPARC64
+       select CRYPTO_MD5
+       select CRYPTO_HASH
+       help
+         MD5 message digest algorithm (RFC1321) implemented
+         using sparc64 crypto instructions, when available.
+
 config CRYPTO_MICHAEL_MIC
        tristate "Michael MIC keyed digest algorithm"
        select CRYPTO_HASH
@@ -433,6 +451,15 @@ config CRYPTO_SHA1_SSSE3
          using Supplemental SSE3 (SSSE3) instructions or Advanced Vector
          Extensions (AVX), when available.
 
+config CRYPTO_SHA1_SPARC64
+       tristate "SHA1 digest algorithm (SPARC64)"
+       depends on SPARC64
+       select CRYPTO_SHA1
+       select CRYPTO_HASH
+       help
+         SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+         using sparc64 crypto instructions, when available.
+
 config CRYPTO_SHA256
        tristate "SHA224 and SHA256 digest algorithm"
        select CRYPTO_HASH
@@ -445,6 +472,15 @@ config CRYPTO_SHA256
          This code also includes SHA-224, a 224 bit hash with 112 bits
          of security against collision attacks.
 
+config CRYPTO_SHA256_SPARC64
+       tristate "SHA224 and SHA256 digest algorithm (SPARC64)"
+       depends on SPARC64
+       select CRYPTO_SHA256
+       select CRYPTO_HASH
+       help
+         SHA-256 secure hash standard (DFIPS 180-2) implemented
+         using sparc64 crypto instructions, when available.
+
 config CRYPTO_SHA512
        tristate "SHA384 and SHA512 digest algorithms"
        select CRYPTO_HASH
@@ -457,6 +493,15 @@ config CRYPTO_SHA512
          This code also includes SHA-384, a 384 bit hash with 192 bits
          of security against collision attacks.
 
+config CRYPTO_SHA512_SPARC64
+       tristate "SHA384 and SHA512 digest algorithm (SPARC64)"
+       depends on SPARC64
+       select CRYPTO_SHA512
+       select CRYPTO_HASH
+       help
+         SHA-512 secure hash standard (DFIPS 180-2) implemented
+         using sparc64 crypto instructions, when available.
+
 config CRYPTO_TGR192
        tristate "Tiger digest algorithms"
        select CRYPTO_HASH
@@ -588,6 +633,34 @@ config CRYPTO_AES_NI_INTEL
          ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
          acceleration for CTR.
 
+config CRYPTO_AES_SPARC64
+       tristate "AES cipher algorithms (SPARC64)"
+       depends on SPARC64
+       select CRYPTO_CRYPTD
+       select CRYPTO_ALGAPI
+       help
+         Use SPARC64 crypto opcodes for AES algorithm.
+
+         AES cipher algorithms (FIPS-197). AES uses the Rijndael
+         algorithm.
+
+         Rijndael appears to be consistently a very good performer in
+         both hardware and software across a wide range of computing
+         environments regardless of its use in feedback or non-feedback
+         modes. Its key setup time is excellent, and its key agility is
+         good. Rijndael's very low memory requirements make it very well
+         suited for restricted-space environments, in which it also
+         demonstrates excellent performance. Rijndael's operations are
+         among the easiest to defend against power and timing attacks.
+
+         The AES specifies three key sizes: 128, 192 and 256 bits
+
+         See <http://csrc.nist.gov/encryption/aes/> for more information.
+
+         In addition to AES cipher algorithm support, the acceleration
+         for some popular block cipher mode is supported too, including
+         ECB and CBC.
+
 config CRYPTO_ANUBIS
        tristate "Anubis cipher algorithm"
        select CRYPTO_ALGAPI
@@ -685,6 +758,22 @@ config CRYPTO_CAMELLIA_X86_64
          See also:
          <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
 
+config CRYPTO_CAMELLIA_SPARC64
+       tristate "Camellia cipher algorithm (SPARC64)"
+       depends on SPARC64
+       depends on CRYPTO
+       select CRYPTO_ALGAPI
+       help
+         Camellia cipher algorithm module (SPARC64).
+
+         Camellia is a symmetric key block cipher developed jointly
+         at NTT and Mitsubishi Electric Corporation.
+
+         The Camellia specifies three key sizes: 128, 192 and 256 bits.
+
+         See also:
+         <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
+
 config CRYPTO_CAST5
        tristate "CAST5 (CAST-128) cipher algorithm"
        select CRYPTO_ALGAPI
@@ -705,6 +794,15 @@ config CRYPTO_DES
        help
          DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
 
+config CRYPTO_DES_SPARC64
+       tristate "DES and Triple DES EDE cipher algorithms (SPARC64)"
+       depends on SPARC64
+       select CRYPTO_ALGAPI
+       select CRYPTO_DES
+       help
+         DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
+         optimized using SPARC64 crypto opcodes.
+
 config CRYPTO_FCRYPT
        tristate "FCrypt cipher algorithm"
        select CRYPTO_ALGAPI
index ba2c611154af5e0df373a04b22eed782da0daac5..6bba414d0c619d1fba31c1136aea411a09a930c0 100644 (file)
@@ -166,7 +166,7 @@ static int crypto_report_alg(struct crypto_alg *alg,
        struct crypto_user_alg *ualg;
        int err = 0;
 
-       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, info->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
                        CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
        if (!nlh) {
                err = -EMSGSIZE;
@@ -216,7 +216,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
        if (err)
                return err;
 
-       return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).pid);
+       return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
 }
 
 static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
@@ -500,8 +500,7 @@ static int __init crypto_user_init(void)
                .input  = crypto_netlink_rcv,
        };
 
-       crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO,
-                                           THIS_MODULE, &cfg);
+       crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg);
        if (!crypto_nlsk)
                return -ENOMEM;
 
index acb48fa4531cd9e707643717b88bd37104d60a81..03da5b663aef8ea6c2991484b98cb315a16b87db 100644 (file)
@@ -123,7 +123,6 @@ obj-$(CONFIG_VHOST_NET)             += vhost/
 obj-$(CONFIG_VLYNQ)            += vlynq/
 obj-$(CONFIG_STAGING)          += staging/
 obj-y                          += platform/
-obj-y                          += ieee802154/
 #common clk code
 obj-y                          += clk/
 
index bfc31cb0dd3eac96292af65535c2ee4554cdbf10..e78c2a52ea46665fe3ea31a7e79ba7f5cc5c38b2 100644 (file)
@@ -475,7 +475,7 @@ static __ref int acpi_processor_start(struct acpi_processor *pr)
        acpi_processor_get_limit_info(pr);
 
        if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
-               acpi_processor_power_init(pr, device);
+               acpi_processor_power_init(pr);
 
        pr->cdev = thermal_cooling_device_register("Processor", device,
                                                   &processor_cooling_ops);
@@ -509,7 +509,7 @@ err_remove_sysfs_thermal:
 err_thermal_unregister:
        thermal_cooling_device_unregister(pr->cdev);
 err_power_exit:
-       acpi_processor_power_exit(pr, device);
+       acpi_processor_power_exit(pr);
 
        return result;
 }
@@ -620,7 +620,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
                        return -EINVAL;
        }
 
-       acpi_processor_power_exit(pr, device);
+       acpi_processor_power_exit(pr);
 
        sysfs_remove_link(&device->dev.kobj, "sysdev");
 
@@ -905,8 +905,6 @@ static int __init acpi_processor_init(void)
        if (acpi_disabled)
                return 0;
 
-       memset(&errata, 0, sizeof(errata));
-
        result = acpi_bus_register_driver(&acpi_processor_driver);
        if (result < 0)
                return result;
index ad3730b4038bdfb7b2dc24fb3b396abfdf0a5703..3655ab923812c10501e86cde8b166cb18be70065 100644 (file)
@@ -79,6 +79,8 @@ module_param(bm_check_disable, uint, 0000);
 static unsigned int latency_factor __read_mostly = 2;
 module_param(latency_factor, uint, 0644);
 
+static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
+
 static int disabled_by_idle_boot_param(void)
 {
        return boot_option_idle_override == IDLE_POLL ||
@@ -483,8 +485,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
                if (obj->type != ACPI_TYPE_INTEGER)
                        continue;
 
-               cx.power = obj->integer.value;
-
                current_count++;
                memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
 
@@ -1000,7 +1000,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
        int i, count = CPUIDLE_DRIVER_STATE_START;
        struct acpi_processor_cx *cx;
        struct cpuidle_state_usage *state_usage;
-       struct cpuidle_device *dev = &pr->power.dev;
+       struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
 
        if (!pr->flags.power_setup_done)
                return -EINVAL;
@@ -1132,6 +1132,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
 int acpi_processor_hotplug(struct acpi_processor *pr)
 {
        int ret = 0;
+       struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
 
        if (disabled_by_idle_boot_param())
                return 0;
@@ -1147,11 +1148,11 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
                return -ENODEV;
 
        cpuidle_pause_and_lock();
-       cpuidle_disable_device(&pr->power.dev);
+       cpuidle_disable_device(dev);
        acpi_processor_get_power_info(pr);
        if (pr->flags.power) {
                acpi_processor_setup_cpuidle_cx(pr);
-               ret = cpuidle_enable_device(&pr->power.dev);
+               ret = cpuidle_enable_device(dev);
        }
        cpuidle_resume_and_unlock();
 
@@ -1162,6 +1163,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 {
        int cpu;
        struct acpi_processor *_pr;
+       struct cpuidle_device *dev;
 
        if (disabled_by_idle_boot_param())
                return 0;
@@ -1192,7 +1194,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                        _pr = per_cpu(processors, cpu);
                        if (!_pr || !_pr->flags.power_setup_done)
                                continue;
-                       cpuidle_disable_device(&_pr->power.dev);
+                       dev = per_cpu(acpi_cpuidle_device, cpu);
+                       cpuidle_disable_device(dev);
                }
 
                /* Populate Updated C-state information */
@@ -1206,7 +1209,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                        acpi_processor_get_power_info(_pr);
                        if (_pr->flags.power) {
                                acpi_processor_setup_cpuidle_cx(_pr);
-                               cpuidle_enable_device(&_pr->power.dev);
+                               dev = per_cpu(acpi_cpuidle_device, cpu);
+                               cpuidle_enable_device(dev);
                        }
                }
                put_online_cpus();
@@ -1218,11 +1222,11 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 
 static int acpi_processor_registered;
 
-int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
-                             struct acpi_device *device)
+int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
 {
        acpi_status status = 0;
        int retval;
+       struct cpuidle_device *dev;
        static int first_run;
 
        if (disabled_by_idle_boot_param())
@@ -1268,11 +1272,18 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
                        printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
                                        acpi_idle_driver.name);
                }
+
+               dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+               if (!dev)
+                       return -ENOMEM;
+               per_cpu(acpi_cpuidle_device, pr->id) = dev;
+
+               acpi_processor_setup_cpuidle_cx(pr);
+
                /* Register per-cpu cpuidle_device. Cpuidle driver
                 * must already be registered before registering device
                 */
-               acpi_processor_setup_cpuidle_cx(pr);
-               retval = cpuidle_register_device(&pr->power.dev);
+               retval = cpuidle_register_device(dev);
                if (retval) {
                        if (acpi_processor_registered == 0)
                                cpuidle_unregister_driver(&acpi_idle_driver);
@@ -1283,14 +1294,15 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
        return 0;
 }
 
-int acpi_processor_power_exit(struct acpi_processor *pr,
-                             struct acpi_device *device)
+int acpi_processor_power_exit(struct acpi_processor *pr)
 {
+       struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
+
        if (disabled_by_idle_boot_param())
                return 0;
 
        if (pr->flags.power) {
-               cpuidle_unregister_device(&pr->power.dev);
+               cpuidle_unregister_device(dev);
                acpi_processor_registered--;
                if (acpi_processor_registered == 0)
                        cpuidle_unregister_driver(&acpi_idle_driver);
index a093dc163a42a8b677d9ccb7b27a61d0427c3f49..836bfe0690422855c80ecfe41895a4fe667bfd14 100644 (file)
@@ -324,6 +324,34 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
        return result;
 }
 
+#ifdef CONFIG_X86
+/*
+ * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
+ * in their ACPI data. Calculate the real values and fix up the _PSS data.
+ */
+static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
+{
+       u32 hi, lo, fid, did;
+       int index = px->control & 0x00000007;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return;
+
+       if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
+           || boot_cpu_data.x86 == 0x11) {
+               rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
+               fid = lo & 0x3f;
+               did = (lo >> 6) & 7;
+               if (boot_cpu_data.x86 == 0x10)
+                       px->core_frequency = (100 * (fid + 0x10)) >> did;
+               else
+                       px->core_frequency = (100 * (fid + 8)) >> did;
+       }
+}
+#else
+static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
+#endif
+
 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
 {
        int result = 0;
@@ -379,6 +407,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
                        goto end;
                }
 
+               amd_fixup_frequency(px, i);
+
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
                                  i,
index 27cecd313e7588386960244548b391bfddac2a55..e08d322d01d7a483e61829ce369d4d095551d5b1 100644 (file)
@@ -214,6 +214,14 @@ config SATA_DWC_VDEBUG
        help
          This option enables the taskfile dumping and NCQ debugging.
 
+config SATA_HIGHBANK
+       tristate "Calxeda Highbank SATA support"
+       help
+         This option enables support for the Calxeda Highbank SoC's
+         onboard SATA.
+
+         If unsure, say N.
+
 config SATA_MV
        tristate "Marvell SATA support"
        help
index a454a139b1d20531f899897ab6fcc7b689e44556..9329dafba91b0c92dcab192e36c1f5d337d19a2a 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_SATA_FSL)          += sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)    += sata_inic162x.o
 obj-$(CONFIG_SATA_SIL24)       += sata_sil24.o
 obj-$(CONFIG_SATA_DWC)         += sata_dwc_460ex.o
+obj-$(CONFIG_SATA_HIGHBANK)    += sata_highbank.o libahci.o
 
 # SFF w/ custom DMA
 obj-$(CONFIG_PDC_ADMA)         += pdc_adma.o
index 57eb1c212a4ce8ee267d36b3f920d3011f96b832..9be471200a07657f1f655ea31d805e0d6223c24b 100644 (file)
@@ -35,6 +35,7 @@
 #ifndef _AHCI_H
 #define _AHCI_H
 
+#include <linux/clk.h>
 #include <linux/libata.h>
 
 /* Enclosure Management Control */
@@ -115,6 +116,9 @@ enum {
        HOST_CAP2_BOH           = (1 << 0),  /* BIOS/OS handoff supported */
        HOST_CAP2_NVMHCI        = (1 << 1),  /* NVMHCI supported */
        HOST_CAP2_APST          = (1 << 2),  /* Automatic partial to slumber */
+       HOST_CAP2_SDS           = (1 << 3),  /* Support device sleep */
+       HOST_CAP2_SADM          = (1 << 4),  /* Support aggressive DevSlp */
+       HOST_CAP2_DESO          = (1 << 5),  /* DevSlp from slumber only */
 
        /* registers for each SATA port */
        PORT_LST_ADDR           = 0x00, /* command list DMA addr */
@@ -133,6 +137,7 @@ enum {
        PORT_SCR_ACT            = 0x34, /* SATA phy register: SActive */
        PORT_SCR_NTF            = 0x3c, /* SATA phy register: SNotification */
        PORT_FBS                = 0x40, /* FIS-based Switching */
+       PORT_DEVSLP             = 0x44, /* device sleep */
 
        /* PORT_IRQ_{STAT,MASK} bits */
        PORT_IRQ_COLD_PRES      = (1 << 31), /* cold presence detect */
@@ -186,6 +191,7 @@ enum {
        PORT_CMD_ICC_PARTIAL    = (0x2 << 28), /* Put i/f in partial state */
        PORT_CMD_ICC_SLUMBER    = (0x6 << 28), /* Put i/f in slumber state */
 
+       /* PORT_FBS bits */
        PORT_FBS_DWE_OFFSET     = 16, /* FBS device with error offset */
        PORT_FBS_ADO_OFFSET     = 12, /* FBS active dev optimization offset */
        PORT_FBS_DEV_OFFSET     = 8,  /* FBS device to issue offset */
@@ -194,6 +200,15 @@ enum {
        PORT_FBS_DEC            = (1 << 1), /* FBS device error clear */
        PORT_FBS_EN             = (1 << 0), /* Enable FBS */
 
+       /* PORT_DEVSLP bits */
+       PORT_DEVSLP_DM_OFFSET   = 25,             /* DITO multiplier offset */
+       PORT_DEVSLP_DM_MASK     = (0xf << 25),    /* DITO multiplier mask */
+       PORT_DEVSLP_DITO_OFFSET = 15,             /* DITO offset */
+       PORT_DEVSLP_MDAT_OFFSET = 10,             /* Minimum assertion time */
+       PORT_DEVSLP_DETO_OFFSET = 2,              /* DevSlp exit timeout */
+       PORT_DEVSLP_DSP         = (1 << 1),       /* DevSlp present */
+       PORT_DEVSLP_ADSE        = (1 << 0),       /* Aggressive DevSlp enable */
+
        /* hpriv->flags bits */
 
 #define AHCI_HFLAGS(flags)             .private_data   = (void *)(flags)
@@ -302,6 +317,7 @@ struct ahci_host_priv {
        u32                     em_loc; /* enclosure management location */
        u32                     em_buf_sz;      /* EM buffer size in byte */
        u32                     em_msg_type;    /* EM message type */
+       struct clk              *clk;           /* Only for platforms supporting clk */
 };
 
 extern int ahci_ignore_sss;
index 09728e09cb3138de9c9df006f0f98b9bb1b24896..b1ae48054dc5773eab42917d5e9d10f9764602ae 100644 (file)
@@ -12,6 +12,7 @@
  * any later version.
  */
 
+#include <linux/clk.h>
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
@@ -118,6 +119,17 @@ static int __init ahci_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       hpriv->clk = clk_get(dev, NULL);
+       if (IS_ERR(hpriv->clk)) {
+               dev_err(dev, "can't get clock\n");
+       } else {
+               rc = clk_prepare_enable(hpriv->clk);
+               if (rc) {
+                       dev_err(dev, "clock prepare enable failed");
+                       goto free_clk;
+               }
+       }
+
        /*
         * Some platforms might need to prepare for mmio region access,
         * which could be done in the following init call. So, the mmio
@@ -127,7 +139,7 @@ static int __init ahci_probe(struct platform_device *pdev)
        if (pdata && pdata->init) {
                rc = pdata->init(dev, hpriv->mmio);
                if (rc)
-                       return rc;
+                       goto disable_unprepare_clk;
        }
 
        ahci_save_initial_config(dev, hpriv,
@@ -153,7 +165,7 @@ static int __init ahci_probe(struct platform_device *pdev)
        host = ata_host_alloc_pinfo(dev, ppi, n_ports);
        if (!host) {
                rc = -ENOMEM;
-               goto err0;
+               goto pdata_exit;
        }
 
        host->private_data = hpriv;
@@ -183,7 +195,7 @@ static int __init ahci_probe(struct platform_device *pdev)
 
        rc = ahci_reset_controller(host);
        if (rc)
-               goto err0;
+               goto pdata_exit;
 
        ahci_init_controller(host);
        ahci_print_info(host, "platform");
@@ -191,12 +203,18 @@ static int __init ahci_probe(struct platform_device *pdev)
        rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
                               &ahci_platform_sht);
        if (rc)
-               goto err0;
+               goto pdata_exit;
 
        return 0;
-err0:
+pdata_exit:
        if (pdata && pdata->exit)
                pdata->exit(dev);
+disable_unprepare_clk:
+       if (!IS_ERR(hpriv->clk))
+               clk_disable_unprepare(hpriv->clk);
+free_clk:
+       if (!IS_ERR(hpriv->clk))
+               clk_put(hpriv->clk);
        return rc;
 }
 
@@ -205,12 +223,18 @@ static int __devexit ahci_remove(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ahci_platform_data *pdata = dev_get_platdata(dev);
        struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
 
        ata_host_detach(host);
 
        if (pdata && pdata->exit)
                pdata->exit(dev);
 
+       if (!IS_ERR(hpriv->clk)) {
+               clk_disable_unprepare(hpriv->clk);
+               clk_put(hpriv->clk);
+       }
+
        return 0;
 }
 
@@ -245,6 +269,10 @@ static int ahci_suspend(struct device *dev)
 
        if (pdata && pdata->suspend)
                return pdata->suspend(dev);
+
+       if (!IS_ERR(hpriv->clk))
+               clk_disable_unprepare(hpriv->clk);
+
        return 0;
 }
 
@@ -252,18 +280,27 @@ static int ahci_resume(struct device *dev)
 {
        struct ahci_platform_data *pdata = dev_get_platdata(dev);
        struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
        int rc;
 
+       if (!IS_ERR(hpriv->clk)) {
+               rc = clk_prepare_enable(hpriv->clk);
+               if (rc) {
+                       dev_err(dev, "clock prepare enable failed");
+                       return rc;
+               }
+       }
+
        if (pdata && pdata->resume) {
                rc = pdata->resume(dev);
                if (rc)
-                       return rc;
+                       goto disable_unprepare_clk;
        }
 
        if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
                rc = ahci_reset_controller(host);
                if (rc)
-                       return rc;
+                       goto disable_unprepare_clk;
 
                ahci_init_controller(host);
        }
@@ -271,13 +308,18 @@ static int ahci_resume(struct device *dev)
        ata_host_resume(host);
 
        return 0;
+
+disable_unprepare_clk:
+       if (!IS_ERR(hpriv->clk))
+               clk_disable_unprepare(hpriv->clk);
+
+       return rc;
 }
 #endif
 
 SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
 
 static const struct of_device_id ahci_of_match[] = {
-       { .compatible = "calxeda,hb-ahci", },
        { .compatible = "snps,spear-ahci", },
        {},
 };
index 555c07afa05bc6f82d6cc0357684f1f8e8b9f8da..4201e535a8c897ce0acf5912f569f6c74448e7ec 100644 (file)
@@ -45,6 +45,7 @@
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
 #include "ahci.h"
+#include "libata.h"
 
 static int ahci_skip_host_reset;
 int ahci_ignore_sss;
@@ -76,6 +77,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc);
 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
 static void ahci_freeze(struct ata_port *ap);
 static void ahci_thaw(struct ata_port *ap);
+static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep);
 static void ahci_enable_fbs(struct ata_port *ap);
 static void ahci_disable_fbs(struct ata_port *ap);
 static void ahci_pmp_attach(struct ata_port *ap);
@@ -193,6 +195,10 @@ module_param(ahci_em_messages, int, 0444);
 MODULE_PARM_DESC(ahci_em_messages,
        "AHCI Enclosure Management Message control (0 = off, 1 = on)");
 
+int devslp_idle_timeout = 1000;        /* device sleep idle timeout in ms */
+module_param(devslp_idle_timeout, int, 0644);
+MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
+
 static void ahci_enable_ahci(void __iomem *mmio)
 {
        int i;
@@ -702,6 +708,16 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
                }
        }
 
+       /* set aggressive device sleep */
+       if ((hpriv->cap2 & HOST_CAP2_SDS) &&
+           (hpriv->cap2 & HOST_CAP2_SADM) &&
+           (link->device->flags & ATA_DFLAG_DEVSLP)) {
+               if (policy == ATA_LPM_MIN_POWER)
+                       ahci_set_aggressive_devslp(ap, true);
+               else
+                       ahci_set_aggressive_devslp(ap, false);
+       }
+
        if (policy == ATA_LPM_MAX_POWER) {
                sata_link_scr_lpm(link, policy, false);
 
@@ -1890,6 +1906,81 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
                ahci_kick_engine(ap);
 }
 
+static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+{
+       void __iomem *port_mmio = ahci_port_base(ap);
+       struct ata_device *dev = ap->link.device;
+       u32 devslp, dm, dito, mdat, deto;
+       int rc;
+       unsigned int err_mask;
+
+       devslp = readl(port_mmio + PORT_DEVSLP);
+       if (!(devslp & PORT_DEVSLP_DSP)) {
+               dev_err(ap->host->dev, "port does not support device sleep\n");
+               return;
+       }
+
+       /* disable device sleep */
+       if (!sleep) {
+               if (devslp & PORT_DEVSLP_ADSE) {
+                       writel(devslp & ~PORT_DEVSLP_ADSE,
+                              port_mmio + PORT_DEVSLP);
+                       err_mask = ata_dev_set_feature(dev,
+                                                      SETFEATURES_SATA_DISABLE,
+                                                      SATA_DEVSLP);
+                       if (err_mask && err_mask != AC_ERR_DEV)
+                               ata_dev_warn(dev, "failed to disable DEVSLP\n");
+               }
+               return;
+       }
+
+       /* device sleep was already enabled */
+       if (devslp & PORT_DEVSLP_ADSE)
+               return;
+
+       /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
+       rc = ahci_stop_engine(ap);
+       if (rc)
+               return;
+
+       dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
+       dito = devslp_idle_timeout / (dm + 1);
+       if (dito > 0x3ff)
+               dito = 0x3ff;
+
+       /* Use the nominal value 10 ms if the read MDAT is zero,
+        * the nominal value of DETO is 20 ms.
+        */
+       if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] &
+           ATA_LOG_DEVSLP_VALID_MASK) {
+               mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] &
+                      ATA_LOG_DEVSLP_MDAT_MASK;
+               if (!mdat)
+                       mdat = 10;
+               deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO];
+               if (!deto)
+                       deto = 20;
+       } else {
+               mdat = 10;
+               deto = 20;
+       }
+
+       devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
+                  (mdat << PORT_DEVSLP_MDAT_OFFSET) |
+                  (deto << PORT_DEVSLP_DETO_OFFSET) |
+                  PORT_DEVSLP_ADSE);
+       writel(devslp, port_mmio + PORT_DEVSLP);
+
+       ahci_start_engine(ap);
+
+       /* enable device sleep feature for the drive */
+       err_mask = ata_dev_set_feature(dev,
+                                      SETFEATURES_SATA_ENABLE,
+                                      SATA_DEVSLP);
+       if (err_mask && err_mask != AC_ERR_DEV)
+               ata_dev_warn(dev, "failed to enable DEVSLP\n");
+}
+
 static void ahci_enable_fbs(struct ata_port *ap)
 {
        struct ahci_port_priv *pp = ap->private_data;
@@ -2164,7 +2255,8 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
                "flags: "
                "%s%s%s%s%s%s%s"
                "%s%s%s%s%s%s%s"
-               "%s%s%s%s%s%s\n"
+               "%s%s%s%s%s%s%s"
+               "%s%s\n"
                ,
 
                cap & HOST_CAP_64 ? "64bit " : "",
@@ -2184,6 +2276,9 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
                cap & HOST_CAP_CCC ? "ccc " : "",
                cap & HOST_CAP_EMS ? "ems " : "",
                cap & HOST_CAP_SXS ? "sxs " : "",
+               cap2 & HOST_CAP2_DESO ? "deso " : "",
+               cap2 & HOST_CAP2_SADM ? "sadm " : "",
+               cap2 & HOST_CAP2_SDS ? "sds " : "",
                cap2 & HOST_CAP2_APST ? "apst " : "",
                cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
                cap2 & HOST_CAP2_BOH ? "boh " : ""
index 8e1039c8e15975aced4e6a359dc4a0451a58bc4b..3cc7096cfda758e9b3ffd514aaf6e70928ec7f8d 100644 (file)
@@ -774,7 +774,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
                tf->lbam = (block >> 8) & 0xff;
                tf->lbal = block & 0xff;
 
-               tf->device = 1 << 6;
+               tf->device = ATA_LBA;
                if (tf->flags & ATA_TFLAG_FUA)
                        tf->device |= 1 << 7;
        } else if (dev->flags & ATA_DFLAG_LBA) {
@@ -2155,6 +2155,7 @@ int ata_dev_configure(struct ata_device *dev)
        int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
        const u16 *id = dev->id;
        unsigned long xfer_mask;
+       unsigned int err_mask;
        char revbuf[7];         /* XYZ-99\0 */
        char fwrevbuf[ATA_ID_FW_REV_LEN+1];
        char modelbuf[ATA_ID_PROD_LEN+1];
@@ -2323,6 +2324,26 @@ int ata_dev_configure(struct ata_device *dev)
                        }
                }
 
+               /* check and mark DevSlp capability */
+               if (ata_id_has_devslp(dev->id))
+                       dev->flags |= ATA_DFLAG_DEVSLP;
+
+               /* Obtain SATA Settings page from Identify Device Data Log,
+                * which contains DevSlp timing variables etc.
+                * Exclude old devices with ata_id_has_ncq()
+                */
+               if (ata_id_has_ncq(dev->id)) {
+                       err_mask = ata_read_log_page(dev,
+                                                    ATA_LOG_SATA_ID_DEV_DATA,
+                                                    ATA_LOG_SATA_SETTINGS,
+                                                    dev->sata_settings,
+                                                    1);
+                       if (err_mask)
+                               ata_dev_dbg(dev,
+                                           "failed to get Identify Device Data, Emask 0x%x\n",
+                                           err_mask);
+               }
+
                dev->cdb_len = 16;
        }
 
@@ -2351,8 +2372,6 @@ int ata_dev_configure(struct ata_device *dev)
                    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
                    (!sata_pmp_attached(ap) ||
                     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
-                       unsigned int err_mask;
-
                        /* issue SET feature command to turn this on */
                        err_mask = ata_dev_set_feature(dev,
                                        SETFEATURES_SATA_ENABLE, SATA_AN);
@@ -3598,7 +3617,7 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
        switch (policy) {
        case ATA_LPM_MAX_POWER:
                /* disable all LPM transitions */
-               scontrol |= (0x3 << 8);
+               scontrol |= (0x7 << 8);
                /* initiate transition to active state */
                if (spm_wakeup) {
                        scontrol |= (0x4 << 12);
@@ -3608,12 +3627,12 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
        case ATA_LPM_MED_POWER:
                /* allow LPM to PARTIAL */
                scontrol &= ~(0x1 << 8);
-               scontrol |= (0x2 << 8);
+               scontrol |= (0x6 << 8);
                break;
        case ATA_LPM_MIN_POWER:
                if (ata_link_nr_enabled(link) > 0)
                        /* no restrictions on LPM transitions */
-                       scontrol &= ~(0x3 << 8);
+                       scontrol &= ~(0x7 << 8);
                else {
                        /* empty port, power off */
                        scontrol &= ~0xf;
@@ -4472,6 +4491,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
        DPRINTK("EXIT, err_mask=%x\n", err_mask);
        return err_mask;
 }
+EXPORT_SYMBOL_GPL(ata_dev_set_feature);
 
 /**
  *     ata_dev_init_params - Issue INIT DEV PARAMS command
@@ -5253,16 +5273,20 @@ bool ata_link_offline(struct ata_link *link)
 #ifdef CONFIG_PM
 static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
                               unsigned int action, unsigned int ehi_flags,
-                              int wait)
+                              int *async)
 {
        struct ata_link *link;
        unsigned long flags;
-       int rc;
+       int rc = 0;
 
        /* Previous resume operation might still be in
         * progress.  Wait for PM_PENDING to clear.
         */
        if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+               if (async) {
+                       *async = -EAGAIN;
+                       return 0;
+               }
                ata_port_wait_eh(ap);
                WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
        }
@@ -5271,10 +5295,10 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
        spin_lock_irqsave(ap->lock, flags);
 
        ap->pm_mesg = mesg;
-       if (wait) {
-               rc = 0;
+       if (async)
+               ap->pm_result = async;
+       else
                ap->pm_result = &rc;
-       }
 
        ap->pflags |= ATA_PFLAG_PM_PENDING;
        ata_for_each_link(link, ap, HOST_FIRST) {
@@ -5287,7 +5311,7 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
        spin_unlock_irqrestore(ap->lock, flags);
 
        /* wait and check result */
-       if (wait) {
+       if (!async) {
                ata_port_wait_eh(ap);
                WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
        }
@@ -5295,9 +5319,8 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
        return rc;
 }
 
-static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
+static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
 {
-       struct ata_port *ap = to_ata_port(dev);
        unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
@@ -5312,10 +5335,17 @@ static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
        if (mesg.event == PM_EVENT_SUSPEND)
                ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
 
-       rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
+       rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
        return rc;
 }
 
+static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
+{
+       struct ata_port *ap = to_ata_port(dev);
+
+       return __ata_port_suspend_common(ap, mesg, NULL);
+}
+
 static int ata_port_suspend(struct device *dev)
 {
        if (pm_runtime_suspended(dev))
@@ -5340,16 +5370,22 @@ static int ata_port_poweroff(struct device *dev)
        return ata_port_suspend_common(dev, PMSG_HIBERNATE);
 }
 
-static int ata_port_resume_common(struct device *dev)
+static int __ata_port_resume_common(struct ata_port *ap, int *async)
 {
-       struct ata_port *ap = to_ata_port(dev);
        int rc;
 
        rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
-               ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
+               ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
        return rc;
 }
 
+static int ata_port_resume_common(struct device *dev)
+{
+       struct ata_port *ap = to_ata_port(dev);
+
+       return __ata_port_resume_common(ap, NULL);
+}
+
 static int ata_port_resume(struct device *dev)
 {
        int rc;
@@ -5382,6 +5418,24 @@ static const struct dev_pm_ops ata_port_pm_ops = {
        .runtime_idle = ata_port_runtime_idle,
 };
 
+/* sas ports don't participate in pm runtime management of ata_ports,
+ * and need to resume ata devices at the domain level, not the per-port
+ * level. sas suspend/resume is async to allow parallel port recovery
+ * since sas has multiple ata_port instances per Scsi_Host.
+ */
+int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+{
+       return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
+
+int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+{
+       return __ata_port_resume_common(ap, async);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
+
+
 /**
  *     ata_host_suspend - suspend host
  *     @host: host to suspend
@@ -5927,24 +5981,18 @@ int ata_host_start(struct ata_host *host)
 }
 
 /**
- *     ata_sas_host_init - Initialize a host struct
+ *     ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
  *     @host:  host to initialize
  *     @dev:   device host is attached to
- *     @flags: host flags
  *     @ops:   port_ops
  *
- *     LOCKING:
- *     PCI/etc. bus probe sem.
- *
  */
-/* KILLME - the only user left is ipr */
 void ata_host_init(struct ata_host *host, struct device *dev,
-                  unsigned long flags, struct ata_port_operations *ops)
+                  struct ata_port_operations *ops)
 {
        spin_lock_init(&host->lock);
        mutex_init(&host->eh_mutex);
        host->dev = dev;
-       host->flags = flags;
        host->ops = ops;
 }
 
@@ -6388,6 +6436,7 @@ static int __init ata_parse_force_one(char **cur,
                { "nohrst",     .lflags         = ATA_LFLAG_NO_HRST },
                { "nosrst",     .lflags         = ATA_LFLAG_NO_SRST },
                { "norst",      .lflags         = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
+               { "rstonce",    .lflags         = ATA_LFLAG_RST_ONCE },
        };
        char *start = *cur, *p = *cur;
        char *id, *val, *endp;
index 7d4535e989bf5580d76909c2402460898ff86062..e60437cd0d192efeb68ffdb93c7e0fd0a0f62423 100644 (file)
@@ -1487,6 +1487,7 @@ static const char *ata_err_string(unsigned int err_mask)
 /**
  *     ata_read_log_page - read a specific log page
  *     @dev: target device
+ *     @log: log to read
  *     @page: page to read
  *     @buf: buffer to store read page
  *     @sectors: number of sectors to read
@@ -1499,17 +1500,18 @@ static const char *ata_err_string(unsigned int err_mask)
  *     RETURNS:
  *     0 on success, AC_ERR_* mask otherwise.
  */
-static unsigned int ata_read_log_page(struct ata_device *dev,
-                                     u8 page, void *buf, unsigned int sectors)
+unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+                              u8 page, void *buf, unsigned int sectors)
 {
        struct ata_taskfile tf;
        unsigned int err_mask;
 
-       DPRINTK("read log page - page %d\n", page);
+       DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
 
        ata_tf_init(dev, &tf);
        tf.command = ATA_CMD_READ_LOG_EXT;
-       tf.lbal = page;
+       tf.lbal = log;
+       tf.lbam = page;
        tf.nsect = sectors;
        tf.hob_nsect = sectors >> 8;
        tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
@@ -1545,7 +1547,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        u8 csum;
        int i;
 
-       err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
+       err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
        if (err_mask)
                return -EIO;
 
@@ -2623,6 +2625,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
         */
        while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
                max_tries++;
+       if (link->flags & ATA_LFLAG_RST_ONCE)
+               max_tries = 1;
        if (link->flags & ATA_LFLAG_NO_HRST)
                hardreset = NULL;
        if (link->flags & ATA_LFLAG_NO_SRST)
index 8ec81ca8f659076dfa0f123ed178200549c4e253..e3bda074fa12f59381f9e8911338c49a88611f36 100644 (file)
@@ -1655,7 +1655,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
                if (unlikely(scmd->cmd_len < 10))
                        goto invalid_fld;
                scsi_10_lba_len(cdb, &block, &n_block);
-               if (unlikely(cdb[1] & (1 << 3)))
+               if (cdb[1] & (1 << 3))
                        tf_flags |= ATA_TFLAG_FUA;
                break;
        case READ_6:
@@ -1675,7 +1675,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
                if (unlikely(scmd->cmd_len < 16))
                        goto invalid_fld;
                scsi_16_lba_len(cdb, &block, &n_block);
-               if (unlikely(cdb[1] & (1 << 3)))
+               if (cdb[1] & (1 << 3))
                        tf_flags |= ATA_TFLAG_FUA;
                break;
        default:
@@ -2204,10 +2204,34 @@ static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
        return 0;
 }
 
+/**
+ *     modecpy - Prepare response for MODE SENSE
+ *     @dest: output buffer
+ *     @src: data being copied
+ *     @n: length of mode page
+ *     @changeable: whether changeable parameters are requested
+ *
+ *     Generate a generic MODE SENSE page for either current or changeable
+ *     parameters.
+ *
+ *     LOCKING:
+ *     None.
+ */
+static void modecpy(u8 *dest, const u8 *src, int n, bool changeable)
+{
+       if (changeable) {
+               memcpy(dest, src, 2);
+               memset(dest + 2, 0, n - 2);
+       } else {
+               memcpy(dest, src, n);
+       }
+}
+
 /**
  *     ata_msense_caching - Simulate MODE SENSE caching info page
  *     @id: device IDENTIFY data
  *     @buf: output buffer
+ *     @changeable: whether changeable parameters are requested
  *
  *     Generate a caching info page, which conditionally indicates
  *     write caching to the SCSI layer, depending on device
@@ -2216,12 +2240,12 @@ static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
  *     LOCKING:
  *     None.
  */
-static unsigned int ata_msense_caching(u16 *id, u8 *buf)
+static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
 {
-       memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
-       if (ata_id_wcache_enabled(id))
+       modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable);
+       if (changeable || ata_id_wcache_enabled(id))
                buf[2] |= (1 << 2);     /* write cache enable */
-       if (!ata_id_rahead_enabled(id))
+       if (!changeable && !ata_id_rahead_enabled(id))
                buf[12] |= (1 << 5);    /* disable read ahead */
        return sizeof(def_cache_mpage);
 }
@@ -2229,30 +2253,33 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf)
 /**
  *     ata_msense_ctl_mode - Simulate MODE SENSE control mode page
  *     @buf: output buffer
+ *     @changeable: whether changeable parameters are requested
  *
  *     Generate a generic MODE SENSE control mode page.
  *
  *     LOCKING:
  *     None.
  */
-static unsigned int ata_msense_ctl_mode(u8 *buf)
+static unsigned int ata_msense_ctl_mode(u8 *buf, bool changeable)
 {
-       memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
+       modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable);
        return sizeof(def_control_mpage);
 }
 
 /**
  *     ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
  *     @buf: output buffer
+ *     @changeable: whether changeable parameters are requested
  *
  *     Generate a generic MODE SENSE r/w error recovery page.
  *
  *     LOCKING:
  *     None.
  */
-static unsigned int ata_msense_rw_recovery(u8 *buf)
+static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
 {
-       memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
+       modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage),
+               changeable);
        return sizeof(def_rw_recovery_mpage);
 }
 
@@ -2316,11 +2343,11 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
        page_control = scsicmd[2] >> 6;
        switch (page_control) {
        case 0: /* current */
+       case 1: /* changeable */
+       case 2: /* defaults */
                break;  /* supported */
        case 3: /* saved */
                goto saving_not_supp;
-       case 1: /* changeable */
-       case 2: /* defaults */
        default:
                goto invalid_fld;
        }
@@ -2341,21 +2368,21 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
 
        switch(pg) {
        case RW_RECOVERY_MPAGE:
-               p += ata_msense_rw_recovery(p);
+               p += ata_msense_rw_recovery(p, page_control == 1);
                break;
 
        case CACHE_MPAGE:
-               p += ata_msense_caching(args->id, p);
+               p += ata_msense_caching(args->id, p, page_control == 1);
                break;
 
        case CONTROL_MPAGE:
-               p += ata_msense_ctl_mode(p);
+               p += ata_msense_ctl_mode(p, page_control == 1);
                break;
 
        case ALL_MPAGES:
-               p += ata_msense_rw_recovery(p);
-               p += ata_msense_caching(args->id, p);
-               p += ata_msense_ctl_mode(p);
+               p += ata_msense_rw_recovery(p, page_control == 1);
+               p += ata_msense_caching(args->id, p, page_control == 1);
+               p += ata_msense_ctl_mode(p, page_control == 1);
                break;
 
        default:                /* invalid page code */
@@ -3079,6 +3106,188 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
        return 1;
 }
 
+/**
+ *     ata_mselect_caching - Simulate MODE SELECT for caching info page
+ *     @qc: Storage for translated ATA taskfile
+ *     @buf: input buffer
+ *     @len: number of valid bytes in the input buffer
+ *
+ *     Prepare a taskfile to modify caching information for the device.
+ *
+ *     LOCKING:
+ *     None.
+ */
+static int ata_mselect_caching(struct ata_queued_cmd *qc,
+                              const u8 *buf, int len)
+{
+       struct ata_taskfile *tf = &qc->tf;
+       struct ata_device *dev = qc->dev;
+       char mpage[CACHE_MPAGE_LEN];
+       u8 wce;
+
+       /*
+        * The first two bytes of def_cache_mpage are a header, so offsets
+        * in mpage are off by 2 compared to buf.  Same for len.
+        */
+
+       if (len != CACHE_MPAGE_LEN - 2)
+               return -EINVAL;
+
+       wce = buf[0] & (1 << 2);
+
+       /*
+        * Check that read-only bits are not modified.
+        */
+       ata_msense_caching(dev->id, mpage, false);
+       mpage[2] &= ~(1 << 2);
+       mpage[2] |= wce;
+       if (memcmp(mpage + 2, buf, CACHE_MPAGE_LEN - 2) != 0)
+               return -EINVAL;
+
+       tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+       tf->protocol = ATA_PROT_NODATA;
+       tf->nsect = 0;
+       tf->command = ATA_CMD_SET_FEATURES;
+       tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF;
+       return 0;
+}
+
+/**
+ *     ata_scsiop_mode_select - Simulate MODE SELECT 6, 10 commands
+ *     @qc: Storage for translated ATA taskfile
+ *
+ *     Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
+ *     Assume this is invoked for direct access devices (e.g. disks) only.
+ *     There should be no block descriptor for other device types.
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
+{
+       struct scsi_cmnd *scmd = qc->scsicmd;
+       const u8 *cdb = scmd->cmnd;
+       const u8 *p;
+       u8 pg, spg;
+       unsigned six_byte, pg_len, hdr_len, bd_len;
+       int len;
+
+       VPRINTK("ENTER\n");
+
+       six_byte = (cdb[0] == MODE_SELECT);
+       if (six_byte) {
+               if (scmd->cmd_len < 5)
+                       goto invalid_fld;
+
+               len = cdb[4];
+               hdr_len = 4;
+       } else {
+               if (scmd->cmd_len < 9)
+                       goto invalid_fld;
+
+               len = (cdb[7] << 8) + cdb[8];
+               hdr_len = 8;
+       }
+
+       /* We only support PF=1, SP=0.  */
+       if ((cdb[1] & 0x11) != 0x10)
+               goto invalid_fld;
+
+       /* Test early for possible overrun.  */
+       if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
+               goto invalid_param_len;
+
+       p = page_address(sg_page(scsi_sglist(scmd)));
+
+       /* Move past header and block descriptors.  */
+       if (len < hdr_len)
+               goto invalid_param_len;
+
+       if (six_byte)
+               bd_len = p[3];
+       else
+               bd_len = (p[6] << 8) + p[7];
+
+       len -= hdr_len;
+       p += hdr_len;
+       if (len < bd_len)
+               goto invalid_param_len;
+       if (bd_len != 0 && bd_len != 8)
+               goto invalid_param;
+
+       len -= bd_len;
+       p += bd_len;
+       if (len == 0)
+               goto skip;
+
+       /* Parse both possible formats for the mode page headers.  */
+       pg = p[0] & 0x3f;
+       if (p[0] & 0x40) {
+               if (len < 4)
+                       goto invalid_param_len;
+
+               spg = p[1];
+               pg_len = (p[2] << 8) | p[3];
+               p += 4;
+               len -= 4;
+       } else {
+               if (len < 2)
+                       goto invalid_param_len;
+
+               spg = 0;
+               pg_len = p[1];
+               p += 2;
+               len -= 2;
+       }
+
+       /*
+        * No mode subpages supported (yet) but asking for _all_
+        * subpages may be valid
+        */
+       if (spg && (spg != ALL_SUB_MPAGES))
+               goto invalid_param;
+       if (pg_len > len)
+               goto invalid_param_len;
+
+       switch (pg) {
+       case CACHE_MPAGE:
+               if (ata_mselect_caching(qc, p, pg_len) < 0)
+                       goto invalid_param;
+               break;
+
+       default:                /* invalid page code */
+               goto invalid_param;
+       }
+
+       /*
+        * Only one page has changeable data, so we only support setting one
+        * page at a time.
+        */
+       if (len > pg_len)
+               goto invalid_param;
+
+       return 0;
+
+ invalid_fld:
+       /* "Invalid field in CDB" */
+       ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
+       return 1;
+
+ invalid_param:
+       /* "Invalid field in parameter list" */
+       ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x26, 0x0);
+       return 1;
+
+ invalid_param_len:
+       /* "Parameter list length error" */
+       ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+       return 1;
+
+ skip:
+       scmd->result = SAM_STAT_GOOD;
+       return 1;
+}
+
 /**
  *     ata_get_xlat_func - check if SCSI to ATA translation is possible
  *     @dev: ATA device
@@ -3119,6 +3328,11 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
        case ATA_16:
                return ata_scsi_pass_thru;
 
+       case MODE_SELECT:
+       case MODE_SELECT_10:
+               return ata_scsi_mode_select_xlat;
+               break;
+
        case START_STOP:
                return ata_scsi_start_stop_xlat;
        }
@@ -3311,11 +3525,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
                ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
                break;
 
-       case MODE_SELECT:       /* unconditionally return */
-       case MODE_SELECT_10:    /* bad-field-in-cdb */
-               ata_scsi_invalid_field(cmd);
-               break;
-
        case READ_CAPACITY:
                ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
                break;
index 50e4dff0604ed449862f0f2595bf428038d49b1d..7148a58020b91d4cd1adb7a7f74e3224ac12d6e1 100644 (file)
@@ -165,6 +165,8 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
                               unsigned int action);
 extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
                        unsigned int action);
+extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+                                     u8 page, void *buf, unsigned int sectors);
 extern void ata_eh_autopsy(struct ata_port *ap);
 const char *ata_get_cmd_descript(u8 command);
 extern void ata_eh_report(struct ata_port *ap);
index bfaa5cb1629ae8651f111c46d0e78b694072aac1..26201ebef3ca652a16c12c6719eae2127ed212ad 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kernel.h>
 #include <linux/libata.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pata_arasan_cf_data.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
@@ -310,7 +311,7 @@ static int cf_init(struct arasan_cf_dev *acdev)
        unsigned long flags;
        int ret = 0;
 
-       ret = clk_enable(acdev->clk);
+       ret = clk_prepare_enable(acdev->clk);
        if (ret) {
                dev_dbg(acdev->host->dev, "clock enable failed");
                return ret;
@@ -340,7 +341,7 @@ static void cf_exit(struct arasan_cf_dev *acdev)
        writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
                        acdev->vbase + OP_MODE);
        spin_unlock_irqrestore(&acdev->host->lock, flags);
-       clk_disable(acdev->clk);
+       clk_disable_unprepare(acdev->clk);
 }
 
 static void dma_callback(void *dev)
@@ -935,6 +936,14 @@ static int arasan_cf_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
 
+#ifdef CONFIG_OF
+static const struct of_device_id arasan_cf_id_table[] = {
+       { .compatible = "arasan,cf-spear1340" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
+#endif
+
 static struct platform_driver arasan_cf_driver = {
        .probe          = arasan_cf_probe,
        .remove         = __devexit_p(arasan_cf_remove),
@@ -942,6 +951,7 @@ static struct platform_driver arasan_cf_driver = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
                .pm     = &arasan_cf_pm_ops,
+               .of_match_table = of_match_ptr(arasan_cf_id_table),
        },
 };
 
index d6577b93bee3735a8eebf47a38f64dcc72aac182..124b2c1d9c0b020255870dec161d41626562fcca 100644 (file)
@@ -123,6 +123,7 @@ enum {
        ONLINE = (1 << 31),
        GOING_OFFLINE = (1 << 30),
        BIST_ERR = (1 << 29),
+       CLEAR_ERROR = (1 << 27),
 
        FATAL_ERR_HC_MASTER_ERR = (1 << 18),
        FATAL_ERR_PARITY_ERR_TX = (1 << 17),
@@ -143,6 +144,7 @@ enum {
            FATAL_ERR_CRC_ERR_RX |
            FATAL_ERR_FIFO_OVRFL_TX | FATAL_ERR_FIFO_OVRFL_RX,
 
+       INT_ON_DATA_LENGTH_MISMATCH = (1 << 12),
        INT_ON_FATAL_ERR = (1 << 5),
        INT_ON_PHYRDY_CHG = (1 << 4),
 
@@ -1181,25 +1183,54 @@ static void sata_fsl_host_intr(struct ata_port *ap)
        u32 hstatus, done_mask = 0;
        struct ata_queued_cmd *qc;
        u32 SError;
+       u32 tag;
+       u32 status_mask = INT_ON_ERROR;
 
        hstatus = ioread32(hcr_base + HSTATUS);
 
        sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
 
+       /* Read command completed register */
+       done_mask = ioread32(hcr_base + CC);
+
+       /* Workaround for data length mismatch errata */
+       if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
+               for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+                       qc = ata_qc_from_tag(ap, tag);
+                       if (qc && ata_is_atapi(qc->tf.protocol)) {
+                               u32 hcontrol;
+                               /* Set HControl[27] to clear error registers */
+                               hcontrol = ioread32(hcr_base + HCONTROL);
+                               iowrite32(hcontrol | CLEAR_ERROR,
+                                               hcr_base + HCONTROL);
+
+                               /* Clear HControl[27] */
+                               iowrite32(hcontrol & ~CLEAR_ERROR,
+                                               hcr_base + HCONTROL);
+
+                               /* Clear SError[E] bit */
+                               sata_fsl_scr_write(&ap->link, SCR_ERROR,
+                                               SError);
+
+                               /* Ignore fatal error and device error */
+                               status_mask &= ~(INT_ON_SINGL_DEVICE_ERR
+                                               | INT_ON_FATAL_ERR);
+                               break;
+                       }
+               }
+       }
+
        if (unlikely(SError & 0xFFFF0000)) {
                DPRINTK("serror @host_intr : 0x%x\n", SError);
                sata_fsl_error_intr(ap);
        }
 
-       if (unlikely(hstatus & INT_ON_ERROR)) {
+       if (unlikely(hstatus & status_mask)) {
                DPRINTK("error interrupt!!\n");
                sata_fsl_error_intr(ap);
                return;
        }
 
-       /* Read command completed register */
-       done_mask = ioread32(hcr_base + CC);
-
        VPRINTK("Status of all queues :\n");
        VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n",
                done_mask,
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
new file mode 100644 (file)
index 0000000..0d7c4c2
--- /dev/null
@@ -0,0 +1,450 @@
+/*
+ * Calxeda Highbank AHCI SATA platform driver
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include "ahci.h"
+
+#define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
+#define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
+#define SERDES_CR_CTL                  0x80a0
+#define SERDES_CR_ADDR                 0x80a1
+#define SERDES_CR_DATA                 0x80a2
+#define CR_BUSY                                0x0001
+#define CR_START                       0x0001
+#define CR_WR_RDN                      0x0002
+#define CPHY_RX_INPUT_STS              0x2002
+#define CPHY_SATA_OVERRIDE             0x4000
+#define CPHY_OVERRIDE                  0x2005
+#define SPHY_LANE                      0x100
+#define SPHY_HALF_RATE                 0x0001
+#define CPHY_SATA_DPLL_MODE            0x0700
+#define CPHY_SATA_DPLL_SHIFT           8
+#define CPHY_SATA_DPLL_RESET           (1 << 11)
+#define CPHY_PHY_COUNT                 6
+#define CPHY_LANE_COUNT                        4
+#define CPHY_PORT_COUNT                        (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
+
+static DEFINE_SPINLOCK(cphy_lock);
+/* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
+ * sata ports to their phys and then to their lanes within the phys
+ */
+struct phy_lane_info {
+       void __iomem *phy_base;
+       u8 lane_mapping;
+       u8 phy_devs;
+};
+static struct phy_lane_info port_data[CPHY_PORT_COUNT];
+
+static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
+{
+       u32 data;
+       u8 dev = port_data[sata_port].phy_devs;
+       spin_lock(&cphy_lock);
+       writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
+       data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
+       spin_unlock(&cphy_lock);
+       return data;
+}
+
+static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
+{
+       u8 dev = port_data[sata_port].phy_devs;
+       spin_lock(&cphy_lock);
+       writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
+       writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
+       spin_unlock(&cphy_lock);
+}
+
+static void combo_phy_wait_for_ready(u8 sata_port)
+{
+       while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
+               udelay(5);
+}
+
+static u32 combo_phy_read(u8 sata_port, u32 addr)
+{
+       combo_phy_wait_for_ready(sata_port);
+       __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
+       __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
+       combo_phy_wait_for_ready(sata_port);
+       return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
+}
+
+static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
+{
+       combo_phy_wait_for_ready(sata_port);
+       __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
+       __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
+       __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
+}
+
+static void highbank_cphy_disable_overrides(u8 sata_port)
+{
+       u8 lane = port_data[sata_port].lane_mapping;
+       u32 tmp;
+       if (unlikely(port_data[sata_port].phy_base == NULL))
+               return;
+       tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
+       tmp &= ~CPHY_SATA_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+}
+
+static void cphy_override_rx_mode(u8 sata_port, u32 val)
+{
+       u8 lane = port_data[sata_port].lane_mapping;
+       u32 tmp;
+       tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
+       tmp &= ~CPHY_SATA_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+
+       tmp |= CPHY_SATA_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+
+       tmp &= ~CPHY_SATA_DPLL_MODE;
+       tmp |= val << CPHY_SATA_DPLL_SHIFT;
+       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+
+       tmp |= CPHY_SATA_DPLL_RESET;
+       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+
+       tmp &= ~CPHY_SATA_DPLL_RESET;
+       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+
+       msleep(15);
+}
+
+static void highbank_cphy_override_lane(u8 sata_port)
+{
+       u8 lane = port_data[sata_port].lane_mapping;
+       u32 tmp, k = 0;
+
+       if (unlikely(port_data[sata_port].phy_base == NULL))
+               return;
+       do {
+               tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
+                                               lane * SPHY_LANE);
+       } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
+       cphy_override_rx_mode(sata_port, 3);
+}
+
+static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
+{
+       struct device_node *sata_node = dev->of_node;
+       int phy_count = 0, phy, port = 0;
+       void __iomem *cphy_base[CPHY_PHY_COUNT];
+       struct device_node *phy_nodes[CPHY_PHY_COUNT];
+       memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
+       memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT);
+
+       do {
+               u32 tmp;
+               struct of_phandle_args phy_data;
+               if (of_parse_phandle_with_args(sata_node,
+                               "calxeda,port-phys", "#phy-cells",
+                               port, &phy_data))
+                       break;
+               for (phy = 0; phy < phy_count; phy++) {
+                       if (phy_nodes[phy] == phy_data.np)
+                               break;
+               }
+               if (phy_nodes[phy] == NULL) {
+                       phy_nodes[phy] = phy_data.np;
+                       cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
+                       if (cphy_base[phy] == NULL) {
+                               return 0;
+                       }
+                       phy_count += 1;
+               }
+               port_data[port].lane_mapping = phy_data.args[0];
+               of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
+               port_data[port].phy_devs = tmp;
+               port_data[port].phy_base = cphy_base[phy];
+               of_node_put(phy_data.np);
+               port += 1;
+       } while (port < CPHY_PORT_COUNT);
+       return 0;
+}
+
+static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
+                               unsigned long deadline)
+{
+       const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+       struct ata_port *ap = link->ap;
+       struct ahci_port_priv *pp = ap->private_data;
+       u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+       struct ata_taskfile tf;
+       bool online;
+       u32 sstatus;
+       int rc;
+       int retry = 10;
+
+       ahci_stop_engine(ap);
+
+       /* clear D2H reception area to properly wait for D2H FIS */
+       ata_tf_init(link->device, &tf);
+       tf.command = 0x80;
+       ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+       do {
+               highbank_cphy_disable_overrides(link->ap->port_no);
+               rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+               highbank_cphy_override_lane(link->ap->port_no);
+
+               /* If the status is 1, we are connected, but the link did not
+                * come up. So retry resetting the link again.
+                */
+               if (sata_scr_read(link, SCR_STATUS, &sstatus))
+                       break;
+               if (!(sstatus & 0x3))
+                       break;
+       } while (!online && retry--);
+
+       ahci_start_engine(ap);
+
+       if (online)
+               *class = ahci_dev_classify(ap);
+
+       return rc;
+}
+
+static struct ata_port_operations ahci_highbank_ops = {
+       .inherits               = &ahci_ops,
+       .hardreset              = ahci_highbank_hardreset,
+};
+
+static const struct ata_port_info ahci_highbank_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_highbank_ops,
+};
+
+static struct scsi_host_template ahci_highbank_platform_sht = {
+       AHCI_SHT("highbank-ahci"),
+};
+
+static const struct of_device_id ahci_of_match[] = {
+       { .compatible = "calxeda,hb-ahci" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static int __init ahci_highbank_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ahci_host_priv *hpriv;
+       struct ata_host *host;
+       struct resource *mem;
+       int irq;
+       int n_ports;
+       int i;
+       int rc;
+       struct ata_port_info pi = ahci_highbank_port_info;
+       const struct ata_port_info *ppi[] = { &pi, NULL };
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem) {
+               dev_err(dev, "no mmio space\n");
+               return -EINVAL;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(dev, "no irq\n");
+               return -EINVAL;
+       }
+
+       hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+       if (!hpriv) {
+               dev_err(dev, "can't alloc ahci_host_priv\n");
+               return -ENOMEM;
+       }
+
+       hpriv->flags |= (unsigned long)pi.private_data;
+
+       hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+       if (!hpriv->mmio) {
+               dev_err(dev, "can't map %pR\n", mem);
+               return -ENOMEM;
+       }
+
+       rc = highbank_initialize_phys(dev, hpriv->mmio);
+       if (rc)
+               return rc;
+
+
+       ahci_save_initial_config(dev, hpriv, 0, 0);
+
+       /* prepare host */
+       if (hpriv->cap & HOST_CAP_NCQ)
+               pi.flags |= ATA_FLAG_NCQ;
+
+       if (hpriv->cap & HOST_CAP_PMP)
+               pi.flags |= ATA_FLAG_PMP;
+
+       ahci_set_em_messages(hpriv, &pi);
+
+       /* CAP.NP sometimes indicate the index of the last enabled
+        * port, at other times, that of the last possible port, so
+        * determining the maximum port number requires looking at
+        * both CAP.NP and port_map.
+        */
+       n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+       host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+       if (!host) {
+               rc = -ENOMEM;
+               goto err0;
+       }
+
+       host->private_data = hpriv;
+
+       if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+               host->flags |= ATA_HOST_PARALLEL_SCAN;
+
+       if (pi.flags & ATA_FLAG_EM)
+               ahci_reset_em(host);
+
+       for (i = 0; i < host->n_ports; i++) {
+               struct ata_port *ap = host->ports[i];
+
+               ata_port_desc(ap, "mmio %pR", mem);
+               ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+               /* set enclosure management message type */
+               if (ap->flags & ATA_FLAG_EM)
+                       ap->em_message_type = hpriv->em_msg_type;
+
+               /* disabled/not-implemented port */
+               if (!(hpriv->port_map & (1 << i)))
+                       ap->ops = &ata_dummy_port_ops;
+       }
+
+       rc = ahci_reset_controller(host);
+       if (rc)
+               goto err0;
+
+       ahci_init_controller(host);
+       ahci_print_info(host, "platform");
+
+       rc = ata_host_activate(host, irq, ahci_interrupt, 0,
+                                       &ahci_highbank_platform_sht);
+       if (rc)
+               goto err0;
+
+       return 0;
+err0:
+       return rc;
+}
+
+static int __devexit ahci_highbank_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ata_host *host = dev_get_drvdata(dev);
+
+       ata_host_detach(host);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int ahci_highbank_suspend(struct device *dev)
+{
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *mmio = hpriv->mmio;
+       u32 ctl;
+       int rc;
+
+       if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+               dev_err(dev, "firmware update required for suspend/resume\n");
+               return -EIO;
+       }
+
+       /*
+        * AHCI spec rev1.1 section 8.3.3:
+        * Software must disable interrupts prior to requesting a
+        * transition of the HBA to D3 state.
+        */
+       ctl = readl(mmio + HOST_CTL);
+       ctl &= ~HOST_IRQ_EN;
+       writel(ctl, mmio + HOST_CTL);
+       readl(mmio + HOST_CTL); /* flush */
+
+       rc = ata_host_suspend(host, PMSG_SUSPEND);
+       if (rc)
+               return rc;
+
+       return 0;
+}
+
+static int ahci_highbank_resume(struct device *dev)
+{
+       struct ata_host *host = dev_get_drvdata(dev);
+       int rc;
+
+       if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+               rc = ahci_reset_controller(host);
+               if (rc)
+                       return rc;
+
+               ahci_init_controller(host);
+       }
+
+       ata_host_resume(host);
+
+       return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
+                 ahci_highbank_suspend, ahci_highbank_resume);
+
+static struct platform_driver ahci_highbank_driver = {
+        .remove = __devexit_p(ahci_highbank_remove),
+        .driver = {
+                .name = "highbank-ahci",
+                .owner = THIS_MODULE,
+                .of_match_table = ahci_of_match,
+                .pm = &ahci_highbank_pm_ops,
+        },
+       .probe = ahci_highbank_probe,
+};
+
+module_platform_driver(ahci_highbank_driver);
+
+MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("sata:highbank");
index 311be18d3f03beff563901943bc0b08a49ecc80a..68f4fb54d627f43ba24a758e5ed216668e65eb1e 100644 (file)
@@ -79,8 +79,8 @@
  * module options
  */
 
-static int msi;
 #ifdef CONFIG_PCI
+static int msi;
 module_param(msi, int, S_IRUGO);
 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
 #endif
@@ -652,12 +652,13 @@ static u8 mv_sff_check_status(struct ata_port *ap);
  * because we have to allow room for worst case splitting of
  * PRDs for 64K boundaries in mv_fill_sg().
  */
+#ifdef CONFIG_PCI
 static struct scsi_host_template mv5_sht = {
        ATA_BASE_SHT(DRV_NAME),
        .sg_tablesize           = MV_MAX_SG_CT / 2,
        .dma_boundary           = MV_DMA_BOUNDARY,
 };
-
+#endif
 static struct scsi_host_template mv6_sht = {
        ATA_NCQ_SHT(DRV_NAME),
        .can_queue              = MV_MAX_Q_DEPTH - 1,
@@ -1252,7 +1253,7 @@ static void mv_dump_mem(void __iomem *start, unsigned bytes)
        }
 }
 #endif
-
+#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
 {
 #ifdef ATA_DEBUG
@@ -1269,6 +1270,7 @@ static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
        }
 #endif
 }
+#endif
 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
                             struct pci_dev *pdev)
 {
index deb4a456cf8365ac3a45480135786c160473964f..147d1a4dd2694eb05a5366cf0a47317eec729479 100644 (file)
@@ -309,8 +309,8 @@ static int handle_remove(const char *nodename, struct device *dev)
                         * before unlinking this node, reset permissions
                         * of possible references like hardlinks
                         */
-                       newattrs.ia_uid = 0;
-                       newattrs.ia_gid = 0;
+                       newattrs.ia_uid = GLOBAL_ROOT_UID;
+                       newattrs.ia_gid = GLOBAL_ROOT_GID;
                        newattrs.ia_mode = stat.mode & ~0777;
                        newattrs.ia_valid =
                                ATTR_UID|ATTR_GID|ATTR_MODE;
index c30f3e1d0efcc7bcb443051cab59930b06d72b74..460e22dee36dbb2389524f7287eb103b936e2c58 100644 (file)
@@ -460,8 +460,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
        if (vma->vm_file)
                fput(vma->vm_file);
 
-       vma->vm_file = dmabuf->file;
-       get_file(vma->vm_file);
+       vma->vm_file = get_file(dmabuf->file);
 
        vma->vm_pgoff = pgoff;
 
index 34d94c762a1e79415fc00b0fc59caf034a3e186a..9a1469474f55addaea1ae5be1f4499187297db06 100644 (file)
@@ -315,6 +315,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
 {
        unsigned long mask, pfn, pageno, start = 0;
        struct cma *cma = dev_get_cma_area(dev);
+       struct page *page = NULL;
        int ret;
 
        if (!cma || !cma->count)
@@ -336,18 +337,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
        for (;;) {
                pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
                                                    start, count, mask);
-               if (pageno >= cma->count) {
-                       ret = -ENOMEM;
-                       goto error;
-               }
+               if (pageno >= cma->count)
+                       break;
 
                pfn = cma->base_pfn + pageno;
                ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
                if (ret == 0) {
                        bitmap_set(cma->bitmap, pageno, count);
+                       page = pfn_to_page(pfn);
                        break;
                } else if (ret != -EBUSY) {
-                       goto error;
+                       break;
                }
                pr_debug("%s(): memory range at %p is busy, retrying\n",
                         __func__, pfn_to_page(pfn));
@@ -356,12 +356,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
        }
 
        mutex_unlock(&cma_mutex);
-
-       pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
-       return pfn_to_page(pfn);
-error:
-       mutex_unlock(&cma_mutex);
-       return NULL;
+       pr_debug("%s(): returned %p\n", __func__, page);
+       return page;
 }
 
 /**
index ddeca142293ccbe378f5b5c207465f4e1f5e3056..8727e9c5eea47dd78170e091635f31d29fc7cb52 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/idr.h>
 
 #include "base.h"
+#include "power/power.h"
 
 /* For automatically allocated device IDs */
 static DEFINE_IDA(platform_devid_ida);
@@ -983,6 +984,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
                dev = &devs[i]->dev;
 
                if (!dev->devres_head.next) {
+                       pm_runtime_early_init(dev);
                        INIT_LIST_HEAD(&dev->devres_head);
                        list_add_tail(&dev->devres_head,
                                      &early_platform_device_list);
index ba3487c9835b67fa64daad5f6e23016f23a7f569..c22b869245d9498e7052fdc42ceacf3aaa4e332e 100644 (file)
 static LIST_HEAD(gpd_list);
 static DEFINE_MUTEX(gpd_list_lock);
 
+static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
+{
+       struct generic_pm_domain *genpd = NULL, *gpd;
+
+       if (IS_ERR_OR_NULL(domain_name))
+               return NULL;
+
+       mutex_lock(&gpd_list_lock);
+       list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+               if (!strcmp(gpd->name, domain_name)) {
+                       genpd = gpd;
+                       break;
+               }
+       }
+       mutex_unlock(&gpd_list_lock);
+       return genpd;
+}
+
 #ifdef CONFIG_PM
 
 struct generic_pm_domain *dev_to_genpd(struct device *dev)
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
        return ret;
 }
 
+/**
+ * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
+ * @domain_name: Name of the PM domain to power up.
+ */
+int pm_genpd_name_poweron(const char *domain_name)
+{
+       struct generic_pm_domain *genpd;
+
+       genpd = pm_genpd_lookup_name(domain_name);
+       return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
+}
+
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_RUNTIME
 
+static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
+                                    struct device *dev)
+{
+       return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+}
+
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 {
        return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
        not_suspended = 0;
        list_for_each_entry(pdd, &genpd->dev_list, list_node)
                if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
-                   || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
+                   || pdd->dev->power.irq_safe))
                        not_suspended++;
 
        if (not_suspended > genpd->in_progress)
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
 
        might_sleep_if(!genpd->dev_irq_safe);
 
-       if (dev_gpd_data(dev)->always_on)
-               return -EBUSY;
-
        stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
        if (stop_ok && !stop_ok(dev))
                return -EBUSY;
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
 
        /* If power.irq_safe, the PM domain is never powered off. */
        if (dev->power.irq_safe)
-               return genpd_start_dev(genpd, dev);
+               return genpd_start_dev_no_timing(genpd, dev);
 
        mutex_lock(&genpd->lock);
        ret = __pm_genpd_poweron(genpd);
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
 
 #ifdef CONFIG_PM_SLEEP
 
+/**
+ * pm_genpd_present - Check if the given PM domain has been initialized.
+ * @genpd: PM domain to check.
+ */
+static bool pm_genpd_present(struct generic_pm_domain *genpd)
+{
+       struct generic_pm_domain *gpd;
+
+       if (IS_ERR_OR_NULL(genpd))
+               return false;
+
+       list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+               if (gpd == genpd)
+                       return true;
+
+       return false;
+}
+
 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
                                    struct device *dev)
 {
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
  * Check if the given PM domain can be powered off (during system suspend or
  * hibernation) and do that if so.  Also, in that case propagate to its masters.
  *
- * This function is only called in "noirq" stages of system power transitions,
- * so it need not acquire locks (all of the "noirq" callbacks are executed
- * sequentially, so it is guaranteed that it will never run twice in parallel).
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
  */
 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
 {
@@ -776,6 +828,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
        }
 }
 
+/**
+ * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
+ * @genpd: PM domain to power on.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
+ */
+static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
+{
+       struct gpd_link *link;
+
+       if (genpd->status != GPD_STATE_POWER_OFF)
+               return;
+
+       list_for_each_entry(link, &genpd->slave_links, slave_node) {
+               pm_genpd_sync_poweron(link->master);
+               genpd_sd_counter_inc(link->master);
+       }
+
+       if (genpd->power_on)
+               genpd->power_on(genpd);
+
+       genpd->status = GPD_STATE_ACTIVE;
+}
+
 /**
  * resume_needed - Check whether to resume a device before system suspend.
  * @dev: Device to check.
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+       if (genpd->suspend_power_off
            || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
                return 0;
 
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+       if (genpd->suspend_power_off
            || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
                return 0;
 
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
         * guaranteed that this function will never run twice in parallel for
         * the same PM domain, so it is not necessary to use locking here.
         */
-       pm_genpd_poweron(genpd);
+       pm_genpd_sync_poweron(genpd);
        genpd->suspended_count--;
 
        return genpd_start_dev(genpd, dev);
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
-               0 : genpd_stop_dev(genpd, dev);
+       return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
 }
 
 /**
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
-               0 : genpd_start_dev(genpd, dev);
+       return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
 }
 
 /**
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
        if (genpd->suspended_count++ == 0) {
                /*
                 * The boot kernel might put the domain into arbitrary state,
-                * so make it appear as powered off to pm_genpd_poweron(), so
-                * that it tries to power it on in case it was really off.
+                * so make it appear as powered off to pm_genpd_sync_poweron(),
+                * so that it tries to power it on in case it was really off.
                 */
                genpd->status = GPD_STATE_POWER_OFF;
                if (genpd->suspend_power_off) {
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
        if (genpd->suspend_power_off)
                return 0;
 
-       pm_genpd_poweron(genpd);
+       pm_genpd_sync_poweron(genpd);
 
-       return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
+       return genpd_start_dev(genpd, dev);
 }
 
 /**
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
        }
 }
 
+/**
+ * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
+ * @dev: Device that normally is marked as "always on" to switch power for.
+ *
+ * This routine may only be called during the system core (syscore) suspend or
+ * resume phase for devices whose "always on" flags are set.
+ */
+void pm_genpd_syscore_switch(struct device *dev, bool suspend)
+{
+       struct generic_pm_domain *genpd;
+
+       genpd = dev_to_genpd(dev);
+       if (!pm_genpd_present(genpd))
+               return;
+
+       if (suspend) {
+               genpd->suspended_count++;
+               pm_genpd_sync_poweroff(genpd);
+       } else {
+               pm_genpd_sync_poweron(genpd);
+               genpd->suspended_count--;
+       }
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
+
 #else
 
 #define pm_genpd_prepare               NULL
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
        return __pm_genpd_add_device(genpd, dev, td);
 }
 
+
+/**
+ * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
+ * @domain_name: Name of the PM domain to add the device to.
+ * @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
+ */
+int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
+                              struct gpd_timing_data *td)
+{
+       return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
+}
+
 /**
  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
  * @genpd: PM domain to remove the device from.
@@ -1454,26 +1569,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
        return ret;
 }
 
-/**
- * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "always on" flag.
- */
-void pm_genpd_dev_always_on(struct device *dev, bool val)
-{
-       struct pm_subsys_data *psd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->power.lock, flags);
-
-       psd = dev_to_psd(dev);
-       if (psd && psd->domain_data)
-               to_gpd_data(psd->domain_data)->always_on = val;
-
-       spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
-
 /**
  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
  * @dev: Device to set/unset the flag for.
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
        struct gpd_link *link;
        int ret = 0;
 
-       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
+           || genpd == subdomain)
                return -EINVAL;
 
  start:
@@ -1551,6 +1647,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
        return ret;
 }
 
+/**
+ * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
+ * @master_name: Name of the master PM domain to add the subdomain to.
+ * @subdomain_name: Name of the subdomain to be added.
+ */
+int pm_genpd_add_subdomain_names(const char *master_name,
+                                const char *subdomain_name)
+{
+       struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
+
+       if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
+               return -EINVAL;
+
+       mutex_lock(&gpd_list_lock);
+       list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+               if (!master && !strcmp(gpd->name, master_name))
+                       master = gpd;
+
+               if (!subdomain && !strcmp(gpd->name, subdomain_name))
+                       subdomain = gpd;
+
+               if (master && subdomain)
+                       break;
+       }
+       mutex_unlock(&gpd_list_lock);
+
+       return pm_genpd_add_subdomain(master, subdomain);
+}
+
 /**
  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  * @genpd: Master PM domain to remove the subdomain from.
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
 }
 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
 
-int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+/**
+ * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
+ * @genpd: PM domain to be connected with cpuidle.
+ * @state: cpuidle state this domain can disable/enable.
+ *
+ * Make a PM domain behave as though it contained a CPU core, that is, instead
+ * of calling its power down routine it will enable the given cpuidle state so
+ * that the cpuidle subsystem can power it down (if possible and desirable).
+ */
+int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
 {
        struct cpuidle_driver *cpuidle_drv;
        struct gpd_cpu_data *cpu_data;
@@ -1753,7 +1887,24 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
        goto out;
 }
 
-int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+/**
+ * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
+ * @name: Name of the domain to connect to cpuidle.
+ * @state: cpuidle state this domain can manipulate.
+ */
+int pm_genpd_name_attach_cpuidle(const char *name, int state)
+{
+       return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
+}
+
+/**
+ * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
+ * @genpd: PM domain to remove the cpuidle connection from.
+ *
+ * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
+ * given PM domain.
+ */
+int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
 {
        struct gpd_cpu_data *cpu_data;
        struct cpuidle_state *idle_state;
@@ -1784,6 +1935,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
        return ret;
 }
 
+/**
+ * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
+ * @name: Name of the domain to disconnect cpuidle from.
+ */
+int pm_genpd_name_detach_cpuidle(const char *name)
+{
+       return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
+}
+
 /* Default device callbacks for generic PM domains. */
 
 /**
index b0b072a88f5fdaf8447ae6fb9133a613be8fac06..a3c1404c79338c3b7e82b7b71835971dc42ec358 100644 (file)
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
 static int async_error;
 
 /**
- * device_pm_init - Initialize the PM-related part of a device object.
+ * device_pm_sleep_init - Initialize system suspend-related device fields.
  * @dev: Device object being initialized.
  */
-void device_pm_init(struct device *dev)
+void device_pm_sleep_init(struct device *dev)
 {
        dev->power.is_prepared = false;
        dev->power.is_suspended = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
-       spin_lock_init(&dev->power.lock);
-       pm_runtime_init(dev);
        INIT_LIST_HEAD(&dev->power.entry);
-       dev->power.power_state = PMSG_INVALID;
 }
 
 /**
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
 
+       if (dev->power.syscore)
+               goto Out;
+
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
 
        error = dpm_run_callback(callback, dev, state, info);
 
+ Out:
        TRACE_RESUME(error);
        return error;
 }
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
 
+       if (dev->power.syscore)
+               goto Out;
+
        if (dev->pm_domain) {
                info = "early power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
 
        error = dpm_run_callback(callback, dev, state, info);
 
+ Out:
        TRACE_RESUME(error);
        return error;
 }
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
-       bool put = false;
 
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
 
+       if (dev->power.syscore)
+               goto Complete;
+
        dpm_wait(dev->parent, async);
        device_lock(dev);
 
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
                goto Unlock;
 
        pm_runtime_enable(dev);
-       put = true;
 
        if (dev->pm_domain) {
                info = "power domain ";
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 
  Unlock:
        device_unlock(dev);
+
+ Complete:
        complete_all(&dev->power.completion);
 
        TRACE_RESUME(error);
 
-       if (put)
-               pm_runtime_put_sync(dev);
-
        return error;
 }
 
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
        void (*callback)(struct device *) = NULL;
        char *info = NULL;
 
+       if (dev->power.syscore)
+               return;
+
        device_lock(dev);
 
        if (dev->pm_domain) {
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
        }
 
        device_unlock(dev);
+
+       pm_runtime_put_sync(dev);
 }
 
 /**
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
        pm_callback_t callback = NULL;
        char *info = NULL;
 
+       if (dev->power.syscore)
+               return 0;
+
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
        pm_callback_t callback = NULL;
        char *info = NULL;
 
+       if (dev->power.syscore)
+               return 0;
+
        if (dev->pm_domain) {
                info = "late power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
 
        error = dpm_suspend_noirq(state);
        if (error) {
-               dpm_resume_early(state);
+               dpm_resume_early(resume_event(state));
                return error;
        }
 
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (async_error)
                goto Complete;
 
-       pm_runtime_get_noresume(dev);
+       /*
+        * If a device configured to wake up the system from sleep states
+        * has been suspended at run time and there's a resume request pending
+        * for it, this is equivalent to the device signaling wakeup, so the
+        * system suspend operation should be aborted.
+        */
        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
-               pm_runtime_put_sync(dev);
                async_error = -EBUSY;
                goto Complete;
        }
 
+       if (dev->power.syscore)
+               goto Complete;
+
        device_lock(dev);
 
        if (dev->pm_domain) {
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  Complete:
        complete_all(&dev->power.completion);
 
-       if (error) {
-               pm_runtime_put_sync(dev);
+       if (error)
                async_error = error;
-       } else if (dev->power.is_suspended) {
+       else if (dev->power.is_suspended)
                __pm_runtime_disable(dev, false);
-       }
 
        return error;
 }
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
        char *info = NULL;
        int error = 0;
 
+       if (dev->power.syscore)
+               return 0;
+
+       /*
+        * If a device's parent goes into runtime suspend at the wrong time,
+        * it won't be possible to resume the device.  To prevent this we
+        * block runtime suspend here, during the prepare phase, and allow
+        * it again during the complete phase.
+        */
+       pm_runtime_get_noresume(dev);
+
        device_lock(dev);
 
        dev->power.wakeup_path = device_may_wakeup(dev);
index ac993eafec82ecd707cfc6e2654b5a8f30703fc6..d9468642fc414c22f8578c82e57e2cac9ba224e5 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 #include <linux/opp.h>
+#include <linux/of.h>
 
 /*
  * Internal data structure organization with the OPP layer library is as
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
 
        return &dev_opp->head;
 }
+
+#ifdef CONFIG_OF
+/**
+ * of_init_opp_table() - Initialize opp table from device tree
+ * @dev:       device pointer used to lookup device OPPs.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ */
+int of_init_opp_table(struct device *dev)
+{
+       const struct property *prop;
+       const __be32 *val;
+       int nr;
+
+       prop = of_find_property(dev->of_node, "operating-points", NULL);
+       if (!prop)
+               return -ENODEV;
+       if (!prop->value)
+               return -ENODATA;
+
+       /*
+        * Each OPP is a set of tuples consisting of frequency and
+        * voltage like <freq-kHz vol-uV>.
+        */
+       nr = prop->length / sizeof(u32);
+       if (nr % 2) {
+               dev_err(dev, "%s: Invalid OPP list\n", __func__);
+               return -EINVAL;
+       }
+
+       val = prop->value;
+       while (nr) {
+               unsigned long freq = be32_to_cpup(val++) * 1000;
+               unsigned long volt = be32_to_cpup(val++);
+
+               if (opp_add(dev, freq, volt)) {
+                       dev_warn(dev, "%s: Failed to add OPP %ld\n",
+                                __func__, freq);
+                       continue;
+               }
+               nr -= 2;
+       }
+
+       return 0;
+}
+#endif
index eeb4bff9505cd6dccd5d2cb30671fd69105966ef..0dbfdf4419af8914136060f0b9c88838e4f2db4a 100644 (file)
@@ -1,12 +1,32 @@
 #include <linux/pm_qos.h>
 
+static inline void device_pm_init_common(struct device *dev)
+{
+       if (!dev->power.early_init) {
+               spin_lock_init(&dev->power.lock);
+               dev->power.power_state = PMSG_INVALID;
+               dev->power.early_init = true;
+       }
+}
+
 #ifdef CONFIG_PM_RUNTIME
 
+static inline void pm_runtime_early_init(struct device *dev)
+{
+       dev->power.disable_depth = 1;
+       device_pm_init_common(dev);
+}
+
 extern void pm_runtime_init(struct device *dev);
 extern void pm_runtime_remove(struct device *dev);
 
 #else /* !CONFIG_PM_RUNTIME */
 
+static inline void pm_runtime_early_init(struct device *dev)
+{
+       device_pm_init_common(dev);
+}
+
 static inline void pm_runtime_init(struct device *dev) {}
 static inline void pm_runtime_remove(struct device *dev) {}
 
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
        return container_of(entry, struct device, power.entry);
 }
 
-extern void device_pm_init(struct device *dev);
+extern void device_pm_sleep_init(struct device *dev);
 extern void device_pm_add(struct device *);
 extern void device_pm_remove(struct device *);
 extern void device_pm_move_before(struct device *, struct device *);
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
 
 #else /* !CONFIG_PM_SLEEP */
 
-static inline void device_pm_init(struct device *dev)
-{
-       spin_lock_init(&dev->power.lock);
-       dev->power.power_state = PMSG_INVALID;
-       pm_runtime_init(dev);
-}
+static inline void device_pm_sleep_init(struct device *dev) {}
 
 static inline void device_pm_add(struct device *dev)
 {
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
 
 #endif /* !CONFIG_PM_SLEEP */
 
+static inline void device_pm_init(struct device *dev)
+{
+       device_pm_init_common(dev);
+       device_pm_sleep_init(dev);
+       pm_runtime_init(dev);
+}
+
 #ifdef CONFIG_PM
 
 /*
index 7d9c1cb1c39a7760081bae4d518efd8acf835902..3148b10dc2e59b92dd5913e2b34c446d54ac69b7 100644 (file)
@@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
  repeat:
        if (dev->power.runtime_error)
                retval = -EINVAL;
+       else if (dev->power.disable_depth == 1 && dev->power.is_suspended
+           && dev->power.runtime_status == RPM_ACTIVE)
+               retval = 1;
        else if (dev->power.disable_depth > 0)
                retval = -EACCES;
        if (retval)
index cbb463b3a750e30bc70d7d8de40ff7555e7670ee..e6ee5e80e546a1c7895194e529e2e70f5e53ce7f 100644 (file)
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
  */
 void wakeup_source_add(struct wakeup_source *ws)
 {
+       unsigned long flags;
+
        if (WARN_ON(!ws))
                return;
 
@@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
        ws->active = false;
        ws->last_time = ktime_get();
 
-       spin_lock_irq(&events_lock);
+       spin_lock_irqsave(&events_lock, flags);
        list_add_rcu(&ws->entry, &wakeup_sources);
-       spin_unlock_irq(&events_lock);
+       spin_unlock_irqrestore(&events_lock, flags);
 }
 EXPORT_SYMBOL_GPL(wakeup_source_add);
 
@@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
  */
 void wakeup_source_remove(struct wakeup_source *ws)
 {
+       unsigned long flags;
+
        if (WARN_ON(!ws))
                return;
 
-       spin_lock_irq(&events_lock);
+       spin_lock_irqsave(&events_lock, flags);
        list_del_rcu(&ws->entry);
-       spin_unlock_irq(&events_lock);
+       spin_unlock_irqrestore(&events_lock, flags);
        synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_event);
 
+static void print_active_wakeup_sources(void)
+{
+       struct wakeup_source *ws;
+       int active = 0;
+       struct wakeup_source *last_activity_ws = NULL;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+               if (ws->active) {
+                       pr_info("active wakeup source: %s\n", ws->name);
+                       active = 1;
+               } else if (!active &&
+                          (!last_activity_ws ||
+                           ktime_to_ns(ws->last_time) >
+                           ktime_to_ns(last_activity_ws->last_time))) {
+                       last_activity_ws = ws;
+               }
+       }
+
+       if (!active && last_activity_ws)
+               pr_info("last active wakeup source: %s\n",
+                       last_activity_ws->name);
+       rcu_read_unlock();
+}
+
 /**
  * pm_wakeup_pending - Check if power transition in progress should be aborted.
  *
@@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
                events_check_enabled = !ret;
        }
        spin_unlock_irqrestore(&events_lock, flags);
+
+       if (ret)
+               print_active_wakeup_sources();
+
        return ret;
 }
 
@@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
 bool pm_save_wakeup_count(unsigned int count)
 {
        unsigned int cnt, inpr;
+       unsigned long flags;
 
        events_check_enabled = false;
-       spin_lock_irq(&events_lock);
+       spin_lock_irqsave(&events_lock, flags);
        split_counters(&cnt, &inpr);
        if (cnt == count && inpr == 0) {
                saved_count = count;
                events_check_enabled = true;
        }
-       spin_unlock_irq(&events_lock);
+       spin_unlock_irqrestore(&events_lock, flags);
        return events_check_enabled;
 }
 
index 06b3207adebdcec34d75a9bcc352343cad73fd8d..a533af218368ec7723754a3cf28d8d4cb5d4711a 100644 (file)
@@ -48,12 +48,12 @@ config BCMA_DRIVER_MIPS
 
 config BCMA_SFLASH
        bool
-       depends on BCMA_DRIVER_MIPS && BROKEN
+       depends on BCMA_DRIVER_MIPS
        default y
 
 config BCMA_NFLASH
        bool
-       depends on BCMA_DRIVER_MIPS && BROKEN
+       depends on BCMA_DRIVER_MIPS
        default y
 
 config BCMA_DRIVER_GMAC_CMN
index 3cf9cc923cd27fa5765e4c22f9b78ff813a8e355..169fc58427d3cefc343c2e1ac229a5c2c280655f 100644 (file)
@@ -54,6 +54,7 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
 #ifdef CONFIG_BCMA_SFLASH
 /* driver_chipcommon_sflash.c */
 int bcma_sflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_sflash_dev;
 #else
 static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
@@ -65,6 +66,7 @@ static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
 #ifdef CONFIG_BCMA_NFLASH
 /* driver_chipcommon_nflash.c */
 int bcma_nflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_nflash_dev;
 #else
 static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
index 63c8b470536f7278c164f762c1033c0bd27ca9d9..03bbe104338ff70db5abb6fbace10b1595f438a0 100644 (file)
@@ -65,7 +65,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
        switch (clkmode) {
        case BCMA_CLKMODE_FAST:
                bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
-               udelay(64);
+               usleep_range(64, 300);
                for (i = 0; i < 1500; i++) {
                        if (bcma_read32(core, BCMA_CLKCTLST) &
                            BCMA_CLKCTLST_HAVEHT) {
index 574d62435bc2f01718d8bdb5113a3e95ec8e66ab..9042781edec340e932e39d2b75c7dba495764e42 100644 (file)
@@ -5,15 +5,37 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+struct platform_device bcma_nflash_dev = {
+       .name           = "bcma_nflash",
+       .num_resources  = 0,
+};
+
 /* Initialize NAND flash access */
 int bcma_nflash_init(struct bcma_drv_cc *cc)
 {
-       bcma_err(cc->core->bus, "NAND flash support is broken\n");
+       struct bcma_bus *bus = cc->core->bus;
+
+       if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
+           cc->core->id.rev != 0x38) {
+               bcma_err(bus, "NAND flash on unsupported board!\n");
+               return -ENOTSUPP;
+       }
+
+       if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
+               bcma_err(bus, "NAND flash not present according to ChipCommon\n");
+               return -ENODEV;
+       }
+
+       cc->nflash.present = true;
+
+       /* Prepare platform device, but don't register it yet. It's too early,
+        * malloc (required by device_private_init) is not available yet. */
+       bcma_nflash_dev.dev.platform_data = &cc->nflash;
+
        return 0;
 }
index c9a4f46c5143e28309ca55b788471f1241c2afff..201faf106b3f4e342337e2219213a72c670ef447 100644 (file)
@@ -76,7 +76,10 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
        if (max_msk)
                bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
 
-       /* Add some delay; allow resources to come up and settle. */
+       /*
+        * Add some delay; allow resources to come up and settle.
+        * Delay is required for SoC (early init).
+        */
        mdelay(2);
 }
 
@@ -101,7 +104,7 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
        bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
 }
 
-void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
+static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
 
@@ -257,7 +260,7 @@ static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
 }
 
 /* query bus clock frequency for PMU-enabled chipcommon */
-u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
 
index 6e157a58a1d7f9dc68ea10251c4157a2c2f2c012..2c4eec2ca5a0784bd50d392a84edc74911a58916 100644 (file)
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
-#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/delay.h>
 
 #include "bcma_private.h"
 
+static struct resource bcma_sflash_resource = {
+       .name   = "bcma_sflash",
+       .start  = BCMA_SFLASH,
+       .end    = 0,
+       .flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
+};
+
+struct platform_device bcma_sflash_dev = {
+       .name           = "bcma_sflash",
+       .resource       = &bcma_sflash_resource,
+       .num_resources  = 1,
+};
+
+struct bcma_sflash_tbl_e {
+       char *name;
+       u32 id;
+       u32 blocksize;
+       u16 numblocks;
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
+       { "", 0x14, 0x10000, 32, },
+       { 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+       { 0 },
+};
+
+static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+       { 0 },
+};
+
+static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
+{
+       int i;
+       bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
+                       BCMA_CC_FLASHCTL_START | opcode);
+       for (i = 0; i < 1000; i++) {
+               if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
+                     BCMA_CC_FLASHCTL_BUSY))
+                       return;
+               cpu_relax();
+       }
+       bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
+}
+
 /* Initialize serial flash access */
 int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
-       bcma_err(cc->core->bus, "Serial flash support is broken\n");
+       struct bcma_bus *bus = cc->core->bus;
+       struct bcma_sflash *sflash = &cc->sflash;
+       struct bcma_sflash_tbl_e *e;
+       u32 id, id2;
+
+       switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+       case BCMA_CC_FLASHT_STSER:
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
+
+               bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+               id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+               bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+               id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+               switch (id) {
+               case 0xbf:
+                       for (e = bcma_sflash_sst_tbl; e->name; e++) {
+                               if (e->id == id2)
+                                       break;
+                       }
+                       break;
+               default:
+                       for (e = bcma_sflash_st_tbl; e->name; e++) {
+                               if (e->id == id)
+                                       break;
+                       }
+                       break;
+               }
+               if (!e->name) {
+                       bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
+                       return -ENOTSUPP;
+               }
+
+               break;
+       case BCMA_CC_FLASHT_ATSER:
+               bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
+               id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
+
+               for (e = bcma_sflash_at_tbl; e->name; e++) {
+                       if (e->id == id)
+                               break;
+               }
+               if (!e->name) {
+                       bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
+                       return -ENOTSUPP;
+               }
+
+               break;
+       default:
+               bcma_err(bus, "Unsupported flash type\n");
+               return -ENOTSUPP;
+       }
+
+       sflash->window = BCMA_SFLASH;
+       sflash->blocksize = e->blocksize;
+       sflash->numblocks = e->numblocks;
+       sflash->size = sflash->blocksize * sflash->numblocks;
+       sflash->present = true;
+
+       bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+                 e->name, sflash->size / 1024, sflash->blocksize,
+                 sflash->numblocks);
+
+       /* Prepare platform device, but don't register it yet. It's too early,
+        * malloc (required by device_private_init) is not available yet. */
+       bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
+                                         sflash->size;
+       bcma_sflash_dev.dev.platform_data = sflash;
+
        return 0;
 }
index c32ebd537abe3a3e5f8f5e777f119c8768c9ea38..c39ee6d458506d6bb9c44eae651742e692255207 100644 (file)
@@ -51,7 +51,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
                v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
                if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
        }
 }
 
@@ -92,7 +92,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
                        ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
                        break;
                }
-               msleep(1);
+               usleep_range(1000, 2000);
        }
        pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
        return ret;
@@ -132,7 +132,7 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
                v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
                if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
        }
        pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
 }
index cbae2c2313366c8792d3a08e5a963aa05a35655a..9baf886e82df39f710b897a0b864824fbedfebad 100644 (file)
@@ -425,9 +425,9 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
        pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
 
        /* Reset RC */
-       udelay(3000);
+       usleep_range(3000, 5000);
        pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
-       udelay(1000);
+       usleep_range(1000, 2000);
        pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
                        BCMA_CORE_PCI_CTL_RST_OE);
 
@@ -481,7 +481,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
         * before issuing configuration requests to PCI Express
         * devices.
         */
-       udelay(100000);
+       msleep(100);
 
        bcma_core_pci_enable_crs(pc);
 
@@ -501,7 +501,7 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
        set_io_port_base(pc_host->pci_controller.io_map_base);
        /* Give some time to the PCI controller to configure itself with the new
         * values. Not waiting at this point causes crashes of the machine. */
-       mdelay(10);
+       usleep_range(10000, 15000);
        register_pci_controller(&pc_host->pci_controller);
        return;
 }
index a6e5672c67e77f473a8685884ff4626791e99ab2..b6b4b5ebd4c2560255b3a53bbceae0a814723e96 100644 (file)
@@ -77,8 +77,8 @@ static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
 }
 
 #ifdef CONFIG_BCMA_BLOCKIO
-void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
-                             size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+                                    size_t count, u16 offset, u8 reg_width)
 {
        void __iomem *addr = core->bus->mmio + offset;
        if (core->bus->mapped_core != core)
@@ -100,8 +100,9 @@ void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
        }
 }
 
-void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
-                              size_t count, u16 offset, u8 reg_width)
+static void bcma_host_pci_block_write(struct bcma_device *core,
+                                     const void *buffer, size_t count,
+                                     u16 offset, u8 reg_width)
 {
        void __iomem *addr = core->bus->mmio + offset;
        if (core->bus->mapped_core != core)
@@ -139,7 +140,7 @@ static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
        iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 }
 
-const struct bcma_host_ops bcma_host_pci_ops = {
+static const struct bcma_host_ops bcma_host_pci_ops = {
        .read8          = bcma_host_pci_read8,
        .read16         = bcma_host_pci_read16,
        .read32         = bcma_host_pci_read32,
@@ -272,6 +273,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
        { 0, },
index 3c381fb8f9c4797c2a413246ed90ac26305582cd..3475e600011a5c5ce0ec6ffe2b41f4f68b04c0e7 100644 (file)
@@ -143,7 +143,7 @@ static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
        writel(value, core->io_wrap + offset);
 }
 
-const struct bcma_host_ops bcma_host_soc_ops = {
+static const struct bcma_host_ops bcma_host_soc_ops = {
        .read8          = bcma_host_soc_read8,
        .read16         = bcma_host_soc_read16,
        .read32         = bcma_host_soc_read32,
index 758af9ccdef0fa3b71b891bb1d75c3bbc90f6f05..432aeeedfd5e6992c28ea081e4185bf88ffeed8e 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 
@@ -136,6 +137,22 @@ static int bcma_register_cores(struct bcma_bus *bus)
                dev_id++;
        }
 
+#ifdef CONFIG_BCMA_SFLASH
+       if (bus->drv_cc.sflash.present) {
+               err = platform_device_register(&bcma_sflash_dev);
+               if (err)
+                       bcma_err(bus, "Error registering serial flash\n");
+       }
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+       if (bus->drv_cc.nflash.present) {
+               err = platform_device_register(&bcma_nflash_dev);
+               if (err)
+                       bcma_err(bus, "Error registering NAND flash\n");
+       }
+#endif
+
        return 0;
 }
 
@@ -210,7 +227,17 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
 
 void bcma_bus_unregister(struct bcma_bus *bus)
 {
+       struct bcma_device *cores[3];
+
+       cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+       cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
+       cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
+
        bcma_unregister_cores(bus);
+
+       kfree(cores[2]);
+       kfree(cores[1]);
+       kfree(cores[0]);
 }
 
 int __init bcma_bus_early_register(struct bcma_bus *bus,
index 9ea4627dc0c233a808f322816c2560fd9c145d3d..0d546b64be341239a5ee405970f166be47f83081 100644 (file)
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
                /* for these chips OTP is always available */
                present = true;
                break;
+       case BCMA_CHIP_ID_BCM43227:
        case BCMA_CHIP_ID_BCM43228:
+       case BCMA_CHIP_ID_BCM43428:
                present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
                break;
        default:
index a7d6347aaa7913b2a029014a95a2558d8360597e..17c675c522954cc39a210e431603aac2a3a2d946 100644 (file)
@@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message)
 
        if (drive == current_reqD)
                drive = current_drive;
-       __cancel_delayed_work(&fd_timeout);
 
        if (drive < 0 || drive >= N_DRIVE) {
                delay = 20UL * HZ;
@@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message)
        } else
                delay = UDP->timeout;
 
-       queue_delayed_work(floppy_wq, &fd_timeout, delay);
+       mod_delayed_work(floppy_wq, &fd_timeout, delay);
        if (UDP->flags & FD_DEBUG)
                DPRINT("reschedule timeout %s\n", message);
        timeout_message = message;
@@ -891,7 +890,7 @@ static void unlock_fdc(void)
 
        raw_cmd = NULL;
        command_status = FD_COMMAND_NONE;
-       __cancel_delayed_work(&fd_timeout);
+       cancel_delayed_work(&fd_timeout);
        do_floppy = NULL;
        cont = NULL;
        clear_bit(0, &fdc_busy);
index 3bba65510d23afdf39070f7d23552e8e15e27af2..e9d594fd12cbee408251c4ead03d1b71183ff7ae 100644 (file)
@@ -1038,10 +1038,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
 {
        int err;
        struct loop_func_table *xfer;
-       uid_t uid = current_uid();
+       kuid_t uid = current_uid();
 
        if (lo->lo_encrypt_key_size &&
-           lo->lo_key_owner != uid &&
+           !uid_eq(lo->lo_key_owner, uid) &&
            !capable(CAP_SYS_ADMIN))
                return -EPERM;
        if (lo->lo_state != Lo_bound)
index 2c2d2e5c15974c30755fd61715a189029dff7cd6..007db8986e84d3df91eb0a00707f14d4c02d0ed3 100644 (file)
@@ -670,7 +670,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
        spin_unlock_irqrestore(&info->io_lock, flags);
 
        /* Flush gnttab callback work. Must be done with no locks held. */
-       flush_work_sync(&info->work);
+       flush_work(&info->work);
 
        del_gendisk(info->gd);
 
@@ -719,7 +719,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
        spin_unlock_irq(&info->io_lock);
 
        /* Flush gnttab callback work. Must be done with no locks held. */
-       flush_work_sync(&info->work);
+       flush_work(&info->work);
 
        /* Free resources associated with old device channel. */
        if (info->ring_ref != GRANT_INVALID_REF) {
index 37ae175162f346a3316b4864b2f1eebefd9e9d71..364f82b34d036bca12cbf0d48ef16af9b8ad68d2 100644 (file)
@@ -177,7 +177,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data) {
                BT_ERR("Can't allocate memory for data structure");
                return -ENOMEM;
@@ -189,14 +189,12 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
        data->urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!data->urb) {
                BT_ERR("Can't allocate URB");
-               kfree(data);
                return -ENOMEM;
        }
 
        if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
                BT_ERR("Mini driver request failed");
                usb_free_urb(data->urb);
-               kfree(data);
                return -EIO;
        }
 
@@ -209,7 +207,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                BT_ERR("Can't allocate memory for mini driver");
                release_firmware(firmware);
                usb_free_urb(data->urb);
-               kfree(data);
                return -ENOMEM;
        }
 
@@ -224,7 +221,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                BT_ERR("Firmware request failed");
                usb_free_urb(data->urb);
                kfree(data->buffer);
-               kfree(data);
                return -EIO;
        }
 
@@ -236,7 +232,6 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
                release_firmware(firmware);
                usb_free_urb(data->urb);
                kfree(data->buffer);
-               kfree(data);
                return -ENOMEM;
        }
 
@@ -271,7 +266,6 @@ static void bcm203x_disconnect(struct usb_interface *intf)
        usb_free_urb(data->urb);
        kfree(data->fw_data);
        kfree(data->buffer);
-       kfree(data);
 }
 
 static struct usb_driver bcm203x_driver = {
index 32e825144fe9835bb30ef4693bf902d6a13fc87e..995aee9cba22a8d871289004a4cef5ec60cdeaa2 100644 (file)
@@ -653,7 +653,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        }
 
        /* Initialize control structure and load firmware */
-       data = kzalloc(sizeof(struct bfusb_data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL);
        if (!data) {
                BT_ERR("Can't allocate memory for control structure");
                goto done;
@@ -674,7 +674,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) {
                BT_ERR("Firmware request failed");
-               goto error;
+               goto done;
        }
 
        BT_DBG("firmware data %p size %zu", firmware->data, firmware->size);
@@ -690,7 +690,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        hdev = hci_alloc_dev();
        if (!hdev) {
                BT_ERR("Can't allocate HCI device");
-               goto error;
+               goto done;
        }
 
        data->hdev = hdev;
@@ -708,7 +708,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        if (hci_register_dev(hdev) < 0) {
                BT_ERR("Can't register HCI device");
                hci_free_dev(hdev);
-               goto error;
+               goto done;
        }
 
        usb_set_intfdata(intf, data);
@@ -718,9 +718,6 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
 release:
        release_firmware(firmware);
 
-error:
-       kfree(data);
-
 done:
        return -EIO;
 }
@@ -741,7 +738,6 @@ static void bfusb_disconnect(struct usb_interface *intf)
 
        hci_unregister_dev(hdev);
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 static struct usb_driver bfusb_driver = {
index 66c3a6770c417a5dbb714a9c10ae029fc2675109..0d26851d6e495a8624e611394709a1bf1246ef90 100644 (file)
@@ -681,7 +681,7 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -849,7 +849,7 @@ static int bluecard_probe(struct pcmcia_device *link)
        bluecard_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -864,10 +864,7 @@ static int bluecard_probe(struct pcmcia_device *link)
 
 static void bluecard_detach(struct pcmcia_device *link)
 {
-       bluecard_info_t *info = link->priv;
-
        bluecard_release(link);
-       kfree(info);
 }
 
 
index 29caaed2d715bd6de4f8b355bc8bc11b9580a7b6..2fe4a8031348f0c8b05074eb9889a1d2e02a7a91 100644 (file)
@@ -443,7 +443,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
                return -ENODEV;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -453,10 +453,8 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        init_usb_anchor(&data->rx_anchor);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
@@ -475,7 +473,6 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -500,7 +497,6 @@ static void bpa10x_disconnect(struct usb_interface *intf)
        hci_free_dev(data->hdev);
        kfree_skb(data->rx_skb[0]);
        kfree_skb(data->rx_skb[1]);
-       kfree(data);
 }
 
 static struct usb_driver bpa10x_driver = {
index 8925b6d672a6ef7c14dc89741e7662fc3e057ef4..7ffd3f407144dc05c2bb2a9d9b29848efeeba347 100644 (file)
@@ -638,7 +638,7 @@ static int bt3c_probe(struct pcmcia_device *link)
        bt3c_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -654,10 +654,7 @@ static int bt3c_probe(struct pcmcia_device *link)
 
 static void bt3c_detach(struct pcmcia_device *link)
 {
-       bt3c_info_t *info = link->priv;
-
        bt3c_release(link);
-       kfree(info);
 }
 
 static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
index 6a9e9717d3ab8327da49160823ef136ada65e053..3f4bfc814dc7d5a0382635dbe16a41af413e59fe 100644 (file)
@@ -600,8 +600,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
 exit:
        if (ret) {
                hdev->stat.err_rx++;
-               if (skb)
-                       kfree_skb(skb);
+               kfree_skb(skb);
        }
 
        return ret;
@@ -956,11 +955,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
        BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d",
                        id->vendor, id->device, id->class, func->num);
 
-       card = kzalloc(sizeof(*card), GFP_KERNEL);
-       if (!card) {
-               ret = -ENOMEM;
-               goto done;
-       }
+       card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL);
+       if (!card)
+               return -ENOMEM;
 
        card->func = func;
 
@@ -974,8 +971,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
 
        if (btmrvl_sdio_register_dev(card) < 0) {
                BT_ERR("Failed to register BT device!");
-               ret = -ENODEV;
-               goto free_card;
+               return -ENODEV;
        }
 
        /* Disable the interrupts on the card */
@@ -1023,9 +1019,6 @@ disable_host_int:
        btmrvl_sdio_disable_host_int(card);
 unreg_dev:
        btmrvl_sdio_unregister_dev(card);
-free_card:
-       kfree(card);
-done:
        return ret;
 }
 
@@ -1047,7 +1040,6 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
                        BT_DBG("unregester dev");
                        btmrvl_sdio_unregister_dev(card);
                        btmrvl_remove_card(card->priv);
-                       kfree(card);
                }
        }
 }
index e10ea03470510f876bd3b09ef572630c4ef83e29..4a9909713874dd03eb52240b148ed83b9a22b9d8 100644 (file)
@@ -304,7 +304,7 @@ static int btsdio_probe(struct sdio_func *func,
                tuple = tuple->next;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -315,10 +315,8 @@ static int btsdio_probe(struct sdio_func *func,
        skb_queue_head_init(&data->txq);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_SDIO;
        hci_set_drvdata(hdev, data);
@@ -340,7 +338,6 @@ static int btsdio_probe(struct sdio_func *func,
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -366,7 +363,6 @@ static void btsdio_remove(struct sdio_func *func)
        hci_unregister_dev(hdev);
 
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 static struct sdio_driver btsdio_driver = {
index 21e803a6a281690af1d6598ba73d800fbf70c5dc..35a553a90616d85f07bf5c03d5d24e5373494ac6 100644 (file)
@@ -446,7 +446,7 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -567,7 +567,7 @@ static int btuart_probe(struct pcmcia_device *link)
        btuart_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -583,10 +583,7 @@ static int btuart_probe(struct pcmcia_device *link)
 
 static void btuart_detach(struct pcmcia_device *link)
 {
-       btuart_info_t *info = link->priv;
-
        btuart_release(link);
-       kfree(info);
 }
 
 static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
index 654e248763efb98024bd81b57118cd2ad1d77cdf..debda27df9b0452e3d7f3e5d2b06855c1b7af1f4 100644 (file)
@@ -96,11 +96,12 @@ static struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x0c10, 0x0000) },
 
        /* Broadcom BCM20702A0 */
+       { USB_DEVICE(0x04ca, 0x2003) },
        { USB_DEVICE(0x0489, 0xe042) },
        { USB_DEVICE(0x413c, 0x8197) },
 
        /* Foxconn - Hon Hai */
-       { USB_DEVICE(0x0489, 0xe033) },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
 
        /*Broadcom devices with vendor specific id */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
@@ -956,7 +957,7 @@ static int btusb_probe(struct usb_interface *intf,
                        return -ENODEV;
        }
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -979,10 +980,8 @@ static int btusb_probe(struct usb_interface *intf,
                }
        }
 
-       if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
-               kfree(data);
+       if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
                return -ENODEV;
-       }
 
        data->cmdreq_type = USB_TYPE_CLASS;
 
@@ -1002,10 +1001,8 @@ static int btusb_probe(struct usb_interface *intf,
        init_usb_anchor(&data->deferred);
 
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        hdev->bus = HCI_USB;
        hci_set_drvdata(hdev, data);
@@ -1073,7 +1070,6 @@ static int btusb_probe(struct usb_interface *intf,
                                                        data->isoc, data);
                if (err < 0) {
                        hci_free_dev(hdev);
-                       kfree(data);
                        return err;
                }
        }
@@ -1081,7 +1077,6 @@ static int btusb_probe(struct usb_interface *intf,
        err = hci_register_dev(hdev);
        if (err < 0) {
                hci_free_dev(hdev);
-               kfree(data);
                return err;
        }
 
@@ -1114,7 +1109,6 @@ static void btusb_disconnect(struct usb_interface *intf)
                usb_driver_release_interface(&btusb_driver, data->isoc);
 
        hci_free_dev(hdev);
-       kfree(data);
 }
 
 #ifdef CONFIG_PM
index 88694697f34f68386cdb4273289ccea7baaeaa5b..60abf596f60ea21f9354ae1b3d3608bb04e4142e 100644 (file)
@@ -297,16 +297,14 @@ static int bt_ti_probe(struct platform_device *pdev)
        struct hci_dev *hdev;
        int err;
 
-       hst = kzalloc(sizeof(struct ti_st), GFP_KERNEL);
+       hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
        if (!hst)
                return -ENOMEM;
 
        /* Expose "hciX" device to user space */
        hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(hst);
+       if (!hdev)
                return -ENOMEM;
-       }
 
        BT_DBG("hdev %p", hdev);
 
@@ -321,7 +319,6 @@ static int bt_ti_probe(struct platform_device *pdev)
        err = hci_register_dev(hdev);
        if (err < 0) {
                BT_ERR("Can't register HCI device error %d", err);
-               kfree(hst);
                hci_free_dev(hdev);
                return err;
        }
@@ -347,7 +344,6 @@ static int bt_ti_remove(struct platform_device *pdev)
        hci_unregister_dev(hdev);
 
        hci_free_dev(hdev);
-       kfree(hst);
 
        dev_set_drvdata(&pdev->dev, NULL);
        return 0;
@@ -362,21 +358,7 @@ static struct platform_driver btwilink_driver = {
        },
 };
 
-/* ------- Module Init/Exit interfaces ------ */
-static int __init btwilink_init(void)
-{
-       BT_INFO("Bluetooth Driver for TI WiLink - Version %s", VERSION);
-
-       return platform_driver_register(&btwilink_driver);
-}
-
-static void __exit btwilink_exit(void)
-{
-       platform_driver_unregister(&btwilink_driver);
-}
-
-module_init(btwilink_init);
-module_exit(btwilink_exit);
+module_platform_driver(btwilink_driver);
 
 /* ------ Module Info ------ */
 
index 97a7784db4a2d4b6431aa2e15d8ff122b40ccdf4..036cb366fe6e77d7c8e202cf2aee0f0d3fde2880 100644 (file)
@@ -550,7 +550,7 @@ static int dtl1_probe(struct pcmcia_device *link)
        dtl1_info_t *info;
 
        /* Create new info device */
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -569,7 +569,6 @@ static void dtl1_detach(struct pcmcia_device *link)
 
        dtl1_close(info);
        pcmcia_disable_device(link);
-       kfree(info);
 }
 
 static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
index 74e0966b3ead0bbcf3678ebea52ea6886a65baee..c8abce3d2d9c0618f2092c94398e3197d0396b27 100644 (file)
@@ -531,7 +531,7 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
        default:
                err = n_tty_ioctl_helper(tty, file, cmd, arg);
                break;
-       };
+       }
 
        return err;
 }
index ff6d589c34a5e900eff0eb498782f5597a6a8bee..cfc7679385890b6e0feaadee1462a1e85330cc59 100644 (file)
@@ -481,7 +481,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
                        hu->hdev->stat.err_rx++;
                        ptr++; count--;
                        continue;
-               };
+               }
 
                ptr++; count--;
 
index 3f72595a60178a7f23ca45ac6418c6e5cb53faa6..d8b7aed6e4a96f6d6997ab9fac2a1ac84fbc4700 100644 (file)
@@ -156,7 +156,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
        case HCI_SCODATA_PKT:
                data->hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        return total;
 }
index 3ceaf006e7f080fabc94848e9fc3b53f3dd59f0a..75d485afe56c7bf685e4f83e76473ca3516a6f54 100644 (file)
@@ -840,7 +840,7 @@ probe_fail_no_mem:
 
 static int __devexit remove_gdrom(struct platform_device *devptr)
 {
-       flush_work_sync(&work);
+       flush_work(&work);
        blk_cleanup_queue(gd.gdrom_rq);
        free_irq(HW_EVENT_GDROM_CMD, &gd);
        free_irq(HW_EVENT_GDROM_DMA, &gd);
index 7c0d391996b5b4e82432b9b3418d427f7a42894a..fbd9b2b850ef1de0a84a57182ff79bdd38064896 100644 (file)
@@ -289,3 +289,16 @@ config HW_RANDOM_EXYNOS
          module will be called exynos-rng.
 
          If unsure, say Y.
+
+config HW_RANDOM_TPM
+       tristate "TPM HW Random Number Generator support"
+       depends on HW_RANDOM && TCG_TPM
+       default HW_RANDOM
+       ---help---
+         This driver provides kernel-side support for the Random Number
+         Generator in the Trusted Platform Module
+
+         To compile this driver as a module, choose M here: the
+         module will be called tpm-rng.
+
+         If unsure, say Y.
index 39a757ca15b65c59b188d0489b12488e4b4c6ac3..1fd7eec9fbf6421ace05971f95671095917bf4fa 100644 (file)
@@ -25,3 +25,4 @@ obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
 obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
 obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
 obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
+obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
diff --git a/drivers/char/hw_random/tpm-rng.c b/drivers/char/hw_random/tpm-rng.c
new file mode 100644 (file)
index 0000000..d6d4482
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 Kent Yoder IBM Corporation
+ *
+ * HWRNG interfaces to pull RNG data from a TPM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/hw_random.h>
+#include <linux/tpm.h>
+
+#define MODULE_NAME "tpm-rng"
+
+static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       return tpm_get_random(TPM_ANY_NUM, data, max);
+}
+
+static struct hwrng tpm_rng = {
+       .name = MODULE_NAME,
+       .read = tpm_rng_read,
+};
+
+static int __init rng_init(void)
+{
+       return hwrng_register(&tpm_rng);
+}
+module_init(rng_init);
+
+static void __exit rng_exit(void)
+{
+       hwrng_unregister(&tpm_rng);
+}
+module_exit(rng_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kent Yoder <key@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("RNG driver for TPM devices");
index f87780502b4199712a933103723128054eb5a5ab..320debbe32faf74032f5117cfaf461cbd65a63a3 100644 (file)
@@ -1433,7 +1433,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
        sonypi_disable();
 
        synchronize_irq(sonypi_device.irq);
-       flush_work_sync(&sonypi_device.input_work);
+       flush_work(&sonypi_device.input_work);
 
        if (useinput) {
                input_unregister_device(sonypi_device.input_key_dev);
index a048199ce866555f19b98fcd93ff10f3890c1eb3..915875e431d2151f48d9f793da67d0b2817d5e45 100644 (file)
@@ -33,6 +33,17 @@ config TCG_TIS
          from within Linux.  To compile this driver as a module, choose
          M here; the module will be called tpm_tis.
 
+config TCG_TIS_I2C_INFINEON
+       tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)"
+       depends on I2C
+       ---help---
+         If you have a TPM security chip that is compliant with the
+         TCG TIS 1.2 TPM specification and Infineon's I2C Protocol Stack
+         Specification 0.20 say Yes and it will be accessible from within
+         Linux.
+         To compile this driver as a module, choose M here; the module
+         will be called tpm_tis_i2c_infineon.
+
 config TCG_NSC
        tristate "National Semiconductor TPM Interface"
        depends on X86
@@ -62,4 +73,12 @@ config TCG_INFINEON
          Further information on this driver and the supported hardware
          can be found at http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/ 
 
+config TCG_IBMVTPM
+       tristate "IBM VTPM Interface"
+       depends on PPC64
+       ---help---
+         If you have IBM virtual TPM (VTPM) support say Yes and it
+         will be accessible from within Linux.  To compile this driver
+         as a module, choose M here; the module will be called tpm_ibmvtpm.
+
 endif # TCG_TPM
index ea3a1e02a824de2b1a80feaa5ebc0e0125162c1c..5b3fc8bc6c132fa76f1e6ec8723c9bf096670271 100644 (file)
@@ -4,8 +4,16 @@
 obj-$(CONFIG_TCG_TPM) += tpm.o
 ifdef CONFIG_ACPI
        obj-$(CONFIG_TCG_TPM) += tpm_bios.o
+       tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o
+else
+ifdef CONFIG_TCG_IBMVTPM
+       obj-$(CONFIG_TCG_TPM) += tpm_bios.o
+       tpm_bios-objs += tpm_eventlog.o tpm_of.o
+endif
 endif
 obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
 obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
 obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
 obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
+obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
index 817f0ee202b6861e87e146e2cc8864bc2de33ede..f26afdb1a7026aed8a0173ff7a17ea87524661dd 100644 (file)
 #include <linux/freezer.h>
 
 #include "tpm.h"
-
-enum tpm_const {
-       TPM_MINOR = 224,        /* officially assigned */
-       TPM_BUFSIZE = 4096,
-       TPM_NUM_DEVICES = 256,
-};
+#include "tpm_eventlog.h"
 
 enum tpm_duration {
        TPM_SHORT = 0,
@@ -482,6 +477,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
 #define TPM_INTERNAL_RESULT_SIZE 200
 #define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
 #define TPM_ORD_GET_CAP cpu_to_be32(101)
+#define TPM_ORD_GET_RANDOM cpu_to_be32(70)
 
 static const struct tpm_input_header tpm_getcap_header = {
        .tag = TPM_TAG_RQU_COMMAND,
@@ -919,7 +915,7 @@ EXPORT_SYMBOL_GPL(tpm_show_pcrs);
 
 #define  READ_PUBEK_RESULT_SIZE 314
 #define TPM_ORD_READPUBEK cpu_to_be32(124)
-struct tpm_input_header tpm_readpubek_header = {
+static struct tpm_input_header tpm_readpubek_header = {
        .tag = TPM_TAG_RQU_COMMAND,
        .length = cpu_to_be32(30),
        .ordinal = TPM_ORD_READPUBEK
@@ -1172,10 +1168,10 @@ int tpm_release(struct inode *inode, struct file *file)
        struct tpm_chip *chip = file->private_data;
 
        del_singleshot_timer_sync(&chip->user_read_timer);
-       flush_work_sync(&chip->work);
+       flush_work(&chip->work);
        file->private_data = NULL;
        atomic_set(&chip->data_pending, 0);
-       kfree(chip->data_buffer);
+       kzfree(chip->data_buffer);
        clear_bit(0, &chip->is_open);
        put_device(chip->dev);
        return 0;
@@ -1225,9 +1221,8 @@ ssize_t tpm_read(struct file *file, char __user *buf,
        int rc;
 
        del_singleshot_timer_sync(&chip->user_read_timer);
-       flush_work_sync(&chip->work);
+       flush_work(&chip->work);
        ret_size = atomic_read(&chip->data_pending);
-       atomic_set(&chip->data_pending, 0);
        if (ret_size > 0) {     /* relay data */
                ssize_t orig_ret_size = ret_size;
                if (size < ret_size)
@@ -1242,6 +1237,8 @@ ssize_t tpm_read(struct file *file, char __user *buf,
                mutex_unlock(&chip->buffer_mutex);
        }
 
+       atomic_set(&chip->data_pending, 0);
+
        return ret_size;
 }
 EXPORT_SYMBOL_GPL(tpm_read);
@@ -1326,6 +1323,58 @@ int tpm_pm_resume(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(tpm_pm_resume);
 
+#define TPM_GETRANDOM_RESULT_SIZE      18
+static struct tpm_input_header tpm_getrandom_header = {
+       .tag = TPM_TAG_RQU_COMMAND,
+       .length = cpu_to_be32(14),
+       .ordinal = TPM_ORD_GET_RANDOM
+};
+
+/**
+ * tpm_get_random() - Get random bytes from the tpm's RNG
+ * @chip_num: A specific chip number for the request or TPM_ANY_NUM
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
+ *
+ * Returns < 0 on error and the number of bytes read on success
+ */
+int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+{
+       struct tpm_chip *chip;
+       struct tpm_cmd_t tpm_cmd;
+       u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
+       int err, total = 0, retries = 5;
+       u8 *dest = out;
+
+       chip = tpm_chip_find_get(chip_num);
+       if (chip == NULL)
+               return -ENODEV;
+
+       if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
+               return -EINVAL;
+
+       do {
+               tpm_cmd.header.in = tpm_getrandom_header;
+               tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
+
+               err = transmit_cmd(chip, &tpm_cmd,
+                                  TPM_GETRANDOM_RESULT_SIZE + num_bytes,
+                                  "attempting get random");
+               if (err)
+                       break;
+
+               recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+               memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
+
+               dest += recd;
+               total += recd;
+               num_bytes -= recd;
+       } while (retries-- && total < max);
+
+       return total ? total : -EIO;
+}
+EXPORT_SYMBOL_GPL(tpm_get_random);
+
 /* In case vendor provided release function, call it too.*/
 
 void tpm_dev_vendor_release(struct tpm_chip *chip)
@@ -1346,7 +1395,7 @@ EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
  * Once all references to platform device are down to 0,
  * release all allocated structures.
  */
-void tpm_dev_release(struct device *dev)
+static void tpm_dev_release(struct device *dev)
 {
        struct tpm_chip *chip = dev_get_drvdata(dev);
 
@@ -1427,6 +1476,11 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
                goto put_device;
        }
 
+       if (sys_add_ppi(&dev->kobj)) {
+               misc_deregister(&chip->vendor.miscdev);
+               goto put_device;
+       }
+
        chip->bios_dir = tpm_bios_log_setup(devname);
 
        /* Make chip available */
index 917f727e674059b7e7daa2fc36a9920a306deef6..02c266aa2bf712657d3507071c1b266a4215e3c2 100644 (file)
 #include <linux/io.h>
 #include <linux/tpm.h>
 
+enum tpm_const {
+       TPM_MINOR = 224,        /* officially assigned */
+       TPM_BUFSIZE = 4096,
+       TPM_NUM_DEVICES = 256,
+};
+
 enum tpm_timeout {
        TPM_TIMEOUT = 5,        /* msecs */
 };
@@ -94,6 +100,7 @@ struct tpm_vendor_specific {
        bool timeout_adjusted;
        unsigned long duration[3]; /* jiffies */
        bool duration_adjusted;
+       void *data;
 
        wait_queue_head_t read_queue;
        wait_queue_head_t int_queue;
@@ -269,6 +276,21 @@ struct tpm_pcrextend_in {
        u8      hash[TPM_DIGEST_SIZE];
 }__attribute__((packed));
 
+/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
+ * bytes, but 128 is still a relatively large number of random bytes and
+ * anything much bigger causes users of struct tpm_cmd_t to start getting
+ * compiler warnings about stack frame size. */
+#define TPM_MAX_RNG_DATA       128
+
+struct tpm_getrandom_out {
+       __be32 rng_data_len;
+       u8     rng_data[TPM_MAX_RNG_DATA];
+}__attribute__((packed));
+
+struct tpm_getrandom_in {
+       __be32 num_bytes;
+}__attribute__((packed));
+
 typedef union {
        struct  tpm_getcap_params_out getcap_out;
        struct  tpm_readpubek_params_out readpubek_out;
@@ -277,6 +299,8 @@ typedef union {
        struct  tpm_pcrread_in  pcrread_in;
        struct  tpm_pcrread_out pcrread_out;
        struct  tpm_pcrextend_in pcrextend_in;
+       struct  tpm_getrandom_in getrandom_in;
+       struct  tpm_getrandom_out getrandom_out;
 } tpm_cmd_params;
 
 struct tpm_cmd_t {
@@ -303,15 +327,12 @@ extern int tpm_pm_suspend(struct device *);
 extern int tpm_pm_resume(struct device *);
 extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
                             wait_queue_head_t *);
+
 #ifdef CONFIG_ACPI
-extern struct dentry ** tpm_bios_log_setup(char *);
-extern void tpm_bios_log_teardown(struct dentry **);
+extern ssize_t sys_add_ppi(struct kobject *parent);
 #else
-static inline struct dentry ** tpm_bios_log_setup(char *name)
-{
-       return NULL;
-}
-static inline void tpm_bios_log_teardown(struct dentry **dir)
+static inline ssize_t sys_add_ppi(struct kobject *parent)
 {
+       return 0;
 }
 #endif
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
new file mode 100644 (file)
index 0000000..56051d0
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ *     Seiji Munetoh <munetoh@jp.ibm.com>
+ *     Stefan Berger <stefanb@us.ibm.com>
+ *     Reiner Sailer <sailer@watson.ibm.com>
+ *     Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the eventlog extended by the TCG BIOS of PC platform
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <acpi/acpi.h>
+
+#include "tpm.h"
+#include "tpm_eventlog.h"
+
+struct acpi_tcpa {
+       struct acpi_table_header hdr;
+       u16 platform_class;
+       union {
+               struct client_hdr {
+                       u32 log_max_len __attribute__ ((packed));
+                       u64 log_start_addr __attribute__ ((packed));
+               } client;
+               struct server_hdr {
+                       u16 reserved;
+                       u64 log_max_len __attribute__ ((packed));
+                       u64 log_start_addr __attribute__ ((packed));
+               } server;
+       };
+};
+
+/* read binary bios log */
+int read_log(struct tpm_bios_log *log)
+{
+       struct acpi_tcpa *buff;
+       acpi_status status;
+       void __iomem *virt;
+       u64 len, start;
+
+       if (log->bios_event_log != NULL) {
+               printk(KERN_ERR
+                      "%s: ERROR - Eventlog already initialized\n",
+                      __func__);
+               return -EFAULT;
+       }
+
+       /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+       status = acpi_get_table(ACPI_SIG_TCPA, 1,
+                               (struct acpi_table_header **)&buff);
+
+       if (ACPI_FAILURE(status)) {
+               printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
+                      __func__);
+               return -EIO;
+       }
+
+       switch(buff->platform_class) {
+       case BIOS_SERVER:
+               len = buff->server.log_max_len;
+               start = buff->server.log_start_addr;
+               break;
+       case BIOS_CLIENT:
+       default:
+               len = buff->client.log_max_len;
+               start = buff->client.log_start_addr;
+               break;
+       }
+       if (!len) {
+               printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+               return -EIO;
+       }
+
+       /* malloc EventLog space */
+       log->bios_event_log = kmalloc(len, GFP_KERNEL);
+       if (!log->bios_event_log) {
+               printk("%s: ERROR - Not enough  Memory for BIOS measurements\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       log->bios_event_log_end = log->bios_event_log + len;
+
+       virt = acpi_os_map_memory(start, len);
+       if (!virt) {
+               kfree(log->bios_event_log);
+               printk("%s: ERROR - Unable to map memory\n", __func__);
+               return -EIO;
+       }
+
+       memcpy_fromio(log->bios_event_log, virt, len);
+
+       acpi_os_unmap_memory(virt, len);
+       return 0;
+}
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
deleted file mode 100644 (file)
index 0636520..0000000
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * Copyright (C) 2005 IBM Corporation
- *
- * Authors:
- *     Seiji Munetoh <munetoh@jp.ibm.com>
- *     Stefan Berger <stefanb@us.ibm.com>
- *     Reiner Sailer <sailer@watson.ibm.com>
- *     Kylene Hall <kjhall@us.ibm.com>
- *
- * Maintained by: <tpmdd-devel@lists.sourceforge.net>
- *
- * Access to the eventlog extended by the TCG BIOS of PC platform
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/seq_file.h>
-#include <linux/fs.h>
-#include <linux/security.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <acpi/acpi.h>
-#include "tpm.h"
-
-#define TCG_EVENT_NAME_LEN_MAX 255
-#define MAX_TEXT_EVENT         1000    /* Max event string length */
-#define ACPI_TCPA_SIG          "TCPA"  /* 0x41504354 /'TCPA' */
-
-enum bios_platform_class {
-       BIOS_CLIENT = 0x00,
-       BIOS_SERVER = 0x01,
-};
-
-struct tpm_bios_log {
-       void *bios_event_log;
-       void *bios_event_log_end;
-};
-
-struct acpi_tcpa {
-       struct acpi_table_header hdr;
-       u16 platform_class;
-       union {
-               struct client_hdr {
-                       u32 log_max_len __attribute__ ((packed));
-                       u64 log_start_addr __attribute__ ((packed));
-               } client;
-               struct server_hdr {
-                       u16 reserved;
-                       u64 log_max_len __attribute__ ((packed));
-                       u64 log_start_addr __attribute__ ((packed));
-               } server;
-       };
-};
-
-struct tcpa_event {
-       u32 pcr_index;
-       u32 event_type;
-       u8 pcr_value[20];       /* SHA1 */
-       u32 event_size;
-       u8 event_data[0];
-};
-
-enum tcpa_event_types {
-       PREBOOT = 0,
-       POST_CODE,
-       UNUSED,
-       NO_ACTION,
-       SEPARATOR,
-       ACTION,
-       EVENT_TAG,
-       SCRTM_CONTENTS,
-       SCRTM_VERSION,
-       CPU_MICROCODE,
-       PLATFORM_CONFIG_FLAGS,
-       TABLE_OF_DEVICES,
-       COMPACT_HASH,
-       IPL,
-       IPL_PARTITION_DATA,
-       NONHOST_CODE,
-       NONHOST_CONFIG,
-       NONHOST_INFO,
-};
-
-static const char* tcpa_event_type_strings[] = {
-       "PREBOOT",
-       "POST CODE",
-       "",
-       "NO ACTION",
-       "SEPARATOR",
-       "ACTION",
-       "EVENT TAG",
-       "S-CRTM Contents",
-       "S-CRTM Version",
-       "CPU Microcode",
-       "Platform Config Flags",
-       "Table of Devices",
-       "Compact Hash",
-       "IPL",
-       "IPL Partition Data",
-       "Non-Host Code",
-       "Non-Host Config",
-       "Non-Host Info"
-};
-
-struct tcpa_pc_event {
-       u32 event_id;
-       u32 event_size;
-       u8 event_data[0];
-};
-
-enum tcpa_pc_event_ids {
-       SMBIOS = 1,
-       BIS_CERT,
-       POST_BIOS_ROM,
-       ESCD,
-       CMOS,
-       NVRAM,
-       OPTION_ROM_EXEC,
-       OPTION_ROM_CONFIG,
-       OPTION_ROM_MICROCODE = 10,
-       S_CRTM_VERSION,
-       S_CRTM_CONTENTS,
-       POST_CONTENTS,
-       HOST_TABLE_OF_DEVICES,
-};
-
-static const char* tcpa_pc_event_id_strings[] = {
-       "",
-       "SMBIOS",
-       "BIS Certificate",
-       "POST BIOS ",
-       "ESCD ",
-       "CMOS",
-       "NVRAM",
-       "Option ROM",
-       "Option ROM config",
-       "",
-       "Option ROM microcode ",
-       "S-CRTM Version",
-       "S-CRTM Contents ",
-       "POST Contents ",
-       "Table of Devices",
-};
-
-/* returns pointer to start of pos. entry of tcg log */
-static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
-{
-       loff_t i;
-       struct tpm_bios_log *log = m->private;
-       void *addr = log->bios_event_log;
-       void *limit = log->bios_event_log_end;
-       struct tcpa_event *event;
-
-       /* read over *pos measurements */
-       for (i = 0; i < *pos; i++) {
-               event = addr;
-
-               if ((addr + sizeof(struct tcpa_event)) < limit) {
-                       if (event->event_type == 0 && event->event_size == 0)
-                               return NULL;
-                       addr += sizeof(struct tcpa_event) + event->event_size;
-               }
-       }
-
-       /* now check if current entry is valid */
-       if ((addr + sizeof(struct tcpa_event)) >= limit)
-               return NULL;
-
-       event = addr;
-
-       if ((event->event_type == 0 && event->event_size == 0) ||
-           ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
-               return NULL;
-
-       return addr;
-}
-
-static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
-                                       loff_t *pos)
-{
-       struct tcpa_event *event = v;
-       struct tpm_bios_log *log = m->private;
-       void *limit = log->bios_event_log_end;
-
-       v += sizeof(struct tcpa_event) + event->event_size;
-
-       /* now check if current entry is valid */
-       if ((v + sizeof(struct tcpa_event)) >= limit)
-               return NULL;
-
-       event = v;
-
-       if (event->event_type == 0 && event->event_size == 0)
-               return NULL;
-
-       if ((event->event_type == 0 && event->event_size == 0) ||
-           ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
-               return NULL;
-
-       (*pos)++;
-       return v;
-}
-
-static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
-{
-}
-
-static int get_event_name(char *dest, struct tcpa_event *event,
-                       unsigned char * event_entry)
-{
-       const char *name = "";
-       /* 41 so there is room for 40 data and 1 nul */
-       char data[41] = "";
-       int i, n_len = 0, d_len = 0;
-       struct tcpa_pc_event *pc_event;
-
-       switch(event->event_type) {
-       case PREBOOT:
-       case POST_CODE:
-       case UNUSED:
-       case NO_ACTION:
-       case SCRTM_CONTENTS:
-       case SCRTM_VERSION:
-       case CPU_MICROCODE:
-       case PLATFORM_CONFIG_FLAGS:
-       case TABLE_OF_DEVICES:
-       case COMPACT_HASH:
-       case IPL:
-       case IPL_PARTITION_DATA:
-       case NONHOST_CODE:
-       case NONHOST_CONFIG:
-       case NONHOST_INFO:
-               name = tcpa_event_type_strings[event->event_type];
-               n_len = strlen(name);
-               break;
-       case SEPARATOR:
-       case ACTION:
-               if (MAX_TEXT_EVENT > event->event_size) {
-                       name = event_entry;
-                       n_len = event->event_size;
-               }
-               break;
-       case EVENT_TAG:
-               pc_event = (struct tcpa_pc_event *)event_entry;
-
-               /* ToDo Row data -> Base64 */
-
-               switch (pc_event->event_id) {
-               case SMBIOS:
-               case BIS_CERT:
-               case CMOS:
-               case NVRAM:
-               case OPTION_ROM_EXEC:
-               case OPTION_ROM_CONFIG:
-               case S_CRTM_VERSION:
-                       name = tcpa_pc_event_id_strings[pc_event->event_id];
-                       n_len = strlen(name);
-                       break;
-               /* hash data */
-               case POST_BIOS_ROM:
-               case ESCD:
-               case OPTION_ROM_MICROCODE:
-               case S_CRTM_CONTENTS:
-               case POST_CONTENTS:
-                       name = tcpa_pc_event_id_strings[pc_event->event_id];
-                       n_len = strlen(name);
-                       for (i = 0; i < 20; i++)
-                               d_len += sprintf(&data[2*i], "%02x",
-                                               pc_event->event_data[i]);
-                       break;
-               default:
-                       break;
-               }
-       default:
-               break;
-       }
-
-       return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
-                       n_len, name, d_len, data);
-
-}
-
-static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
-{
-       struct tcpa_event *event = v;
-       char *data = v;
-       int i;
-
-       for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
-               seq_putc(m, data[i]);
-
-       return 0;
-}
-
-static int tpm_bios_measurements_release(struct inode *inode,
-                                        struct file *file)
-{
-       struct seq_file *seq = file->private_data;
-       struct tpm_bios_log *log = seq->private;
-
-       if (log) {
-               kfree(log->bios_event_log);
-               kfree(log);
-       }
-
-       return seq_release(inode, file);
-}
-
-static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
-{
-       int len = 0;
-       int i;
-       char *eventname;
-       struct tcpa_event *event = v;
-       unsigned char *event_entry =
-           (unsigned char *) (v + sizeof(struct tcpa_event));
-
-       eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
-       if (!eventname) {
-               printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
-                      __func__);
-               return -EFAULT;
-       }
-
-       seq_printf(m, "%2d ", event->pcr_index);
-
-       /* 2nd: SHA1 */
-       for (i = 0; i < 20; i++)
-               seq_printf(m, "%02x", event->pcr_value[i]);
-
-       /* 3rd: event type identifier */
-       seq_printf(m, " %02x", event->event_type);
-
-       len += get_event_name(eventname, event, event_entry);
-
-       /* 4th: eventname <= max + \'0' delimiter */
-       seq_printf(m, " %s\n", eventname);
-
-       kfree(eventname);
-       return 0;
-}
-
-static const struct seq_operations tpm_ascii_b_measurments_seqops = {
-       .start = tpm_bios_measurements_start,
-       .next = tpm_bios_measurements_next,
-       .stop = tpm_bios_measurements_stop,
-       .show = tpm_ascii_bios_measurements_show,
-};
-
-static const struct seq_operations tpm_binary_b_measurments_seqops = {
-       .start = tpm_bios_measurements_start,
-       .next = tpm_bios_measurements_next,
-       .stop = tpm_bios_measurements_stop,
-       .show = tpm_binary_bios_measurements_show,
-};
-
-/* read binary bios log */
-static int read_log(struct tpm_bios_log *log)
-{
-       struct acpi_tcpa *buff;
-       acpi_status status;
-       struct acpi_table_header *virt;
-       u64 len, start;
-
-       if (log->bios_event_log != NULL) {
-               printk(KERN_ERR
-                      "%s: ERROR - Eventlog already initialized\n",
-                      __func__);
-               return -EFAULT;
-       }
-
-       /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
-       status = acpi_get_table(ACPI_SIG_TCPA, 1,
-                               (struct acpi_table_header **)&buff);
-
-       if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
-                      __func__);
-               return -EIO;
-       }
-
-       switch(buff->platform_class) {
-       case BIOS_SERVER:
-               len = buff->server.log_max_len;
-               start = buff->server.log_start_addr;
-               break;
-       case BIOS_CLIENT:
-       default:
-               len = buff->client.log_max_len;
-               start = buff->client.log_start_addr;
-               break;
-       }
-       if (!len) {
-               printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
-               return -EIO;
-       }
-
-       /* malloc EventLog space */
-       log->bios_event_log = kmalloc(len, GFP_KERNEL);
-       if (!log->bios_event_log) {
-               printk("%s: ERROR - Not enough  Memory for BIOS measurements\n",
-                       __func__);
-               return -ENOMEM;
-       }
-
-       log->bios_event_log_end = log->bios_event_log + len;
-
-       virt = acpi_os_map_memory(start, len);
-
-       memcpy(log->bios_event_log, virt, len);
-
-       acpi_os_unmap_memory(virt, len);
-       return 0;
-}
-
-static int tpm_ascii_bios_measurements_open(struct inode *inode,
-                                           struct file *file)
-{
-       int err;
-       struct tpm_bios_log *log;
-       struct seq_file *seq;
-
-       log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
-       if (!log)
-               return -ENOMEM;
-
-       if ((err = read_log(log)))
-               goto out_free;
-
-       /* now register seq file */
-       err = seq_open(file, &tpm_ascii_b_measurments_seqops);
-       if (!err) {
-               seq = file->private_data;
-               seq->private = log;
-       } else {
-               goto out_free;
-       }
-
-out:
-       return err;
-out_free:
-       kfree(log->bios_event_log);
-       kfree(log);
-       goto out;
-}
-
-static const struct file_operations tpm_ascii_bios_measurements_ops = {
-       .open = tpm_ascii_bios_measurements_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = tpm_bios_measurements_release,
-};
-
-static int tpm_binary_bios_measurements_open(struct inode *inode,
-                                            struct file *file)
-{
-       int err;
-       struct tpm_bios_log *log;
-       struct seq_file *seq;
-
-       log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
-       if (!log)
-               return -ENOMEM;
-
-       if ((err = read_log(log)))
-               goto out_free;
-
-       /* now register seq file */
-       err = seq_open(file, &tpm_binary_b_measurments_seqops);
-       if (!err) {
-               seq = file->private_data;
-               seq->private = log;
-       } else {
-               goto out_free;
-       }
-
-out:
-       return err;
-out_free:
-       kfree(log->bios_event_log);
-       kfree(log);
-       goto out;
-}
-
-static const struct file_operations tpm_binary_bios_measurements_ops = {
-       .open = tpm_binary_bios_measurements_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = tpm_bios_measurements_release,
-};
-
-static int is_bad(void *p)
-{
-       if (!p)
-               return 1;
-       if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
-               return 1;
-       return 0;
-}
-
-struct dentry **tpm_bios_log_setup(char *name)
-{
-       struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
-
-       tpm_dir = securityfs_create_dir(name, NULL);
-       if (is_bad(tpm_dir))
-               goto out;
-
-       bin_file =
-           securityfs_create_file("binary_bios_measurements",
-                                  S_IRUSR | S_IRGRP, tpm_dir, NULL,
-                                  &tpm_binary_bios_measurements_ops);
-       if (is_bad(bin_file))
-               goto out_tpm;
-
-       ascii_file =
-           securityfs_create_file("ascii_bios_measurements",
-                                  S_IRUSR | S_IRGRP, tpm_dir, NULL,
-                                  &tpm_ascii_bios_measurements_ops);
-       if (is_bad(ascii_file))
-               goto out_bin;
-
-       ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
-       if (!ret)
-               goto out_ascii;
-
-       ret[0] = ascii_file;
-       ret[1] = bin_file;
-       ret[2] = tpm_dir;
-
-       return ret;
-
-out_ascii:
-       securityfs_remove(ascii_file);
-out_bin:
-       securityfs_remove(bin_file);
-out_tpm:
-       securityfs_remove(tpm_dir);
-out:
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
-
-void tpm_bios_log_teardown(struct dentry **lst)
-{
-       int i;
-
-       for (i = 0; i < 3; i++)
-               securityfs_remove(lst[i]);
-}
-EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
new file mode 100644 (file)
index 0000000..84ddc55
--- /dev/null
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2005, 2012 IBM Corporation
+ *
+ * Authors:
+ *     Kent Yoder <key@linux.vnet.ibm.com>
+ *     Seiji Munetoh <munetoh@jp.ibm.com>
+ *     Stefan Berger <stefanb@us.ibm.com>
+ *     Reiner Sailer <sailer@watson.ibm.com>
+ *     Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Access to the eventlog created by a system's firmware / BIOS
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "tpm.h"
+#include "tpm_eventlog.h"
+
+
+static const char* tcpa_event_type_strings[] = {
+       "PREBOOT",
+       "POST CODE",
+       "",
+       "NO ACTION",
+       "SEPARATOR",
+       "ACTION",
+       "EVENT TAG",
+       "S-CRTM Contents",
+       "S-CRTM Version",
+       "CPU Microcode",
+       "Platform Config Flags",
+       "Table of Devices",
+       "Compact Hash",
+       "IPL",
+       "IPL Partition Data",
+       "Non-Host Code",
+       "Non-Host Config",
+       "Non-Host Info"
+};
+
+static const char* tcpa_pc_event_id_strings[] = {
+       "",
+       "SMBIOS",
+       "BIS Certificate",
+       "POST BIOS ",
+       "ESCD ",
+       "CMOS",
+       "NVRAM",
+       "Option ROM",
+       "Option ROM config",
+       "",
+       "Option ROM microcode ",
+       "S-CRTM Version",
+       "S-CRTM Contents ",
+       "POST Contents ",
+       "Table of Devices",
+};
+
+/* returns pointer to start of pos. entry of tcg log */
+static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+       loff_t i;
+       struct tpm_bios_log *log = m->private;
+       void *addr = log->bios_event_log;
+       void *limit = log->bios_event_log_end;
+       struct tcpa_event *event;
+
+       /* read over *pos measurements */
+       for (i = 0; i < *pos; i++) {
+               event = addr;
+
+               if ((addr + sizeof(struct tcpa_event)) < limit) {
+                       if (event->event_type == 0 && event->event_size == 0)
+                               return NULL;
+                       addr += sizeof(struct tcpa_event) + event->event_size;
+               }
+       }
+
+       /* now check if current entry is valid */
+       if ((addr + sizeof(struct tcpa_event)) >= limit)
+               return NULL;
+
+       event = addr;
+
+       if ((event->event_type == 0 && event->event_size == 0) ||
+           ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
+               return NULL;
+
+       return addr;
+}
+
+static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+                                       loff_t *pos)
+{
+       struct tcpa_event *event = v;
+       struct tpm_bios_log *log = m->private;
+       void *limit = log->bios_event_log_end;
+
+       v += sizeof(struct tcpa_event) + event->event_size;
+
+       /* now check if current entry is valid */
+       if ((v + sizeof(struct tcpa_event)) >= limit)
+               return NULL;
+
+       event = v;
+
+       if (event->event_type == 0 && event->event_size == 0)
+               return NULL;
+
+       if ((event->event_type == 0 && event->event_size == 0) ||
+           ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
+               return NULL;
+
+       (*pos)++;
+       return v;
+}
+
+static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int get_event_name(char *dest, struct tcpa_event *event,
+                       unsigned char * event_entry)
+{
+       const char *name = "";
+       /* 41 so there is room for 40 data and 1 nul */
+       char data[41] = "";
+       int i, n_len = 0, d_len = 0;
+       struct tcpa_pc_event *pc_event;
+
+       switch(event->event_type) {
+       case PREBOOT:
+       case POST_CODE:
+       case UNUSED:
+       case NO_ACTION:
+       case SCRTM_CONTENTS:
+       case SCRTM_VERSION:
+       case CPU_MICROCODE:
+       case PLATFORM_CONFIG_FLAGS:
+       case TABLE_OF_DEVICES:
+       case COMPACT_HASH:
+       case IPL:
+       case IPL_PARTITION_DATA:
+       case NONHOST_CODE:
+       case NONHOST_CONFIG:
+       case NONHOST_INFO:
+               name = tcpa_event_type_strings[event->event_type];
+               n_len = strlen(name);
+               break;
+       case SEPARATOR:
+       case ACTION:
+               if (MAX_TEXT_EVENT > event->event_size) {
+                       name = event_entry;
+                       n_len = event->event_size;
+               }
+               break;
+       case EVENT_TAG:
+               pc_event = (struct tcpa_pc_event *)event_entry;
+
+               /* ToDo Row data -> Base64 */
+
+               switch (pc_event->event_id) {
+               case SMBIOS:
+               case BIS_CERT:
+               case CMOS:
+               case NVRAM:
+               case OPTION_ROM_EXEC:
+               case OPTION_ROM_CONFIG:
+               case S_CRTM_VERSION:
+                       name = tcpa_pc_event_id_strings[pc_event->event_id];
+                       n_len = strlen(name);
+                       break;
+               /* hash data */
+               case POST_BIOS_ROM:
+               case ESCD:
+               case OPTION_ROM_MICROCODE:
+               case S_CRTM_CONTENTS:
+               case POST_CONTENTS:
+                       name = tcpa_pc_event_id_strings[pc_event->event_id];
+                       n_len = strlen(name);
+                       for (i = 0; i < 20; i++)
+                               d_len += sprintf(&data[2*i], "%02x",
+                                               pc_event->event_data[i]);
+                       break;
+               default:
+                       break;
+               }
+       default:
+               break;
+       }
+
+       return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
+                       n_len, name, d_len, data);
+
+}
+
+static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+       struct tcpa_event *event = v;
+       char *data = v;
+       int i;
+
+       for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
+               seq_putc(m, data[i]);
+
+       return 0;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+                                        struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       struct tpm_bios_log *log = seq->private;
+
+       if (log) {
+               kfree(log->bios_event_log);
+               kfree(log);
+       }
+
+       return seq_release(inode, file);
+}
+
+static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
+{
+       int len = 0;
+       int i;
+       char *eventname;
+       struct tcpa_event *event = v;
+       unsigned char *event_entry =
+           (unsigned char *) (v + sizeof(struct tcpa_event));
+
+       eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
+       if (!eventname) {
+               printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
+                      __func__);
+               return -EFAULT;
+       }
+
+       seq_printf(m, "%2d ", event->pcr_index);
+
+       /* 2nd: SHA1 */
+       for (i = 0; i < 20; i++)
+               seq_printf(m, "%02x", event->pcr_value[i]);
+
+       /* 3rd: event type identifier */
+       seq_printf(m, " %02x", event->event_type);
+
+       len += get_event_name(eventname, event, event_entry);
+
+       /* 4th: eventname <= max + \'0' delimiter */
+       seq_printf(m, " %s\n", eventname);
+
+       kfree(eventname);
+       return 0;
+}
+
+static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+       .start = tpm_bios_measurements_start,
+       .next = tpm_bios_measurements_next,
+       .stop = tpm_bios_measurements_stop,
+       .show = tpm_ascii_bios_measurements_show,
+};
+
+static const struct seq_operations tpm_binary_b_measurments_seqops = {
+       .start = tpm_bios_measurements_start,
+       .next = tpm_bios_measurements_next,
+       .stop = tpm_bios_measurements_stop,
+       .show = tpm_binary_bios_measurements_show,
+};
+
+static int tpm_ascii_bios_measurements_open(struct inode *inode,
+                                           struct file *file)
+{
+       int err;
+       struct tpm_bios_log *log;
+       struct seq_file *seq;
+
+       log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
+       if (!log)
+               return -ENOMEM;
+
+       if ((err = read_log(log)))
+               goto out_free;
+
+       /* now register seq file */
+       err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+       if (!err) {
+               seq = file->private_data;
+               seq->private = log;
+       } else {
+               goto out_free;
+       }
+
+out:
+       return err;
+out_free:
+       kfree(log->bios_event_log);
+       kfree(log);
+       goto out;
+}
+
+static const struct file_operations tpm_ascii_bios_measurements_ops = {
+       .open = tpm_ascii_bios_measurements_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = tpm_bios_measurements_release,
+};
+
+static int tpm_binary_bios_measurements_open(struct inode *inode,
+                                            struct file *file)
+{
+       int err;
+       struct tpm_bios_log *log;
+       struct seq_file *seq;
+
+       log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
+       if (!log)
+               return -ENOMEM;
+
+       if ((err = read_log(log)))
+               goto out_free;
+
+       /* now register seq file */
+       err = seq_open(file, &tpm_binary_b_measurments_seqops);
+       if (!err) {
+               seq = file->private_data;
+               seq->private = log;
+       } else {
+               goto out_free;
+       }
+
+out:
+       return err;
+out_free:
+       kfree(log->bios_event_log);
+       kfree(log);
+       goto out;
+}
+
+static const struct file_operations tpm_binary_bios_measurements_ops = {
+       .open = tpm_binary_bios_measurements_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = tpm_bios_measurements_release,
+};
+
+static int is_bad(void *p)
+{
+       if (!p)
+               return 1;
+       if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
+               return 1;
+       return 0;
+}
+
+struct dentry **tpm_bios_log_setup(char *name)
+{
+       struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
+
+       tpm_dir = securityfs_create_dir(name, NULL);
+       if (is_bad(tpm_dir))
+               goto out;
+
+       bin_file =
+           securityfs_create_file("binary_bios_measurements",
+                                  S_IRUSR | S_IRGRP, tpm_dir, NULL,
+                                  &tpm_binary_bios_measurements_ops);
+       if (is_bad(bin_file))
+               goto out_tpm;
+
+       ascii_file =
+           securityfs_create_file("ascii_bios_measurements",
+                                  S_IRUSR | S_IRGRP, tpm_dir, NULL,
+                                  &tpm_ascii_bios_measurements_ops);
+       if (is_bad(ascii_file))
+               goto out_bin;
+
+       ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
+       if (!ret)
+               goto out_ascii;
+
+       ret[0] = ascii_file;
+       ret[1] = bin_file;
+       ret[2] = tpm_dir;
+
+       return ret;
+
+out_ascii:
+       securityfs_remove(ascii_file);
+out_bin:
+       securityfs_remove(bin_file);
+out_tpm:
+       securityfs_remove(tpm_dir);
+out:
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
+
+void tpm_bios_log_teardown(struct dentry **lst)
+{
+       int i;
+
+       for (i = 0; i < 3; i++)
+               securityfs_remove(lst[i]);
+}
+EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
new file mode 100644 (file)
index 0000000..e7da086
--- /dev/null
@@ -0,0 +1,86 @@
+
+#ifndef __TPM_EVENTLOG_H__
+#define __TPM_EVENTLOG_H__
+
+#define TCG_EVENT_NAME_LEN_MAX 255
+#define MAX_TEXT_EVENT         1000    /* Max event string length */
+#define ACPI_TCPA_SIG          "TCPA"  /* 0x41504354 /'TCPA' */
+
+enum bios_platform_class {
+       BIOS_CLIENT = 0x00,
+       BIOS_SERVER = 0x01,
+};
+
+struct tpm_bios_log {
+       void *bios_event_log;
+       void *bios_event_log_end;
+};
+
+struct tcpa_event {
+       u32 pcr_index;
+       u32 event_type;
+       u8 pcr_value[20];       /* SHA1 */
+       u32 event_size;
+       u8 event_data[0];
+};
+
+enum tcpa_event_types {
+       PREBOOT = 0,
+       POST_CODE,
+       UNUSED,
+       NO_ACTION,
+       SEPARATOR,
+       ACTION,
+       EVENT_TAG,
+       SCRTM_CONTENTS,
+       SCRTM_VERSION,
+       CPU_MICROCODE,
+       PLATFORM_CONFIG_FLAGS,
+       TABLE_OF_DEVICES,
+       COMPACT_HASH,
+       IPL,
+       IPL_PARTITION_DATA,
+       NONHOST_CODE,
+       NONHOST_CONFIG,
+       NONHOST_INFO,
+};
+
+struct tcpa_pc_event {
+       u32 event_id;
+       u32 event_size;
+       u8 event_data[0];
+};
+
+enum tcpa_pc_event_ids {
+       SMBIOS = 1,
+       BIS_CERT,
+       POST_BIOS_ROM,
+       ESCD,
+       CMOS,
+       NVRAM,
+       OPTION_ROM_EXEC,
+       OPTION_ROM_CONFIG,
+       OPTION_ROM_MICROCODE = 10,
+       S_CRTM_VERSION,
+       S_CRTM_CONTENTS,
+       POST_CONTENTS,
+       HOST_TABLE_OF_DEVICES,
+};
+
+int read_log(struct tpm_bios_log *log);
+
+#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
+       defined(CONFIG_ACPI)
+extern struct dentry **tpm_bios_log_setup(char *);
+extern void tpm_bios_log_teardown(struct dentry **);
+#else
+static inline struct dentry **tpm_bios_log_setup(char *name)
+{
+       return NULL;
+}
+static inline void tpm_bios_log_teardown(struct dentry **dir)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
new file mode 100644 (file)
index 0000000..5a831ae
--- /dev/null
@@ -0,0 +1,695 @@
+/*
+ * Copyright (C) 2012 Infineon Technologies
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0 and the
+ * Infineon I2C Protocol Stack Specification v0.20.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ */
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/wait.h>
+#include "tpm.h"
+
+/* max. buffer size supported by our TPM */
+#define TPM_BUFSIZE 1260
+
+/* max. number of iterations after I2C NAK */
+#define MAX_COUNT 3
+
+#define SLEEP_DURATION_LOW 55
+#define SLEEP_DURATION_HI 65
+
+/* max. number of iterations after I2C NAK for 'long' commands
+ * we need this especially for sending TPM_READY, since the cleanup after the
+ * transtion to the ready state may take some time, but it is unpredictable
+ * how long it will take.
+ */
+#define MAX_COUNT_LONG 50
+
+#define SLEEP_DURATION_LONG_LOW 200
+#define SLEEP_DURATION_LONG_HI 220
+
+/* After sending TPM_READY to 'reset' the TPM we have to sleep even longer */
+#define SLEEP_DURATION_RESET_LOW 2400
+#define SLEEP_DURATION_RESET_HI 2600
+
+/* we want to use usleep_range instead of msleep for the 5ms TPM_TIMEOUT */
+#define TPM_TIMEOUT_US_LOW (TPM_TIMEOUT * 1000)
+#define TPM_TIMEOUT_US_HI  (TPM_TIMEOUT_US_LOW + 2000)
+
+/* expected value for DIDVID register */
+#define TPM_TIS_I2C_DID_VID 0x000b15d1L
+
+/* Structure to store I2C TPM specific stuff */
+struct tpm_inf_dev {
+       struct i2c_client *client;
+       u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
+       struct tpm_chip *chip;
+};
+
+static struct tpm_inf_dev tpm_dev;
+static struct i2c_driver tpm_tis_i2c_driver;
+
+/*
+ * iic_tpm_read() - read from TPM register
+ * @addr: register address to read from
+ * @buffer: provided by caller
+ * @len: number of bytes to read
+ *
+ * Read len bytes from TPM register and put them into
+ * buffer (little-endian format, i.e. first byte is put into buffer[0]).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: We can't unfortunately use the combined read/write functions
+ * provided by the i2c core as the TPM currently does not support the
+ * repeated start condition and due to it's special requirements.
+ * The i2c_smbus* functions do not work for this chip.
+ *
+ * Return -EIO on error, 0 on success.
+ */
+static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
+{
+
+       struct i2c_msg msg1 = { tpm_dev.client->addr, 0, 1, &addr };
+       struct i2c_msg msg2 = { tpm_dev.client->addr, I2C_M_RD, len, buffer };
+
+       int rc;
+       int count;
+
+       /* Lock the adapter for the duration of the whole sequence. */
+       if (!tpm_dev.client->adapter->algo->master_xfer)
+               return -EOPNOTSUPP;
+       i2c_lock_adapter(tpm_dev.client->adapter);
+
+       for (count = 0; count < MAX_COUNT; count++) {
+               rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
+               if (rc > 0)
+                       break;  /* break here to skip sleep */
+
+               usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+       }
+
+       if (rc <= 0)
+               goto out;
+
+       /* After the TPM has successfully received the register address it needs
+        * some time, thus we're sleeping here again, before retrieving the data
+        */
+       for (count = 0; count < MAX_COUNT; count++) {
+               usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+               rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1);
+               if (rc > 0)
+                       break;
+
+       }
+
+out:
+       i2c_unlock_adapter(tpm_dev.client->adapter);
+       if (rc <= 0)
+               return -EIO;
+
+       return 0;
+}
+
+static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
+                                unsigned int sleep_low,
+                                unsigned int sleep_hi, u8 max_count)
+{
+       int rc = -EIO;
+       int count;
+
+       struct i2c_msg msg1 = { tpm_dev.client->addr, 0, len + 1, tpm_dev.buf };
+
+       if (len > TPM_BUFSIZE)
+               return -EINVAL;
+
+       if (!tpm_dev.client->adapter->algo->master_xfer)
+               return -EOPNOTSUPP;
+       i2c_lock_adapter(tpm_dev.client->adapter);
+
+       /* prepend the 'register address' to the buffer */
+       tpm_dev.buf[0] = addr;
+       memcpy(&(tpm_dev.buf[1]), buffer, len);
+
+       /*
+        * NOTE: We have to use these special mechanisms here and unfortunately
+        * cannot rely on the standard behavior of i2c_transfer.
+        */
+       for (count = 0; count < max_count; count++) {
+               rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
+               if (rc > 0)
+                       break;
+
+               usleep_range(sleep_low, sleep_hi);
+       }
+
+       i2c_unlock_adapter(tpm_dev.client->adapter);
+       if (rc <= 0)
+               return -EIO;
+
+       return 0;
+}
+
+/*
+ * iic_tpm_write() - write to TPM register
+ * @addr: register address to write to
+ * @buffer: containing data to be written
+ * @len: number of bytes to write
+ *
+ * Write len bytes from provided buffer to TPM register (little
+ * endian format, i.e. buffer[0] is written as first byte).
+ *
+ * NOTE: TPM is big-endian for multi-byte values. Multi-byte
+ * values have to be swapped.
+ *
+ * NOTE: use this function instead of the iic_tpm_write_generic function.
+ *
+ * Return -EIO on error, 0 on success
+ */
+static int iic_tpm_write(u8 addr, u8 *buffer, size_t len)
+{
+       return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LOW,
+                                    SLEEP_DURATION_HI, MAX_COUNT);
+}
+
+/*
+ * This function is needed especially for the cleanup situation after
+ * sending TPM_READY
+ * */
+static int iic_tpm_write_long(u8 addr, u8 *buffer, size_t len)
+{
+       return iic_tpm_write_generic(addr, buffer, len, SLEEP_DURATION_LONG_LOW,
+                                    SLEEP_DURATION_LONG_HI, MAX_COUNT_LONG);
+}
+
+enum tis_access {
+       TPM_ACCESS_VALID = 0x80,
+       TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+       TPM_ACCESS_REQUEST_PENDING = 0x04,
+       TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+       TPM_STS_VALID = 0x80,
+       TPM_STS_COMMAND_READY = 0x40,
+       TPM_STS_GO = 0x20,
+       TPM_STS_DATA_AVAIL = 0x10,
+       TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum tis_defaults {
+       TIS_SHORT_TIMEOUT = 750,        /* ms */
+       TIS_LONG_TIMEOUT = 2000,        /* 2 sec */
+};
+
+#define        TPM_ACCESS(l)                   (0x0000 | ((l) << 4))
+#define        TPM_STS(l)                      (0x0001 | ((l) << 4))
+#define        TPM_DATA_FIFO(l)                (0x0005 | ((l) << 4))
+#define        TPM_DID_VID(l)                  (0x0006 | ((l) << 4))
+
+static int check_locality(struct tpm_chip *chip, int loc)
+{
+       u8 buf;
+       int rc;
+
+       rc = iic_tpm_read(TPM_ACCESS(loc), &buf, 1);
+       if (rc < 0)
+               return rc;
+
+       if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+           (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+               chip->vendor.locality = loc;
+               return loc;
+       }
+
+       return -EIO;
+}
+
+/* implementation similar to tpm_tis */
+static void release_locality(struct tpm_chip *chip, int loc, int force)
+{
+       u8 buf;
+       if (iic_tpm_read(TPM_ACCESS(loc), &buf, 1) < 0)
+               return;
+
+       if (force || (buf & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
+           (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) {
+               buf = TPM_ACCESS_ACTIVE_LOCALITY;
+               iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+       }
+}
+
+static int request_locality(struct tpm_chip *chip, int loc)
+{
+       unsigned long stop;
+       u8 buf = TPM_ACCESS_REQUEST_USE;
+
+       if (check_locality(chip, loc) >= 0)
+               return loc;
+
+       iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
+
+       /* wait for burstcount */
+       stop = jiffies + chip->vendor.timeout_a;
+       do {
+               if (check_locality(chip, loc) >= 0)
+                       return loc;
+               usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+       } while (time_before(jiffies, stop));
+
+       return -ETIME;
+}
+
+static u8 tpm_tis_i2c_status(struct tpm_chip *chip)
+{
+       /* NOTE: since I2C read may fail, return 0 in this case --> time-out */
+       u8 buf;
+       if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0)
+               return 0;
+       else
+               return buf;
+}
+
+static void tpm_tis_i2c_ready(struct tpm_chip *chip)
+{
+       /* this causes the current command to be aborted */
+       u8 buf = TPM_STS_COMMAND_READY;
+       iic_tpm_write_long(TPM_STS(chip->vendor.locality), &buf, 1);
+}
+
+static ssize_t get_burstcount(struct tpm_chip *chip)
+{
+       unsigned long stop;
+       ssize_t burstcnt;
+       u8 buf[3];
+
+       /* wait for burstcount */
+       /* which timeout value, spec has 2 answers (c & d) */
+       stop = jiffies + chip->vendor.timeout_d;
+       do {
+               /* Note: STS is little endian */
+               if (iic_tpm_read(TPM_STS(chip->vendor.locality)+1, buf, 3) < 0)
+                       burstcnt = 0;
+               else
+                       burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0];
+
+               if (burstcnt)
+                       return burstcnt;
+
+               usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+       } while (time_before(jiffies, stop));
+       return -EBUSY;
+}
+
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+                        int *status)
+{
+       unsigned long stop;
+
+       /* check current status */
+       *status = tpm_tis_i2c_status(chip);
+       if ((*status & mask) == mask)
+               return 0;
+
+       stop = jiffies + timeout;
+       do {
+               /* since we just checked the status, give the TPM some time */
+               usleep_range(TPM_TIMEOUT_US_LOW, TPM_TIMEOUT_US_HI);
+               *status = tpm_tis_i2c_status(chip);
+               if ((*status & mask) == mask)
+                       return 0;
+
+       } while (time_before(jiffies, stop));
+
+       return -ETIME;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+       size_t size = 0;
+       ssize_t burstcnt;
+       u8 retries = 0;
+       int rc;
+
+       while (size < count) {
+               burstcnt = get_burstcount(chip);
+
+               /* burstcnt < 0 = TPM is busy */
+               if (burstcnt < 0)
+                       return burstcnt;
+
+               /* limit received data to max. left */
+               if (burstcnt > (count - size))
+                       burstcnt = count - size;
+
+               rc = iic_tpm_read(TPM_DATA_FIFO(chip->vendor.locality),
+                                 &(buf[size]), burstcnt);
+               if (rc == 0)
+                       size += burstcnt;
+               else if (rc < 0)
+                       retries++;
+
+               /* avoid endless loop in case of broken HW */
+               if (retries > MAX_COUNT_LONG)
+                       return -EIO;
+
+       }
+       return size;
+}
+
+static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+       int size = 0;
+       int expected, status;
+
+       if (count < TPM_HEADER_SIZE) {
+               size = -EIO;
+               goto out;
+       }
+
+       /* read first 10 bytes, including tag, paramsize, and result */
+       size = recv_data(chip, buf, TPM_HEADER_SIZE);
+       if (size < TPM_HEADER_SIZE) {
+               dev_err(chip->dev, "Unable to read header\n");
+               goto out;
+       }
+
+       expected = be32_to_cpu(*(__be32 *)(buf + 2));
+       if ((size_t) expected > count) {
+               size = -EIO;
+               goto out;
+       }
+
+       size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+                         expected - TPM_HEADER_SIZE);
+       if (size < expected) {
+               dev_err(chip->dev, "Unable to read remainder of result\n");
+               size = -ETIME;
+               goto out;
+       }
+
+       wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
+       if (status & TPM_STS_DATA_AVAIL) {      /* retry? */
+               dev_err(chip->dev, "Error left over data\n");
+               size = -EIO;
+               goto out;
+       }
+
+out:
+       tpm_tis_i2c_ready(chip);
+       /* The TPM needs some time to clean up here,
+        * so we sleep rather than keeping the bus busy
+        */
+       usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+       release_locality(chip, chip->vendor.locality, 0);
+       return size;
+}
+
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+       int rc, status;
+       ssize_t burstcnt;
+       size_t count = 0;
+       u8 retries = 0;
+       u8 sts = TPM_STS_GO;
+
+       if (len > TPM_BUFSIZE)
+               return -E2BIG;  /* command is too long for our tpm, sorry */
+
+       if (request_locality(chip, 0) < 0)
+               return -EBUSY;
+
+       status = tpm_tis_i2c_status(chip);
+       if ((status & TPM_STS_COMMAND_READY) == 0) {
+               tpm_tis_i2c_ready(chip);
+               if (wait_for_stat
+                   (chip, TPM_STS_COMMAND_READY,
+                    chip->vendor.timeout_b, &status) < 0) {
+                       rc = -ETIME;
+                       goto out_err;
+               }
+       }
+
+       while (count < len - 1) {
+               burstcnt = get_burstcount(chip);
+
+               /* burstcnt < 0 = TPM is busy */
+               if (burstcnt < 0)
+                       return burstcnt;
+
+               if (burstcnt > (len - 1 - count))
+                       burstcnt = len - 1 - count;
+
+               rc = iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality),
+                                  &(buf[count]), burstcnt);
+               if (rc == 0)
+                       count += burstcnt;
+               else if (rc < 0)
+                       retries++;
+
+               /* avoid endless loop in case of broken HW */
+               if (retries > MAX_COUNT_LONG) {
+                       rc = -EIO;
+                       goto out_err;
+               }
+
+               wait_for_stat(chip, TPM_STS_VALID,
+                             chip->vendor.timeout_c, &status);
+
+               if ((status & TPM_STS_DATA_EXPECT) == 0) {
+                       rc = -EIO;
+                       goto out_err;
+               }
+
+       }
+
+       /* write last byte */
+       iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), &(buf[count]), 1);
+       wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
+       if ((status & TPM_STS_DATA_EXPECT) != 0) {
+               rc = -EIO;
+               goto out_err;
+       }
+
+       /* go and do it */
+       iic_tpm_write(TPM_STS(chip->vendor.locality), &sts, 1);
+
+       return len;
+out_err:
+       tpm_tis_i2c_ready(chip);
+       /* The TPM needs some time to clean up here,
+        * so we sleep rather than keeping the bus busy
+        */
+       usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
+       release_locality(chip, chip->vendor.locality, 0);
+       return rc;
+}
+
+static const struct file_operations tis_ops = {
+       .owner = THIS_MODULE,
+       .llseek = no_llseek,
+       .open = tpm_open,
+       .read = tpm_read,
+       .write = tpm_write,
+       .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *tis_attrs[] = {
+       &dev_attr_pubek.attr,
+       &dev_attr_pcrs.attr,
+       &dev_attr_enabled.attr,
+       &dev_attr_active.attr,
+       &dev_attr_owned.attr,
+       &dev_attr_temp_deactivated.attr,
+       &dev_attr_caps.attr,
+       &dev_attr_cancel.attr,
+       &dev_attr_durations.attr,
+       &dev_attr_timeouts.attr,
+       NULL,
+};
+
+static struct attribute_group tis_attr_grp = {
+       .attrs = tis_attrs
+};
+
+static struct tpm_vendor_specific tpm_tis_i2c = {
+       .status = tpm_tis_i2c_status,
+       .recv = tpm_tis_i2c_recv,
+       .send = tpm_tis_i2c_send,
+       .cancel = tpm_tis_i2c_ready,
+       .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+       .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+       .req_canceled = TPM_STS_COMMAND_READY,
+       .attr_group = &tis_attr_grp,
+       .miscdev.fops = &tis_ops,
+};
+
+static int __devinit tpm_tis_i2c_init(struct device *dev)
+{
+       u32 vendor;
+       int rc = 0;
+       struct tpm_chip *chip;
+
+       chip = tpm_register_hardware(dev, &tpm_tis_i2c);
+       if (!chip) {
+               rc = -ENODEV;
+               goto out_err;
+       }
+
+       /* Disable interrupts */
+       chip->vendor.irq = 0;
+
+       /* Default timeouts */
+       chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+       chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+       chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+       chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+       if (request_locality(chip, 0) != 0) {
+               rc = -ENODEV;
+               goto out_vendor;
+       }
+
+       /* read four bytes from DID_VID register */
+       if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) {
+               rc = -EIO;
+               goto out_release;
+       }
+
+       /* create DID_VID register value, after swapping to little-endian */
+       vendor = be32_to_cpu((__be32) vendor);
+
+       if (vendor != TPM_TIS_I2C_DID_VID) {
+               rc = -ENODEV;
+               goto out_release;
+       }
+
+       dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16);
+
+       INIT_LIST_HEAD(&chip->vendor.list);
+       tpm_dev.chip = chip;
+
+       tpm_get_timeouts(chip);
+       tpm_do_selftest(chip);
+
+       return 0;
+
+out_release:
+       release_locality(chip, chip->vendor.locality, 1);
+
+out_vendor:
+       /* close file handles */
+       tpm_dev_vendor_release(chip);
+
+       /* remove hardware */
+       tpm_remove_hardware(chip->dev);
+
+       /* reset these pointers, otherwise we oops */
+       chip->dev->release = NULL;
+       chip->release = NULL;
+       tpm_dev.client = NULL;
+       dev_set_drvdata(chip->dev, chip);
+out_err:
+       return rc;
+}
+
+static const struct i2c_device_id tpm_tis_i2c_table[] = {
+       {"tpm_i2c_infineon", 0},
+       {},
+};
+
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
+static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
+
+static int __devinit tpm_tis_i2c_probe(struct i2c_client *client,
+                            const struct i2c_device_id *id)
+{
+       int rc;
+       if (tpm_dev.client != NULL)
+               return -EBUSY;  /* We only support one client */
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+               dev_err(&client->dev,
+                       "no algorithms associated to the i2c bus\n");
+               return -ENODEV;
+       }
+
+       client->driver = &tpm_tis_i2c_driver;
+       tpm_dev.client = client;
+       rc = tpm_tis_i2c_init(&client->dev);
+       if (rc != 0) {
+               client->driver = NULL;
+               tpm_dev.client = NULL;
+               rc = -ENODEV;
+       }
+       return rc;
+}
+
+static int __devexit tpm_tis_i2c_remove(struct i2c_client *client)
+{
+       struct tpm_chip *chip = tpm_dev.chip;
+       release_locality(chip, chip->vendor.locality, 1);
+
+       /* close file handles */
+       tpm_dev_vendor_release(chip);
+
+       /* remove hardware */
+       tpm_remove_hardware(chip->dev);
+
+       /* reset these pointers, otherwise we oops */
+       chip->dev->release = NULL;
+       chip->release = NULL;
+       tpm_dev.client = NULL;
+       dev_set_drvdata(chip->dev, chip);
+
+       return 0;
+}
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+
+       .id_table = tpm_tis_i2c_table,
+       .probe = tpm_tis_i2c_probe,
+       .remove = tpm_tis_i2c_remove,
+       .driver = {
+                  .name = "tpm_i2c_infineon",
+                  .owner = THIS_MODULE,
+                  .pm = &tpm_tis_i2c_ops,
+                  },
+};
+
+module_i2c_driver(tpm_tis_i2c_driver);
+MODULE_AUTHOR("Peter Huewe <peter.huewe@infineon.com>");
+MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver");
+MODULE_VERSION("2.1.5");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
new file mode 100644 (file)
index 0000000..efc4ab3
--- /dev/null
@@ -0,0 +1,749 @@
+/*
+ * Copyright (C) 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <adlai@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <asm/vio.h>
+#include <asm/irq.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <asm/prom.h>
+
+#include "tpm.h"
+#include "tpm_ibmvtpm.h"
+
+static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
+
+static struct vio_device_id tpm_ibmvtpm_device_table[] __devinitdata = {
+       { "IBM,vtpm", "IBM,vtpm"},
+       { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
+
+DECLARE_WAIT_QUEUE_HEAD(wq);
+
+/**
+ * ibmvtpm_send_crq - Send a CRQ request
+ * @vdev:      vio device struct
+ * @w1:                first word
+ * @w2:                second word
+ *
+ * Return value:
+ *     0 -Sucess
+ *     Non-zero - Failure
+ */
+static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
+{
+       return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
+}
+
+/**
+ * ibmvtpm_get_data - Retrieve ibm vtpm data
+ * @dev:       device struct
+ *
+ * Return value:
+ *     vtpm device struct
+ */
+static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
+{
+       struct tpm_chip *chip = dev_get_drvdata(dev);
+       if (chip)
+               return (struct ibmvtpm_dev *)chip->vendor.data;
+       return NULL;
+}
+
+/**
+ * tpm_ibmvtpm_recv - Receive data after send
+ * @chip:      tpm chip struct
+ * @buf:       buffer to read
+ * count:      size of buffer
+ *
+ * Return value:
+ *     Number of bytes read
+ */
+static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+       struct ibmvtpm_dev *ibmvtpm;
+       u16 len;
+
+       ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
+
+       if (!ibmvtpm->rtce_buf) {
+               dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+               return 0;
+       }
+
+       wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0);
+
+       if (count < ibmvtpm->crq_res.len) {
+               dev_err(ibmvtpm->dev,
+                       "Invalid size in recv: count=%ld, crq_size=%d\n",
+                       count, ibmvtpm->crq_res.len);
+               return -EIO;
+       }
+
+       spin_lock(&ibmvtpm->rtce_lock);
+       memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len);
+       memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len);
+       ibmvtpm->crq_res.valid = 0;
+       ibmvtpm->crq_res.msg = 0;
+       len = ibmvtpm->crq_res.len;
+       ibmvtpm->crq_res.len = 0;
+       spin_unlock(&ibmvtpm->rtce_lock);
+       return len;
+}
+
+/**
+ * tpm_ibmvtpm_send - Send tpm request
+ * @chip:      tpm chip struct
+ * @buf:       buffer contains data to send
+ * count:      size of buffer
+ *
+ * Return value:
+ *     Number of bytes sent
+ */
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+       struct ibmvtpm_dev *ibmvtpm;
+       struct ibmvtpm_crq crq;
+       u64 *word = (u64 *) &crq;
+       int rc;
+
+       ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
+
+       if (!ibmvtpm->rtce_buf) {
+               dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
+               return 0;
+       }
+
+       if (count > ibmvtpm->rtce_size) {
+               dev_err(ibmvtpm->dev,
+                       "Invalid size in send: count=%ld, rtce_size=%d\n",
+                       count, ibmvtpm->rtce_size);
+               return -EIO;
+       }
+
+       spin_lock(&ibmvtpm->rtce_lock);
+       memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+       crq.valid = (u8)IBMVTPM_VALID_CMD;
+       crq.msg = (u8)VTPM_TPM_COMMAND;
+       crq.len = (u16)count;
+       crq.data = ibmvtpm->rtce_dma_handle;
+
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
+       if (rc != H_SUCCESS) {
+               dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+               rc = 0;
+       } else
+               rc = count;
+
+       spin_unlock(&ibmvtpm->rtce_lock);
+       return rc;
+}
+
+static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
+{
+       return;
+}
+
+static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
+{
+       return 0;
+}
+
+/**
+ * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
+ * @ibmvtpm:   vtpm device struct
+ *
+ * Return value:
+ *     0 - Success
+ *     Non-zero - Failure
+ */
+static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
+{
+       struct ibmvtpm_crq crq;
+       u64 *buf = (u64 *) &crq;
+       int rc;
+
+       crq.valid = (u8)IBMVTPM_VALID_CMD;
+       crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
+
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
+       if (rc != H_SUCCESS)
+               dev_err(ibmvtpm->dev,
+                       "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
+
+       return rc;
+}
+
+/**
+ * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
+ *                        - Note that this is vtpm version and not tpm version
+ * @ibmvtpm:   vtpm device struct
+ *
+ * Return value:
+ *     0 - Success
+ *     Non-zero - Failure
+ */
+static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
+{
+       struct ibmvtpm_crq crq;
+       u64 *buf = (u64 *) &crq;
+       int rc;
+
+       crq.valid = (u8)IBMVTPM_VALID_CMD;
+       crq.msg = (u8)VTPM_GET_VERSION;
+
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
+       if (rc != H_SUCCESS)
+               dev_err(ibmvtpm->dev,
+                       "ibmvtpm_crq_get_version failed rc=%d\n", rc);
+
+       return rc;
+}
+
+/**
+ * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
+ * @ibmvtpm:   vtpm device struct
+ *
+ * Return value:
+ *     0 - Success
+ *     Non-zero - Failure
+ */
+static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
+{
+       int rc;
+
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
+       if (rc != H_SUCCESS)
+               dev_err(ibmvtpm->dev,
+                       "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
+
+       return rc;
+}
+
+/**
+ * ibmvtpm_crq_send_init - Send a CRQ initialize message
+ * @ibmvtpm:   vtpm device struct
+ *
+ * Return value:
+ *     0 - Success
+ *     Non-zero - Failure
+ */
+static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
+{
+       int rc;
+
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
+       if (rc != H_SUCCESS)
+               dev_err(ibmvtpm->dev,
+                       "ibmvtpm_crq_send_init failed rc=%d\n", rc);
+
+       return rc;
+}
+
+/**
+ * tpm_ibmvtpm_remove - ibm vtpm remove entry point
+ * @vdev:      vio device struct
+ *
+ * Return value:
+ *     0
+ */
+static int __devexit tpm_ibmvtpm_remove(struct vio_dev *vdev)
+{
+       struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
+       int rc = 0;
+
+       free_irq(vdev->irq, ibmvtpm);
+       tasklet_kill(&ibmvtpm->tasklet);
+
+       do {
+               if (rc)
+                       msleep(100);
+               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+       dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
+                        CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
+       free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
+
+       if (ibmvtpm->rtce_buf) {
+               dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
+                                ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
+               kfree(ibmvtpm->rtce_buf);
+       }
+
+       tpm_remove_hardware(ibmvtpm->dev);
+
+       kfree(ibmvtpm);
+
+       return 0;
+}
+
+/**
+ * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
+ * @vdev:      vio device struct
+ *
+ * Return value:
+ *     Number of bytes the driver needs to DMA map
+ */
+static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+{
+       struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
+       return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
+}
+
+/**
+ * tpm_ibmvtpm_suspend - Suspend
+ * @dev:       device struct
+ *
+ * Return value:
+ *     0
+ */
+static int tpm_ibmvtpm_suspend(struct device *dev)
+{
+       struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
+       struct ibmvtpm_crq crq;
+       u64 *buf = (u64 *) &crq;
+       int rc = 0;
+
+       crq.valid = (u8)IBMVTPM_VALID_CMD;
+       crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
+
+       rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
+       if (rc != H_SUCCESS)
+               dev_err(ibmvtpm->dev,
+                       "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
+
+       return rc;
+}
+
+/**
+ * ibmvtpm_reset_crq - Reset CRQ
+ * @ibmvtpm:   ibm vtpm struct
+ *
+ * Return value:
+ *     0 - Success
+ *     Non-zero - Failure
+ */
+static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
+{
+       int rc = 0;
+
+       do {
+               if (rc)
+                       msleep(100);
+               rc = plpar_hcall_norets(H_FREE_CRQ,
+                                       ibmvtpm->vdev->unit_address);
+       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+       memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
+       ibmvtpm->crq_queue.index = 0;
+
+       return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
+                                 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+}
+
+/**
+ * tpm_ibmvtpm_resume - Resume from suspend
+ * @dev:       device struct
+ *
+ * Return value:
+ *     0
+ */
+static int tpm_ibmvtpm_resume(struct device *dev)
+{
+       struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
+       unsigned long flags;
+       int rc = 0;
+
+       do {
+               if (rc)
+                       msleep(100);
+               rc = plpar_hcall_norets(H_ENABLE_CRQ,
+                                       ibmvtpm->vdev->unit_address);
+       } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+       if (rc) {
+               dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
+               return rc;
+       }
+
+       spin_lock_irqsave(&ibmvtpm->lock, flags);
+       vio_disable_interrupts(ibmvtpm->vdev);
+       tasklet_schedule(&ibmvtpm->tasklet);
+       spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+
+       rc = ibmvtpm_crq_send_init(ibmvtpm);
+       if (rc)
+               dev_err(dev, "Error send_init rc=%d\n", rc);
+
+       return rc;
+}
+
+static const struct file_operations ibmvtpm_ops = {
+       .owner = THIS_MODULE,
+       .llseek = no_llseek,
+       .open = tpm_open,
+       .read = tpm_read,
+       .write = tpm_write,
+       .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
+                  NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+
+static struct attribute *ibmvtpm_attrs[] = {
+       &dev_attr_pubek.attr,
+       &dev_attr_pcrs.attr,
+       &dev_attr_enabled.attr,
+       &dev_attr_active.attr,
+       &dev_attr_owned.attr,
+       &dev_attr_temp_deactivated.attr,
+       &dev_attr_caps.attr,
+       &dev_attr_cancel.attr,
+       &dev_attr_durations.attr,
+       &dev_attr_timeouts.attr, NULL,
+};
+
+static struct attribute_group ibmvtpm_attr_grp = { .attrs = ibmvtpm_attrs };
+
+static const struct tpm_vendor_specific tpm_ibmvtpm = {
+       .recv = tpm_ibmvtpm_recv,
+       .send = tpm_ibmvtpm_send,
+       .cancel = tpm_ibmvtpm_cancel,
+       .status = tpm_ibmvtpm_status,
+       .req_complete_mask = 0,
+       .req_complete_val = 0,
+       .req_canceled = 0,
+       .attr_group = &ibmvtpm_attr_grp,
+       .miscdev = { .fops = &ibmvtpm_ops, },
+};
+
+static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
+       .suspend = tpm_ibmvtpm_suspend,
+       .resume = tpm_ibmvtpm_resume,
+};
+
+/**
+ * ibmvtpm_crq_get_next - Get next responded crq
+ * @ibmvtpm    vtpm device struct
+ *
+ * Return value:
+ *     vtpm crq pointer
+ */
+static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
+{
+       struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
+       struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
+
+       if (crq->valid & VTPM_MSG_RES) {
+               if (++crq_q->index == crq_q->num_entry)
+                       crq_q->index = 0;
+               rmb();
+       } else
+               crq = NULL;
+       return crq;
+}
+
+/**
+ * ibmvtpm_crq_process - Process responded crq
+ * @crq                crq to be processed
+ * @ibmvtpm    vtpm device struct
+ *
+ * Return value:
+ *     Nothing
+ */
+static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+                               struct ibmvtpm_dev *ibmvtpm)
+{
+       int rc = 0;
+
+       switch (crq->valid) {
+       case VALID_INIT_CRQ:
+               switch (crq->msg) {
+               case INIT_CRQ_RES:
+                       dev_info(ibmvtpm->dev, "CRQ initialized\n");
+                       rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
+                       if (rc)
+                               dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
+                       return;
+               case INIT_CRQ_COMP_RES:
+                       dev_info(ibmvtpm->dev,
+                                "CRQ initialization completed\n");
+                       return;
+               default:
+                       dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
+                       return;
+               }
+               return;
+       case IBMVTPM_VALID_CMD:
+               switch (crq->msg) {
+               case VTPM_GET_RTCE_BUFFER_SIZE_RES:
+                       if (crq->len <= 0) {
+                               dev_err(ibmvtpm->dev, "Invalid rtce size\n");
+                               return;
+                       }
+                       ibmvtpm->rtce_size = crq->len;
+                       ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+                                                   GFP_KERNEL);
+                       if (!ibmvtpm->rtce_buf) {
+                               dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
+                               return;
+                       }
+
+                       ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
+                               ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
+                               DMA_BIDIRECTIONAL);
+
+                       if (dma_mapping_error(ibmvtpm->dev,
+                                             ibmvtpm->rtce_dma_handle)) {
+                               kfree(ibmvtpm->rtce_buf);
+                               ibmvtpm->rtce_buf = NULL;
+                               dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
+                       }
+
+                       return;
+               case VTPM_GET_VERSION_RES:
+                       ibmvtpm->vtpm_version = crq->data;
+                       return;
+               case VTPM_TPM_COMMAND_RES:
+                       ibmvtpm->crq_res.valid = crq->valid;
+                       ibmvtpm->crq_res.msg = crq->msg;
+                       ibmvtpm->crq_res.len = crq->len;
+                       ibmvtpm->crq_res.data = crq->data;
+                       wake_up_interruptible(&wq);
+                       return;
+               default:
+                       return;
+               }
+       }
+       return;
+}
+
+/**
+ * ibmvtpm_interrupt - Interrupt handler
+ * @irq:               irq number to handle
+ * @vtpm_instance:     vtpm that received interrupt
+ *
+ * Returns:
+ *     IRQ_HANDLED
+ **/
+static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
+{
+       struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ibmvtpm->lock, flags);
+       vio_disable_interrupts(ibmvtpm->vdev);
+       tasklet_schedule(&ibmvtpm->tasklet);
+       spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * ibmvtpm_tasklet - Interrupt handler tasklet
+ * @data:      ibm vtpm device struct
+ *
+ * Returns:
+ *     Nothing
+ **/
+static void ibmvtpm_tasklet(void *data)
+{
+       struct ibmvtpm_dev *ibmvtpm = data;
+       struct ibmvtpm_crq *crq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ibmvtpm->lock, flags);
+       while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
+               ibmvtpm_crq_process(crq, ibmvtpm);
+               crq->valid = 0;
+               wmb();
+       }
+
+       vio_enable_interrupts(ibmvtpm->vdev);
+       spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+}
+
+/**
+ * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
+ * @vio_dev:   vio device struct
+ * @id:                vio device id struct
+ *
+ * Return value:
+ *     0 - Success
+ *     Non-zero - Failure
+ */
+static int __devinit tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+                                  const struct vio_device_id *id)
+{
+       struct ibmvtpm_dev *ibmvtpm;
+       struct device *dev = &vio_dev->dev;
+       struct ibmvtpm_crq_queue *crq_q;
+       struct tpm_chip *chip;
+       int rc = -ENOMEM, rc1;
+
+       chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
+       if (!chip) {
+               dev_err(dev, "tpm_register_hardware failed\n");
+               return -ENODEV;
+       }
+
+       ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
+       if (!ibmvtpm) {
+               dev_err(dev, "kzalloc for ibmvtpm failed\n");
+               goto cleanup;
+       }
+
+       crq_q = &ibmvtpm->crq_queue;
+       crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
+       if (!crq_q->crq_addr) {
+               dev_err(dev, "Unable to allocate memory for crq_addr\n");
+               goto cleanup;
+       }
+
+       crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
+       ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
+                                                CRQ_RES_BUF_SIZE,
+                                                DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
+               dev_err(dev, "dma mapping failed\n");
+               goto cleanup;
+       }
+
+       rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
+                               ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
+       if (rc == H_RESOURCE)
+               rc = ibmvtpm_reset_crq(ibmvtpm);
+
+       if (rc) {
+               dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
+               goto reg_crq_cleanup;
+       }
+
+       tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
+                    (unsigned long)ibmvtpm);
+
+       rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
+                        tpm_ibmvtpm_driver_name, ibmvtpm);
+       if (rc) {
+               dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
+               goto init_irq_cleanup;
+       }
+
+       rc = vio_enable_interrupts(vio_dev);
+       if (rc) {
+               dev_err(dev, "Error %d enabling interrupts\n", rc);
+               goto init_irq_cleanup;
+       }
+
+       crq_q->index = 0;
+
+       ibmvtpm->dev = dev;
+       ibmvtpm->vdev = vio_dev;
+       chip->vendor.data = (void *)ibmvtpm;
+
+       spin_lock_init(&ibmvtpm->lock);
+       spin_lock_init(&ibmvtpm->rtce_lock);
+
+       rc = ibmvtpm_crq_send_init(ibmvtpm);
+       if (rc)
+               goto init_irq_cleanup;
+
+       rc = ibmvtpm_crq_get_version(ibmvtpm);
+       if (rc)
+               goto init_irq_cleanup;
+
+       rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
+       if (rc)
+               goto init_irq_cleanup;
+
+       return rc;
+init_irq_cleanup:
+       tasklet_kill(&ibmvtpm->tasklet);
+       do {
+               rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
+       } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
+reg_crq_cleanup:
+       dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
+                        DMA_BIDIRECTIONAL);
+cleanup:
+       if (ibmvtpm) {
+               if (crq_q->crq_addr)
+                       free_page((unsigned long)crq_q->crq_addr);
+               kfree(ibmvtpm);
+       }
+
+       tpm_remove_hardware(dev);
+
+       return rc;
+}
+
+static struct vio_driver ibmvtpm_driver = {
+       .id_table        = tpm_ibmvtpm_device_table,
+       .probe           = tpm_ibmvtpm_probe,
+       .remove          = tpm_ibmvtpm_remove,
+       .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
+       .name            = tpm_ibmvtpm_driver_name,
+       .pm              = &tpm_ibmvtpm_pm_ops,
+};
+
+/**
+ * ibmvtpm_module_init - Initialize ibm vtpm module
+ *
+ * Return value:
+ *     0 -Success
+ *     Non-zero - Failure
+ */
+static int __init ibmvtpm_module_init(void)
+{
+       return vio_register_driver(&ibmvtpm_driver);
+}
+
+/**
+ * ibmvtpm_module_exit - Teardown ibm vtpm module
+ *
+ * Return value:
+ *     Nothing
+ */
+static void __exit ibmvtpm_module_exit(void)
+{
+       vio_unregister_driver(&ibmvtpm_driver);
+}
+
+module_init(ibmvtpm_module_init);
+module_exit(ibmvtpm_module_exit);
+
+MODULE_AUTHOR("adlai@us.ibm.com");
+MODULE_DESCRIPTION("IBM vTPM Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
new file mode 100644 (file)
index 0000000..4296eb4
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <adlai@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#ifndef __TPM_IBMVTPM_H__
+#define __TPM_IBMVTPM_H__
+
+/* vTPM Message Format 1 */
+struct ibmvtpm_crq {
+       u8 valid;
+       u8 msg;
+       u16 len;
+       u32 data;
+       u64 reserved;
+} __attribute__((packed, aligned(8)));
+
+struct ibmvtpm_crq_queue {
+       struct ibmvtpm_crq *crq_addr;
+       u32 index;
+       u32 num_entry;
+};
+
+struct ibmvtpm_dev {
+       struct device *dev;
+       struct vio_dev *vdev;
+       struct ibmvtpm_crq_queue crq_queue;
+       dma_addr_t crq_dma_handle;
+       spinlock_t lock;
+       struct tasklet_struct tasklet;
+       u32 rtce_size;
+       void __iomem *rtce_buf;
+       dma_addr_t rtce_dma_handle;
+       spinlock_t rtce_lock;
+       struct ibmvtpm_crq crq_res;
+       u32 vtpm_version;
+};
+
+#define CRQ_RES_BUF_SIZE       PAGE_SIZE
+
+/* Initialize CRQ */
+#define INIT_CRQ_CMD           0xC001000000000000LL /* Init cmd */
+#define INIT_CRQ_COMP_CMD      0xC002000000000000LL /* Init complete cmd */
+#define INIT_CRQ_RES           0x01    /* Init respond */
+#define INIT_CRQ_COMP_RES      0x02    /* Init complete respond */
+#define VALID_INIT_CRQ         0xC0    /* Valid command for init crq */
+
+/* vTPM CRQ response is the message type | 0x80 */
+#define VTPM_MSG_RES           0x80
+#define IBMVTPM_VALID_CMD      0x80
+
+/* vTPM CRQ message types */
+#define VTPM_GET_VERSION                       0x01
+#define VTPM_GET_VERSION_RES                   (0x01 | VTPM_MSG_RES)
+
+#define VTPM_TPM_COMMAND                       0x02
+#define VTPM_TPM_COMMAND_RES                   (0x02 | VTPM_MSG_RES)
+
+#define VTPM_GET_RTCE_BUFFER_SIZE              0x03
+#define VTPM_GET_RTCE_BUFFER_SIZE_RES          (0x03 | VTPM_MSG_RES)
+
+#define VTPM_PREPARE_TO_SUSPEND                        0x04
+#define VTPM_PREPARE_TO_SUSPEND_RES            (0x04 | VTPM_MSG_RES)
+
+#endif
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
new file mode 100644 (file)
index 0000000..98ba2bd
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012 IBM Corporation
+ *
+ * Author: Ashley Lai <adlai@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Read the event log created by the firmware on PPC64
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include "tpm.h"
+#include "tpm_eventlog.h"
+
+int read_log(struct tpm_bios_log *log)
+{
+       struct device_node *np;
+       const u32 *sizep;
+       const __be64 *basep;
+
+       if (log->bios_event_log != NULL) {
+               pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
+               return -EFAULT;
+       }
+
+       np = of_find_node_by_name(NULL, "ibm,vtpm");
+       if (!np) {
+               pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
+               return -ENODEV;
+       }
+
+       sizep = of_get_property(np, "linux,sml-size", NULL);
+       if (sizep == NULL) {
+               pr_err("%s: ERROR - SML size not found\n", __func__);
+               goto cleanup_eio;
+       }
+       if (*sizep == 0) {
+               pr_err("%s: ERROR - event log area empty\n", __func__);
+               goto cleanup_eio;
+       }
+
+       basep = of_get_property(np, "linux,sml-base", NULL);
+       if (basep == NULL) {
+               pr_err(KERN_ERR "%s: ERROR - SML not found\n", __func__);
+               goto cleanup_eio;
+       }
+
+       of_node_put(np);
+       log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
+       if (!log->bios_event_log) {
+               pr_err("%s: ERROR - Not enough memory for BIOS measurements\n",
+                      __func__);
+               return -ENOMEM;
+       }
+
+       log->bios_event_log_end = log->bios_event_log + *sizep;
+
+       memcpy(log->bios_event_log, __va(be64_to_cpup(basep)), *sizep);
+
+       return 0;
+
+cleanup_eio:
+       of_node_put(np);
+       return -EIO;
+}
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
new file mode 100644 (file)
index 0000000..f27b58c
--- /dev/null
@@ -0,0 +1,461 @@
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include "tpm.h"
+
+static const u8 tpm_ppi_uuid[] = {
+       0xA6, 0xFA, 0xDD, 0x3D,
+       0x1B, 0x36,
+       0xB4, 0x4E,
+       0xA4, 0x24,
+       0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
+};
+static char *tpm_device_name = "TPM";
+
+#define TPM_PPI_REVISION_ID    1
+#define TPM_PPI_FN_VERSION     1
+#define TPM_PPI_FN_SUBREQ      2
+#define TPM_PPI_FN_GETREQ      3
+#define TPM_PPI_FN_GETACT      4
+#define TPM_PPI_FN_GETRSP      5
+#define TPM_PPI_FN_SUBREQ2     7
+#define TPM_PPI_FN_GETOPR      8
+#define PPI_TPM_REQ_MAX                22
+#define PPI_VS_REQ_START       128
+#define PPI_VS_REQ_END         255
+#define PPI_VERSION_LEN                3
+
+static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
+                               void **return_value)
+{
+       acpi_status status;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+       if (strstr(buffer.pointer, context) != NULL) {
+               *return_value = handle;
+               kfree(buffer.pointer);
+               return AE_CTRL_TERMINATE;
+       }
+       return AE_OK;
+}
+
+static inline void ppi_assign_params(union acpi_object params[4],
+                                    u64 function_num)
+{
+       params[0].type = ACPI_TYPE_BUFFER;
+       params[0].buffer.length = sizeof(tpm_ppi_uuid);
+       params[0].buffer.pointer = (char *)tpm_ppi_uuid;
+       params[1].type = ACPI_TYPE_INTEGER;
+       params[1].integer.value = TPM_PPI_REVISION_ID;
+       params[2].type = ACPI_TYPE_INTEGER;
+       params[2].integer.value = function_num;
+       params[3].type = ACPI_TYPE_PACKAGE;
+       params[3].package.count = 0;
+       params[3].package.elements = NULL;
+}
+
+static ssize_t tpm_show_ppi_version(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       acpi_handle handle;
+       acpi_status status;
+       struct acpi_object_list input;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object params[4];
+       union acpi_object *obj;
+
+       input.count = 4;
+       ppi_assign_params(params, TPM_PPI_FN_VERSION);
+       input.pointer = params;
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+                                    ACPI_UINT32_MAX, ppi_callback, NULL,
+                                    tpm_device_name, &handle);
+       if (ACPI_FAILURE(status))
+               return -ENXIO;
+
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                        ACPI_TYPE_STRING);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+       obj = (union acpi_object *)output.pointer;
+       status = scnprintf(buf, PAGE_SIZE, "%s\n", obj->string.pointer);
+       kfree(output.pointer);
+       return status;
+}
+
+static ssize_t tpm_show_ppi_request(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       acpi_handle handle;
+       acpi_status status;
+       struct acpi_object_list input;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object params[4];
+       union acpi_object *ret_obj;
+
+       input.count = 4;
+       ppi_assign_params(params, TPM_PPI_FN_GETREQ);
+       input.pointer = params;
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+                                    ACPI_UINT32_MAX, ppi_callback, NULL,
+                                    tpm_device_name, &handle);
+       if (ACPI_FAILURE(status))
+               return -ENXIO;
+
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                           ACPI_TYPE_PACKAGE);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+       /*
+        * output.pointer should be of package type, including two integers.
+        * The first is function return code, 0 means success and 1 means
+        * error. The second is pending TPM operation requested by the OS, 0
+        * means none and >0 means operation value.
+        */
+       ret_obj = ((union acpi_object *)output.pointer)->package.elements;
+       if (ret_obj->type == ACPI_TYPE_INTEGER) {
+               if (ret_obj->integer.value) {
+                       status = -EFAULT;
+                       goto cleanup;
+               }
+               ret_obj++;
+               if (ret_obj->type == ACPI_TYPE_INTEGER)
+                       status = scnprintf(buf, PAGE_SIZE, "%llu\n",
+                                          ret_obj->integer.value);
+               else
+                       status = -EINVAL;
+       } else {
+               status = -EINVAL;
+       }
+cleanup:
+       kfree(output.pointer);
+       return status;
+}
+
+static ssize_t tpm_store_ppi_request(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       char version[PPI_VERSION_LEN + 1];
+       acpi_handle handle;
+       acpi_status status;
+       struct acpi_object_list input;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object params[4];
+       union acpi_object obj;
+       u32 req;
+       u64 ret;
+
+       input.count = 4;
+       ppi_assign_params(params, TPM_PPI_FN_VERSION);
+       input.pointer = params;
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+                                    ACPI_UINT32_MAX, ppi_callback, NULL,
+                                    tpm_device_name, &handle);
+       if (ACPI_FAILURE(status))
+               return -ENXIO;
+
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                           ACPI_TYPE_STRING);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+       strncpy(version,
+               ((union acpi_object *)output.pointer)->string.pointer,
+               PPI_VERSION_LEN);
+       kfree(output.pointer);
+       output.length = ACPI_ALLOCATE_BUFFER;
+       output.pointer = NULL;
+       /*
+        * the function to submit TPM operation request to pre-os environment
+        * is updated with function index from SUBREQ to SUBREQ2 since PPI
+        * version 1.1
+        */
+       if (strcmp(version, "1.1") == -1)
+               params[2].integer.value = TPM_PPI_FN_SUBREQ;
+       else
+               params[2].integer.value = TPM_PPI_FN_SUBREQ2;
+       /*
+        * PPI spec defines params[3].type as ACPI_TYPE_PACKAGE. Some BIOS
+        * accept buffer/string/integer type, but some BIOS accept buffer/
+        * string/package type. For PPI version 1.0 and 1.1, use buffer type
+        * for compatibility, and use package type since 1.2 according to spec.
+        */
+       if (strcmp(version, "1.2") == -1) {
+               params[3].type = ACPI_TYPE_BUFFER;
+               params[3].buffer.length = sizeof(req);
+               sscanf(buf, "%d", &req);
+               params[3].buffer.pointer = (char *)&req;
+       } else {
+               params[3].package.count = 1;
+               obj.type = ACPI_TYPE_INTEGER;
+               sscanf(buf, "%llu", &obj.integer.value);
+               params[3].package.elements = &obj;
+       }
+
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                           ACPI_TYPE_INTEGER);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+       ret = ((union acpi_object *)output.pointer)->integer.value;
+       if (ret == 0)
+               status = (acpi_status)count;
+       else if (ret == 1)
+               status = -EPERM;
+       else
+               status = -EFAULT;
+       kfree(output.pointer);
+       return status;
+}
+
+static ssize_t tpm_show_ppi_transition_action(struct device *dev,
+                                             struct device_attribute *attr,
+                                             char *buf)
+{
+       char version[PPI_VERSION_LEN + 1];
+       acpi_handle handle;
+       acpi_status status;
+       struct acpi_object_list input;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object params[4];
+       u32 ret;
+       char *info[] = {
+               "None",
+               "Shutdown",
+               "Reboot",
+               "OS Vendor-specific",
+               "Error",
+       };
+       input.count = 4;
+       ppi_assign_params(params, TPM_PPI_FN_VERSION);
+       input.pointer = params;
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+                                    ACPI_UINT32_MAX, ppi_callback, NULL,
+                                    tpm_device_name, &handle);
+       if (ACPI_FAILURE(status))
+               return -ENXIO;
+
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                           ACPI_TYPE_STRING);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+       strncpy(version,
+               ((union acpi_object *)output.pointer)->string.pointer,
+               PPI_VERSION_LEN);
+       /*
+        * PPI spec defines params[3].type as empty package, but some platforms
+        * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
+        * compatibility, define params[3].type as buffer, if PPI version < 1.2
+        */
+       if (strcmp(version, "1.2") == -1) {
+               params[3].type = ACPI_TYPE_BUFFER;
+               params[3].buffer.length =  0;
+               params[3].buffer.pointer = NULL;
+       }
+       params[2].integer.value = TPM_PPI_FN_GETACT;
+       kfree(output.pointer);
+       output.length = ACPI_ALLOCATE_BUFFER;
+       output.pointer = NULL;
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                           ACPI_TYPE_INTEGER);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+       ret = ((union acpi_object *)output.pointer)->integer.value;
+       if (ret < ARRAY_SIZE(info) - 1)
+               status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+       else
+               status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
+                                  info[ARRAY_SIZE(info)-1]);
+       kfree(output.pointer);
+       return status;
+}
+
+static ssize_t tpm_show_ppi_response(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       acpi_handle handle;
+       acpi_status status;
+       struct acpi_object_list input;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object params[4];
+       union acpi_object *ret_obj;
+       u64 req;
+
+       input.count = 4;
+       ppi_assign_params(params, TPM_PPI_FN_GETRSP);
+       input.pointer = params;
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+                                    ACPI_UINT32_MAX, ppi_callback, NULL,
+                                    tpm_device_name, &handle);
+       if (ACPI_FAILURE(status))
+               return -ENXIO;
+
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                           ACPI_TYPE_PACKAGE);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+       /*
+        * parameter output.pointer should be of package type, including
+        * 3 integers. The first means function return code, the second means
+        * most recent TPM operation request, and the last means response to
+        * the most recent TPM operation request. Only if the first is 0, and
+        * the second integer is not 0, the response makes sense.
+        */
+       ret_obj = ((union acpi_object *)output.pointer)->package.elements;
+       if (ret_obj->type != ACPI_TYPE_INTEGER) {
+               status = -EINVAL;
+               goto cleanup;
+       }
+       if (ret_obj->integer.value) {
+               status = -EFAULT;
+               goto cleanup;
+       }
+       ret_obj++;
+       if (ret_obj->type != ACPI_TYPE_INTEGER) {
+               status = -EINVAL;
+               goto cleanup;
+       }
+       if (ret_obj->integer.value) {
+               req = ret_obj->integer.value;
+               ret_obj++;
+               if (ret_obj->type != ACPI_TYPE_INTEGER) {
+                       status = -EINVAL;
+                       goto cleanup;
+               }
+               if (ret_obj->integer.value == 0)
+                       status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+                                          "0: Success");
+               else if (ret_obj->integer.value == 0xFFFFFFF0)
+                       status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+                                          "0xFFFFFFF0: User Abort");
+               else if (ret_obj->integer.value == 0xFFFFFFF1)
+                       status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
+                                          "0xFFFFFFF1: BIOS Failure");
+               else if (ret_obj->integer.value >= 1 &&
+                        ret_obj->integer.value <= 0x00000FFF)
+                       status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+                                          req, ret_obj->integer.value,
+                                          "Corresponding TPM error");
+               else
+                       status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
+                                          req, ret_obj->integer.value,
+                                          "Error");
+       } else {
+               status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
+                                  ret_obj->integer.value, "No Recent Request");
+       }
+cleanup:
+       kfree(output.pointer);
+       return status;
+}
+
+static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
+{
+       char *str = buf;
+       char version[PPI_VERSION_LEN];
+       acpi_handle handle;
+       acpi_status status;
+       struct acpi_object_list input;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object params[4];
+       union acpi_object obj;
+       int i;
+       u32 ret;
+       char *info[] = {
+               "Not implemented",
+               "BIOS only",
+               "Blocked for OS by BIOS",
+               "User required",
+               "User not required",
+       };
+       input.count = 4;
+       ppi_assign_params(params, TPM_PPI_FN_VERSION);
+       input.pointer = params;
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+                                    ACPI_UINT32_MAX, ppi_callback, NULL,
+                                    tpm_device_name, &handle);
+       if (ACPI_FAILURE(status))
+               return -ENXIO;
+
+       status = acpi_evaluate_object_typed(handle, "_DSM", &input, &output,
+                                        ACPI_TYPE_STRING);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+
+       strncpy(version,
+               ((union acpi_object *)output.pointer)->string.pointer,
+               PPI_VERSION_LEN);
+       kfree(output.pointer);
+       output.length = ACPI_ALLOCATE_BUFFER;
+       output.pointer = NULL;
+       if (strcmp(version, "1.2") == -1)
+               return -EPERM;
+
+       params[2].integer.value = TPM_PPI_FN_GETOPR;
+       params[3].package.count = 1;
+       obj.type = ACPI_TYPE_INTEGER;
+       params[3].package.elements = &obj;
+       for (i = start; i <= end; i++) {
+               obj.integer.value = i;
+               status = acpi_evaluate_object_typed(handle, "_DSM",
+                        &input, &output, ACPI_TYPE_INTEGER);
+               if (ACPI_FAILURE(status))
+                       return -ENOMEM;
+
+               ret = ((union acpi_object *)output.pointer)->integer.value;
+               if (ret > 0 && ret < ARRAY_SIZE(info))
+                       str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
+                                        i, ret, info[ret]);
+               kfree(output.pointer);
+               output.length = ACPI_ALLOCATE_BUFFER;
+               output.pointer = NULL;
+       }
+       return str - buf;
+}
+
+static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       return show_ppi_operations(buf, 0, PPI_TPM_REQ_MAX);
+}
+
+static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       return show_ppi_operations(buf, PPI_VS_REQ_START, PPI_VS_REQ_END);
+}
+
+static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
+static DEVICE_ATTR(request, S_IRUGO | S_IWUSR | S_IWGRP,
+                  tpm_show_ppi_request, tpm_store_ppi_request);
+static DEVICE_ATTR(transition_action, S_IRUGO,
+                  tpm_show_ppi_transition_action, NULL);
+static DEVICE_ATTR(response, S_IRUGO, tpm_show_ppi_response, NULL);
+static DEVICE_ATTR(tcg_operations, S_IRUGO, tpm_show_ppi_tcg_operations, NULL);
+static DEVICE_ATTR(vs_operations, S_IRUGO, tpm_show_ppi_vs_operations, NULL);
+
+static struct attribute *ppi_attrs[] = {
+       &dev_attr_version.attr,
+       &dev_attr_request.attr,
+       &dev_attr_transition_action.attr,
+       &dev_attr_response.attr,
+       &dev_attr_tcg_operations.attr,
+       &dev_attr_vs_operations.attr, NULL,
+};
+static struct attribute_group ppi_attr_grp = {
+       .attrs = ppi_attrs
+};
+
+ssize_t sys_add_ppi(struct kobject *parent)
+{
+       struct kobject *ppi;
+       ppi = kobject_create_and_add("ppi", parent);
+       if (sysfs_create_group(ppi, &ppi_attr_grp))
+               return -EFAULT;
+       else
+               return 0;
+}
+EXPORT_SYMBOL_GPL(sys_add_ppi);
+
+MODULE_LICENSE("GPL");
index c4be3519a587c4fe33c3a14be17ba72e6067d9a0..6bdf2671254f405e03bfcca83cebba4cbae2e50c 100644 (file)
@@ -705,6 +705,7 @@ out_err:
        return rc;
 }
 
+#if defined(CONFIG_PNP) || defined(CONFIG_PM_SLEEP)
 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
 {
        u32 intmask;
@@ -725,7 +726,7 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
        iowrite32(intmask,
                  chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
 }
-
+#endif
 
 #ifdef CONFIG_PNP
 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
index 7bedeec08524bd54f0d6a085c7c97dd4ee8c3536..a6a22237e860e9de980b1aeed3d86baa0e32cd24 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for mxs specific clk
 #
 
-obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o
+obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o clk-ssp.o
 
 obj-$(CONFIG_SOC_IMX23) += clk-imx23.o
 obj-$(CONFIG_SOC_IMX28) += clk-imx28.o
diff --git a/drivers/clk/mxs/clk-ssp.c b/drivers/clk/mxs/clk-ssp.c
new file mode 100644 (file)
index 0000000..af7bdbf
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 DENX Software Engineering, GmbH
+ *
+ * Pulled from code:
+ * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
+ * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
+ *
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/spi/mxs-spi.h>
+
+void mxs_ssp_set_clk_rate(struct mxs_ssp *ssp, unsigned int rate)
+{
+       unsigned int ssp_clk, ssp_sck;
+       u32 clock_divide, clock_rate;
+       u32 val;
+
+       ssp_clk = clk_get_rate(ssp->clk);
+
+       for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
+               clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
+               clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
+               if (clock_rate <= 255)
+                       break;
+       }
+
+       if (clock_divide > 254) {
+               dev_err(ssp->dev,
+                       "%s: cannot set clock to %d\n", __func__, rate);
+               return;
+       }
+
+       ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
+
+       val = readl(ssp->base + HW_SSP_TIMING(ssp));
+       val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
+       val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
+       val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
+       writel(val, ssp->base + HW_SSP_TIMING(ssp));
+
+       ssp->clk_rate = ssp_sck;
+
+       dev_dbg(ssp->dev,
+               "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
+               __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
+}
+EXPORT_SYMBOL_GPL(mxs_ssp_set_clk_rate);
index 98b06baafcc64dd95c2a16965ea261210080c324..a5f7829f27993b8fefea105357229413b78e5309 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 
 struct sh_cmt_priv {
        void __iomem *mapbase;
@@ -52,6 +53,7 @@ struct sh_cmt_priv {
        struct clock_event_device ced;
        struct clocksource cs;
        unsigned long total_cycles;
+       bool cs_enabled;
 };
 
 static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
@@ -155,6 +157,9 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
 {
        int k, ret;
 
+       pm_runtime_get_sync(&p->pdev->dev);
+       dev_pm_syscore_device(&p->pdev->dev, true);
+
        /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
@@ -221,6 +226,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
 
        /* stop clock */
        clk_disable(p->clk);
+
+       dev_pm_syscore_device(&p->pdev->dev, false);
+       pm_runtime_put(&p->pdev->dev);
 }
 
 /* private flags */
@@ -451,22 +459,42 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
        int ret;
        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 
+       WARN_ON(p->cs_enabled);
+
        p->total_cycles = 0;
 
        ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
-       if (!ret)
+       if (!ret) {
                __clocksource_updatefreq_hz(cs, p->rate);
+               p->cs_enabled = true;
+       }
        return ret;
 }
 
 static void sh_cmt_clocksource_disable(struct clocksource *cs)
 {
-       sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+
+       WARN_ON(!p->cs_enabled);
+
+       sh_cmt_stop(p, FLAG_CLOCKSOURCE);
+       p->cs_enabled = false;
+}
+
+static void sh_cmt_clocksource_suspend(struct clocksource *cs)
+{
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+
+       sh_cmt_stop(p, FLAG_CLOCKSOURCE);
+       pm_genpd_syscore_poweroff(&p->pdev->dev);
 }
 
 static void sh_cmt_clocksource_resume(struct clocksource *cs)
 {
-       sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+
+       pm_genpd_syscore_poweron(&p->pdev->dev);
+       sh_cmt_start(p, FLAG_CLOCKSOURCE);
 }
 
 static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
@@ -479,7 +507,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
        cs->read = sh_cmt_clocksource_read;
        cs->enable = sh_cmt_clocksource_enable;
        cs->disable = sh_cmt_clocksource_disable;
-       cs->suspend = sh_cmt_clocksource_disable;
+       cs->suspend = sh_cmt_clocksource_suspend;
        cs->resume = sh_cmt_clocksource_resume;
        cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
@@ -562,6 +590,16 @@ static int sh_cmt_clock_event_next(unsigned long delta,
        return 0;
 }
 
+static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
+}
+
+static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
+}
+
 static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
                                       char *name, unsigned long rating)
 {
@@ -576,6 +614,8 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
        ced->cpumask = cpumask_of(0);
        ced->set_next_event = sh_cmt_clock_event_next;
        ced->set_mode = sh_cmt_clock_event_mode;
+       ced->suspend = sh_cmt_clock_event_suspend;
+       ced->resume = sh_cmt_clock_event_resume;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
        clockevents_register_device(ced);
@@ -670,6 +710,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
                dev_err(&p->pdev->dev, "registration failed\n");
                goto err1;
        }
+       p->cs_enabled = false;
 
        ret = setup_irq(irq, &p->irqaction);
        if (ret) {
@@ -688,14 +729,17 @@ err0:
 static int __devinit sh_cmt_probe(struct platform_device *pdev)
 {
        struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+       struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
 
-       if (!is_early_platform_device(pdev))
-               pm_genpd_dev_always_on(&pdev->dev, true);
+       if (!is_early_platform_device(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               return 0;
+               goto out;
        }
 
        p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -708,8 +752,19 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
        if (ret) {
                kfree(p);
                platform_set_drvdata(pdev, NULL);
+               pm_runtime_idle(&pdev->dev);
+               return ret;
        }
-       return ret;
+       if (is_early_platform_device(pdev))
+               return 0;
+
+ out:
+       if (cfg->clockevent_rating || cfg->clocksource_rating)
+               pm_runtime_irq_safe(&pdev->dev);
+       else
+               pm_runtime_idle(&pdev->dev);
+
+       return 0;
 }
 
 static int __devexit sh_cmt_remove(struct platform_device *pdev)
index d9b76ca64a611327c0ea79d979482f638e3c14d3..c5eea858054aa4e67195edf69c1b1951fa8cf11c 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 
 struct sh_mtu2_priv {
        void __iomem *mapbase;
@@ -123,6 +124,9 @@ static int sh_mtu2_enable(struct sh_mtu2_priv *p)
 {
        int ret;
 
+       pm_runtime_get_sync(&p->pdev->dev);
+       dev_pm_syscore_device(&p->pdev->dev, true);
+
        /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
@@ -157,6 +161,9 @@ static void sh_mtu2_disable(struct sh_mtu2_priv *p)
 
        /* stop clock */
        clk_disable(p->clk);
+
+       dev_pm_syscore_device(&p->pdev->dev, false);
+       pm_runtime_put(&p->pdev->dev);
 }
 
 static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
@@ -208,6 +215,16 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
        }
 }
 
+static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev);
+}
+
+static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev);
+}
+
 static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
                                       char *name, unsigned long rating)
 {
@@ -221,6 +238,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
        ced->rating = rating;
        ced->cpumask = cpumask_of(0);
        ced->set_mode = sh_mtu2_clock_event_mode;
+       ced->suspend = sh_mtu2_clock_event_suspend;
+       ced->resume = sh_mtu2_clock_event_resume;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
        clockevents_register_device(ced);
@@ -305,14 +324,17 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
 static int __devinit sh_mtu2_probe(struct platform_device *pdev)
 {
        struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
+       struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
 
-       if (!is_early_platform_device(pdev))
-               pm_genpd_dev_always_on(&pdev->dev, true);
+       if (!is_early_platform_device(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               return 0;
+               goto out;
        }
 
        p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -325,8 +347,19 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev)
        if (ret) {
                kfree(p);
                platform_set_drvdata(pdev, NULL);
+               pm_runtime_idle(&pdev->dev);
+               return ret;
        }
-       return ret;
+       if (is_early_platform_device(pdev))
+               return 0;
+
+ out:
+       if (cfg->clockevent_rating)
+               pm_runtime_irq_safe(&pdev->dev);
+       else
+               pm_runtime_idle(&pdev->dev);
+
+       return 0;
 }
 
 static int __devexit sh_mtu2_remove(struct platform_device *pdev)
index c1b51d49d106e90d8f4927cf174983878ad58b2a..0cc4add882795ed048e5b98a9e215a758b8ad147 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 
 struct sh_tmu_priv {
        void __iomem *mapbase;
@@ -43,6 +44,8 @@ struct sh_tmu_priv {
        unsigned long periodic;
        struct clock_event_device ced;
        struct clocksource cs;
+       bool cs_enabled;
+       unsigned int enable_count;
 };
 
 static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
@@ -107,7 +110,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
        raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
 }
 
-static int sh_tmu_enable(struct sh_tmu_priv *p)
+static int __sh_tmu_enable(struct sh_tmu_priv *p)
 {
        int ret;
 
@@ -135,7 +138,18 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
        return 0;
 }
 
-static void sh_tmu_disable(struct sh_tmu_priv *p)
+static int sh_tmu_enable(struct sh_tmu_priv *p)
+{
+       if (p->enable_count++ > 0)
+               return 0;
+
+       pm_runtime_get_sync(&p->pdev->dev);
+       dev_pm_syscore_device(&p->pdev->dev, true);
+
+       return __sh_tmu_enable(p);
+}
+
+static void __sh_tmu_disable(struct sh_tmu_priv *p)
 {
        /* disable channel */
        sh_tmu_start_stop_ch(p, 0);
@@ -147,6 +161,20 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
        clk_disable(p->clk);
 }
 
+static void sh_tmu_disable(struct sh_tmu_priv *p)
+{
+       if (WARN_ON(p->enable_count == 0))
+               return;
+
+       if (--p->enable_count > 0)
+               return;
+
+       __sh_tmu_disable(p);
+
+       dev_pm_syscore_device(&p->pdev->dev, false);
+       pm_runtime_put(&p->pdev->dev);
+}
+
 static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
                            int periodic)
 {
@@ -203,15 +231,53 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)
        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
        int ret;
 
+       if (WARN_ON(p->cs_enabled))
+               return 0;
+
        ret = sh_tmu_enable(p);
-       if (!ret)
+       if (!ret) {
                __clocksource_updatefreq_hz(cs, p->rate);
+               p->cs_enabled = true;
+       }
+
        return ret;
 }
 
 static void sh_tmu_clocksource_disable(struct clocksource *cs)
 {
-       sh_tmu_disable(cs_to_sh_tmu(cs));
+       struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+       if (WARN_ON(!p->cs_enabled))
+               return;
+
+       sh_tmu_disable(p);
+       p->cs_enabled = false;
+}
+
+static void sh_tmu_clocksource_suspend(struct clocksource *cs)
+{
+       struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+       if (!p->cs_enabled)
+               return;
+
+       if (--p->enable_count == 0) {
+               __sh_tmu_disable(p);
+               pm_genpd_syscore_poweroff(&p->pdev->dev);
+       }
+}
+
+static void sh_tmu_clocksource_resume(struct clocksource *cs)
+{
+       struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+       if (!p->cs_enabled)
+               return;
+
+       if (p->enable_count++ == 0) {
+               pm_genpd_syscore_poweron(&p->pdev->dev);
+               __sh_tmu_enable(p);
+       }
 }
 
 static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
@@ -224,6 +290,8 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
        cs->read = sh_tmu_clocksource_read;
        cs->enable = sh_tmu_clocksource_enable;
        cs->disable = sh_tmu_clocksource_disable;
+       cs->suspend = sh_tmu_clocksource_suspend;
+       cs->resume = sh_tmu_clocksource_resume;
        cs->mask = CLOCKSOURCE_MASK(32);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 
@@ -301,6 +369,16 @@ static int sh_tmu_clock_event_next(unsigned long delta,
        return 0;
 }
 
+static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
+}
+
+static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
+}
+
 static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
                                       char *name, unsigned long rating)
 {
@@ -316,6 +394,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
        ced->cpumask = cpumask_of(0);
        ced->set_next_event = sh_tmu_clock_event_next;
        ced->set_mode = sh_tmu_clock_event_mode;
+       ced->suspend = sh_tmu_clock_event_suspend;
+       ced->resume = sh_tmu_clock_event_resume;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
 
@@ -392,6 +472,8 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
                ret = PTR_ERR(p->clk);
                goto err1;
        }
+       p->cs_enabled = false;
+       p->enable_count = 0;
 
        return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
                               cfg->clockevent_rating,
@@ -405,14 +487,17 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
 static int __devinit sh_tmu_probe(struct platform_device *pdev)
 {
        struct sh_tmu_priv *p = platform_get_drvdata(pdev);
+       struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
 
-       if (!is_early_platform_device(pdev))
-               pm_genpd_dev_always_on(&pdev->dev, true);
+       if (!is_early_platform_device(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               return 0;
+               goto out;
        }
 
        p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -425,8 +510,19 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
        if (ret) {
                kfree(p);
                platform_set_drvdata(pdev, NULL);
+               pm_runtime_idle(&pdev->dev);
+               return ret;
        }
-       return ret;
+       if (is_early_platform_device(pdev))
+               return 0;
+
+ out:
+       if (cfg->clockevent_rating || cfg->clocksource_rating)
+               pm_runtime_irq_safe(&pdev->dev);
+       else
+               pm_runtime_idle(&pdev->dev);
+
+       return 0;
 }
 
 static int __devexit sh_tmu_remove(struct platform_device *pdev)
index 3e92b7d3fcd23623c51ac554aa018a626ac71162..fce2000eec31d658efb52251b7f6df60822d1798 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/gfp.h>
 #include <linux/ptrace.h>
 #include <linux/atomic.h>
+#include <linux/pid_namespace.h>
 
 #include <asm/unaligned.h>
 
@@ -127,11 +128,11 @@ void proc_id_connector(struct task_struct *task, int which_id)
        rcu_read_lock();
        cred = __task_cred(task);
        if (which_id == PROC_EVENT_UID) {
-               ev->event_data.id.r.ruid = cred->uid;
-               ev->event_data.id.e.euid = cred->euid;
+               ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
+               ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
        } else if (which_id == PROC_EVENT_GID) {
-               ev->event_data.id.r.rgid = cred->gid;
-               ev->event_data.id.e.egid = cred->egid;
+               ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
+               ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
        } else {
                rcu_read_unlock();
                return;
@@ -303,6 +304,15 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
        if (msg->len != sizeof(*mc_op))
                return;
 
+       /* 
+        * Events are reported with respect to the initial pid
+        * and user namespaces so ignore requestors from
+        * other namespaces.
+        */
+       if ((current_user_ns() != &init_user_ns) ||
+           (task_active_pid_ns(current) != &init_pid_ns))
+               return;
+
        mc_op = (enum proc_cn_mcast_op *)msg->data;
        switch (*mc_op) {
        case PROC_CN_MCAST_LISTEN:
index 82fa4f0f91d6ebdb13534997807b5da125e126fa..965b7811e04f37100151dc441b7713cdee065bbf 100644 (file)
@@ -264,8 +264,7 @@ static int __devinit cn_init(void)
                .input  = dev->input,
        };
 
-       dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
-                                        THIS_MODULE, &cfg);
+       dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
        if (!dev->nls)
                return -EIO;
 
index e24a2a1b666661aab161c508bc6696bd3fe7979f..ea512f47b789b49c842a78388f15957fb65d62be 100644 (file)
@@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE
 
          If in doubt, say N.
 
+config GENERIC_CPUFREQ_CPU0
+       bool "Generic CPU0 cpufreq driver"
+       depends on HAVE_CLK && REGULATOR && PM_OPP && OF
+       select CPU_FREQ_TABLE
+       help
+         This adds a generic cpufreq driver for CPU0 frequency management.
+         It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
+         systems which share clock and voltage across all CPUs.
+
+         If in doubt, say N.
+
 menu "x86 CPU frequency scaling drivers"
 depends on X86
 source "drivers/cpufreq/Kconfig.x86"
index 78ff7ee48951b6d6d488f0da4d40e47929608f24..934854ae5eb4a436205d644c76c8011a5c41de3e 100644 (file)
@@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ
        help
          This driver adds a CPUFreq driver which utilizes the ACPI
          Processor Performance States.
-         This driver also supports Intel Enhanced Speedstep.
+         This driver also supports Intel Enhanced Speedstep and newer
+         AMD CPUs.
 
          To compile this driver as a module, choose M here: the
          module will be called acpi-cpufreq.
@@ -32,6 +33,18 @@ config X86_ACPI_CPUFREQ
 
          If in doubt, say N.
 
+config X86_ACPI_CPUFREQ_CPB
+       default y
+       bool "Legacy cpb sysfs knob support for AMD CPUs"
+       depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
+       help
+         The powernow-k8 driver used to provide a sysfs knob called "cpb"
+         to disable the Core Performance Boosting feature of AMD CPUs. This
+         file has now been superseeded by the more generic "boost" entry.
+
+         By enabling this option the acpi_cpufreq driver provides the old
+         entry in addition to the new boost ones, for compatibility reasons.
+
 config ELAN_CPUFREQ
        tristate "AMD Elan SC400 and SC410"
        select CPU_FREQ_TABLE
@@ -95,7 +108,8 @@ config X86_POWERNOW_K8
        select CPU_FREQ_TABLE
        depends on ACPI && ACPI_PROCESSOR
        help
-         This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors.
+         This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
+         Support for K10 and newer processors is now in acpi-cpufreq.
 
          To compile this driver as a module, choose M here: the
          module will be called powernow-k8.
index 9531fc2eda22d15591f8ce867950d84a6cadbca9..1bc90e1306d88e0582b76c83abf6bfe98983ea35 100644 (file)
@@ -13,13 +13,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)     += cpufreq_conservative.o
 # CPUfreq cross-arch helpers
 obj-$(CONFIG_CPU_FREQ_TABLE)           += freq_table.o
 
+obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)     += cpufreq-cpu0.o
+
 ##################################################################################
 # x86 drivers.
 # Link order matters. K8 is preferred to ACPI because of firmware bugs in early
 # K8 systems. ACPI is preferred to all other hardware-specific drivers.
 # speedstep-* is preferred over p4-clockmod.
 
-obj-$(CONFIG_X86_POWERNOW_K8)          += powernow-k8.o mperf.o
+obj-$(CONFIG_X86_POWERNOW_K8)          += powernow-k8.o
 obj-$(CONFIG_X86_ACPI_CPUFREQ)         += acpi-cpufreq.o mperf.o
 obj-$(CONFIG_X86_PCC_CPUFREQ)          += pcc-cpufreq.o
 obj-$(CONFIG_X86_POWERNOW_K6)          += powernow-k6.o
index 56c6c6b4eb4d61043bfdd9b61a95e71ee34f424a..0d048f6a2b23a3bf9476a658cac846ccd321bd3c 100644 (file)
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
 MODULE_LICENSE("GPL");
 
+#define PFX "acpi-cpufreq: "
+
 enum {
        UNDEFINED_CAPABLE = 0,
        SYSTEM_INTEL_MSR_CAPABLE,
+       SYSTEM_AMD_MSR_CAPABLE,
        SYSTEM_IO_CAPABLE,
 };
 
 #define INTEL_MSR_RANGE                (0xffff)
+#define AMD_MSR_RANGE          (0x7)
+
+#define MSR_K7_HWCR_CPB_DIS    (1ULL << 25)
 
 struct acpi_cpufreq_data {
        struct acpi_processor_performance *acpi_data;
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
 static struct cpufreq_driver acpi_cpufreq_driver;
 
 static unsigned int acpi_pstate_strict;
+static bool boost_enabled, boost_supported;
+static struct msr __percpu *msrs;
+
+static bool boost_state(unsigned int cpu)
+{
+       u32 lo, hi;
+       u64 msr;
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
+               msr = lo | ((u64)hi << 32);
+               return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+       case X86_VENDOR_AMD:
+               rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+               msr = lo | ((u64)hi << 32);
+               return !(msr & MSR_K7_HWCR_CPB_DIS);
+       }
+       return false;
+}
+
+static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
+{
+       u32 cpu;
+       u32 msr_addr;
+       u64 msr_mask;
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               msr_addr = MSR_IA32_MISC_ENABLE;
+               msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+               break;
+       case X86_VENDOR_AMD:
+               msr_addr = MSR_K7_HWCR;
+               msr_mask = MSR_K7_HWCR_CPB_DIS;
+               break;
+       default:
+               return;
+       }
+
+       rdmsr_on_cpus(cpumask, msr_addr, msrs);
+
+       for_each_cpu(cpu, cpumask) {
+               struct msr *reg = per_cpu_ptr(msrs, cpu);
+               if (enable)
+                       reg->q &= ~msr_mask;
+               else
+                       reg->q |= msr_mask;
+       }
+
+       wrmsr_on_cpus(cpumask, msr_addr, msrs);
+}
+
+static ssize_t _store_boost(const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val = 0;
+
+       if (!boost_supported)
+               return -EINVAL;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret || (val > 1))
+               return -EINVAL;
+
+       if ((val && boost_enabled) || (!val && !boost_enabled))
+               return count;
+
+       get_online_cpus();
+
+       boost_set_msrs(val, cpu_online_mask);
+
+       put_online_cpus();
+
+       boost_enabled = val;
+       pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
+
+       return count;
+}
+
+static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
+                                 const char *buf, size_t count)
+{
+       return _store_boost(buf, count);
+}
+
+static ssize_t show_global_boost(struct kobject *kobj,
+                                struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", boost_enabled);
+}
+
+static struct global_attr global_boost = __ATTR(boost, 0644,
+                                               show_global_boost,
+                                               store_global_boost);
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+                        size_t count)
+{
+       return _store_boost(buf, count);
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+       return sprintf(buf, "%u\n", boost_enabled);
+}
+
+static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
+#endif
 
 static int check_est_cpu(unsigned int cpuid)
 {
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
        return cpu_has(cpu, X86_FEATURE_EST);
 }
 
+static int check_amd_hwpstate_cpu(unsigned int cpuid)
+{
+       struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
+
+       return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
+}
+
 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
 {
        struct acpi_processor_performance *perf;
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
        int i;
        struct acpi_processor_performance *perf;
 
-       msr &= INTEL_MSR_RANGE;
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               msr &= AMD_MSR_RANGE;
+       else
+               msr &= INTEL_MSR_RANGE;
+
        perf = data->acpi_data;
 
        for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
 {
        switch (data->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
+       case SYSTEM_AMD_MSR_CAPABLE:
                return extract_msr(val, data);
        case SYSTEM_IO_CAPABLE:
                return extract_io(val, data);
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
 
        switch (cmd->type) {
        case SYSTEM_INTEL_MSR_CAPABLE:
+       case SYSTEM_AMD_MSR_CAPABLE:
                rdmsr(cmd->addr.msr.reg, cmd->val, h);
                break;
        case SYSTEM_IO_CAPABLE:
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
                lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
                wrmsr(cmd->addr.msr.reg, lo, hi);
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               wrmsr(cmd->addr.msr.reg, cmd->val, 0);
+               break;
        case SYSTEM_IO_CAPABLE:
                acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
                                cmd->val,
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
                cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+               cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+               break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
                perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
                cmd.val = (u32) perf->states[next_perf_state].control;
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+               cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
+               cmd.val = (u32) perf->states[next_perf_state].control;
+               break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
                cmd.addr.io.port = perf->control_register.address;
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
        free_percpu(acpi_perf_data);
 }
 
+static int boost_notify(struct notifier_block *nb, unsigned long action,
+                     void *hcpu)
+{
+       unsigned cpu = (long)hcpu;
+       const struct cpumask *cpumask;
+
+       cpumask = get_cpu_mask(cpu);
+
+       /*
+        * Clear the boost-disable bit on the CPU_DOWN path so that
+        * this cpu cannot block the remaining ones from boosting. On
+        * the CPU_UP path we simply keep the boost-disable flag in
+        * sync with the current global state.
+        */
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               boost_set_msrs(boost_enabled, cpumask);
+               break;
+
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               boost_set_msrs(1, cpumask);
+               break;
+
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+
+static struct notifier_block boost_nb = {
+       .notifier_call          = boost_notify,
+};
+
 /*
  * acpi_cpufreq_early_init - initialize ACPI P-States library
  *
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
                cpumask_copy(policy->cpus, cpu_core_mask(cpu));
        }
+
+       if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
+               cpumask_clear(policy->cpus);
+               cpumask_set_cpu(cpu, policy->cpus);
+               cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
+               policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+               pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+       }
 #endif
 
        /* capability check */
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                break;
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
                pr_debug("HARDWARE addr space\n");
-               if (!check_est_cpu(cpu)) {
-                       result = -ENODEV;
-                       goto err_unreg;
+               if (check_est_cpu(cpu)) {
+                       data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+                       break;
                }
-               data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
-               break;
+               if (check_amd_hwpstate_cpu(cpu)) {
+                       data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
+                       break;
+               }
+               result = -ENODEV;
+               goto err_unreg;
        default:
                pr_debug("Unknown addr space %d\n",
                        (u32) (perf->control_register.space_id));
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 
 static struct freq_attr *acpi_cpufreq_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,   /* this is a placeholder for cpb, do not remove */
        NULL,
 };
 
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
        .attr           = acpi_cpufreq_attr,
 };
 
+static void __init acpi_cpufreq_boost_init(void)
+{
+       if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
+               msrs = msrs_alloc();
+
+               if (!msrs)
+                       return;
+
+               boost_supported = true;
+               boost_enabled = boost_state(0);
+
+               get_online_cpus();
+
+               /* Force all MSRs to the same value */
+               boost_set_msrs(boost_enabled, cpu_online_mask);
+
+               register_cpu_notifier(&boost_nb);
+
+               put_online_cpus();
+       } else
+               global_boost.attr.mode = 0444;
+
+       /* We create the boost file in any case, though for systems without
+        * hardware support it will be read-only and hardwired to return 0.
+        */
+       if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
+               pr_warn(PFX "could not register global boost sysfs file\n");
+       else
+               pr_debug("registered global boost sysfs file\n");
+}
+
+static void __exit acpi_cpufreq_boost_exit(void)
+{
+       sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
+
+       if (msrs) {
+               unregister_cpu_notifier(&boost_nb);
+
+               msrs_free(msrs);
+               msrs = NULL;
+       }
+}
+
 static int __init acpi_cpufreq_init(void)
 {
        int ret;
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
        if (ret)
                return ret;
 
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+       /* this is a sysfs file with a strange name and an even stranger
+        * semantic - per CPU instantiation, but system global effect.
+        * Lets enable it only on AMD CPUs for compatibility reasons and
+        * only if configured. This is considered legacy code, which
+        * will probably be removed at some point in the future.
+        */
+       if (check_amd_hwpstate_cpu(0)) {
+               struct freq_attr **iter;
+
+               pr_debug("adding sysfs entry for cpb\n");
+
+               for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
+                       ;
+
+               /* make sure there is a terminator behind it */
+               if (iter[1] == NULL)
+                       *iter = &cpb;
+       }
+#endif
+
        ret = cpufreq_register_driver(&acpi_cpufreq_driver);
        if (ret)
                free_acpi_perf_data();
+       else
+               acpi_cpufreq_boost_init();
 
        return ret;
 }
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
 {
        pr_debug("acpi_cpufreq_exit\n");
 
+       acpi_cpufreq_boost_exit();
+
        cpufreq_unregister_driver(&acpi_cpufreq_driver);
 
        free_acpi_perf_data();
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
new file mode 100644 (file)
index 0000000..e915827
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * The OPP code in function cpu0_set_target() is reused from
+ * drivers/cpufreq/omap-cpufreq.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+static unsigned int transition_latency;
+static unsigned int voltage_tolerance; /* in percentage */
+
+static struct device *cpu_dev;
+static struct clk *cpu_clk;
+static struct regulator *cpu_reg;
+static struct cpufreq_frequency_table *freq_table;
+
+static int cpu0_verify_speed(struct cpufreq_policy *policy)
+{
+       return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int cpu0_get_speed(unsigned int cpu)
+{
+       return clk_get_rate(cpu_clk) / 1000;
+}
+
+static int cpu0_set_target(struct cpufreq_policy *policy,
+                          unsigned int target_freq, unsigned int relation)
+{
+       struct cpufreq_freqs freqs;
+       struct opp *opp;
+       unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
+       unsigned int index, cpu;
+       int ret;
+
+       ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+                                            relation, &index);
+       if (ret) {
+               pr_err("failed to match target freqency %d: %d\n",
+                      target_freq, ret);
+               return ret;
+       }
+
+       freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
+       if (freq_Hz < 0)
+               freq_Hz = freq_table[index].frequency * 1000;
+       freqs.new = freq_Hz / 1000;
+       freqs.old = clk_get_rate(cpu_clk) / 1000;
+
+       if (freqs.old == freqs.new)
+               return 0;
+
+       for_each_online_cpu(cpu) {
+               freqs.cpu = cpu;
+               cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+       }
+
+       if (cpu_reg) {
+               opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+               if (IS_ERR(opp)) {
+                       pr_err("failed to find OPP for %ld\n", freq_Hz);
+                       return PTR_ERR(opp);
+               }
+               volt = opp_get_voltage(opp);
+               tol = volt * voltage_tolerance / 100;
+               volt_old = regulator_get_voltage(cpu_reg);
+       }
+
+       pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
+                freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
+                freqs.new / 1000, volt ? volt / 1000 : -1);
+
+       /* scaling up?  scale voltage before frequency */
+       if (cpu_reg && freqs.new > freqs.old) {
+               ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+               if (ret) {
+                       pr_err("failed to scale voltage up: %d\n", ret);
+                       freqs.new = freqs.old;
+                       return ret;
+               }
+       }
+
+       ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+       if (ret) {
+               pr_err("failed to set clock rate: %d\n", ret);
+               if (cpu_reg)
+                       regulator_set_voltage_tol(cpu_reg, volt_old, tol);
+               return ret;
+       }
+
+       /* scaling down?  scale voltage after frequency */
+       if (cpu_reg && freqs.new < freqs.old) {
+               ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+               if (ret) {
+                       pr_err("failed to scale voltage down: %d\n", ret);
+                       clk_set_rate(cpu_clk, freqs.old * 1000);
+                       freqs.new = freqs.old;
+                       return ret;
+               }
+       }
+
+       for_each_online_cpu(cpu) {
+               freqs.cpu = cpu;
+               cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+       }
+
+       return 0;
+}
+
+static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
+{
+       int ret;
+
+       if (policy->cpu != 0)
+               return -EINVAL;
+
+       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+       if (ret) {
+               pr_err("invalid frequency table: %d\n", ret);
+               return ret;
+       }
+
+       policy->cpuinfo.transition_latency = transition_latency;
+       policy->cur = clk_get_rate(cpu_clk) / 1000;
+
+       /*
+        * The driver only supports the SMP configuartion where all processors
+        * share the clock and voltage and clock.  Use cpufreq affected_cpus
+        * interface to have all CPUs scaled together.
+        */
+       policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+       cpumask_setall(policy->cpus);
+
+       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+       return 0;
+}
+
+static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       cpufreq_frequency_table_put_attr(policy->cpu);
+
+       return 0;
+}
+
+static struct freq_attr *cpu0_cpufreq_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,
+};
+
+static struct cpufreq_driver cpu0_cpufreq_driver = {
+       .flags = CPUFREQ_STICKY,
+       .verify = cpu0_verify_speed,
+       .target = cpu0_set_target,
+       .get = cpu0_get_speed,
+       .init = cpu0_cpufreq_init,
+       .exit = cpu0_cpufreq_exit,
+       .name = "generic_cpu0",
+       .attr = cpu0_cpufreq_attr,
+};
+
+static int __devinit cpu0_cpufreq_driver_init(void)
+{
+       struct device_node *np;
+       int ret;
+
+       np = of_find_node_by_path("/cpus/cpu@0");
+       if (!np) {
+               pr_err("failed to find cpu0 node\n");
+               return -ENOENT;
+       }
+
+       cpu_dev = get_cpu_device(0);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu0 device\n");
+               ret = -ENODEV;
+               goto out_put_node;
+       }
+
+       cpu_dev->of_node = np;
+
+       cpu_clk = clk_get(cpu_dev, NULL);
+       if (IS_ERR(cpu_clk)) {
+               ret = PTR_ERR(cpu_clk);
+               pr_err("failed to get cpu0 clock: %d\n", ret);
+               goto out_put_node;
+       }
+
+       cpu_reg = regulator_get(cpu_dev, "cpu0");
+       if (IS_ERR(cpu_reg)) {
+               pr_warn("failed to get cpu0 regulator\n");
+               cpu_reg = NULL;
+       }
+
+       ret = of_init_opp_table(cpu_dev);
+       if (ret) {
+               pr_err("failed to init OPP table: %d\n", ret);
+               goto out_put_node;
+       }
+
+       ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+       if (ret) {
+               pr_err("failed to init cpufreq table: %d\n", ret);
+               goto out_put_node;
+       }
+
+       of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
+
+       if (of_property_read_u32(np, "clock-latency", &transition_latency))
+               transition_latency = CPUFREQ_ETERNAL;
+
+       if (cpu_reg) {
+               struct opp *opp;
+               unsigned long min_uV, max_uV;
+               int i;
+
+               /*
+                * OPP is maintained in order of increasing frequency, and
+                * freq_table initialised from OPP is therefore sorted in the
+                * same order.
+                */
+               for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+                       ;
+               opp = opp_find_freq_exact(cpu_dev,
+                               freq_table[0].frequency * 1000, true);
+               min_uV = opp_get_voltage(opp);
+               opp = opp_find_freq_exact(cpu_dev,
+                               freq_table[i-1].frequency * 1000, true);
+               max_uV = opp_get_voltage(opp);
+               ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
+               if (ret > 0)
+                       transition_latency += ret * 1000;
+       }
+
+       ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
+       if (ret) {
+               pr_err("failed register driver: %d\n", ret);
+               goto out_free_table;
+       }
+
+       of_node_put(np);
+       return 0;
+
+out_free_table:
+       opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_node:
+       of_node_put(np);
+       return ret;
+}
+late_initcall(cpu0_cpufreq_driver_init);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
+MODULE_LICENSE("GPL");
index 235a340e81f20bcb63805f9c911f4fd99eafeab8..a152af7e1991eff7db4dcd97c3d0d777b8812f37 100644 (file)
@@ -466,7 +466,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
        delay -= jiffies % delay;
 
        dbs_info->enable = 1;
-       INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+       INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
        schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
 }
 
@@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                j_dbs_info->prev_cpu_nice =
                                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
                }
+               this_dbs_info->cpu = cpu;
                this_dbs_info->down_skip = 0;
                this_dbs_info->requested_freq = policy->cur;
 
@@ -583,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        __cpufreq_driver_target(
                                        this_dbs_info->cur_policy,
                                        policy->min, CPUFREQ_RELATION_L);
+               dbs_check_cpu(this_dbs_info);
                mutex_unlock(&this_dbs_info->timer_mutex);
 
                break;
index 836e9b062e5ec4a2e08c935d4c49f17a79ef124b..396322f2a83ffc22fd6249a3133c588b195cf25b 100644 (file)
@@ -644,7 +644,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
                delay -= jiffies % delay;
 
        dbs_info->sample_type = DBS_NORMAL_SAMPLE;
-       INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+       INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
        schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
 }
 
@@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                else if (policy->min > this_dbs_info->cur_policy->cur)
                        __cpufreq_driver_target(this_dbs_info->cur_policy,
                                policy->min, CPUFREQ_RELATION_L);
+               dbs_check_cpu(this_dbs_info);
                mutex_unlock(&this_dbs_info->timer_mutex);
                break;
        }
index cbf48fbca881f534f3eec4eba81d97f788115eac..e2dc436099d1006ed9d879cc802eedda7eae9deb 100644 (file)
@@ -56,7 +56,7 @@ union msr_longhaul {
 /*
  * VIA C3 Samuel 1  & Samuel 2 (stepping 0)
  */
-static const int __cpuinitdata samuel1_mults[16] = {
+static const int __cpuinitconst samuel1_mults[16] = {
        -1, /* 0000 -> RESERVED */
        30, /* 0001 ->  3.0x */
        40, /* 0010 ->  4.0x */
@@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = {
        -1, /* 1111 -> RESERVED */
 };
 
-static const int __cpuinitdata samuel1_eblcr[16] = {
+static const int __cpuinitconst samuel1_eblcr[16] = {
        50, /* 0000 -> RESERVED */
        30, /* 0001 ->  3.0x */
        40, /* 0010 ->  4.0x */
@@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = {
 /*
  * VIA C3 Samuel2 Stepping 1->15
  */
-static const int __cpuinitdata samuel2_eblcr[16] = {
+static const int __cpuinitconst samuel2_eblcr[16] = {
        50,  /* 0000 ->  5.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = {
 /*
  * VIA C3 Ezra
  */
-static const int __cpuinitdata ezra_mults[16] = {
+static const int __cpuinitconst ezra_mults[16] = {
        100, /* 0000 -> 10.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = {
        120, /* 1111 -> 12.0x */
 };
 
-static const int __cpuinitdata ezra_eblcr[16] = {
+static const int __cpuinitconst ezra_eblcr[16] = {
        50,  /* 0000 ->  5.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = {
 /*
  * VIA C3 (Ezra-T) [C5M].
  */
-static const int __cpuinitdata ezrat_mults[32] = {
+static const int __cpuinitconst ezrat_mults[32] = {
        100, /* 0000 -> 10.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = {
        -1,  /* 1111 -> RESERVED (12.0x) */
 };
 
-static const int __cpuinitdata ezrat_eblcr[32] = {
+static const int __cpuinitconst ezrat_eblcr[32] = {
        50,  /* 0000 ->  5.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = {
 /*
  * VIA C3 Nehemiah */
 
-static const int __cpuinitdata nehemiah_mults[32] = {
+static const int __cpuinitconst nehemiah_mults[32] = {
        100, /* 0000 -> 10.0x */
        -1, /* 0001 -> 16.0x */
        40,  /* 0010 ->  4.0x */
@@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = {
        -1, /* 1111 -> 12.0x */
 };
 
-static const int __cpuinitdata nehemiah_eblcr[32] = {
+static const int __cpuinitconst nehemiah_eblcr[32] = {
        50,  /* 0000 ->  5.0x */
        160, /* 0001 -> 16.0x */
        40,  /* 0010 ->  4.0x */
@@ -315,7 +315,7 @@ struct mV_pos {
        unsigned short pos;
 };
 
-static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
+static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
        {1250, 8},      {1200, 6},      {1150, 4},      {1100, 2},
        {1050, 0},      {1800, 30},     {1750, 28},     {1700, 26},
        {1650, 24},     {1600, 22},     {1550, 20},     {1500, 18},
@@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
        {1475, 17},     {1425, 15},     {1375, 13},     {1325, 11}
 };
 
-static const unsigned char __cpuinitdata mV_vrm85[32] = {
+static const unsigned char __cpuinitconst mV_vrm85[32] = {
        0x04,   0x14,   0x03,   0x13,   0x02,   0x12,   0x01,   0x11,
        0x00,   0x10,   0x0f,   0x1f,   0x0e,   0x1e,   0x0d,   0x1d,
        0x0c,   0x1c,   0x0b,   0x1b,   0x0a,   0x1a,   0x09,   0x19,
        0x08,   0x18,   0x07,   0x17,   0x06,   0x16,   0x05,   0x15
 };
 
-static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
+static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
        {1750, 31},     {1700, 30},     {1650, 29},     {1600, 28},
        {1550, 27},     {1500, 26},     {1450, 25},     {1400, 24},
        {1350, 23},     {1300, 22},     {1250, 21},     {1200, 20},
@@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
        {675, 3},       {650, 2},       {625, 1},       {600, 0}
 };
 
-static const unsigned char __cpuinitdata mV_mobilevrm[32] = {
+static const unsigned char __cpuinitconst mV_mobilevrm[32] = {
        0x1f,   0x1e,   0x1d,   0x1c,   0x1b,   0x1a,   0x19,   0x18,
        0x17,   0x16,   0x15,   0x14,   0x13,   0x12,   0x11,   0x10,
        0x0f,   0x0e,   0x0d,   0x0c,   0x0b,   0x0a,   0x09,   0x08,
index b47034e650a579b9d0263f8d0a03af2ecc8b3aa7..65f8e9a54975e8274c9de6403718f5ff39ee2e85 100644 (file)
 /* OPP tolerance in percentage */
 #define        OPP_TOLERANCE   4
 
-#ifdef CONFIG_SMP
-struct lpj_info {
-       unsigned long   ref;
-       unsigned int    freq;
-};
-
-static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
-static struct lpj_info global_lpj_ref;
-#endif
-
 static struct cpufreq_frequency_table *freq_table;
 static atomic_t freq_table_users = ATOMIC_INIT(0);
 static struct clk *mpu_clk;
@@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
        }
 
        freqs.new = omap_getspeed(policy->cpu);
-#ifdef CONFIG_SMP
-       /*
-        * Note that loops_per_jiffy is not updated on SMP systems in
-        * cpufreq driver. So, update the per-CPU loops_per_jiffy value
-        * on frequency transition. We need to update all dependent CPUs.
-        */
-       for_each_cpu(i, policy->cpus) {
-               struct lpj_info *lpj = &per_cpu(lpj_ref, i);
-               if (!lpj->freq) {
-                       lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
-                       lpj->freq = freqs.old;
-               }
-
-               per_cpu(cpu_data, i).loops_per_jiffy =
-                       cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
-       }
-
-       /* And don't forget to adjust the global one */
-       if (!global_lpj_ref.freq) {
-               global_lpj_ref.ref = loops_per_jiffy;
-               global_lpj_ref.freq = freqs.old;
-       }
-       loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
-                                       freqs.new);
-#endif
 
 done:
        /* notifiers */
@@ -301,9 +266,9 @@ static int __init omap_cpufreq_init(void)
        }
 
        mpu_dev = omap_device_get_by_hwmod_name("mpu");
-       if (!mpu_dev) {
+       if (IS_ERR(mpu_dev)) {
                pr_warning("%s: unable to get the mpu device\n", __func__);
-               return -EINVAL;
+               return PTR_ERR(mpu_dev);
        }
 
        mpu_reg = regulator_get(mpu_dev, "vcc");
index 1a40935c85fdcc1f21e976ad8dbd5298a87d6060..129e80bfff22e1d399fbe5320012f5721de1bcaf 100644 (file)
 #define PFX "powernow-k8: "
 #define VERSION "version 2.20.00"
 #include "powernow-k8.h"
-#include "mperf.h"
 
 /* serialize freq changes  */
 static DEFINE_MUTEX(fidvid_mutex);
 
 static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
 
-static int cpu_family = CPU_OPTERON;
-
-/* array to map SW pstate number to acpi state */
-static u32 ps_to_as[8];
-
-/* core performance boost */
-static bool cpb_capable, cpb_enabled;
-static struct msr __percpu *msrs;
-
 static struct cpufreq_driver cpufreq_amd64_driver;
 
 #ifndef CONFIG_SMP
@@ -85,12 +75,6 @@ static u32 find_khz_freq_from_fid(u32 fid)
        return 1000 * find_freq_from_fid(fid);
 }
 
-static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
-                                    u32 pstate)
-{
-       return data[ps_to_as[pstate]].frequency;
-}
-
 /* Return the vco fid for an input fid
  *
  * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -113,9 +97,6 @@ static int pending_bit_stuck(void)
 {
        u32 lo, hi;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               return 0;
-
        rdmsr(MSR_FIDVID_STATUS, lo, hi);
        return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
 }
@@ -129,20 +110,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
        u32 lo, hi;
        u32 i = 0;
 
-       if (cpu_family == CPU_HW_PSTATE) {
-               rdmsr(MSR_PSTATE_STATUS, lo, hi);
-               i = lo & HW_PSTATE_MASK;
-               data->currpstate = i;
-
-               /*
-                * a workaround for family 11h erratum 311 might cause
-                * an "out-of-range Pstate if the core is in Pstate-0
-                */
-               if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
-                       data->currpstate = HW_PSTATE_0;
-
-               return 0;
-       }
        do {
                if (i++ > 10000) {
                        pr_debug("detected change pending stuck\n");
@@ -299,14 +266,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data,
        return 0;
 }
 
-/* Change hardware pstate by single MSR write */
-static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
-{
-       wrmsr(MSR_PSTATE_CTRL, pstate, 0);
-       data->currpstate = pstate;
-       return 0;
-}
-
 /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
 static int transition_fid_vid(struct powernow_k8_data *data,
                u32 reqfid, u32 reqvid)
@@ -523,8 +482,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
 static const struct x86_cpu_id powernow_k8_ids[] = {
        /* IO based frequency switching */
        { X86_VENDOR_AMD, 0xf },
-       /* MSR based frequency switching supported */
-       X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
@@ -560,15 +517,8 @@ static void check_supported_cpu(void *_rc)
                                "Power state transitions not supported\n");
                        return;
                }
-       } else { /* must be a HW Pstate capable processor */
-               cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-               if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
-                       cpu_family = CPU_HW_PSTATE;
-               else
-                       return;
+               *rc = 0;
        }
-
-       *rc = 0;
 }
 
 static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -632,18 +582,11 @@ static void print_basics(struct powernow_k8_data *data)
        for (j = 0; j < data->numps; j++) {
                if (data->powernow_table[j].frequency !=
                                CPUFREQ_ENTRY_INVALID) {
-                       if (cpu_family == CPU_HW_PSTATE) {
-                               printk(KERN_INFO PFX
-                                       "   %d : pstate %d (%d MHz)\n", j,
-                                       data->powernow_table[j].index,
-                                       data->powernow_table[j].frequency/1000);
-                       } else {
                                printk(KERN_INFO PFX
                                        "fid 0x%x (%d MHz), vid 0x%x\n",
                                        data->powernow_table[j].index & 0xff,
                                        data->powernow_table[j].frequency/1000,
                                        data->powernow_table[j].index >> 8);
-                       }
                }
        }
        if (data->batps)
@@ -651,20 +594,6 @@ static void print_basics(struct powernow_k8_data *data)
                                data->batps);
 }
 
-static u32 freq_from_fid_did(u32 fid, u32 did)
-{
-       u32 mhz = 0;
-
-       if (boot_cpu_data.x86 == 0x10)
-               mhz = (100 * (fid + 0x10)) >> did;
-       else if (boot_cpu_data.x86 == 0x11)
-               mhz = (100 * (fid + 8)) >> did;
-       else
-               BUG();
-
-       return mhz * 1000;
-}
-
 static int fill_powernow_table(struct powernow_k8_data *data,
                struct pst_s *pst, u8 maxvid)
 {
@@ -824,7 +753,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
 {
        u64 control;
 
-       if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
+       if (!data->acpi_data.state_count)
                return;
 
        control = data->acpi_data.states[index].control;
@@ -875,10 +804,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
        data->numps = data->acpi_data.state_count;
        powernow_k8_acpi_pst_values(data, 0);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               ret_val = fill_powernow_table_pstate(data, powernow_table);
-       else
-               ret_val = fill_powernow_table_fidvid(data, powernow_table);
+       ret_val = fill_powernow_table_fidvid(data, powernow_table);
        if (ret_val)
                goto err_out_mem;
 
@@ -915,51 +841,6 @@ err_out:
        return ret_val;
 }
 
-static int fill_powernow_table_pstate(struct powernow_k8_data *data,
-               struct cpufreq_frequency_table *powernow_table)
-{
-       int i;
-       u32 hi = 0, lo = 0;
-       rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
-       data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
-
-       for (i = 0; i < data->acpi_data.state_count; i++) {
-               u32 index;
-
-               index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
-               if (index > data->max_hw_pstate) {
-                       printk(KERN_ERR PFX "invalid pstate %d - "
-                                       "bad value %d.\n", i, index);
-                       printk(KERN_ERR PFX "Please report to BIOS "
-                                       "manufacturer\n");
-                       invalidate_entry(powernow_table, i);
-                       continue;
-               }
-
-               ps_to_as[index] = i;
-
-               /* Frequency may be rounded for these */
-               if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
-                                || boot_cpu_data.x86 == 0x11) {
-
-                       rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
-                       if (!(hi & HW_PSTATE_VALID_MASK)) {
-                               pr_debug("invalid pstate %d, ignoring\n", index);
-                               invalidate_entry(powernow_table, i);
-                               continue;
-                       }
-
-                       powernow_table[i].frequency =
-                               freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
-               } else
-                       powernow_table[i].frequency =
-                               data->acpi_data.states[i].core_frequency * 1000;
-
-               powernow_table[i].index = index;
-       }
-       return 0;
-}
-
 static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
                struct cpufreq_frequency_table *powernow_table)
 {
@@ -1036,15 +917,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
                        max_latency = cur_latency;
        }
        if (max_latency == 0) {
-               /*
-                * Fam 11h and later may return 0 as transition latency. This
-                * is intended and means "very fast". While cpufreq core and
-                * governors currently can handle that gracefully, better set it
-                * to 1 to avoid problems in the future.
-                */
-               if (boot_cpu_data.x86 < 0x11)
-                       printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
-                               "latency\n");
+               pr_err(FW_WARN PFX "Invalid zero transition latency\n");
                max_latency = 1;
        }
        /* value in usecs, needs to be in nanoseconds */
@@ -1104,40 +977,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
        return res;
 }
 
-/* Take a frequency, and issue the hardware pstate transition command */
-static int transition_frequency_pstate(struct powernow_k8_data *data,
-               unsigned int index)
-{
-       u32 pstate = 0;
-       int res, i;
-       struct cpufreq_freqs freqs;
-
-       pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
-
-       /* get MSR index for hardware pstate transition */
-       pstate = index & HW_PSTATE_MASK;
-       if (pstate > data->max_hw_pstate)
-               return -EINVAL;
-
-       freqs.old = find_khz_freq_from_pstate(data->powernow_table,
-                       data->currpstate);
-       freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
-
-       for_each_cpu(i, data->available_cores) {
-               freqs.cpu = i;
-               cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-       }
-
-       res = transition_pstate(data, pstate);
-       freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
-
-       for_each_cpu(i, data->available_cores) {
-               freqs.cpu = i;
-               cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-       }
-       return res;
-}
-
 struct powernowk8_target_arg {
        struct cpufreq_policy           *pol;
        unsigned                        targfreq;
@@ -1173,18 +1012,15 @@ static long powernowk8_target_fn(void *arg)
        if (query_current_values_with_pending_wait(data))
                return -EIO;
 
-       if (cpu_family != CPU_HW_PSTATE) {
-               pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
-               data->currfid, data->currvid);
+       pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
+                data->currfid, data->currvid);
 
-               if ((checkvid != data->currvid) ||
-                   (checkfid != data->currfid)) {
-                       printk(KERN_INFO PFX
-                               "error - out of sync, fix 0x%x 0x%x, "
-                               "vid 0x%x 0x%x\n",
-                               checkfid, data->currfid,
-                               checkvid, data->currvid);
-               }
+       if ((checkvid != data->currvid) ||
+           (checkfid != data->currfid)) {
+               pr_info(PFX
+                      "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
+                      checkfid, data->currfid,
+                      checkvid, data->currvid);
        }
 
        if (cpufreq_frequency_table_target(pol, data->powernow_table,
@@ -1195,11 +1031,8 @@ static long powernowk8_target_fn(void *arg)
 
        powernow_k8_acpi_pst_values(data, newstate);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               ret = transition_frequency_pstate(data,
-                       data->powernow_table[newstate].index);
-       else
-               ret = transition_frequency_fidvid(data, newstate);
+       ret = transition_frequency_fidvid(data, newstate);
+
        if (ret) {
                printk(KERN_ERR PFX "transition frequency failed\n");
                mutex_unlock(&fidvid_mutex);
@@ -1207,11 +1040,7 @@ static long powernowk8_target_fn(void *arg)
        }
        mutex_unlock(&fidvid_mutex);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               pol->cur = find_khz_freq_from_pstate(data->powernow_table,
-                               data->powernow_table[newstate].index);
-       else
-               pol->cur = find_khz_freq_from_fid(data->currfid);
+       pol->cur = find_khz_freq_from_fid(data->currfid);
 
        return 0;
 }
@@ -1264,22 +1093,23 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
                return;
        }
 
-       if (cpu_family == CPU_OPTERON)
-               fidvid_msr_init();
+       fidvid_msr_init();
 
        init_on_cpu->rc = 0;
 }
 
+static const char missing_pss_msg[] =
+       KERN_ERR
+       FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
+       FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
+       FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
+
 /* per CPU init entry point to the driver */
 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
-       static const char ACPI_PSS_BIOS_BUG_MSG[] =
-               KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
-               FW_BUG PFX "Try again with latest BIOS.\n";
        struct powernow_k8_data *data;
        struct init_on_cpu init_on_cpu;
        int rc;
-       struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
 
        if (!cpu_online(pol->cpu))
                return -ENODEV;
@@ -1295,7 +1125,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        }
 
        data->cpu = pol->cpu;
-       data->currpstate = HW_PSTATE_INVALID;
 
        if (powernow_k8_cpu_init_acpi(data)) {
                /*
@@ -1303,7 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
                 * an UP version, and is deprecated by AMD.
                 */
                if (num_online_cpus() != 1) {
-                       printk_once(ACPI_PSS_BIOS_BUG_MSG);
+                       printk_once(missing_pss_msg);
                        goto err_out;
                }
                if (pol->cpu != 0) {
@@ -1332,17 +1161,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        if (rc != 0)
                goto err_out_exit_acpi;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
-       else
-               cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
+       cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
        data->available_cores = pol->cpus;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               pol->cur = find_khz_freq_from_pstate(data->powernow_table,
-                               data->currpstate);
-       else
-               pol->cur = find_khz_freq_from_fid(data->currfid);
+       pol->cur = find_khz_freq_from_fid(data->currfid);
        pr_debug("policy current frequency %d kHz\n", pol->cur);
 
        /* min/max the cpu is capable of */
@@ -1354,18 +1176,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
                return -EINVAL;
        }
 
-       /* Check for APERF/MPERF support in hardware */
-       if (cpu_has(c, X86_FEATURE_APERFMPERF))
-               cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
-
        cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               pr_debug("cpu_init done, current pstate 0x%x\n",
-                               data->currpstate);
-       else
-               pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
-                       data->currfid, data->currvid);
+       pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
+                data->currfid, data->currvid);
 
        per_cpu(powernow_data, pol->cpu) = data;
 
@@ -1418,88 +1232,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
        if (err)
                goto out;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               khz = find_khz_freq_from_pstate(data->powernow_table,
-                                               data->currpstate);
-       else
-               khz = find_khz_freq_from_fid(data->currfid);
+       khz = find_khz_freq_from_fid(data->currfid);
 
 
 out:
        return khz;
 }
 
-static void _cpb_toggle_msrs(bool t)
-{
-       int cpu;
-
-       get_online_cpus();
-
-       rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
-
-       for_each_cpu(cpu, cpu_online_mask) {
-               struct msr *reg = per_cpu_ptr(msrs, cpu);
-               if (t)
-                       reg->l &= ~BIT(25);
-               else
-                       reg->l |= BIT(25);
-       }
-       wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
-
-       put_online_cpus();
-}
-
-/*
- * Switch on/off core performance boosting.
- *
- * 0=disable
- * 1=enable.
- */
-static void cpb_toggle(bool t)
-{
-       if (!cpb_capable)
-               return;
-
-       if (t && !cpb_enabled) {
-               cpb_enabled = true;
-               _cpb_toggle_msrs(t);
-               printk(KERN_INFO PFX "Core Boosting enabled.\n");
-       } else if (!t && cpb_enabled) {
-               cpb_enabled = false;
-               _cpb_toggle_msrs(t);
-               printk(KERN_INFO PFX "Core Boosting disabled.\n");
-       }
-}
-
-static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
-                                size_t count)
-{
-       int ret = -EINVAL;
-       unsigned long val = 0;
-
-       ret = strict_strtoul(buf, 10, &val);
-       if (!ret && (val == 0 || val == 1) && cpb_capable)
-               cpb_toggle(val);
-       else
-               return -EINVAL;
-
-       return count;
-}
-
-static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
-{
-       return sprintf(buf, "%u\n", cpb_enabled);
-}
-
-#define define_one_rw(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(cpb);
-
 static struct freq_attr *powernow_k8_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
-       &cpb,
        NULL,
 };
 
@@ -1515,53 +1256,18 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
        .attr           = powernow_k8_attr,
 };
 
-/*
- * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
- * cannot block the remaining ones from boosting. On the CPU_UP path we
- * simply keep the boost-disable flag in sync with the current global
- * state.
- */
-static int cpb_notify(struct notifier_block *nb, unsigned long action,
-                     void *hcpu)
-{
-       unsigned cpu = (long)hcpu;
-       u32 lo, hi;
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-
-               if (!cpb_enabled) {
-                       rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
-                       lo |= BIT(25);
-                       wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
-               }
-               break;
-
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
-               lo &= ~BIT(25);
-               wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
-               break;
-
-       default:
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block cpb_nb = {
-       .notifier_call          = cpb_notify,
-};
-
 /* driver entry point for init */
 static int __cpuinit powernowk8_init(void)
 {
-       unsigned int i, supported_cpus = 0, cpu;
+       unsigned int i, supported_cpus = 0;
        int rv;
 
+       if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+               pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
+               request_module("acpi-cpufreq");
+               return -ENODEV;
+       }
+
        if (!x86_match_cpu(powernow_k8_ids))
                return -ENODEV;
 
@@ -1575,38 +1281,13 @@ static int __cpuinit powernowk8_init(void)
        if (supported_cpus != num_online_cpus())
                return -ENODEV;
 
-       printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
-               num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
-
-       if (boot_cpu_has(X86_FEATURE_CPB)) {
-
-               cpb_capable = true;
-
-               msrs = msrs_alloc();
-               if (!msrs) {
-                       printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
-                       return -ENOMEM;
-               }
-
-               register_cpu_notifier(&cpb_nb);
-
-               rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
 
-               for_each_cpu(cpu, cpu_online_mask) {
-                       struct msr *reg = per_cpu_ptr(msrs, cpu);
-                       cpb_enabled |= !(!!(reg->l & BIT(25)));
-               }
+       if (!rv)
+               pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+                       num_online_nodes(), boot_cpu_data.x86_model_id,
+                       supported_cpus);
 
-               printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
-                       (cpb_enabled ? "on" : "off"));
-       }
-
-       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
-       if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
-               unregister_cpu_notifier(&cpb_nb);
-               msrs_free(msrs);
-               msrs = NULL;
-       }
        return rv;
 }
 
@@ -1615,13 +1296,6 @@ static void __exit powernowk8_exit(void)
 {
        pr_debug("exit\n");
 
-       if (boot_cpu_has(X86_FEATURE_CPB)) {
-               msrs_free(msrs);
-               msrs = NULL;
-
-               unregister_cpu_notifier(&cpb_nb);
-       }
-
        cpufreq_unregister_driver(&cpufreq_amd64_driver);
 }
 
index 3744d26cdc2b31524fe6686e33a28f2ada18f30b..79329d4d5abea9d9d7ff537bf636de9296cddcfe 100644 (file)
@@ -5,24 +5,11 @@
  *  http://www.gnu.org/licenses/gpl.html
  */
 
-enum pstate {
-       HW_PSTATE_INVALID = 0xff,
-       HW_PSTATE_0 = 0,
-       HW_PSTATE_1 = 1,
-       HW_PSTATE_2 = 2,
-       HW_PSTATE_3 = 3,
-       HW_PSTATE_4 = 4,
-       HW_PSTATE_5 = 5,
-       HW_PSTATE_6 = 6,
-       HW_PSTATE_7 = 7,
-};
-
 struct powernow_k8_data {
        unsigned int cpu;
 
        u32 numps;  /* number of p-states */
        u32 batps;  /* number of p-states supported on battery */
-       u32 max_hw_pstate; /* maximum legal hardware pstate */
 
        /* these values are constant when the PSB is used to determine
         * vid/fid pairings, but are modified during the ->target() call
@@ -37,7 +24,6 @@ struct powernow_k8_data {
        /* keep track of the current fid / vid or pstate */
        u32 currvid;
        u32 currfid;
-       enum pstate currpstate;
 
        /* the powernow_table includes all frequency and vid/fid pairings:
         * fid are the lower 8 bits of the index, vid are the upper 8 bits.
@@ -97,23 +83,6 @@ struct powernow_k8_data {
 #define MSR_S_HI_CURRENT_VID      0x0000003f
 #define MSR_C_HI_STP_GNT_BENIGN          0x00000001
 
-
-/* Hardware Pstate _PSS and MSR definitions */
-#define USE_HW_PSTATE          0x00000080
-#define HW_PSTATE_MASK                 0x00000007
-#define HW_PSTATE_VALID_MASK   0x80000000
-#define HW_PSTATE_MAX_MASK     0x000000f0
-#define HW_PSTATE_MAX_SHIFT    4
-#define MSR_PSTATE_DEF_BASE    0xc0010064 /* base of Pstate MSRs */
-#define MSR_PSTATE_STATUS      0xc0010063 /* Pstate Status MSR */
-#define MSR_PSTATE_CTRL        0xc0010062 /* Pstate control MSR */
-#define MSR_PSTATE_CUR_LIMIT   0xc0010061 /* pstate current limit MSR */
-
-/* define the two driver architectures */
-#define CPU_OPTERON 0
-#define CPU_HW_PSTATE 1
-
-
 /*
  * There are restrictions frequencies have to follow:
  * - only 1 entry in the low fid table ( <=1.4GHz )
@@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
 
 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
 
-static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
 static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
index 58bf3b1ac9c46035c2a3fae79271954f6d436442..87db3877fead7280d93eaf53baabcc6e4fe0f043 100644 (file)
@@ -18,9 +18,10 @@ static struct cpuidle_driver *cpuidle_curr_driver;
 DEFINE_SPINLOCK(cpuidle_driver_lock);
 int cpuidle_driver_refcount;
 
-static void __cpuidle_register_driver(struct cpuidle_driver *drv)
+static void set_power_states(struct cpuidle_driver *drv)
 {
        int i;
+
        /*
         * cpuidle driver should set the drv->power_specified bit
         * before registering if the driver provides
@@ -35,13 +36,10 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv)
         * an power value of -1.  So we use -2, -3, etc, for other
         * c-states.
         */
-       if (!drv->power_specified) {
-               for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
-                       drv->states[i].power_usage = -1 - i;
-       }
+       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
+               drv->states[i].power_usage = -1 - i;
 }
 
-
 /**
  * cpuidle_register_driver - registers a driver
  * @drv: the driver
@@ -59,13 +57,16 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
                spin_unlock(&cpuidle_driver_lock);
                return -EBUSY;
        }
-       __cpuidle_register_driver(drv);
+
+       if (!drv->power_specified)
+               set_power_states(drv);
+
        cpuidle_curr_driver = drv;
+
        spin_unlock(&cpuidle_driver_lock);
 
        return 0;
 }
-
 EXPORT_SYMBOL_GPL(cpuidle_register_driver);
 
 /**
@@ -96,7 +97,6 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv)
 
        spin_unlock(&cpuidle_driver_lock);
 }
-
 EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
 
 struct cpuidle_driver *cpuidle_driver_ref(void)
index b6a09ea859b135c31e6dcffbad0e7a3d2ad66eee..9b784051ec12b47564b3d07429096556b2fc2035 100644 (file)
@@ -88,6 +88,8 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        /* consider promotion */
        if (last_idx < drv->state_count - 1 &&
+           !drv->states[last_idx + 1].disabled &&
+           !dev->states_usage[last_idx + 1].disable &&
            last_residency > last_state->threshold.promotion_time &&
            drv->states[last_idx + 1].exit_latency <= latency_req) {
                last_state->stats.promotion_count++;
@@ -100,7 +102,9 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        /* consider demotion */
        if (last_idx > CPUIDLE_DRIVER_STATE_START &&
-           drv->states[last_idx].exit_latency > latency_req) {
+           (drv->states[last_idx].disabled ||
+           dev->states_usage[last_idx].disable ||
+           drv->states[last_idx].exit_latency > latency_req)) {
                int i;
 
                for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
index a8bd0310f8fec7fe5f8f7bceaf1987f7f38bd8dc..aab257403b4a54ac4d75d4db3340c9b48855478b 100644 (file)
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("Niagara2 Crypto driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
-#define N2_CRA_PRIORITY                300
+#define N2_CRA_PRIORITY                200
 
 static DEFINE_MUTEX(spu_lock);
 
index 70c31d43fff3f1f7c682187c3a3629fce99c1e96..b146d76f04cfaf9361f3e9ebd767d5c99e8e45d6 100644 (file)
@@ -607,7 +607,7 @@ static int __init devfreq_start_polling(void)
        mutex_lock(&devfreq_list_lock);
        polling = false;
        devfreq_wq = create_freezable_workqueue("devfreq_wq");
-       INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
+       INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
        mutex_unlock(&devfreq_list_lock);
 
        devfreq_monitor(&devfreq_work.work);
index d5dc9da7f99f95fc9dd3a6edb5d4bc302e69c2c3..90f0b730e9bb1ee4a23a2941ea60021a12798d1f 100644 (file)
@@ -559,7 +559,7 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
                return;
 
        INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-       queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+       mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
 }
 
 /*
@@ -599,21 +599,6 @@ void edac_mc_reset_delay_period(int value)
 
        mutex_lock(&mem_ctls_mutex);
 
-       /* scan the list and turn off all workq timers, doing so under lock
-        */
-       list_for_each(item, &mc_devices) {
-               mci = list_entry(item, struct mem_ctl_info, link);
-
-               if (mci->op_state == OP_RUNNING_POLL)
-                       cancel_delayed_work(&mci->work);
-       }
-
-       mutex_unlock(&mem_ctls_mutex);
-
-
-       /* re-walk the list, and reset the poll delay */
-       mutex_lock(&mem_ctls_mutex);
-
        list_for_each(item, &mc_devices) {
                mci = list_entry(item, struct mem_ctl_info, link);
 
index 60ac3fbb4cde7ef1163105ef7de2335012ced5ea..725eb5aa8d8cb670d0e104d48557fb866947a20a 100644 (file)
@@ -143,7 +143,7 @@ static int __devinit adc_jack_probe(struct platform_device *pdev)
 
        data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&data->handler, adc_jack_handler);
+       INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
 
        platform_set_drvdata(pdev, data);
 
index a00b828b1643ddfbcd90f219525a93fe15efc5af..8382dc832929a4044bbb61ec3c4423a738a5a8df 100644 (file)
@@ -82,7 +82,7 @@ config GPIO_GENERIC
 
 config GPIO_DA9052
        tristate "Dialog DA9052 GPIO"
-       depends on PMIC_DA9052 && BROKEN
+       depends on PMIC_DA9052
        help
          Say yes here to enable the GPIO driver for the DA9052 chip.
 
@@ -330,6 +330,7 @@ config GPIO_PCA953X_IRQ
 config GPIO_PCF857X
        tristate "PCF857x, PCA{85,96}7x, and MAX732[89] I2C GPIO expanders"
        depends on I2C
+       select IRQ_DOMAIN
        help
          Say yes here to provide access to most "quasi-bidirectional" I2C
          GPIO expanders used for additional digital outputs or inputs.
@@ -450,6 +451,17 @@ config GPIO_ADP5588_IRQ
          Say yes here to enable the adp5588 to be used as an interrupt
          controller. It requires the driver to be built in the kernel.
 
+config GPIO_ADNP
+       tristate "Avionic Design N-bit GPIO expander"
+       depends on I2C && OF
+       help
+         This option enables support for N GPIOs found on Avionic Design
+         I2C GPIO expanders. The register space will be extended by powers
+         of two, so the controller will need to accomodate for that. For
+         example: if a controller provides 48 pins, 6 registers will be
+         enough to represent all pins, but the driver will assume a
+         register layout for 64 pins (8 registers).
+
 comment "PCI GPIO expanders:"
 
 config GPIO_CS5535
index a288142ad99879fd1da7484bcca4f4fd5586e567..0ffaa8423e87c07d9d83d9408edc0da804939c40 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_GPIO_GENERIC)    += gpio-generic.o
 
 obj-$(CONFIG_GPIO_74X164)      += gpio-74x164.o
 obj-$(CONFIG_GPIO_AB8500)      += gpio-ab8500.o
+obj-$(CONFIG_GPIO_ADNP)                += gpio-adnp.o
 obj-$(CONFIG_GPIO_ADP5520)     += gpio-adp5520.o
 obj-$(CONFIG_GPIO_ADP5588)     += gpio-adp5588.o
 obj-$(CONFIG_GPIO_AMD8111)     += gpio-amd8111.o
index a31ad6f5d9106f370167f518f719f4cf2d26323a..ed3e55161bdc5bacbd0409a8a728f161872671c9 100644 (file)
 #include <linux/spi/spi.h>
 #include <linux/spi/74x164.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 
+#define GEN_74X164_NUMBER_GPIOS        8
+
 struct gen_74x164_chip {
        struct spi_device       *spi;
+       u8                      *buffer;
        struct gpio_chip        gpio_chip;
        struct mutex            lock;
-       u8                      port_config;
+       u32                     registers;
 };
 
 static struct gen_74x164_chip *gpio_to_74x164_chip(struct gpio_chip *gc)
@@ -31,17 +35,47 @@ static struct gen_74x164_chip *gpio_to_74x164_chip(struct gpio_chip *gc)
 
 static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
 {
-       return spi_write(chip->spi,
-                        &chip->port_config, sizeof(chip->port_config));
+       struct spi_message message;
+       struct spi_transfer *msg_buf;
+       int i, ret = 0;
+
+       msg_buf = kzalloc(chip->registers * sizeof(struct spi_transfer),
+                       GFP_KERNEL);
+       if (!msg_buf)
+               return -ENOMEM;
+
+       spi_message_init(&message);
+
+       /*
+        * Since the registers are chained, every byte sent will make
+        * the previous byte shift to the next register in the
+        * chain. Thus, the first byte send will end up in the last
+        * register at the end of the transfer. So, to have a logical
+        * numbering, send the bytes in reverse order so that the last
+        * byte of the buffer will end up in the last register.
+        */
+       for (i = chip->registers - 1; i >= 0; i--) {
+               msg_buf[i].tx_buf = chip->buffer +i;
+               msg_buf[i].len = sizeof(u8);
+               spi_message_add_tail(msg_buf + i, &message);
+       }
+
+       ret = spi_sync(chip->spi, &message);
+
+       kfree(msg_buf);
+
+       return ret;
 }
 
 static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
 {
        struct gen_74x164_chip *chip = gpio_to_74x164_chip(gc);
+       u8 bank = offset / 8;
+       u8 pin = offset % 8;
        int ret;
 
        mutex_lock(&chip->lock);
-       ret = (chip->port_config >> offset) & 0x1;
+       ret = (chip->buffer[bank] >> pin) & 0x1;
        mutex_unlock(&chip->lock);
 
        return ret;
@@ -51,12 +85,14 @@ static void gen_74x164_set_value(struct gpio_chip *gc,
                unsigned offset, int val)
 {
        struct gen_74x164_chip *chip = gpio_to_74x164_chip(gc);
+       u8 bank = offset / 8;
+       u8 pin = offset % 8;
 
        mutex_lock(&chip->lock);
        if (val)
-               chip->port_config |= (1 << offset);
+               chip->buffer[bank] |= (1 << pin);
        else
-               chip->port_config &= ~(1 << offset);
+               chip->buffer[bank] &= ~(1 << pin);
 
        __gen_74x164_write_config(chip);
        mutex_unlock(&chip->lock);
@@ -75,9 +111,8 @@ static int __devinit gen_74x164_probe(struct spi_device *spi)
        struct gen_74x164_chip_platform_data *pdata;
        int ret;
 
-       pdata = spi->dev.platform_data;
-       if (!pdata || !pdata->base) {
-               dev_dbg(&spi->dev, "incorrect or missing platform data\n");
+       if (!spi->dev.of_node) {
+               dev_err(&spi->dev, "No device tree data available.\n");
                return -EINVAL;
        }
 
@@ -90,10 +125,16 @@ static int __devinit gen_74x164_probe(struct spi_device *spi)
        if (ret < 0)
                return ret;
 
-       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       chip = devm_kzalloc(&spi->dev, sizeof(*chip), GFP_KERNEL);
        if (!chip)
                return -ENOMEM;
 
+       pdata = spi->dev.platform_data;
+       if (pdata && pdata->base)
+               chip->gpio_chip.base = pdata->base;
+       else
+               chip->gpio_chip.base = -1;
+
        mutex_init(&chip->lock);
 
        dev_set_drvdata(&spi->dev, chip);
@@ -104,8 +145,20 @@ static int __devinit gen_74x164_probe(struct spi_device *spi)
        chip->gpio_chip.direction_output = gen_74x164_direction_output;
        chip->gpio_chip.get = gen_74x164_get_value;
        chip->gpio_chip.set = gen_74x164_set_value;
-       chip->gpio_chip.base = pdata->base;
-       chip->gpio_chip.ngpio = 8;
+
+       if (of_property_read_u32(spi->dev.of_node, "registers-number", &chip->registers)) {
+               dev_err(&spi->dev, "Missing registers-number property in the DT.\n");
+               ret = -EINVAL;
+               goto exit_destroy;
+       }
+
+       chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
+       chip->buffer = devm_kzalloc(&spi->dev, chip->gpio_chip.ngpio, GFP_KERNEL);
+       if (!chip->buffer) {
+               ret = -ENOMEM;
+               goto exit_destroy;
+       }
+
        chip->gpio_chip.can_sleep = 1;
        chip->gpio_chip.dev = &spi->dev;
        chip->gpio_chip.owner = THIS_MODULE;
@@ -125,7 +178,6 @@ static int __devinit gen_74x164_probe(struct spi_device *spi)
 exit_destroy:
        dev_set_drvdata(&spi->dev, NULL);
        mutex_destroy(&chip->lock);
-       kfree(chip);
        return ret;
 }
 
@@ -141,36 +193,31 @@ static int __devexit gen_74x164_remove(struct spi_device *spi)
        dev_set_drvdata(&spi->dev, NULL);
 
        ret = gpiochip_remove(&chip->gpio_chip);
-       if (!ret) {
+       if (!ret)
                mutex_destroy(&chip->lock);
-               kfree(chip);
-       } else
+       else
                dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
                                ret);
 
        return ret;
 }
 
+static const struct of_device_id gen_74x164_dt_ids[] = {
+       { .compatible = "fairchild,74hc595" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, gen_74x164_dt_ids);
+
 static struct spi_driver gen_74x164_driver = {
        .driver = {
                .name           = "74x164",
                .owner          = THIS_MODULE,
+               .of_match_table = of_match_ptr(gen_74x164_dt_ids),
        },
        .probe          = gen_74x164_probe,
        .remove         = __devexit_p(gen_74x164_remove),
 };
-
-static int __init gen_74x164_init(void)
-{
-       return spi_register_driver(&gen_74x164_driver);
-}
-subsys_initcall(gen_74x164_init);
-
-static void __exit gen_74x164_exit(void)
-{
-       spi_unregister_driver(&gen_74x164_driver);
-}
-module_exit(gen_74x164_exit);
+module_spi_driver(gen_74x164_driver);
 
 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
 MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
new file mode 100644 (file)
index 0000000..3df8833
--- /dev/null
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2011-2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#define GPIO_DDR(gpio) (0x00 << (gpio)->reg_shift)
+#define GPIO_PLR(gpio) (0x01 << (gpio)->reg_shift)
+#define GPIO_IER(gpio) (0x02 << (gpio)->reg_shift)
+#define GPIO_ISR(gpio) (0x03 << (gpio)->reg_shift)
+#define GPIO_PTR(gpio) (0x04 << (gpio)->reg_shift)
+
+struct adnp {
+       struct i2c_client *client;
+       struct gpio_chip gpio;
+       unsigned int reg_shift;
+
+       struct mutex i2c_lock;
+
+       struct irq_domain *domain;
+       struct mutex irq_lock;
+
+       u8 *irq_enable;
+       u8 *irq_level;
+       u8 *irq_rise;
+       u8 *irq_fall;
+       u8 *irq_high;
+       u8 *irq_low;
+};
+
+static inline struct adnp *to_adnp(struct gpio_chip *chip)
+{
+       return container_of(chip, struct adnp, gpio);
+}
+
+static int adnp_read(struct adnp *adnp, unsigned offset, uint8_t *value)
+{
+       int err;
+
+       err = i2c_smbus_read_byte_data(adnp->client, offset);
+       if (err < 0) {
+               dev_err(adnp->gpio.dev, "%s failed: %d\n",
+                       "i2c_smbus_read_byte_data()", err);
+               return err;
+       }
+
+       *value = err;
+       return 0;
+}
+
+static int adnp_write(struct adnp *adnp, unsigned offset, uint8_t value)
+{
+       int err;
+
+       err = i2c_smbus_write_byte_data(adnp->client, offset, value);
+       if (err < 0) {
+               dev_err(adnp->gpio.dev, "%s failed: %d\n",
+                       "i2c_smbus_write_byte_data()", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static int adnp_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct adnp *adnp = to_adnp(chip);
+       unsigned int reg = offset >> adnp->reg_shift;
+       unsigned int pos = offset & 7;
+       u8 value;
+       int err;
+
+       err = adnp_read(adnp, GPIO_PLR(adnp) + reg, &value);
+       if (err < 0)
+               return err;
+
+       return (value & BIT(pos)) ? 1 : 0;
+}
+
+static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value)
+{
+       unsigned int reg = offset >> adnp->reg_shift;
+       unsigned int pos = offset & 7;
+       int err;
+       u8 val;
+
+       err = adnp_read(adnp, GPIO_PLR(adnp) + reg, &val);
+       if (err < 0)
+               return;
+
+       if (value)
+               val |= BIT(pos);
+       else
+               val &= ~BIT(pos);
+
+       adnp_write(adnp, GPIO_PLR(adnp) + reg, val);
+}
+
+static void adnp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+       struct adnp *adnp = to_adnp(chip);
+
+       mutex_lock(&adnp->i2c_lock);
+       __adnp_gpio_set(adnp, offset, value);
+       mutex_unlock(&adnp->i2c_lock);
+}
+
+static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       struct adnp *adnp = to_adnp(chip);
+       unsigned int reg = offset >> adnp->reg_shift;
+       unsigned int pos = offset & 7;
+       u8 value;
+       int err;
+
+       mutex_lock(&adnp->i2c_lock);
+
+       err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
+       if (err < 0)
+               goto out;
+
+       value &= ~BIT(pos);
+
+       err = adnp_write(adnp, GPIO_DDR(adnp) + reg, value);
+       if (err < 0)
+               goto out;
+
+       err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
+       if (err < 0)
+               goto out;
+
+       if (err & BIT(pos))
+               err = -EACCES;
+
+       err = 0;
+
+out:
+       mutex_unlock(&adnp->i2c_lock);
+       return err;
+}
+
+static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+                                     int value)
+{
+       struct adnp *adnp = to_adnp(chip);
+       unsigned int reg = offset >> adnp->reg_shift;
+       unsigned int pos = offset & 7;
+       int err;
+       u8 val;
+
+       mutex_lock(&adnp->i2c_lock);
+
+       err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
+       if (err < 0)
+               goto out;
+
+       val |= BIT(pos);
+
+       err = adnp_write(adnp, GPIO_DDR(adnp) + reg, val);
+       if (err < 0)
+               goto out;
+
+       err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
+       if (err < 0)
+               goto out;
+
+       if (!(val & BIT(pos))) {
+               err = -EPERM;
+               goto out;
+       }
+
+       __adnp_gpio_set(adnp, offset, value);
+       err = 0;
+
+out:
+       mutex_unlock(&adnp->i2c_lock);
+       return err;
+}
+
+static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+       struct adnp *adnp = to_adnp(chip);
+       unsigned int num_regs = 1 << adnp->reg_shift, i, j;
+       int err;
+
+       for (i = 0; i < num_regs; i++) {
+               u8 ddr, plr, ier, isr;
+
+               mutex_lock(&adnp->i2c_lock);
+
+               err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
+               if (err < 0) {
+                       mutex_unlock(&adnp->i2c_lock);
+                       return;
+               }
+
+               err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
+               if (err < 0) {
+                       mutex_unlock(&adnp->i2c_lock);
+                       return;
+               }
+
+               err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+               if (err < 0) {
+                       mutex_unlock(&adnp->i2c_lock);
+                       return;
+               }
+
+               err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+               if (err < 0) {
+                       mutex_unlock(&adnp->i2c_lock);
+                       return;
+               }
+
+               mutex_unlock(&adnp->i2c_lock);
+
+               for (j = 0; j < 8; j++) {
+                       unsigned int bit = (i << adnp->reg_shift) + j;
+                       const char *direction = "input ";
+                       const char *level = "low ";
+                       const char *interrupt = "disabled";
+                       const char *pending = "";
+
+                       if (ddr & BIT(j))
+                               direction = "output";
+
+                       if (plr & BIT(j))
+                               level = "high";
+
+                       if (ier & BIT(j))
+                               interrupt = "enabled ";
+
+                       if (isr & BIT(j))
+                               pending = "pending";
+
+                       seq_printf(s, "%2u: %s %s IRQ %s %s\n", bit,
+                                  direction, level, interrupt, pending);
+               }
+       }
+}
+
+static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios)
+{
+       struct gpio_chip *chip = &adnp->gpio;
+
+       adnp->reg_shift = get_count_order(num_gpios) - 3;
+
+       chip->direction_input = adnp_gpio_direction_input;
+       chip->direction_output = adnp_gpio_direction_output;
+       chip->get = adnp_gpio_get;
+       chip->set = adnp_gpio_set;
+       chip->can_sleep = 1;
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS))
+               chip->dbg_show = adnp_gpio_dbg_show;
+
+       chip->base = -1;
+       chip->ngpio = num_gpios;
+       chip->label = adnp->client->name;
+       chip->dev = &adnp->client->dev;
+       chip->of_node = chip->dev->of_node;
+       chip->owner = THIS_MODULE;
+
+       return 0;
+}
+
+static irqreturn_t adnp_irq(int irq, void *data)
+{
+       struct adnp *adnp = data;
+       unsigned int num_regs, i;
+
+       num_regs = 1 << adnp->reg_shift;
+
+       for (i = 0; i < num_regs; i++) {
+               unsigned int base = i << adnp->reg_shift, bit;
+               u8 changed, level, isr, ier;
+               unsigned long pending;
+               int err;
+
+               mutex_lock(&adnp->i2c_lock);
+
+               err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level);
+               if (err < 0) {
+                       mutex_unlock(&adnp->i2c_lock);
+                       continue;
+               }
+
+               err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+               if (err < 0) {
+                       mutex_unlock(&adnp->i2c_lock);
+                       continue;
+               }
+
+               err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+               if (err < 0) {
+                       mutex_unlock(&adnp->i2c_lock);
+                       continue;
+               }
+
+               mutex_unlock(&adnp->i2c_lock);
+
+               /* determine pins that changed levels */
+               changed = level ^ adnp->irq_level[i];
+
+               /* compute edge-triggered interrupts */
+               pending = changed & ((adnp->irq_fall[i] & ~level) |
+                                    (adnp->irq_rise[i] & level));
+
+               /* add in level-triggered interrupts */
+               pending |= (adnp->irq_high[i] & level) |
+                          (adnp->irq_low[i] & ~level);
+
+               /* mask out non-pending and disabled interrupts */
+               pending &= isr & ier;
+
+               for_each_set_bit(bit, &pending, 8) {
+                       unsigned int virq;
+                       virq = irq_find_mapping(adnp->domain, base + bit);
+                       handle_nested_irq(virq);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int adnp_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct adnp *adnp = to_adnp(chip);
+       return irq_create_mapping(adnp->domain, offset);
+}
+
+static void adnp_irq_mask(struct irq_data *data)
+{
+       struct adnp *adnp = irq_data_get_irq_chip_data(data);
+       unsigned int reg = data->hwirq >> adnp->reg_shift;
+       unsigned int pos = data->hwirq & 7;
+
+       adnp->irq_enable[reg] &= ~BIT(pos);
+}
+
+static void adnp_irq_unmask(struct irq_data *data)
+{
+       struct adnp *adnp = irq_data_get_irq_chip_data(data);
+       unsigned int reg = data->hwirq >> adnp->reg_shift;
+       unsigned int pos = data->hwirq & 7;
+
+       adnp->irq_enable[reg] |= BIT(pos);
+}
+
+static int adnp_irq_set_type(struct irq_data *data, unsigned int type)
+{
+       struct adnp *adnp = irq_data_get_irq_chip_data(data);
+       unsigned int reg = data->hwirq >> adnp->reg_shift;
+       unsigned int pos = data->hwirq & 7;
+
+       if (type & IRQ_TYPE_EDGE_RISING)
+               adnp->irq_rise[reg] |= BIT(pos);
+       else
+               adnp->irq_rise[reg] &= ~BIT(pos);
+
+       if (type & IRQ_TYPE_EDGE_FALLING)
+               adnp->irq_fall[reg] |= BIT(pos);
+       else
+               adnp->irq_fall[reg] &= ~BIT(pos);
+
+       if (type & IRQ_TYPE_LEVEL_HIGH)
+               adnp->irq_high[reg] |= BIT(pos);
+       else
+               adnp->irq_high[reg] &= ~BIT(pos);
+
+       if (type & IRQ_TYPE_LEVEL_LOW)
+               adnp->irq_low[reg] |= BIT(pos);
+       else
+               adnp->irq_low[reg] &= ~BIT(pos);
+
+       return 0;
+}
+
+static void adnp_irq_bus_lock(struct irq_data *data)
+{
+       struct adnp *adnp = irq_data_get_irq_chip_data(data);
+
+       mutex_lock(&adnp->irq_lock);
+}
+
+static void adnp_irq_bus_unlock(struct irq_data *data)
+{
+       struct adnp *adnp = irq_data_get_irq_chip_data(data);
+       unsigned int num_regs = 1 << adnp->reg_shift, i;
+
+       mutex_lock(&adnp->i2c_lock);
+
+       for (i = 0; i < num_regs; i++)
+               adnp_write(adnp, GPIO_IER(adnp) + i, adnp->irq_enable[i]);
+
+       mutex_unlock(&adnp->i2c_lock);
+       mutex_unlock(&adnp->irq_lock);
+}
+
+static struct irq_chip adnp_irq_chip = {
+       .name = "gpio-adnp",
+       .irq_mask = adnp_irq_mask,
+       .irq_unmask = adnp_irq_unmask,
+       .irq_set_type = adnp_irq_set_type,
+       .irq_bus_lock = adnp_irq_bus_lock,
+       .irq_bus_sync_unlock = adnp_irq_bus_unlock,
+};
+
+static int adnp_irq_map(struct irq_domain *domain, unsigned int irq,
+                       irq_hw_number_t hwirq)
+{
+       irq_set_chip_data(irq, domain->host_data);
+       irq_set_chip(irq, &adnp_irq_chip);
+       irq_set_nested_thread(irq, true);
+
+#ifdef CONFIG_ARM
+       set_irq_flags(irq, IRQF_VALID);
+#else
+       irq_set_noprobe(irq);
+#endif
+
+       return 0;
+}
+
+static const struct irq_domain_ops adnp_irq_domain_ops = {
+       .map = adnp_irq_map,
+       .xlate = irq_domain_xlate_twocell,
+};
+
+static int adnp_irq_setup(struct adnp *adnp)
+{
+       unsigned int num_regs = 1 << adnp->reg_shift, i;
+       struct gpio_chip *chip = &adnp->gpio;
+       int err;
+
+       mutex_init(&adnp->irq_lock);
+
+       /*
+        * Allocate memory to keep track of the current level and trigger
+        * modes of the interrupts. To avoid multiple allocations, a single
+        * large buffer is allocated and pointers are setup to point at the
+        * corresponding offsets. For consistency, the layout of the buffer
+        * is chosen to match the register layout of the hardware in that
+        * each segment contains the corresponding bits for all interrupts.
+        */
+       adnp->irq_enable = devm_kzalloc(chip->dev, num_regs * 6, GFP_KERNEL);
+       if (!adnp->irq_enable)
+               return -ENOMEM;
+
+       adnp->irq_level = adnp->irq_enable + (num_regs * 1);
+       adnp->irq_rise = adnp->irq_enable + (num_regs * 2);
+       adnp->irq_fall = adnp->irq_enable + (num_regs * 3);
+       adnp->irq_high = adnp->irq_enable + (num_regs * 4);
+       adnp->irq_low = adnp->irq_enable + (num_regs * 5);
+
+       for (i = 0; i < num_regs; i++) {
+               /*
+                * Read the initial level of all pins to allow the emulation
+                * of edge triggered interrupts.
+                */
+               err = adnp_read(adnp, GPIO_PLR(adnp) + i, &adnp->irq_level[i]);
+               if (err < 0)
+                       return err;
+
+               /* disable all interrupts */
+               err = adnp_write(adnp, GPIO_IER(adnp) + i, 0);
+               if (err < 0)
+                       return err;
+
+               adnp->irq_enable[i] = 0x00;
+       }
+
+       adnp->domain = irq_domain_add_linear(chip->of_node, chip->ngpio,
+                                            &adnp_irq_domain_ops, adnp);
+
+       err = request_threaded_irq(adnp->client->irq, NULL, adnp_irq,
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  dev_name(chip->dev), adnp);
+       if (err != 0) {
+               dev_err(chip->dev, "can't request IRQ#%d: %d\n",
+                       adnp->client->irq, err);
+               goto error;
+       }
+
+       chip->to_irq = adnp_gpio_to_irq;
+       return 0;
+
+error:
+       irq_domain_remove(adnp->domain);
+       return err;
+}
+
+static void adnp_irq_teardown(struct adnp *adnp)
+{
+       unsigned int irq, i;
+
+       free_irq(adnp->client->irq, adnp);
+
+       for (i = 0; i < adnp->gpio.ngpio; i++) {
+               irq = irq_find_mapping(adnp->domain, i);
+               if (irq > 0)
+                       irq_dispose_mapping(irq);
+       }
+
+       irq_domain_remove(adnp->domain);
+}
+
+static __devinit int adnp_i2c_probe(struct i2c_client *client,
+                                   const struct i2c_device_id *id)
+{
+       struct device_node *np = client->dev.of_node;
+       struct adnp *adnp;
+       u32 num_gpios;
+       int err;
+
+       err = of_property_read_u32(np, "nr-gpios", &num_gpios);
+       if (err < 0)
+               return err;
+
+       client->irq = irq_of_parse_and_map(np, 0);
+       if (!client->irq)
+               return -EPROBE_DEFER;
+
+       adnp = devm_kzalloc(&client->dev, sizeof(*adnp), GFP_KERNEL);
+       if (!adnp)
+               return -ENOMEM;
+
+       mutex_init(&adnp->i2c_lock);
+       adnp->client = client;
+
+       err = adnp_gpio_setup(adnp, num_gpios);
+       if (err < 0)
+               return err;
+
+       if (of_find_property(np, "interrupt-controller", NULL)) {
+               err = adnp_irq_setup(adnp);
+               if (err < 0)
+                       goto teardown;
+       }
+
+       err = gpiochip_add(&adnp->gpio);
+       if (err < 0)
+               goto teardown;
+
+       i2c_set_clientdata(client, adnp);
+       return 0;
+
+teardown:
+       if (of_find_property(np, "interrupt-controller", NULL))
+               adnp_irq_teardown(adnp);
+
+       return err;
+}
+
+static __devexit int adnp_i2c_remove(struct i2c_client *client)
+{
+       struct adnp *adnp = i2c_get_clientdata(client);
+       struct device_node *np = client->dev.of_node;
+       int err;
+
+       err = gpiochip_remove(&adnp->gpio);
+       if (err < 0) {
+               dev_err(&client->dev, "%s failed: %d\n", "gpiochip_remove()",
+                       err);
+               return err;
+       }
+
+       if (of_find_property(np, "interrupt-controller", NULL))
+               adnp_irq_teardown(adnp);
+
+       return 0;
+}
+
+static const struct i2c_device_id adnp_i2c_id[] __devinitconst = {
+       { "gpio-adnp" },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, adnp_i2c_id);
+
+static const struct of_device_id adnp_of_match[] __devinitconst = {
+       { .compatible = "ad,gpio-adnp", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, adnp_of_match);
+
+static struct i2c_driver adnp_i2c_driver = {
+       .driver = {
+               .name = "gpio-adnp",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(adnp_of_match),
+       },
+       .probe = adnp_i2c_probe,
+       .remove = __devexit_p(adnp_i2c_remove),
+       .id_table = adnp_i2c_id,
+};
+module_i2c_driver(adnp_i2c_driver);
+
+MODULE_DESCRIPTION("Avionic Design N-bit GPIO expander");
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_LICENSE("GPL");
index ae5d7f12ce661f49900f9be1b7aec0d7374bdf90..eeedad42913e3074703082ac434588036f9954ea 100644 (file)
@@ -483,19 +483,7 @@ static struct i2c_driver adp5588_gpio_driver = {
        .id_table = adp5588_gpio_id,
 };
 
-static int __init adp5588_gpio_init(void)
-{
-       return i2c_add_driver(&adp5588_gpio_driver);
-}
-
-module_init(adp5588_gpio_init);
-
-static void __exit adp5588_gpio_exit(void)
-{
-       i2c_del_driver(&adp5588_gpio_driver);
-}
-
-module_exit(adp5588_gpio_exit);
+module_i2c_driver(adp5588_gpio_driver);
 
 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
 MODULE_DESCRIPTION("GPIO ADP5588 Driver");
index e4cc7eb69bb2488e36c3db104feab9858fa6a764..aba97abda77cff7856a41aef420aa5735aacb4e3 100644 (file)
@@ -310,7 +310,7 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
 #define bt8xxgpio_resume NULL
 #endif /* CONFIG_PM */
 
-static struct pci_device_id bt8xxgpio_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bt8xxgpio_pci_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT849) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT878) },
index 56dd047d58443d911ebe4ac4ef85e44a18f149f8..24b8c2974047967b3fe4e4caff32e5b7d9dda17e 100644 (file)
@@ -207,7 +207,7 @@ static int __devinit da9052_gpio_probe(struct platform_device *pdev)
        struct da9052_pdata *pdata;
        int ret;
 
-       gpio = kzalloc(sizeof(*gpio), GFP_KERNEL);
+       gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
        if (gpio == NULL)
                return -ENOMEM;
 
@@ -221,28 +221,19 @@ static int __devinit da9052_gpio_probe(struct platform_device *pdev)
        ret = gpiochip_add(&gpio->gp);
        if (ret < 0) {
                dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
-               goto err_mem;
+               return ret;
        }
 
        platform_set_drvdata(pdev, gpio);
 
        return 0;
-
-err_mem:
-       kfree(gpio);
-       return ret;
 }
 
 static int __devexit da9052_gpio_remove(struct platform_device *pdev)
 {
        struct da9052_gpio *gpio = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = gpiochip_remove(&gpio->gp);
-       if (ret == 0)
-               kfree(gpio);
 
-       return ret;
+       return gpiochip_remove(&gpio->gp);
 }
 
 static struct platform_driver da9052_gpio_driver = {
index 3d000169285d69cb1993882ed3f573cbda0d2b58..17df6db5dca7df56729af877b561d99f8892a15e 100644 (file)
@@ -366,7 +366,7 @@ static int __init davinci_gpio_irq_setup(void)
                       PTR_ERR(clk));
                return PTR_ERR(clk);
        }
-       clk_enable(clk);
+       clk_prepare_enable(clk);
 
        /* Arrange gpio_to_irq() support, handling either direct IRQs or
         * banked IRQs.  Having GPIOs in the first GPIO bank use direct
index ec48ed5126284d4f41303a5ac4e654f856ca4b7a..efb4c2d0d132912a05b63372ee479a71db4e7377 100644 (file)
@@ -85,22 +85,16 @@ static inline void em_gio_write(struct em_gio_priv *p, int offs,
                iowrite32(value, p->base1 + (offs - GIO_IDT0));
 }
 
-static inline struct em_gio_priv *irq_to_priv(struct irq_data *d)
-{
-       struct irq_chip *chip = irq_data_get_irq_chip(d);
-       return container_of(chip, struct em_gio_priv, irq_chip);
-}
-
 static void em_gio_irq_disable(struct irq_data *d)
 {
-       struct em_gio_priv *p = irq_to_priv(d);
+       struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
 
        em_gio_write(p, GIO_IDS, BIT(irqd_to_hwirq(d)));
 }
 
 static void em_gio_irq_enable(struct irq_data *d)
 {
-       struct em_gio_priv *p = irq_to_priv(d);
+       struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
 
        em_gio_write(p, GIO_IEN, BIT(irqd_to_hwirq(d)));
 }
@@ -118,7 +112,7 @@ static unsigned char em_gio_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
 static int em_gio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        unsigned char value = em_gio_sense_table[type & IRQ_TYPE_SENSE_MASK];
-       struct em_gio_priv *p = irq_to_priv(d);
+       struct em_gio_priv *p = irq_data_get_irq_chip_data(d);
        unsigned int reg, offset, shift;
        unsigned long flags;
        unsigned long tmp;
index ed94b4ea72e9324cdc70a1ad4551f0e85fca89d0..3644e0dcb3dd14fc26ee1f40735007eafd8c6d2a 100644 (file)
@@ -113,7 +113,8 @@ static const char *gpi_p3_names[LPC32XX_GPI_P3_MAX] = {
         NULL,    NULL,    NULL,   "gpi15",
        "gpi16", "gpi17", "gpi18", "gpi19",
        "gpi20", "gpi21", "gpi22", "gpi23",
-       "gpi24", "gpi25", "gpi26", "gpi27"
+       "gpi24", "gpi25", "gpi26", "gpi27",
+       "gpi28"
 };
 
 static const char *gpo_p3_names[LPC32XX_GPO_P3_MAX] = {
index 2738cc44d636b06b55836176992430ad006906a9..0ab700046a23eec7f5e6446ea05dc6382ff28e86 100644 (file)
@@ -91,10 +91,9 @@ static int mc9s08dz60_direction_output(struct gpio_chip *gc,
 static int mc9s08dz60_probe(struct i2c_client *client,
                            const struct i2c_device_id *id)
 {
-       int ret = 0;
        struct mc9s08dz60 *mc9s;
 
-       mc9s = kzalloc(sizeof(*mc9s), GFP_KERNEL);
+       mc9s = devm_kzalloc(&client->dev, sizeof(*mc9s), GFP_KERNEL);
        if (!mc9s)
                return -ENOMEM;
 
@@ -110,30 +109,16 @@ static int mc9s08dz60_probe(struct i2c_client *client,
        mc9s->client = client;
        i2c_set_clientdata(client, mc9s);
 
-       ret = gpiochip_add(&mc9s->chip);
-       if (ret)
-               goto error;
-
-       return 0;
-
- error:
-       kfree(mc9s);
-       return ret;
+       return gpiochip_add(&mc9s->chip);
 }
 
 static int mc9s08dz60_remove(struct i2c_client *client)
 {
        struct mc9s08dz60 *mc9s;
-       int ret;
 
        mc9s = i2c_get_clientdata(client);
 
-       ret = gpiochip_remove(&mc9s->chip);
-       if (!ret)
-               kfree(mc9s);
-
-       return ret;
-
+       return gpiochip_remove(&mc9s->chip);
 }
 
 static const struct i2c_device_id mc9s08dz60_id[] = {
index db01f151d41c428f220f5a0b9fad9a4d6f427f18..6a29ee1847be92ad0038d0c0f828c1e0ac92fcb0 100644 (file)
@@ -87,8 +87,7 @@ struct ioh_gpio_reg_data {
  * @gpio_use_sel:              Save GPIO_USE_SEL1~4 register for PM
  * @ch:                                Indicate GPIO channel
  * @irq_base:          Save base of IRQ number for interrupt
- * @spinlock:          Used for register access protection in
- *                             interrupt context ioh_irq_type and PM;
+ * @spinlock:          Used for register access protection
  */
 struct ioh_gpio {
        void __iomem *base;
@@ -97,7 +96,6 @@ struct ioh_gpio {
        struct gpio_chip gpio;
        struct ioh_gpio_reg_data ioh_gpio_reg;
        u32 gpio_use_sel;
-       struct mutex lock;
        int ch;
        int irq_base;
        spinlock_t spinlock;
@@ -109,8 +107,9 @@ static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
 {
        u32 reg_val;
        struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
+       unsigned long flags;
 
-       mutex_lock(&chip->lock);
+       spin_lock_irqsave(&chip->spinlock, flags);
        reg_val = ioread32(&chip->reg->regs[chip->ch].po);
        if (val)
                reg_val |= (1 << nr);
@@ -118,7 +117,7 @@ static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
                reg_val &= ~(1 << nr);
 
        iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
-       mutex_unlock(&chip->lock);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
 }
 
 static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
@@ -134,8 +133,9 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
        struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
        u32 pm;
        u32 reg_val;
+       unsigned long flags;
 
-       mutex_lock(&chip->lock);
+       spin_lock_irqsave(&chip->spinlock, flags);
        pm = ioread32(&chip->reg->regs[chip->ch].pm) &
                                        ((1 << num_ports[chip->ch]) - 1);
        pm |= (1 << nr);
@@ -148,7 +148,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
                reg_val &= ~(1 << nr);
        iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
 
-       mutex_unlock(&chip->lock);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
 
        return 0;
 }
@@ -157,13 +157,14 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
 {
        struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio);
        u32 pm;
+       unsigned long flags;
 
-       mutex_lock(&chip->lock);
+       spin_lock_irqsave(&chip->spinlock, flags);
        pm = ioread32(&chip->reg->regs[chip->ch].pm) &
                                ((1 << num_ports[chip->ch]) - 1);
        pm &= ~(1 << nr);
        iowrite32(pm, &chip->reg->regs[chip->ch].pm);
-       mutex_unlock(&chip->lock);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
 
        return 0;
 }
@@ -447,7 +448,6 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
                chip->base = base;
                chip->reg = chip->base;
                chip->ch = i;
-               mutex_init(&chip->lock);
                spin_lock_init(&chip->spinlock);
                ioh_gpio_setup(chip, num_ports[i]);
                ret = gpiochip_add(&chip->gpio);
index 5cb1227d69cfe596d53f285ea3ddd57214eae7a0..38305beb437572d9de5cbb6389c37524bb3423a2 100644 (file)
@@ -317,9 +317,7 @@ static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
 
        chained_irq_enter(chip, desc);
 
-       for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
-            i < NR_GPIO_IRQS;
-            i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) {
+       for_each_set_bit(i, msm_gpio.enabled_irqs, NR_GPIO_IRQS) {
                if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS))
                        generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
                                                           i));
index 076e236d0da74b71734a3159bf287b4e11e04b0a..16af35cd2b10e1fad8668c6b5fee92d23ddb68ed 100644 (file)
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/i2c/pcf857x.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
 
 
 static const struct i2c_device_id pcf857x_id[] = {
@@ -60,7 +65,12 @@ struct pcf857x {
        struct gpio_chip        chip;
        struct i2c_client       *client;
        struct mutex            lock;           /* protect 'out' */
+       struct work_struct      work;           /* irq demux work */
+       struct irq_domain       *irq_domain;    /* for irq demux  */
+       spinlock_t              slock;          /* protect irq demux */
        unsigned                out;            /* software latch */
+       unsigned                status;         /* current status */
+       int                     irq;            /* real irq number */
 
        int (*write)(struct i2c_client *client, unsigned data);
        int (*read)(struct i2c_client *client);
@@ -150,6 +160,100 @@ static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
 
 /*-------------------------------------------------------------------------*/
 
+static int pcf857x_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+
+       return irq_create_mapping(gpio->irq_domain, offset);
+}
+
+static void pcf857x_irq_demux_work(struct work_struct *work)
+{
+       struct pcf857x *gpio = container_of(work,
+                                              struct pcf857x,
+                                              work);
+       unsigned long change, i, status, flags;
+
+       status = gpio->read(gpio->client);
+
+       spin_lock_irqsave(&gpio->slock, flags);
+
+       change = gpio->status ^ status;
+       for_each_set_bit(i, &change, gpio->chip.ngpio)
+               generic_handle_irq(irq_find_mapping(gpio->irq_domain, i));
+       gpio->status = status;
+
+       spin_unlock_irqrestore(&gpio->slock, flags);
+}
+
+static irqreturn_t pcf857x_irq_demux(int irq, void *data)
+{
+       struct pcf857x  *gpio = data;
+
+       /*
+        * pcf857x can't read/write data here,
+        * since i2c data access might go to sleep.
+        */
+       schedule_work(&gpio->work);
+
+       return IRQ_HANDLED;
+}
+
+static int pcf857x_irq_domain_map(struct irq_domain *domain, unsigned int virq,
+                                irq_hw_number_t hw)
+{
+       irq_set_chip_and_handler(virq,
+                                &dummy_irq_chip,
+                                handle_level_irq);
+       return 0;
+}
+
+static struct irq_domain_ops pcf857x_irq_domain_ops = {
+       .map    = pcf857x_irq_domain_map,
+};
+
+static void pcf857x_irq_domain_cleanup(struct pcf857x *gpio)
+{
+       if (gpio->irq_domain)
+               irq_domain_remove(gpio->irq_domain);
+
+       if (gpio->irq)
+               free_irq(gpio->irq, gpio);
+}
+
+static int pcf857x_irq_domain_init(struct pcf857x *gpio,
+                                  struct pcf857x_platform_data *pdata,
+                                  struct device *dev)
+{
+       int status;
+
+       gpio->irq_domain = irq_domain_add_linear(dev->of_node,
+                                                gpio->chip.ngpio,
+                                                &pcf857x_irq_domain_ops,
+                                                NULL);
+       if (!gpio->irq_domain)
+               goto fail;
+
+       /* enable real irq */
+       status = request_irq(pdata->irq, pcf857x_irq_demux, 0,
+                            dev_name(dev), gpio);
+       if (status)
+               goto fail;
+
+       /* enable gpio_to_irq() */
+       INIT_WORK(&gpio->work, pcf857x_irq_demux_work);
+       gpio->chip.to_irq       = pcf857x_to_irq;
+       gpio->irq               = pdata->irq;
+
+       return 0;
+
+fail:
+       pcf857x_irq_domain_cleanup(gpio);
+       return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+
 static int pcf857x_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
@@ -168,6 +272,7 @@ static int pcf857x_probe(struct i2c_client *client,
                return -ENOMEM;
 
        mutex_init(&gpio->lock);
+       spin_lock_init(&gpio->slock);
 
        gpio->chip.base                 = pdata ? pdata->gpio_base : -1;
        gpio->chip.can_sleep            = 1;
@@ -179,6 +284,15 @@ static int pcf857x_probe(struct i2c_client *client,
        gpio->chip.direction_output     = pcf857x_output;
        gpio->chip.ngpio                = id->driver_data;
 
+       /* enable gpio_to_irq() if platform has settings */
+       if (pdata && pdata->irq) {
+               status = pcf857x_irq_domain_init(gpio, pdata, &client->dev);
+               if (status < 0) {
+                       dev_err(&client->dev, "irq_domain init failed\n");
+                       goto fail;
+               }
+       }
+
        /* NOTE:  the OnSemi jlc1562b is also largely compatible with
         * these parts, notably for output.  It has a low-resolution
         * DAC instead of pin change IRQs; and its inputs can be the
@@ -248,6 +362,7 @@ static int pcf857x_probe(struct i2c_client *client,
         * all-ones reset state.  Otherwise it flags pins to be driven low.
         */
        gpio->out = pdata ? ~pdata->n_latch : ~0;
+       gpio->status = gpio->out;
 
        status = gpiochip_add(&gpio->chip);
        if (status < 0)
@@ -278,6 +393,10 @@ static int pcf857x_probe(struct i2c_client *client,
 fail:
        dev_dbg(&client->dev, "probe error %d for '%s'\n",
                        status, client->name);
+
+       if (pdata && pdata->irq)
+               pcf857x_irq_domain_cleanup(gpio);
+
        kfree(gpio);
        return status;
 }
@@ -299,6 +418,9 @@ static int pcf857x_remove(struct i2c_client *client)
                }
        }
 
+       if (pdata && pdata->irq)
+               pcf857x_irq_domain_cleanup(gpio);
+
        status = gpiochip_remove(&gpio->chip);
        if (status == 0)
                kfree(gpio);
index 139ad3e200113f29a679796324b756846065045e..4ad0c4f9171c18f4c7baa2cf1f253b1ea08af495 100644 (file)
@@ -92,9 +92,7 @@ struct pch_gpio_reg_data {
  * @lock:                      Used for register access protection
  * @irq_base:          Save base of IRQ number for interrupt
  * @ioh:               IOH ID
- * @spinlock:          Used for register access protection in
- *                             interrupt context pch_irq_mask,
- *                             pch_irq_unmask and pch_irq_type;
+ * @spinlock:          Used for register access protection
  */
 struct pch_gpio {
        void __iomem *base;
@@ -102,7 +100,6 @@ struct pch_gpio {
        struct device *dev;
        struct gpio_chip gpio;
        struct pch_gpio_reg_data pch_gpio_reg;
-       struct mutex lock;
        int irq_base;
        enum pch_type_t ioh;
        spinlock_t spinlock;
@@ -112,8 +109,9 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
 {
        u32 reg_val;
        struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
+       unsigned long flags;
 
-       mutex_lock(&chip->lock);
+       spin_lock_irqsave(&chip->spinlock, flags);
        reg_val = ioread32(&chip->reg->po);
        if (val)
                reg_val |= (1 << nr);
@@ -121,7 +119,7 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
                reg_val &= ~(1 << nr);
 
        iowrite32(reg_val, &chip->reg->po);
-       mutex_unlock(&chip->lock);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
 }
 
 static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
@@ -137,8 +135,9 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
        struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
        u32 pm;
        u32 reg_val;
+       unsigned long flags;
 
-       mutex_lock(&chip->lock);
+       spin_lock_irqsave(&chip->spinlock, flags);
        pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
        pm |= (1 << nr);
        iowrite32(pm, &chip->reg->pm);
@@ -149,8 +148,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
        else
                reg_val &= ~(1 << nr);
        iowrite32(reg_val, &chip->reg->po);
-
-       mutex_unlock(&chip->lock);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
 
        return 0;
 }
@@ -159,12 +157,13 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
 {
        struct pch_gpio *chip = container_of(gpio, struct pch_gpio, gpio);
        u32 pm;
+       unsigned long flags;
 
-       mutex_lock(&chip->lock);
+       spin_lock_irqsave(&chip->spinlock, flags);
        pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
        pm &= ~(1 << nr);
        iowrite32(pm, &chip->reg->pm);
-       mutex_unlock(&chip->lock);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
 
        return 0;
 }
@@ -387,7 +386,6 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
 
        chip->reg = chip->base;
        pci_set_drvdata(pdev, chip);
-       mutex_init(&chip->lock);
        spin_lock_init(&chip->spinlock);
        pch_gpio_setup(chip);
        ret = gpiochip_add(&chip->gpio);
index 9528779ca463154334fa54db9a22b6f852cddd0f..98d52cb3fd1a4c423daa47476a1082f954a9c1ec 100644 (file)
@@ -370,12 +370,10 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
                        gedr = gedr & c->irq_mask;
                        writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
 
-                       n = find_first_bit(&gedr, BITS_PER_LONG);
-                       while (n < BITS_PER_LONG) {
+                       for_each_set_bit(n, &gedr, BITS_PER_LONG) {
                                loop = 1;
 
                                generic_handle_irq(gpio_to_irq(gpio_base + n));
-                               n = find_next_bit(&gedr, BITS_PER_LONG, n + 1);
                        }
                }
        } while (loop);
@@ -589,19 +587,12 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev)
                iounmap(gpio_reg_base);
                return PTR_ERR(clk);
        }
-       ret = clk_prepare(clk);
+       ret = clk_prepare_enable(clk);
        if (ret) {
                clk_put(clk);
                iounmap(gpio_reg_base);
                return ret;
        }
-       ret = clk_enable(clk);
-       if (ret) {
-               clk_unprepare(clk);
-               clk_put(clk);
-               iounmap(gpio_reg_base);
-               return ret;
-       }
 
        /* Initialize GPIO chips */
        info = dev_get_platdata(&pdev->dev);
index 9d9891f7a607f44c69d5b2598461ba96efb3a616..e25f73130b40b6676be057929945a0d3a7900489 100644 (file)
@@ -270,7 +270,7 @@ static void sdv_gpio_remove(struct pci_dev *pdev)
        kfree(sd);
 }
 
-static struct pci_device_id sdv_gpio_pci_ids[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(sdv_gpio_pci_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SDV_GPIO) },
        { 0, },
 };
index a4f73534394e4f37e13b6998e50c64dde43b7f3e..eb3e215d23963f0c91b1612f652664aaa515408c 100644 (file)
@@ -311,11 +311,9 @@ static int sx150x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 
 static void sx150x_irq_mask(struct irq_data *d)
 {
-       struct irq_chip *ic = irq_data_get_irq_chip(d);
-       struct sx150x_chip *chip;
+       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
        unsigned n;
 
-       chip = container_of(ic, struct sx150x_chip, irq_chip);
        n = d->irq - chip->irq_base;
        chip->irq_masked |= (1 << n);
        chip->irq_update = n;
@@ -323,27 +321,22 @@ static void sx150x_irq_mask(struct irq_data *d)
 
 static void sx150x_irq_unmask(struct irq_data *d)
 {
-       struct irq_chip *ic = irq_data_get_irq_chip(d);
-       struct sx150x_chip *chip;
+       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
        unsigned n;
 
-       chip = container_of(ic, struct sx150x_chip, irq_chip);
        n = d->irq - chip->irq_base;
-
        chip->irq_masked &= ~(1 << n);
        chip->irq_update = n;
 }
 
 static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-       struct irq_chip *ic = irq_data_get_irq_chip(d);
-       struct sx150x_chip *chip;
+       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
        unsigned n, val = 0;
 
        if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
                return -EINVAL;
 
-       chip = container_of(ic, struct sx150x_chip, irq_chip);
        n = d->irq - chip->irq_base;
 
        if (flow_type & IRQ_TYPE_EDGE_RISING)
@@ -391,22 +384,16 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
 
 static void sx150x_irq_bus_lock(struct irq_data *d)
 {
-       struct irq_chip *ic = irq_data_get_irq_chip(d);
-       struct sx150x_chip *chip;
-
-       chip = container_of(ic, struct sx150x_chip, irq_chip);
+       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
 
        mutex_lock(&chip->lock);
 }
 
 static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
 {
-       struct irq_chip *ic = irq_data_get_irq_chip(d);
-       struct sx150x_chip *chip;
+       struct sx150x_chip *chip = irq_data_get_irq_chip_data(d);
        unsigned n;
 
-       chip = container_of(ic, struct sx150x_chip, irq_chip);
-
        if (chip->irq_update == NO_UPDATE_PENDING)
                goto out;
 
@@ -551,6 +538,7 @@ static int sx150x_install_irq_chip(struct sx150x_chip *chip,
 
        for (n = 0; n < chip->dev_cfg->ngpios; ++n) {
                irq = irq_base + n;
+               irq_set_chip_data(irq, chip);
                irq_set_chip_and_handler(irq, &chip->irq_chip, handle_edge_irq);
                irq_set_nested_thread(irq, 1);
 #ifdef CONFIG_ARM
index 2a82e8999a4299553a4ebeec273a69b55663df61..1e48317e70fb045838b8053cc135055e7765c4ac 100644 (file)
@@ -11,7 +11,9 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/tc3589x.h>
 
@@ -29,6 +31,7 @@ struct tc3589x_gpio {
        struct tc3589x *tc3589x;
        struct device *dev;
        struct mutex irq_lock;
+       struct irq_domain *domain;
 
        int irq_base;
 
@@ -92,11 +95,28 @@ static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
        return tc3589x_set_bits(tc3589x, reg, 1 << pos, 0);
 }
 
+/**
+ * tc3589x_gpio_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
+ *
+ * @tc3589x_gpio: tc3589x_gpio_irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs
+ *
+ * Useful for drivers to request their own IRQs.
+ */
+static int tc3589x_gpio_irq_get_virq(struct tc3589x_gpio *tc3589x_gpio,
+                                    int irq)
+{
+       if (!tc3589x_gpio)
+               return -EINVAL;
+
+       return irq_create_mapping(tc3589x_gpio->domain, irq);
+}
+
 static int tc3589x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
        struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip);
 
-       return tc3589x_gpio->irq_base + offset;
+       return tc3589x_gpio_irq_get_virq(tc3589x_gpio, offset);
 }
 
 static struct gpio_chip template_chip = {
@@ -113,7 +133,7 @@ static struct gpio_chip template_chip = {
 static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
-       int offset = d->irq - tc3589x_gpio->irq_base;
+       int offset = d->hwirq;
        int regoffset = offset / 8;
        int mask = 1 << (offset % 8);
 
@@ -175,7 +195,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
 static void tc3589x_gpio_irq_mask(struct irq_data *d)
 {
        struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
-       int offset = d->irq - tc3589x_gpio->irq_base;
+       int offset = d->hwirq;
        int regoffset = offset / 8;
        int mask = 1 << (offset % 8);
 
@@ -185,7 +205,7 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d)
 static void tc3589x_gpio_irq_unmask(struct irq_data *d)
 {
        struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
-       int offset = d->irq - tc3589x_gpio->irq_base;
+       int offset = d->hwirq;
        int regoffset = offset / 8;
        int mask = 1 << (offset % 8);
 
@@ -222,8 +242,9 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
                while (stat) {
                        int bit = __ffs(stat);
                        int line = i * 8 + bit;
+                       int virq = tc3589x_gpio_irq_get_virq(tc3589x_gpio, line);
 
-                       handle_nested_irq(tc3589x_gpio->irq_base + line);
+                       handle_nested_irq(virq);
                        stat &= ~(1 << bit);
                }
 
@@ -233,51 +254,78 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio)
+static int tc3589x_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+                               irq_hw_number_t hwirq)
 {
-       int base = tc3589x_gpio->irq_base;
-       int irq;
+       struct tc3589x *tc3589x_gpio = d->host_data;
 
-       for (irq = base; irq < base + tc3589x_gpio->chip.ngpio; irq++) {
-               irq_set_chip_data(irq, tc3589x_gpio);
-               irq_set_chip_and_handler(irq, &tc3589x_gpio_irq_chip,
-                                        handle_simple_irq);
-               irq_set_nested_thread(irq, 1);
+       irq_set_chip_data(virq, tc3589x_gpio);
+       irq_set_chip_and_handler(virq, &tc3589x_gpio_irq_chip,
+                               handle_simple_irq);
+       irq_set_nested_thread(virq, 1);
 #ifdef CONFIG_ARM
-               set_irq_flags(irq, IRQF_VALID);
+       set_irq_flags(virq, IRQF_VALID);
 #else
-               irq_set_noprobe(irq);
+       irq_set_noprobe(virq);
 #endif
-       }
 
        return 0;
 }
 
-static void tc3589x_gpio_irq_remove(struct tc3589x_gpio *tc3589x_gpio)
+static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-       int base = tc3589x_gpio->irq_base;
-       int irq;
-
-       for (irq = base; irq < base + tc3589x_gpio->chip.ngpio; irq++) {
 #ifdef CONFIG_ARM
-               set_irq_flags(irq, 0);
+       set_irq_flags(virq, 0);
 #endif
-               irq_set_chip_and_handler(irq, NULL, NULL);
-               irq_set_chip_data(irq, NULL);
+       irq_set_chip_and_handler(virq, NULL, NULL);
+       irq_set_chip_data(virq, NULL);
+}
+
+static struct irq_domain_ops tc3589x_irq_ops = {
+        .map    = tc3589x_gpio_irq_map,
+        .unmap  = tc3589x_gpio_irq_unmap,
+        .xlate  = irq_domain_xlate_twocell,
+};
+
+static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio,
+                               struct device_node *np)
+{
+       int base = tc3589x_gpio->irq_base;
+
+       if (base) {
+               tc3589x_gpio->domain = irq_domain_add_legacy(
+                       NULL, tc3589x_gpio->chip.ngpio, base,
+                       0, &tc3589x_irq_ops, tc3589x_gpio);
+       }
+       else {
+               tc3589x_gpio->domain = irq_domain_add_linear(
+                       np, tc3589x_gpio->chip.ngpio,
+                       &tc3589x_irq_ops, tc3589x_gpio);
+       }
+
+       if (!tc3589x_gpio->domain) {
+               dev_err(tc3589x_gpio->dev, "Failed to create irqdomain\n");
+               return -ENOSYS;
        }
+
+       return 0;
 }
 
 static int __devinit tc3589x_gpio_probe(struct platform_device *pdev)
 {
        struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent);
        struct tc3589x_gpio_platform_data *pdata;
+       struct device_node *np = pdev->dev.of_node;
        struct tc3589x_gpio *tc3589x_gpio;
        int ret;
        int irq;
 
        pdata = tc3589x->pdata->gpio;
-       if (!pdata)
-               return -ENODEV;
+
+       if (!(pdata || np)) {
+               dev_err(&pdev->dev, "No platform data or Device Tree found\n");
+               return -EINVAL;
+       }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -295,9 +343,14 @@ static int __devinit tc3589x_gpio_probe(struct platform_device *pdev)
        tc3589x_gpio->chip = template_chip;
        tc3589x_gpio->chip.ngpio = tc3589x->num_gpio;
        tc3589x_gpio->chip.dev = &pdev->dev;
-       tc3589x_gpio->chip.base = pdata->gpio_base;
+       tc3589x_gpio->chip.base = (pdata) ? pdata->gpio_base : -1;
 
-       tc3589x_gpio->irq_base = tc3589x->irq_base + TC3589x_INT_GPIO(0);
+#ifdef CONFIG_OF_GPIO
+        tc3589x_gpio->chip.of_node = np;
+#endif
+
+       tc3589x_gpio->irq_base = tc3589x->irq_base ?
+               tc3589x->irq_base + TC3589x_INT_GPIO(0) : 0;
 
        /* Bring the GPIO module out of reset */
        ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL,
@@ -305,7 +358,7 @@ static int __devinit tc3589x_gpio_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out_free;
 
-       ret = tc3589x_gpio_irq_init(tc3589x_gpio);
+       ret = tc3589x_gpio_irq_init(tc3589x_gpio, np);
        if (ret)
                goto out_free;
 
@@ -313,7 +366,7 @@ static int __devinit tc3589x_gpio_probe(struct platform_device *pdev)
                                   "tc3589x-gpio", tc3589x_gpio);
        if (ret) {
                dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
-               goto out_removeirq;
+               goto out_free;
        }
 
        ret = gpiochip_add(&tc3589x_gpio->chip);
@@ -322,7 +375,7 @@ static int __devinit tc3589x_gpio_probe(struct platform_device *pdev)
                goto out_freeirq;
        }
 
-       if (pdata->setup)
+       if (pdata && pdata->setup)
                pdata->setup(tc3589x, tc3589x_gpio->chip.base);
 
        platform_set_drvdata(pdev, tc3589x_gpio);
@@ -331,8 +384,6 @@ static int __devinit tc3589x_gpio_probe(struct platform_device *pdev)
 
 out_freeirq:
        free_irq(irq, tc3589x_gpio);
-out_removeirq:
-       tc3589x_gpio_irq_remove(tc3589x_gpio);
 out_free:
        kfree(tc3589x_gpio);
        return ret;
@@ -346,7 +397,7 @@ static int __devexit tc3589x_gpio_remove(struct platform_device *pdev)
        int irq = platform_get_irq(pdev, 0);
        int ret;
 
-       if (pdata->remove)
+       if (pdata && pdata->remove)
                pdata->remove(tc3589x, tc3589x_gpio->chip.base);
 
        ret = gpiochip_remove(&tc3589x_gpio->chip);
@@ -357,7 +408,6 @@ static int __devexit tc3589x_gpio_remove(struct platform_device *pdev)
        }
 
        free_irq(irq, tc3589x_gpio);
-       tc3589x_gpio_irq_remove(tc3589x_gpio);
 
        platform_set_drvdata(pdev, NULL);
        kfree(tc3589x_gpio);
index 79e66c0023503e245c4128d38fde7816c0b10438..99106d1e2e551592db75a9352d4c23fb9c0b0d3b 100644 (file)
@@ -70,7 +70,6 @@ static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
 
        return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
                                                                GPIO_CFG_MASK);
-
 }
 
 static struct gpio_chip template_chip = {
@@ -92,7 +91,8 @@ static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
        struct tps65912_gpio_data *tps65912_gpio;
        int ret;
 
-       tps65912_gpio = kzalloc(sizeof(*tps65912_gpio), GFP_KERNEL);
+       tps65912_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65912_gpio),
+                                    GFP_KERNEL);
        if (tps65912_gpio == NULL)
                return -ENOMEM;
 
@@ -105,28 +105,19 @@ static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
        ret = gpiochip_add(&tps65912_gpio->gpio_chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
-               goto err;
+               return ret;
        }
 
        platform_set_drvdata(pdev, tps65912_gpio);
 
        return ret;
-
-err:
-       kfree(tps65912_gpio);
-       return ret;
 }
 
 static int __devexit tps65912_gpio_remove(struct platform_device *pdev)
 {
        struct tps65912_gpio_data  *tps65912_gpio = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = gpiochip_remove(&tps65912_gpio->gpio_chip);
-       if (ret == 0)
-               kfree(tps65912_gpio);
 
-       return ret;
+       return gpiochip_remove(&tps65912_gpio->gpio_chip);
 }
 
 static struct platform_driver tps65912_gpio_driver = {
index e56a2165641c845137b0b37afd4c30d93f570a84..b6eda35089d5d9883515eb88584b37430fede672 100644 (file)
@@ -250,7 +250,8 @@ static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
        struct wm831x_gpio *wm831x_gpio;
        int ret;
 
-       wm831x_gpio = kzalloc(sizeof(*wm831x_gpio), GFP_KERNEL);
+       wm831x_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm831x_gpio),
+                                  GFP_KERNEL);
        if (wm831x_gpio == NULL)
                return -ENOMEM;
 
@@ -265,30 +266,20 @@ static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
 
        ret = gpiochip_add(&wm831x_gpio->gpio_chip);
        if (ret < 0) {
-               dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
-                       ret);
-               goto err;
+               dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+               return ret;
        }
 
        platform_set_drvdata(pdev, wm831x_gpio);
 
        return ret;
-
-err:
-       kfree(wm831x_gpio);
-       return ret;
 }
 
 static int __devexit wm831x_gpio_remove(struct platform_device *pdev)
 {
        struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = gpiochip_remove(&wm831x_gpio->gpio_chip);
-       if (ret == 0)
-               kfree(wm831x_gpio);
 
-       return ret;
+       return  gpiochip_remove(&wm831x_gpio->gpio_chip);
 }
 
 static struct platform_driver wm831x_gpio_driver = {
index a06af51548384623edce5516eddb43dd2643953d..fb42938893923d13a771b29d940c8e5a31118ceb 100644 (file)
@@ -116,7 +116,8 @@ static int __devinit wm8350_gpio_probe(struct platform_device *pdev)
        struct wm8350_gpio_data *wm8350_gpio;
        int ret;
 
-       wm8350_gpio = kzalloc(sizeof(*wm8350_gpio), GFP_KERNEL);
+       wm8350_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8350_gpio),
+                                  GFP_KERNEL);
        if (wm8350_gpio == NULL)
                return -ENOMEM;
 
@@ -131,30 +132,20 @@ static int __devinit wm8350_gpio_probe(struct platform_device *pdev)
 
        ret = gpiochip_add(&wm8350_gpio->gpio_chip);
        if (ret < 0) {
-               dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
-                       ret);
-               goto err;
+               dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+               return ret;
        }
 
        platform_set_drvdata(pdev, wm8350_gpio);
 
        return ret;
-
-err:
-       kfree(wm8350_gpio);
-       return ret;
 }
 
 static int __devexit wm8350_gpio_remove(struct platform_device *pdev)
 {
        struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = gpiochip_remove(&wm8350_gpio->gpio_chip);
-       if (ret == 0)
-               kfree(wm8350_gpio);
 
-       return ret;
+       return gpiochip_remove(&wm8350_gpio->gpio_chip);
 }
 
 static struct platform_driver wm8350_gpio_driver = {
index de0213c9d11ce12e5496d18b49c16ef4ef030d85..5d6c71edc73911c7765a7530e65352fda2478845 100644 (file)
@@ -1773,56 +1773,102 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
        }
 }
 
-static int gpiolib_show(struct seq_file *s, void *unused)
+static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
 {
-       struct gpio_chip        *chip = NULL;
-       unsigned                gpio;
-       int                     started = 0;
+       struct gpio_chip *chip = NULL;
+       unsigned int gpio;
+       void *ret = NULL;
+       loff_t index = 0;
 
        /* REVISIT this isn't locked against gpio_chip removal ... */
 
        for (gpio = 0; gpio_is_valid(gpio); gpio++) {
-               struct device *dev;
-
-               if (chip == gpio_desc[gpio].chip)
+               if (gpio_desc[gpio].chip == chip)
                        continue;
+
                chip = gpio_desc[gpio].chip;
                if (!chip)
                        continue;
 
-               seq_printf(s, "%sGPIOs %d-%d",
-                               started ? "\n" : "",
-                               chip->base, chip->base + chip->ngpio - 1);
-               dev = chip->dev;
-               if (dev)
-                       seq_printf(s, ", %s/%s",
-                               dev->bus ? dev->bus->name : "no-bus",
-                               dev_name(dev));
-               if (chip->label)
-                       seq_printf(s, ", %s", chip->label);
-               if (chip->can_sleep)
-                       seq_printf(s, ", can sleep");
-               seq_printf(s, ":\n");
-
-               started = 1;
-               if (chip->dbg_show)
-                       chip->dbg_show(s, chip);
-               else
-                       gpiolib_dbg_show(s, chip);
+               if (index++ >= *pos) {
+                       ret = chip;
+                       break;
+               }
        }
+
+       s->private = "";
+
+       return ret;
+}
+
+static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       struct gpio_chip *chip = v;
+       unsigned int gpio;
+       void *ret = NULL;
+
+       /* skip GPIOs provided by the current chip */
+       for (gpio = chip->base + chip->ngpio; gpio_is_valid(gpio); gpio++) {
+               chip = gpio_desc[gpio].chip;
+               if (chip) {
+                       ret = chip;
+                       break;
+               }
+       }
+
+       s->private = "\n";
+       ++*pos;
+
+       return ret;
+}
+
+static void gpiolib_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int gpiolib_seq_show(struct seq_file *s, void *v)
+{
+       struct gpio_chip *chip = v;
+       struct device *dev;
+
+       seq_printf(s, "%sGPIOs %d-%d", (char *)s->private,
+                       chip->base, chip->base + chip->ngpio - 1);
+       dev = chip->dev;
+       if (dev)
+               seq_printf(s, ", %s/%s", dev->bus ? dev->bus->name : "no-bus",
+                       dev_name(dev));
+       if (chip->label)
+               seq_printf(s, ", %s", chip->label);
+       if (chip->can_sleep)
+               seq_printf(s, ", can sleep");
+       seq_printf(s, ":\n");
+
+       if (chip->dbg_show)
+               chip->dbg_show(s, chip);
+       else
+               gpiolib_dbg_show(s, chip);
+
        return 0;
 }
 
+static const struct seq_operations gpiolib_seq_ops = {
+       .start = gpiolib_seq_start,
+       .next = gpiolib_seq_next,
+       .stop = gpiolib_seq_stop,
+       .show = gpiolib_seq_show,
+};
+
 static int gpiolib_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, gpiolib_show, NULL);
+       return seq_open(file, &gpiolib_seq_ops);
 }
 
 static const struct file_operations gpiolib_operations = {
+       .owner          = THIS_MODULE,
        .open           = gpiolib_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release,
+       .release        = seq_release,
 };
 
 static int __init gpiolib_debugfs_init(void)
index 9c936ad3d510028e9a3ca07d99ca5d67e0d1cb1f..1227adf74dbcbc270e04ccd45d334d1532977b89 100644 (file)
@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
        }
 
        if (repoll)
-               queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
+               schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
 }
 
 void drm_kms_helper_poll_disable(struct drm_device *dev)
@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
        }
 
        if (poll)
-               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+               schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
        /* kill timer and schedule immediate execution, this doesn't block */
        cancel_delayed_work(&dev->mode_config.output_poll_work);
        if (drm_kms_helper_poll)
-               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+               schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
index 828ad9082d17486b08d49f98a2b86e0eb442e25e..7ef1b673e1be9ec2f82006ca3d8ea2519e13b231 100644 (file)
@@ -251,7 +251,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
        filp->private_data = priv;
        priv->filp = filp;
        priv->uid = current_euid();
-       priv->pid = task_pid_nr(current);
+       priv->pid = get_pid(task_pid(current));
        priv->minor = idr_find(&drm_minors_idr, minor_id);
        priv->ioctl_count = 0;
        /* for compatibility root is always authenticated */
@@ -524,6 +524,7 @@ int drm_release(struct inode *inode, struct file *filp)
        if (drm_core_check_feature(dev, DRIVER_PRIME))
                drm_prime_destroy_file_private(&file_priv->prime);
 
+       put_pid(file_priv->pid);
        kfree(file_priv);
 
        /* ========================================================
index 3a8b604b743849294bf6197576d399e588a330ad..cdf8b1e7602db2fdd46891b88d7555f6cae7be3e 100644 (file)
@@ -191,8 +191,9 @@ int drm_clients_info(struct seq_file *m, void *data)
                seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
                           priv->authenticated ? 'y' : 'n',
                           priv->minor->index,
-                          priv->pid,
-                          priv->uid, priv->magic, priv->ioctl_count);
+                          pid_vnr(priv->pid),
+                          from_kuid_munged(seq_user_ns(m), priv->uid),
+                          priv->magic, priv->ioctl_count);
        }
        mutex_unlock(&dev->struct_mutex);
        return 0;
index aa70c0b2bfd2b4e3f895578fff179ef4e7a52659..23dd97506f28f0f58fe57a2d0b0b132c4aebe074 100644 (file)
@@ -215,8 +215,8 @@ int drm_getclient(struct drm_device *dev, void *data,
        list_for_each_entry(pt, &dev->filelist, lhead) {
                if (i++ >= idx) {
                        client->auth = pt->authenticated;
-                       client->pid = pt->pid;
-                       client->uid = pt->uid;
+                       client->pid = pid_vnr(pt->pid);
+                       client->uid = from_kuid_munged(current_user_ns(), pt->uid);
                        client->magic = pt->magic;
                        client->iocs = pt->ioctl_count;
                        mutex_unlock(&dev->struct_mutex);
index 1f4f240580dc85a89644daf1540039ac06a92156..bc2a2e9be8ebc59852be751332e873e827633c1a 100644 (file)
@@ -878,7 +878,7 @@ static int g2d_suspend(struct device *dev)
                /* FIXME: good range? */
                usleep_range(500, 1000);
 
-       flush_work_sync(&g2d->runqueue_work);
+       flush_work(&g2d->runqueue_work);
 
        return 0;
 }
index 8881182a47237af43b151267b337f0bee1ddd6d3..ded74e555e5f67b83a84fa67e86e0de4cf1e39d5 100644 (file)
@@ -302,7 +302,7 @@ nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
                spin_unlock_irqrestore(&pgpio->lock, flags);
 
                list_for_each_entry_safe(isr, tmp, &tofree, head) {
-                       flush_work_sync(&isr->work);
+                       flush_work(&isr->work);
                        kfree(isr);
                }
        }
index d6f27a89a9fe03c5334e95b65ddefa28ed1c7038..9201992cee12c0030ca6c7858bb9a2d16a8f5e2c 100644 (file)
@@ -277,7 +277,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
                if (rdev->msi_enabled)
                        pci_disable_msi(rdev->pdev);
        }
-       flush_work_sync(&rdev->hotplug_work);
+       flush_work(&rdev->hotplug_work);
 }
 
 /**
index 568dc0ef1035e01e27f9509b3b451c43308661d9..ed5ce2a41bbf02f34a8fabc8307985fe9763ed94 100644 (file)
@@ -594,7 +594,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
        par->dirty.active = false;
        spin_unlock_irqrestore(&par->dirty.lock, flags);
 
-       flush_delayed_work_sync(&info->deferred_work);
+       flush_delayed_work(&info->deferred_work);
 
        par->bo_ptr = NULL;
        ttm_bo_kunmap(&par->map);
index 0008a512211d4f4d068fe49d9b3b8d41f81e1cf0..eb003574b634790b3f5c97cf03c7cba300e3f9fb 100644 (file)
@@ -608,7 +608,7 @@ void picolcd_exit_framebuffer(struct picolcd_data *data)
        /* make sure there is no running update - thus that fbdata->picolcd
         * once obtained under lock is guaranteed not to get free() under
         * the feet of the deferred work */
-       flush_delayed_work_sync(&info->deferred_work);
+       flush_delayed_work(&info->deferred_work);
 
        data->fb_info = NULL;
        unregister_framebuffer(info);
index bc85bf29062ea50c735a674b761ce688a5df0412..38ae87772e96d18144a5b2e256192fd67b4bde55 100644 (file)
@@ -229,7 +229,7 @@ static void wiiext_worker(struct work_struct *work)
 /* schedule work only once, otherwise mark for reschedule */
 static void wiiext_schedule(struct wiimote_ext *ext)
 {
-       queue_work(system_nrt_wq, &ext->worker);
+       schedule_work(&ext->worker);
 }
 
 /*
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
deleted file mode 100644 (file)
index 1fc4eef..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-menuconfig IEEE802154_DRIVERS
-       tristate "IEEE 802.15.4 drivers"
-       depends on NETDEVICES && IEEE802154
-       default y
-       ---help---
-         Say Y here to get to see options for IEEE 802.15.4 Low-Rate
-         Wireless Personal Area Network device drivers. This option alone
-         does not add any kernel code.
-
-         If you say N, all options in this submenu will be skipped and
-         disabled.
-
-config IEEE802154_FAKEHARD
-       tristate "Fake LR-WPAN driver with several interconnected devices"
-       depends on  IEEE802154_DRIVERS
-       ---help---
-         Say Y here to enable the fake driver that serves as an example
-          of HardMAC device driver.
-
-          This driver can also be built as a module. To do so say M here.
-         The module will be called 'fakehard'.
-
-config IEEE802154_FAKELB
-       depends on IEEE802154_DRIVERS && MAC802154
-       tristate "IEEE 802.15.4 loopback driver"
-       ---help---
-         Say Y here to enable the fake driver that can emulate a net
-         of several interconnected radio devices.
-
-         This driver can also be built as a module. To do so say M here.
-         The module will be called 'fakelb'.
-
-config IEEE802154_AT86RF230
-        depends on IEEE802154_DRIVERS && MAC802154
-        tristate "AT86RF230/231 transceiver driver"
-        depends on SPI
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
deleted file mode 100644 (file)
index 4f4371d..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
-obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
-obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/ieee802154/at86rf230.c
deleted file mode 100644 (file)
index 5d30940..0000000
+++ /dev/null
@@ -1,968 +0,0 @@
-/*
- * AT86RF230/RF231 driver
- *
- * Copyright (C) 2009-2012 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/at86rf230.h>
-#include <linux/skbuff.h>
-
-#include <net/mac802154.h>
-#include <net/wpan-phy.h>
-
-struct at86rf230_local {
-       struct spi_device *spi;
-       int rstn, slp_tr, dig2;
-
-       u8 part;
-       u8 vers;
-
-       u8 buf[2];
-       struct mutex bmux;
-
-       struct work_struct irqwork;
-       struct completion tx_complete;
-
-       struct ieee802154_dev *dev;
-
-       spinlock_t lock;
-       bool irq_disabled;
-       bool is_tx;
-};
-
-#define        RG_TRX_STATUS   (0x01)
-#define        SR_TRX_STATUS           0x01, 0x1f, 0
-#define        SR_RESERVED_01_3        0x01, 0x20, 5
-#define        SR_CCA_STATUS           0x01, 0x40, 6
-#define        SR_CCA_DONE             0x01, 0x80, 7
-#define        RG_TRX_STATE    (0x02)
-#define        SR_TRX_CMD              0x02, 0x1f, 0
-#define        SR_TRAC_STATUS          0x02, 0xe0, 5
-#define        RG_TRX_CTRL_0   (0x03)
-#define        SR_CLKM_CTRL            0x03, 0x07, 0
-#define        SR_CLKM_SHA_SEL         0x03, 0x08, 3
-#define        SR_PAD_IO_CLKM          0x03, 0x30, 4
-#define        SR_PAD_IO               0x03, 0xc0, 6
-#define        RG_TRX_CTRL_1   (0x04)
-#define        SR_IRQ_POLARITY         0x04, 0x01, 0
-#define        SR_IRQ_MASK_MODE        0x04, 0x02, 1
-#define        SR_SPI_CMD_MODE         0x04, 0x0c, 2
-#define        SR_RX_BL_CTRL           0x04, 0x10, 4
-#define        SR_TX_AUTO_CRC_ON       0x04, 0x20, 5
-#define        SR_IRQ_2_EXT_EN         0x04, 0x40, 6
-#define        SR_PA_EXT_EN            0x04, 0x80, 7
-#define        RG_PHY_TX_PWR   (0x05)
-#define        SR_TX_PWR               0x05, 0x0f, 0
-#define        SR_PA_LT                0x05, 0x30, 4
-#define        SR_PA_BUF_LT            0x05, 0xc0, 6
-#define        RG_PHY_RSSI     (0x06)
-#define        SR_RSSI                 0x06, 0x1f, 0
-#define        SR_RND_VALUE            0x06, 0x60, 5
-#define        SR_RX_CRC_VALID         0x06, 0x80, 7
-#define        RG_PHY_ED_LEVEL (0x07)
-#define        SR_ED_LEVEL             0x07, 0xff, 0
-#define        RG_PHY_CC_CCA   (0x08)
-#define        SR_CHANNEL              0x08, 0x1f, 0
-#define        SR_CCA_MODE             0x08, 0x60, 5
-#define        SR_CCA_REQUEST          0x08, 0x80, 7
-#define        RG_CCA_THRES    (0x09)
-#define        SR_CCA_ED_THRES         0x09, 0x0f, 0
-#define        SR_RESERVED_09_1        0x09, 0xf0, 4
-#define        RG_RX_CTRL      (0x0a)
-#define        SR_PDT_THRES            0x0a, 0x0f, 0
-#define        SR_RESERVED_0a_1        0x0a, 0xf0, 4
-#define        RG_SFD_VALUE    (0x0b)
-#define        SR_SFD_VALUE            0x0b, 0xff, 0
-#define        RG_TRX_CTRL_2   (0x0c)
-#define        SR_OQPSK_DATA_RATE      0x0c, 0x03, 0
-#define        SR_RESERVED_0c_2        0x0c, 0x7c, 2
-#define        SR_RX_SAFE_MODE         0x0c, 0x80, 7
-#define        RG_ANT_DIV      (0x0d)
-#define        SR_ANT_CTRL             0x0d, 0x03, 0
-#define        SR_ANT_EXT_SW_EN        0x0d, 0x04, 2
-#define        SR_ANT_DIV_EN           0x0d, 0x08, 3
-#define        SR_RESERVED_0d_2        0x0d, 0x70, 4
-#define        SR_ANT_SEL              0x0d, 0x80, 7
-#define        RG_IRQ_MASK     (0x0e)
-#define        SR_IRQ_MASK             0x0e, 0xff, 0
-#define        RG_IRQ_STATUS   (0x0f)
-#define        SR_IRQ_0_PLL_LOCK       0x0f, 0x01, 0
-#define        SR_IRQ_1_PLL_UNLOCK     0x0f, 0x02, 1
-#define        SR_IRQ_2_RX_START       0x0f, 0x04, 2
-#define        SR_IRQ_3_TRX_END        0x0f, 0x08, 3
-#define        SR_IRQ_4_CCA_ED_DONE    0x0f, 0x10, 4
-#define        SR_IRQ_5_AMI            0x0f, 0x20, 5
-#define        SR_IRQ_6_TRX_UR         0x0f, 0x40, 6
-#define        SR_IRQ_7_BAT_LOW        0x0f, 0x80, 7
-#define        RG_VREG_CTRL    (0x10)
-#define        SR_RESERVED_10_6        0x10, 0x03, 0
-#define        SR_DVDD_OK              0x10, 0x04, 2
-#define        SR_DVREG_EXT            0x10, 0x08, 3
-#define        SR_RESERVED_10_3        0x10, 0x30, 4
-#define        SR_AVDD_OK              0x10, 0x40, 6
-#define        SR_AVREG_EXT            0x10, 0x80, 7
-#define        RG_BATMON       (0x11)
-#define        SR_BATMON_VTH           0x11, 0x0f, 0
-#define        SR_BATMON_HR            0x11, 0x10, 4
-#define        SR_BATMON_OK            0x11, 0x20, 5
-#define        SR_RESERVED_11_1        0x11, 0xc0, 6
-#define        RG_XOSC_CTRL    (0x12)
-#define        SR_XTAL_TRIM            0x12, 0x0f, 0
-#define        SR_XTAL_MODE            0x12, 0xf0, 4
-#define        RG_RX_SYN       (0x15)
-#define        SR_RX_PDT_LEVEL         0x15, 0x0f, 0
-#define        SR_RESERVED_15_2        0x15, 0x70, 4
-#define        SR_RX_PDT_DIS           0x15, 0x80, 7
-#define        RG_XAH_CTRL_1   (0x17)
-#define        SR_RESERVED_17_8        0x17, 0x01, 0
-#define        SR_AACK_PROM_MODE       0x17, 0x02, 1
-#define        SR_AACK_ACK_TIME        0x17, 0x04, 2
-#define        SR_RESERVED_17_5        0x17, 0x08, 3
-#define        SR_AACK_UPLD_RES_FT     0x17, 0x10, 4
-#define        SR_AACK_FLTR_RES_FT     0x17, 0x20, 5
-#define        SR_RESERVED_17_2        0x17, 0x40, 6
-#define        SR_RESERVED_17_1        0x17, 0x80, 7
-#define        RG_FTN_CTRL     (0x18)
-#define        SR_RESERVED_18_2        0x18, 0x7f, 0
-#define        SR_FTN_START            0x18, 0x80, 7
-#define        RG_PLL_CF       (0x1a)
-#define        SR_RESERVED_1a_2        0x1a, 0x7f, 0
-#define        SR_PLL_CF_START         0x1a, 0x80, 7
-#define        RG_PLL_DCU      (0x1b)
-#define        SR_RESERVED_1b_3        0x1b, 0x3f, 0
-#define        SR_RESERVED_1b_2        0x1b, 0x40, 6
-#define        SR_PLL_DCU_START        0x1b, 0x80, 7
-#define        RG_PART_NUM     (0x1c)
-#define        SR_PART_NUM             0x1c, 0xff, 0
-#define        RG_VERSION_NUM  (0x1d)
-#define        SR_VERSION_NUM          0x1d, 0xff, 0
-#define        RG_MAN_ID_0     (0x1e)
-#define        SR_MAN_ID_0             0x1e, 0xff, 0
-#define        RG_MAN_ID_1     (0x1f)
-#define        SR_MAN_ID_1             0x1f, 0xff, 0
-#define        RG_SHORT_ADDR_0 (0x20)
-#define        SR_SHORT_ADDR_0         0x20, 0xff, 0
-#define        RG_SHORT_ADDR_1 (0x21)
-#define        SR_SHORT_ADDR_1         0x21, 0xff, 0
-#define        RG_PAN_ID_0     (0x22)
-#define        SR_PAN_ID_0             0x22, 0xff, 0
-#define        RG_PAN_ID_1     (0x23)
-#define        SR_PAN_ID_1             0x23, 0xff, 0
-#define        RG_IEEE_ADDR_0  (0x24)
-#define        SR_IEEE_ADDR_0          0x24, 0xff, 0
-#define        RG_IEEE_ADDR_1  (0x25)
-#define        SR_IEEE_ADDR_1          0x25, 0xff, 0
-#define        RG_IEEE_ADDR_2  (0x26)
-#define        SR_IEEE_ADDR_2          0x26, 0xff, 0
-#define        RG_IEEE_ADDR_3  (0x27)
-#define        SR_IEEE_ADDR_3          0x27, 0xff, 0
-#define        RG_IEEE_ADDR_4  (0x28)
-#define        SR_IEEE_ADDR_4          0x28, 0xff, 0
-#define        RG_IEEE_ADDR_5  (0x29)
-#define        SR_IEEE_ADDR_5          0x29, 0xff, 0
-#define        RG_IEEE_ADDR_6  (0x2a)
-#define        SR_IEEE_ADDR_6          0x2a, 0xff, 0
-#define        RG_IEEE_ADDR_7  (0x2b)
-#define        SR_IEEE_ADDR_7          0x2b, 0xff, 0
-#define        RG_XAH_CTRL_0   (0x2c)
-#define        SR_SLOTTED_OPERATION    0x2c, 0x01, 0
-#define        SR_MAX_CSMA_RETRIES     0x2c, 0x0e, 1
-#define        SR_MAX_FRAME_RETRIES    0x2c, 0xf0, 4
-#define        RG_CSMA_SEED_0  (0x2d)
-#define        SR_CSMA_SEED_0          0x2d, 0xff, 0
-#define        RG_CSMA_SEED_1  (0x2e)
-#define        SR_CSMA_SEED_1          0x2e, 0x07, 0
-#define        SR_AACK_I_AM_COORD      0x2e, 0x08, 3
-#define        SR_AACK_DIS_ACK         0x2e, 0x10, 4
-#define        SR_AACK_SET_PD          0x2e, 0x20, 5
-#define        SR_AACK_FVN_MODE        0x2e, 0xc0, 6
-#define        RG_CSMA_BE      (0x2f)
-#define        SR_MIN_BE               0x2f, 0x0f, 0
-#define        SR_MAX_BE               0x2f, 0xf0, 4
-
-#define CMD_REG                0x80
-#define CMD_REG_MASK   0x3f
-#define CMD_WRITE      0x40
-#define CMD_FB         0x20
-
-#define IRQ_BAT_LOW    (1 << 7)
-#define IRQ_TRX_UR     (1 << 6)
-#define IRQ_AMI                (1 << 5)
-#define IRQ_CCA_ED     (1 << 4)
-#define IRQ_TRX_END    (1 << 3)
-#define IRQ_RX_START   (1 << 2)
-#define IRQ_PLL_UNL    (1 << 1)
-#define IRQ_PLL_LOCK   (1 << 0)
-
-#define STATE_P_ON             0x00    /* BUSY */
-#define STATE_BUSY_RX          0x01
-#define STATE_BUSY_TX          0x02
-#define STATE_FORCE_TRX_OFF    0x03
-#define STATE_FORCE_TX_ON      0x04    /* IDLE */
-/* 0x05 */                             /* INVALID_PARAMETER */
-#define STATE_RX_ON            0x06
-/* 0x07 */                             /* SUCCESS */
-#define STATE_TRX_OFF          0x08
-#define STATE_TX_ON            0x09
-/* 0x0a - 0x0e */                      /* 0x0a - UNSUPPORTED_ATTRIBUTE */
-#define STATE_SLEEP            0x0F
-#define STATE_BUSY_RX_AACK     0x11
-#define STATE_BUSY_TX_ARET     0x12
-#define STATE_BUSY_RX_AACK_ON  0x16
-#define STATE_BUSY_TX_ARET_ON  0x19
-#define STATE_RX_ON_NOCLK      0x1C
-#define STATE_RX_AACK_ON_NOCLK 0x1D
-#define STATE_BUSY_RX_AACK_NOCLK 0x1E
-#define STATE_TRANSITION_IN_PROGRESS 0x1F
-
-static int
-__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
-{
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-       };
-
-       buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-       buf[1] = data;
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       return status;
-}
-
-static int
-__at86rf230_read_subreg(struct at86rf230_local *lp,
-                       u8 addr, u8 mask, int shift, u8 *data)
-{
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-               .rx_buf = buf,
-       };
-
-       buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
-       buf[1] = 0xff;
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       if (status == 0)
-               *data = buf[1];
-
-       return status;
-}
-
-static int
-at86rf230_read_subreg(struct at86rf230_local *lp,
-                     u8 addr, u8 mask, int shift, u8 *data)
-{
-       int status;
-
-       mutex_lock(&lp->bmux);
-       status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
-       mutex_unlock(&lp->bmux);
-
-       return status;
-}
-
-static int
-at86rf230_write_subreg(struct at86rf230_local *lp,
-                      u8 addr, u8 mask, int shift, u8 data)
-{
-       int status;
-       u8 val;
-
-       mutex_lock(&lp->bmux);
-       status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
-       if (status)
-               goto out;
-
-       val &= ~mask;
-       val |= (data << shift) & mask;
-
-       status = __at86rf230_write(lp, addr, val);
-out:
-       mutex_unlock(&lp->bmux);
-
-       return status;
-}
-
-static int
-at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
-{
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer_head = {
-               .len            = 2,
-               .tx_buf         = buf,
-
-       };
-       struct spi_transfer xfer_buf = {
-               .len            = len,
-               .tx_buf         = data,
-       };
-
-       mutex_lock(&lp->bmux);
-       buf[0] = CMD_WRITE | CMD_FB;
-       buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
-
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head, &msg);
-       spi_message_add_tail(&xfer_buf, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       mutex_unlock(&lp->bmux);
-       return status;
-}
-
-static int
-at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
-{
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer_head = {
-               .len            = 2,
-               .tx_buf         = buf,
-               .rx_buf         = buf,
-       };
-       struct spi_transfer xfer_head1 = {
-               .len            = 2,
-               .tx_buf         = buf,
-               .rx_buf         = buf,
-       };
-       struct spi_transfer xfer_buf = {
-               .len            = 0,
-               .rx_buf         = data,
-       };
-
-       mutex_lock(&lp->bmux);
-
-       buf[0] = CMD_FB;
-       buf[1] = 0x00;
-
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-
-       xfer_buf.len = *(buf + 1) + 1;
-       *len = buf[1];
-
-       buf[0] = CMD_FB;
-       buf[1] = 0x00;
-
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head1, &msg);
-       spi_message_add_tail(&xfer_buf, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       if (status) {
-               if (lqi && (*len > lp->buf[1]))
-                       *lqi = data[lp->buf[1]];
-       }
-       mutex_unlock(&lp->bmux);
-
-       return status;
-}
-
-static int
-at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
-{
-       might_sleep();
-       BUG_ON(!level);
-       *level = 0xbe;
-       return 0;
-}
-
-static int
-at86rf230_state(struct ieee802154_dev *dev, int state)
-{
-       struct at86rf230_local *lp = dev->priv;
-       int rc;
-       u8 val;
-       u8 desired_status;
-
-       might_sleep();
-
-       if (state == STATE_FORCE_TX_ON)
-               desired_status = STATE_TX_ON;
-       else if (state == STATE_FORCE_TRX_OFF)
-               desired_status = STATE_TRX_OFF;
-       else
-               desired_status = state;
-
-       do {
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
-               if (rc)
-                       goto err;
-       } while (val == STATE_TRANSITION_IN_PROGRESS);
-
-       if (val == desired_status)
-               return 0;
-
-       /* state is equal to phy states */
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
-       if (rc)
-               goto err;
-
-       do {
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
-               if (rc)
-                       goto err;
-       } while (val == STATE_TRANSITION_IN_PROGRESS);
-
-
-       if (val == desired_status)
-               return 0;
-
-       pr_err("unexpected state change: %d, asked for %d\n", val, state);
-       return -EBUSY;
-
-err:
-       pr_err("error: %d\n", rc);
-       return rc;
-}
-
-static int
-at86rf230_start(struct ieee802154_dev *dev)
-{
-       struct at86rf230_local *lp = dev->priv;
-       u8 rc;
-
-       rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
-       if (rc)
-               return rc;
-
-       return at86rf230_state(dev, STATE_RX_ON);
-}
-
-static void
-at86rf230_stop(struct ieee802154_dev *dev)
-{
-       at86rf230_state(dev, STATE_FORCE_TRX_OFF);
-}
-
-static int
-at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
-{
-       struct at86rf230_local *lp = dev->priv;
-       int rc;
-
-       might_sleep();
-
-       if (page != 0 || channel < 11 || channel > 26) {
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
-       msleep(1); /* Wait for PLL */
-       dev->phy->current_channel = channel;
-
-       return 0;
-}
-
-static int
-at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
-{
-       struct at86rf230_local *lp = dev->priv;
-       int rc;
-       unsigned long flags;
-
-       spin_lock(&lp->lock);
-       if  (lp->irq_disabled) {
-               spin_unlock(&lp->lock);
-               return -EBUSY;
-       }
-       spin_unlock(&lp->lock);
-
-       might_sleep();
-
-       rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
-       if (rc)
-               goto err;
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->is_tx = 1;
-       INIT_COMPLETION(lp->tx_complete);
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
-       if (rc)
-               goto err_rx;
-
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
-       if (rc)
-               goto err_rx;
-
-       rc = wait_for_completion_interruptible(&lp->tx_complete);
-       if (rc < 0)
-               goto err_rx;
-
-       rc = at86rf230_start(dev);
-
-       return rc;
-
-err_rx:
-       at86rf230_start(dev);
-err:
-       pr_err("error: %d\n", rc);
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->is_tx = 0;
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       return rc;
-}
-
-static int at86rf230_rx(struct at86rf230_local *lp)
-{
-       u8 len = 128, lqi = 0;
-       struct sk_buff *skb;
-
-       skb = alloc_skb(len, GFP_KERNEL);
-
-       if (!skb)
-               return -ENOMEM;
-
-       if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
-               goto err;
-
-       if (len < 2)
-               goto err;
-
-       skb_trim(skb, len - 2); /* We do not put CRC into the frame */
-
-       ieee802154_rx_irqsafe(lp->dev, skb, lqi);
-
-       dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
-
-       return 0;
-err:
-       pr_debug("received frame is too small\n");
-
-       kfree_skb(skb);
-       return -EINVAL;
-}
-
-static struct ieee802154_ops at86rf230_ops = {
-       .owner = THIS_MODULE,
-       .xmit = at86rf230_xmit,
-       .ed = at86rf230_ed,
-       .set_channel = at86rf230_channel,
-       .start = at86rf230_start,
-       .stop = at86rf230_stop,
-};
-
-static void at86rf230_irqwork(struct work_struct *work)
-{
-       struct at86rf230_local *lp =
-               container_of(work, struct at86rf230_local, irqwork);
-       u8 status = 0, val;
-       int rc;
-       unsigned long flags;
-
-       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
-       status |= val;
-
-       status &= ~IRQ_PLL_LOCK; /* ignore */
-       status &= ~IRQ_RX_START; /* ignore */
-       status &= ~IRQ_AMI; /* ignore */
-       status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
-
-       if (status & IRQ_TRX_END) {
-               spin_lock_irqsave(&lp->lock, flags);
-               status &= ~IRQ_TRX_END;
-               if (lp->is_tx) {
-                       lp->is_tx = 0;
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       complete(&lp->tx_complete);
-               } else {
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       at86rf230_rx(lp);
-               }
-       }
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->irq_disabled = 0;
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       enable_irq(lp->spi->irq);
-}
-
-static irqreturn_t at86rf230_isr(int irq, void *data)
-{
-       struct at86rf230_local *lp = data;
-
-       disable_irq_nosync(irq);
-
-       spin_lock(&lp->lock);
-       lp->irq_disabled = 1;
-       spin_unlock(&lp->lock);
-
-       schedule_work(&lp->irqwork);
-
-       return IRQ_HANDLED;
-}
-
-
-static int at86rf230_hw_init(struct at86rf230_local *lp)
-{
-       u8 status;
-       int rc;
-
-       rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
-       if (rc)
-               return rc;
-
-       dev_info(&lp->spi->dev, "Status: %02x\n", status);
-       if (status == STATE_P_ON) {
-               rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
-               if (rc)
-                       return rc;
-               msleep(1);
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
-               if (rc)
-                       return rc;
-               dev_info(&lp->spi->dev, "Status: %02x\n", status);
-       }
-
-       rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
-                                                            * IRQ_CCA_ED |
-                                                            * IRQ_TRX_END |
-                                                            * IRQ_PLL_UNL |
-                                                            * IRQ_PLL_LOCK
-                                                            */
-       if (rc)
-               return rc;
-
-       /* CLKM changes are applied immediately */
-       rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
-       if (rc)
-               return rc;
-
-       /* Turn CLKM Off */
-       rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
-       if (rc)
-               return rc;
-       /* Wait the next SLEEP cycle */
-       msleep(100);
-
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
-       if (rc)
-               return rc;
-       msleep(1);
-
-       rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
-       if (rc)
-               return rc;
-       dev_info(&lp->spi->dev, "Status: %02x\n", status);
-
-       rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
-       if (rc)
-               return rc;
-       if (!status) {
-               dev_err(&lp->spi->dev, "DVDD error\n");
-               return -EINVAL;
-       }
-
-       rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
-       if (rc)
-               return rc;
-       if (!status) {
-               dev_err(&lp->spi->dev, "AVDD error\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
-{
-       return 0;
-}
-
-static int at86rf230_resume(struct spi_device *spi)
-{
-       return 0;
-}
-
-static int at86rf230_fill_data(struct spi_device *spi)
-{
-       struct at86rf230_local *lp = spi_get_drvdata(spi);
-       struct at86rf230_platform_data *pdata = spi->dev.platform_data;
-
-       if (!pdata) {
-               dev_err(&spi->dev, "no platform_data\n");
-               return -EINVAL;
-       }
-
-       lp->rstn = pdata->rstn;
-       lp->slp_tr = pdata->slp_tr;
-       lp->dig2 = pdata->dig2;
-
-       return 0;
-}
-
-static int __devinit at86rf230_probe(struct spi_device *spi)
-{
-       struct ieee802154_dev *dev;
-       struct at86rf230_local *lp;
-       u8 man_id_0, man_id_1;
-       int rc;
-       const char *chip;
-       int supported = 0;
-
-       if (!spi->irq) {
-               dev_err(&spi->dev, "no IRQ specified\n");
-               return -EINVAL;
-       }
-
-       dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
-       if (!dev)
-               return -ENOMEM;
-
-       lp = dev->priv;
-       lp->dev = dev;
-
-       lp->spi = spi;
-
-       dev->priv = lp;
-       dev->parent = &spi->dev;
-       dev->extra_tx_headroom = 0;
-       /* We do support only 2.4 Ghz */
-       dev->phy->channels_supported[0] = 0x7FFF800;
-       dev->flags = IEEE802154_HW_OMIT_CKSUM;
-
-       mutex_init(&lp->bmux);
-       INIT_WORK(&lp->irqwork, at86rf230_irqwork);
-       spin_lock_init(&lp->lock);
-       init_completion(&lp->tx_complete);
-
-       spi_set_drvdata(spi, lp);
-
-       rc = at86rf230_fill_data(spi);
-       if (rc)
-               goto err_fill;
-
-       rc = gpio_request(lp->rstn, "rstn");
-       if (rc)
-               goto err_rstn;
-
-       if (gpio_is_valid(lp->slp_tr)) {
-               rc = gpio_request(lp->slp_tr, "slp_tr");
-               if (rc)
-                       goto err_slp_tr;
-       }
-
-       rc = gpio_direction_output(lp->rstn, 1);
-       if (rc)
-               goto err_gpio_dir;
-
-       if (gpio_is_valid(lp->slp_tr)) {
-               rc = gpio_direction_output(lp->slp_tr, 0);
-               if (rc)
-                       goto err_gpio_dir;
-       }
-
-       /* Reset */
-       msleep(1);
-       gpio_set_value(lp->rstn, 0);
-       msleep(1);
-       gpio_set_value(lp->rstn, 1);
-       msleep(1);
-
-       rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
-       if (rc)
-               goto err_gpio_dir;
-       rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
-       if (rc)
-               goto err_gpio_dir;
-
-       if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
-               dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
-                       man_id_1, man_id_0);
-               rc = -EINVAL;
-               goto err_gpio_dir;
-       }
-
-       rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
-       if (rc)
-               goto err_gpio_dir;
-
-       rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
-       if (rc)
-               goto err_gpio_dir;
-
-       switch (lp->part) {
-       case 2:
-               chip = "at86rf230";
-               /* supported = 1;  FIXME: should be easy to support; */
-               break;
-       case 3:
-               chip = "at86rf231";
-               supported = 1;
-               break;
-       default:
-               chip = "UNKNOWN";
-               break;
-       }
-
-       dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
-       if (!supported) {
-               rc = -ENOTSUPP;
-               goto err_gpio_dir;
-       }
-
-       rc = at86rf230_hw_init(lp);
-       if (rc)
-               goto err_gpio_dir;
-
-       rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
-                        dev_name(&spi->dev), lp);
-       if (rc)
-               goto err_gpio_dir;
-
-       rc = ieee802154_register_device(lp->dev);
-       if (rc)
-               goto err_irq;
-
-       return rc;
-
-       ieee802154_unregister_device(lp->dev);
-err_irq:
-       free_irq(spi->irq, lp);
-       flush_work(&lp->irqwork);
-err_gpio_dir:
-       if (gpio_is_valid(lp->slp_tr))
-               gpio_free(lp->slp_tr);
-err_slp_tr:
-       gpio_free(lp->rstn);
-err_rstn:
-err_fill:
-       spi_set_drvdata(spi, NULL);
-       mutex_destroy(&lp->bmux);
-       ieee802154_free_device(lp->dev);
-       return rc;
-}
-
-static int __devexit at86rf230_remove(struct spi_device *spi)
-{
-       struct at86rf230_local *lp = spi_get_drvdata(spi);
-
-       ieee802154_unregister_device(lp->dev);
-
-       free_irq(spi->irq, lp);
-       flush_work(&lp->irqwork);
-
-       if (gpio_is_valid(lp->slp_tr))
-               gpio_free(lp->slp_tr);
-       gpio_free(lp->rstn);
-
-       spi_set_drvdata(spi, NULL);
-       mutex_destroy(&lp->bmux);
-       ieee802154_free_device(lp->dev);
-
-       dev_dbg(&spi->dev, "unregistered at86rf230\n");
-       return 0;
-}
-
-static struct spi_driver at86rf230_driver = {
-       .driver = {
-               .name   = "at86rf230",
-               .owner  = THIS_MODULE,
-       },
-       .probe      = at86rf230_probe,
-       .remove     = __devexit_p(at86rf230_remove),
-       .suspend    = at86rf230_suspend,
-       .resume     = at86rf230_resume,
-};
-
-static int __init at86rf230_init(void)
-{
-       return spi_register_driver(&at86rf230_driver);
-}
-module_init(at86rf230_init);
-
-static void __exit at86rf230_exit(void)
-{
-       spi_unregister_driver(&at86rf230_driver);
-}
-module_exit(at86rf230_exit);
-
-MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
deleted file mode 100644 (file)
index 73d4531..0000000
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- * Sample driver for HardMAC IEEE 802.15.4 devices
- *
- * Copyright (C) 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-
-#include <net/af_ieee802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/ieee802154.h>
-#include <net/nl802154.h>
-#include <net/wpan-phy.h>
-
-struct fakehard_priv {
-       struct wpan_phy *phy;
-};
-
-static struct wpan_phy *fake_to_phy(const struct net_device *dev)
-{
-       struct fakehard_priv *priv = netdev_priv(dev);
-       return priv->phy;
-}
-
-/**
- * fake_get_phy - Return a phy corresponding to this device.
- * @dev: The network device for which to return the wan-phy object
- *
- * This function returns a wpan-phy object corresponding to the passed
- * network device. Reference counter for wpan-phy object is incremented,
- * so when the wpan-phy isn't necessary, you should drop the reference
- * via @wpan_phy_put() call.
- */
-static struct wpan_phy *fake_get_phy(const struct net_device *dev)
-{
-       struct wpan_phy *phy = fake_to_phy(dev);
-       return to_phy(get_device(&phy->dev));
-}
-
-/**
- * fake_get_pan_id - Retrieve the PAN ID of the device.
- * @dev: The network device to retrieve the PAN of.
- *
- * Return the ID of the PAN from the PIB.
- */
-static u16 fake_get_pan_id(const struct net_device *dev)
-{
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       return 0xeba1;
-}
-
-/**
- * fake_get_short_addr - Retrieve the short address of the device.
- * @dev: The network device to retrieve the short address of.
- *
- * Returns the IEEE 802.15.4 short-form address cached for this
- * device. If the device has not yet had a short address assigned
- * then this should return 0xFFFF to indicate a lack of association.
- */
-static u16 fake_get_short_addr(const struct net_device *dev)
-{
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       return 0x1;
-}
-
-/**
- * fake_get_dsn - Retrieve the DSN of the device.
- * @dev: The network device to retrieve the DSN for.
- *
- * Returns the IEEE 802.15.4 DSN for the network device.
- * The DSN is the sequence number which will be added to each
- * packet or MAC command frame by the MAC during transmission.
- *
- * DSN means 'Data Sequence Number'.
- *
- * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
- *       document.
- */
-static u8 fake_get_dsn(const struct net_device *dev)
-{
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       return 0x00; /* DSN are implemented in HW, so return just 0 */
-}
-
-/**
- * fake_get_bsn - Retrieve the BSN of the device.
- * @dev: The network device to retrieve the BSN for.
- *
- * Returns the IEEE 802.15.4 BSN for the network device.
- * The BSN is the sequence number which will be added to each
- * beacon frame sent by the MAC.
- *
- * BSN means 'Beacon Sequence Number'.
- *
- * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
- *       document.
- */
-static u8 fake_get_bsn(const struct net_device *dev)
-{
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       return 0x00; /* BSN are implemented in HW, so return just 0 */
-}
-
-/**
- * fake_assoc_req - Make an association request to the HW.
- * @dev: The network device which we are associating to a network.
- * @addr: The coordinator with which we wish to associate.
- * @channel: The channel on which to associate.
- * @cap: The capability information field to use in the association.
- *
- * Start an association with a coordinator. The coordinator's address
- * and PAN ID can be found in @addr.
- *
- * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE
- *       802.15.4-2006 document.
- */
-static int fake_assoc_req(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
-{
-       struct wpan_phy *phy = fake_to_phy(dev);
-
-       mutex_lock(&phy->pib_lock);
-       phy->current_channel = channel;
-       phy->current_page = page;
-       mutex_unlock(&phy->pib_lock);
-
-       /* We simply emulate it here */
-       return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev),
-                       IEEE802154_SUCCESS);
-}
-
-/**
- * fake_assoc_resp - Send an association response to a device.
- * @dev: The network device on which to send the response.
- * @addr: The address of the device to respond to.
- * @short_addr: The assigned short address for the device (if any).
- * @status: The result of the association request.
- *
- * Queue the association response of the coordinator to another
- * device's attempt to associate with the network which we
- * coordinate. This is then added to the indirect-send queue to be
- * transmitted to the end device when it polls for data.
- *
- * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE
- *       802.15.4-2006 document.
- */
-static int fake_assoc_resp(struct net_device *dev,
-               struct ieee802154_addr *addr, u16 short_addr, u8 status)
-{
-       return 0;
-}
-
-/**
- * fake_disassoc_req - Disassociate a device from a network.
- * @dev: The network device on which we're disassociating a device.
- * @addr: The device to disassociate from the network.
- * @reason: The reason to give to the device for being disassociated.
- *
- * This sends a disassociation notification to the device being
- * disassociated from the network.
- *
- * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006
- *       document, with the reason described in 7.3.3.2.
- */
-static int fake_disassoc_req(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 reason)
-{
-       return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS);
-}
-
-/**
- * fake_start_req - Start an IEEE 802.15.4 PAN.
- * @dev: The network device on which to start the PAN.
- * @addr: The coordinator address to use when starting the PAN.
- * @channel: The channel on which to start the PAN.
- * @bcn_ord: Beacon order.
- * @sf_ord: Superframe order.
- * @pan_coord: Whether or not we are the PAN coordinator or just
- *             requesting a realignment perhaps?
- * @blx: Battery Life Extension feature bitfield.
- * @coord_realign: Something to realign something else.
- *
- * If pan_coord is non-zero then this starts a network with the
- * provided parameters, otherwise it attempts a coordinator
- * realignment of the stated network instead.
- *
- * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
- * document, with 7.3.8 describing coordinator realignment.
- */
-static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
-                               u8 channel, u8 page,
-                               u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
-                               u8 coord_realign)
-{
-       struct wpan_phy *phy = fake_to_phy(dev);
-
-       mutex_lock(&phy->pib_lock);
-       phy->current_channel = channel;
-       phy->current_page = page;
-       mutex_unlock(&phy->pib_lock);
-
-       /* We don't emulate beacons here at all, so START should fail */
-       ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER);
-       return 0;
-}
-
-/**
- * fake_scan_req - Start a channel scan.
- * @dev: The network device on which to perform a channel scan.
- * @type: The type of scan to perform.
- * @channels: The channel bitmask to scan.
- * @duration: How long to spend on each channel.
- *
- * This starts either a passive (energy) scan or an active (PAN) scan
- * on the channels indicated in the @channels bitmask. The duration of
- * the scan is measured in terms of superframe duration. Specifically,
- * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each
- * channel.
- *
- * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document.
- */
-static int fake_scan_req(struct net_device *dev, u8 type, u32 channels,
-               u8 page, u8 duration)
-{
-       u8 edl[27] = {};
-       return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type,
-                       channels, page,
-                       type == IEEE802154_MAC_SCAN_ED ? edl : NULL);
-}
-
-static struct ieee802154_mlme_ops fake_mlme = {
-       .assoc_req = fake_assoc_req,
-       .assoc_resp = fake_assoc_resp,
-       .disassoc_req = fake_disassoc_req,
-       .start_req = fake_start_req,
-       .scan_req = fake_scan_req,
-
-       .get_phy = fake_get_phy,
-
-       .get_pan_id = fake_get_pan_id,
-       .get_short_addr = fake_get_short_addr,
-       .get_dsn = fake_get_dsn,
-       .get_bsn = fake_get_bsn,
-};
-
-static int ieee802154_fake_open(struct net_device *dev)
-{
-       netif_start_queue(dev);
-       return 0;
-}
-
-static int ieee802154_fake_close(struct net_device *dev)
-{
-       netif_stop_queue(dev);
-       return 0;
-}
-
-static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
-                                             struct net_device *dev)
-{
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
-       /* FIXME: do hardware work here ... */
-
-       dev_kfree_skb(skb);
-       return NETDEV_TX_OK;
-}
-
-
-static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
-               int cmd)
-{
-       struct sockaddr_ieee802154 *sa =
-               (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
-       u16 pan_id, short_addr;
-
-       switch (cmd) {
-       case SIOCGIFADDR:
-               /* FIXME: fixed here, get from device IRL */
-               pan_id = fake_get_pan_id(dev);
-               short_addr = fake_get_short_addr(dev);
-               if (pan_id == IEEE802154_PANID_BROADCAST ||
-                   short_addr == IEEE802154_ADDR_BROADCAST)
-                       return -EADDRNOTAVAIL;
-
-               sa->family = AF_IEEE802154;
-               sa->addr.addr_type = IEEE802154_ADDR_SHORT;
-               sa->addr.pan_id = pan_id;
-               sa->addr.short_addr = short_addr;
-               return 0;
-       }
-       return -ENOIOCTLCMD;
-}
-
-static int ieee802154_fake_mac_addr(struct net_device *dev, void *p)
-{
-       return -EBUSY; /* HW address is built into the device */
-}
-
-static const struct net_device_ops fake_ops = {
-       .ndo_open               = ieee802154_fake_open,
-       .ndo_stop               = ieee802154_fake_close,
-       .ndo_start_xmit         = ieee802154_fake_xmit,
-       .ndo_do_ioctl           = ieee802154_fake_ioctl,
-       .ndo_set_mac_address    = ieee802154_fake_mac_addr,
-};
-
-static void ieee802154_fake_destruct(struct net_device *dev)
-{
-       struct wpan_phy *phy = fake_to_phy(dev);
-
-       wpan_phy_unregister(phy);
-       free_netdev(dev);
-       wpan_phy_free(phy);
-}
-
-static void ieee802154_fake_setup(struct net_device *dev)
-{
-       dev->addr_len           = IEEE802154_ADDR_LEN;
-       memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-       dev->features           = NETIF_F_HW_CSUM;
-       dev->needed_tailroom    = 2; /* FCS */
-       dev->mtu                = 127;
-       dev->tx_queue_len       = 10;
-       dev->type               = ARPHRD_IEEE802154;
-       dev->flags              = IFF_NOARP | IFF_BROADCAST;
-       dev->watchdog_timeo     = 0;
-       dev->destructor         = ieee802154_fake_destruct;
-}
-
-
-static int __devinit ieee802154fake_probe(struct platform_device *pdev)
-{
-       struct net_device *dev;
-       struct fakehard_priv *priv;
-       struct wpan_phy *phy = wpan_phy_alloc(0);
-       int err;
-
-       if (!phy)
-               return -ENOMEM;
-
-       dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
-       if (!dev) {
-               wpan_phy_free(phy);
-               return -ENOMEM;
-       }
-
-       memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
-                       dev->addr_len);
-       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
-       /*
-        * For now we'd like to emulate 2.4 GHz-only device,
-        * both O-QPSK and CSS
-        */
-       /* 2.4 GHz O-QPSK 802.15.4-2003 */
-       phy->channels_supported[0] |= 0x7FFF800;
-       /* 2.4 GHz CSS 802.15.4a-2007 */
-       phy->channels_supported[3] |= 0x3fff;
-
-       phy->transmit_power = 0xbf;
-
-       dev->netdev_ops = &fake_ops;
-       dev->ml_priv = &fake_mlme;
-
-       priv = netdev_priv(dev);
-       priv->phy = phy;
-
-       wpan_phy_set_dev(phy, &pdev->dev);
-       SET_NETDEV_DEV(dev, &phy->dev);
-
-       platform_set_drvdata(pdev, dev);
-
-       err = wpan_phy_register(phy);
-       if (err)
-               goto out;
-
-       err = register_netdev(dev);
-       if (err < 0)
-               goto out;
-
-       dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
-       return 0;
-
-out:
-       unregister_netdev(dev);
-       return err;
-}
-
-static int __devexit ieee802154fake_remove(struct platform_device *pdev)
-{
-       struct net_device *dev = platform_get_drvdata(pdev);
-       unregister_netdev(dev);
-       return 0;
-}
-
-static struct platform_device *ieee802154fake_dev;
-
-static struct platform_driver ieee802154fake_driver = {
-       .probe = ieee802154fake_probe,
-       .remove = __devexit_p(ieee802154fake_remove),
-       .driver = {
-                       .name = "ieee802154hardmac",
-                       .owner = THIS_MODULE,
-       },
-};
-
-static __init int fake_init(void)
-{
-       ieee802154fake_dev = platform_device_register_simple(
-                       "ieee802154hardmac", -1, NULL, 0);
-       return platform_driver_register(&ieee802154fake_driver);
-}
-
-static __exit void fake_exit(void)
-{
-       platform_driver_unregister(&ieee802154fake_driver);
-       platform_device_unregister(ieee802154fake_dev);
-}
-
-module_init(fake_init);
-module_exit(fake_exit);
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/ieee802154/fakelb.c b/drivers/ieee802154/fakelb.c
deleted file mode 100644 (file)
index e7456fc..0000000
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Loopback IEEE 802.15.4 interface
- *
- * Copyright 2007-2012 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Written by:
- * Sergey Lapin <slapin@ossfans.org>
- * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
- * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/platform_device.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <net/mac802154.h>
-#include <net/wpan-phy.h>
-
-static int numlbs = 1;
-
-struct fakelb_dev_priv {
-       struct ieee802154_dev *dev;
-
-       struct list_head list;
-       struct fakelb_priv *fake;
-
-       spinlock_t lock;
-       bool working;
-};
-
-struct fakelb_priv {
-       struct list_head list;
-       rwlock_t lock;
-};
-
-static int
-fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
-{
-       might_sleep();
-       BUG_ON(!level);
-       *level = 0xbe;
-
-       return 0;
-}
-
-static int
-fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel)
-{
-       pr_debug("set channel to %d\n", channel);
-
-       might_sleep();
-       dev->phy->current_page = page;
-       dev->phy->current_channel = channel;
-
-       return 0;
-}
-
-static void
-fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
-{
-       struct sk_buff *newskb;
-
-       spin_lock(&priv->lock);
-       if (priv->working) {
-               newskb = pskb_copy(skb, GFP_ATOMIC);
-               ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc);
-       }
-       spin_unlock(&priv->lock);
-}
-
-static int
-fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
-{
-       struct fakelb_dev_priv *priv = dev->priv;
-       struct fakelb_priv *fake = priv->fake;
-
-       might_sleep();
-
-       read_lock_bh(&fake->lock);
-       if (priv->list.next == priv->list.prev) {
-               /* we are the only one device */
-               fakelb_hw_deliver(priv, skb);
-       } else {
-               struct fakelb_dev_priv *dp;
-               list_for_each_entry(dp, &priv->fake->list, list) {
-                       if (dp != priv &&
-                           (dp->dev->phy->current_channel ==
-                            priv->dev->phy->current_channel))
-                               fakelb_hw_deliver(dp, skb);
-               }
-       }
-       read_unlock_bh(&fake->lock);
-
-       return 0;
-}
-
-static int
-fakelb_hw_start(struct ieee802154_dev *dev) {
-       struct fakelb_dev_priv *priv = dev->priv;
-       int ret = 0;
-
-       spin_lock(&priv->lock);
-       if (priv->working)
-               ret = -EBUSY;
-       else
-               priv->working = 1;
-       spin_unlock(&priv->lock);
-
-       return ret;
-}
-
-static void
-fakelb_hw_stop(struct ieee802154_dev *dev) {
-       struct fakelb_dev_priv *priv = dev->priv;
-
-       spin_lock(&priv->lock);
-       priv->working = 0;
-       spin_unlock(&priv->lock);
-}
-
-static struct ieee802154_ops fakelb_ops = {
-       .owner = THIS_MODULE,
-       .xmit = fakelb_hw_xmit,
-       .ed = fakelb_hw_ed,
-       .set_channel = fakelb_hw_channel,
-       .start = fakelb_hw_start,
-       .stop = fakelb_hw_stop,
-};
-
-/* Number of dummy devices to be set up by this module. */
-module_param(numlbs, int, 0);
-MODULE_PARM_DESC(numlbs, " number of pseudo devices");
-
-static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
-{
-       struct fakelb_dev_priv *priv;
-       int err;
-       struct ieee802154_dev *ieee;
-
-       ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops);
-       if (!ieee)
-               return -ENOMEM;
-
-       priv = ieee->priv;
-       priv->dev = ieee;
-
-       /* 868 MHz BPSK 802.15.4-2003 */
-       ieee->phy->channels_supported[0] |= 1;
-       /* 915 MHz BPSK 802.15.4-2003 */
-       ieee->phy->channels_supported[0] |= 0x7fe;
-       /* 2.4 GHz O-QPSK 802.15.4-2003 */
-       ieee->phy->channels_supported[0] |= 0x7FFF800;
-       /* 868 MHz ASK 802.15.4-2006 */
-       ieee->phy->channels_supported[1] |= 1;
-       /* 915 MHz ASK 802.15.4-2006 */
-       ieee->phy->channels_supported[1] |= 0x7fe;
-       /* 868 MHz O-QPSK 802.15.4-2006 */
-       ieee->phy->channels_supported[2] |= 1;
-       /* 915 MHz O-QPSK 802.15.4-2006 */
-       ieee->phy->channels_supported[2] |= 0x7fe;
-       /* 2.4 GHz CSS 802.15.4a-2007 */
-       ieee->phy->channels_supported[3] |= 0x3fff;
-       /* UWB Sub-gigahertz 802.15.4a-2007 */
-       ieee->phy->channels_supported[4] |= 1;
-       /* UWB Low band 802.15.4a-2007 */
-       ieee->phy->channels_supported[4] |= 0x1e;
-       /* UWB High band 802.15.4a-2007 */
-       ieee->phy->channels_supported[4] |= 0xffe0;
-       /* 750 MHz O-QPSK 802.15.4c-2009 */
-       ieee->phy->channels_supported[5] |= 0xf;
-       /* 750 MHz MPSK 802.15.4c-2009 */
-       ieee->phy->channels_supported[5] |= 0xf0;
-       /* 950 MHz BPSK 802.15.4d-2009 */
-       ieee->phy->channels_supported[6] |= 0x3ff;
-       /* 950 MHz GFSK 802.15.4d-2009 */
-       ieee->phy->channels_supported[6] |= 0x3ffc00;
-
-       INIT_LIST_HEAD(&priv->list);
-       priv->fake = fake;
-
-       spin_lock_init(&priv->lock);
-
-       ieee->parent = dev;
-
-       err = ieee802154_register_device(ieee);
-       if (err)
-               goto err_reg;
-
-       write_lock_bh(&fake->lock);
-       list_add_tail(&priv->list, &fake->list);
-       write_unlock_bh(&fake->lock);
-
-       return 0;
-
-err_reg:
-       ieee802154_free_device(priv->dev);
-       return err;
-}
-
-static void fakelb_del(struct fakelb_dev_priv *priv)
-{
-       write_lock_bh(&priv->fake->lock);
-       list_del(&priv->list);
-       write_unlock_bh(&priv->fake->lock);
-
-       ieee802154_unregister_device(priv->dev);
-       ieee802154_free_device(priv->dev);
-}
-
-static int __devinit fakelb_probe(struct platform_device *pdev)
-{
-       struct fakelb_priv *priv;
-       struct fakelb_dev_priv *dp;
-       int err = -ENOMEM;
-       int i;
-
-       priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
-       if (!priv)
-               goto err_alloc;
-
-       INIT_LIST_HEAD(&priv->list);
-       rwlock_init(&priv->lock);
-
-       for (i = 0; i < numlbs; i++) {
-               err = fakelb_add_one(&pdev->dev, priv);
-               if (err < 0)
-                       goto err_slave;
-       }
-
-       platform_set_drvdata(pdev, priv);
-       dev_info(&pdev->dev, "added ieee802154 hardware\n");
-       return 0;
-
-err_slave:
-       list_for_each_entry(dp, &priv->list, list)
-               fakelb_del(dp);
-       kfree(priv);
-err_alloc:
-       return err;
-}
-
-static int __devexit fakelb_remove(struct platform_device *pdev)
-{
-       struct fakelb_priv *priv = platform_get_drvdata(pdev);
-       struct fakelb_dev_priv *dp, *temp;
-
-       list_for_each_entry_safe(dp, temp, &priv->list, list)
-               fakelb_del(dp);
-       kfree(priv);
-
-       return 0;
-}
-
-static struct platform_device *ieee802154fake_dev;
-
-static struct platform_driver ieee802154fake_driver = {
-       .probe = fakelb_probe,
-       .remove = __devexit_p(fakelb_remove),
-       .driver = {
-                       .name = "ieee802154fakelb",
-                       .owner = THIS_MODULE,
-       },
-};
-
-static __init int fakelb_init_module(void)
-{
-       ieee802154fake_dev = platform_device_register_simple(
-                            "ieee802154fakelb", -1, NULL, 0);
-       return platform_driver_register(&ieee802154fake_driver);
-}
-
-static __exit void fake_remove_module(void)
-{
-       platform_driver_unregister(&ieee802154fake_driver);
-       platform_device_unregister(ieee802154fake_dev);
-}
-
-module_init(fakelb_init_module);
-module_exit(fake_remove_module);
-MODULE_LICENSE("GPL");
index 28058ae33d38f0da431133e854302f6cbec0e2c6..eaec8d7a3b7372094cdabcb98c2277fd46bb93cf 100644 (file)
@@ -152,13 +152,11 @@ static void set_timeout(unsigned long time)
 {
        unsigned long delay;
 
-       cancel_delayed_work(&work);
-
        delay = time - jiffies;
        if ((long)delay <= 0)
                delay = 1;
 
-       queue_delayed_work(addr_wq, &work, delay);
+       mod_delayed_work(addr_wq, &work, delay);
 }
 
 static void queue_req(struct addr_req *req)
index 9353992f9eeadbb336a8b634798079ce57f34ac9..80f6cf2449fb9b852533d254ab8e6cbabc706156 100644 (file)
@@ -167,6 +167,7 @@ int ib_find_cached_pkey(struct ib_device *device,
        unsigned long flags;
        int i;
        int ret = -ENOENT;
+       int partial_ix = -1;
 
        if (port_num < start_port(device) || port_num > end_port(device))
                return -EINVAL;
@@ -179,6 +180,46 @@ int ib_find_cached_pkey(struct ib_device *device,
 
        for (i = 0; i < cache->table_len; ++i)
                if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
+                       if (cache->table[i] & 0x8000) {
+                               *index = i;
+                               ret = 0;
+                               break;
+                       } else
+                               partial_ix = i;
+               }
+
+       if (ret && partial_ix >= 0) {
+               *index = partial_ix;
+               ret = 0;
+       }
+
+       read_unlock_irqrestore(&device->cache.lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(ib_find_cached_pkey);
+
+int ib_find_exact_cached_pkey(struct ib_device *device,
+                             u8                port_num,
+                             u16               pkey,
+                             u16              *index)
+{
+       struct ib_pkey_cache *cache;
+       unsigned long flags;
+       int i;
+       int ret = -ENOENT;
+
+       if (port_num < start_port(device) || port_num > end_port(device))
+               return -EINVAL;
+
+       read_lock_irqsave(&device->cache.lock, flags);
+
+       cache = device->cache.pkey_cache[port_num - start_port(device)];
+
+       *index = -1;
+
+       for (i = 0; i < cache->table_len; ++i)
+               if (cache->table[i] == pkey) {
                        *index = i;
                        ret = 0;
                        break;
@@ -188,7 +229,7 @@ int ib_find_cached_pkey(struct ib_device *device,
 
        return ret;
 }
-EXPORT_SYMBOL(ib_find_cached_pkey);
+EXPORT_SYMBOL(ib_find_exact_cached_pkey);
 
 int ib_get_cached_lmc(struct ib_device *device,
                      u8                port_num,
index 7172559ce0c1486042a0d5396d8898a10cd31663..26b37603dcf11b63a7ae396d93533fed8a2a09cd 100644 (file)
@@ -3058,7 +3058,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
 
        if (id_priv->id.ps == RDMA_PS_IPOIB)
                comp_mask |= IB_SA_MCMEMBER_REC_RATE |
-                            IB_SA_MCMEMBER_REC_RATE_SELECTOR;
+                            IB_SA_MCMEMBER_REC_RATE_SELECTOR |
+                            IB_SA_MCMEMBER_REC_MTU_SELECTOR |
+                            IB_SA_MCMEMBER_REC_MTU |
+                            IB_SA_MCMEMBER_REC_HOP_LIMIT;
 
        mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
                                                id_priv->id.port_num, &rec,
index e711de400a01aaa27216318b82770bdb3647161e..18c1ece765f2c55b8317fba4daa5716df0ab814f 100644 (file)
@@ -707,18 +707,28 @@ int ib_find_pkey(struct ib_device *device,
 {
        int ret, i;
        u16 tmp_pkey;
+       int partial_ix = -1;
 
        for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
                ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
                if (ret)
                        return ret;
-
                if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
-                       *index = i;
-                       return 0;
+                       /* if there is full-member pkey take it.*/
+                       if (tmp_pkey & 0x8000) {
+                               *index = i;
+                               return 0;
+                       }
+                       if (partial_ix < 0)
+                               partial_ix = i;
                }
        }
 
+       /*no full-member, if exists take the limited*/
+       if (partial_ix >= 0) {
+               *index = partial_ix;
+               return 0;
+       }
        return -ENOENT;
 }
 EXPORT_SYMBOL(ib_find_pkey);
index b0d0bc8a6fb6ca58c61206ff11dab9f2f5cd5e8e..dc3fd1e8af07f0f8f4c6da455c38e315074a8e1e 100644 (file)
@@ -2004,7 +2004,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
        unsigned long delay;
 
        if (list_empty(&mad_agent_priv->wait_list)) {
-               __cancel_delayed_work(&mad_agent_priv->timed_work);
+               cancel_delayed_work(&mad_agent_priv->timed_work);
        } else {
                mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
                                         struct ib_mad_send_wr_private,
@@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
                if (time_after(mad_agent_priv->timeout,
                               mad_send_wr->timeout)) {
                        mad_agent_priv->timeout = mad_send_wr->timeout;
-                       __cancel_delayed_work(&mad_agent_priv->timed_work);
                        delay = mad_send_wr->timeout - jiffies;
                        if ((long)delay <= 0)
                                delay = 1;
-                       queue_delayed_work(mad_agent_priv->qp_info->
-                                          port_priv->wq,
-                                          &mad_agent_priv->timed_work, delay);
+                       mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+                                        &mad_agent_priv->timed_work, delay);
                }
        }
 }
@@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
        list_add(&mad_send_wr->agent_list, list_item);
 
        /* Reschedule a work item if we have a shorter timeout */
-       if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
-               __cancel_delayed_work(&mad_agent_priv->timed_work);
-               queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
-                                  &mad_agent_priv->timed_work, delay);
-       }
+       if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
+               mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
+                                &mad_agent_priv->timed_work, delay);
 }
 
 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
index 3ae2bfd310158d58dd6051961358010947070fdd..fe10a949aef9b6bd0533da3c4886933ee4a2091f 100644 (file)
@@ -177,7 +177,7 @@ int __init ibnl_init(void)
                .input  = ibnl_rcv,
        };
 
-       nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg);
+       nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
        if (!nls) {
                pr_warn("Failed to create netlink socket\n");
                return -ENOMEM;
index 06f08713f487c8b3aec1f4bfe85f9dc744b1f2c7..49b15ac1987e0ba1537afbe478fd1b189f96fd66 100644 (file)
@@ -397,7 +397,6 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
        struct ib_ucm_event_get cmd;
        struct ib_ucm_event *uevent;
        int result = 0;
-       DEFINE_WAIT(wait);
 
        if (out_len < sizeof(struct ib_ucm_event_resp))
                return -ENOSPC;
index 055ed59838dca128eef4986a9b3186a1f0588b84..2709ff581392f1dcd928c15356df3a85d58ec36b 100644 (file)
@@ -310,7 +310,6 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
        struct rdma_ucm_get_event cmd;
        struct ucma_event *uevent;
        int ret = 0;
-       DEFINE_WAIT(wait);
 
        if (out_len < sizeof uevent->resp)
                return -ENOSPC;
@@ -1184,7 +1183,7 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
        struct rdma_ucm_migrate_id cmd;
        struct rdma_ucm_migrate_resp resp;
        struct ucma_context *ctx;
-       struct file *filp;
+       struct fd f;
        struct ucma_file *cur_file;
        int ret = 0;
 
@@ -1192,12 +1191,12 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
                return -EFAULT;
 
        /* Get current fd to protect against it being closed */
-       filp = fget(cmd.fd);
-       if (!filp)
+       f = fdget(cmd.fd);
+       if (!f.file)
                return -ENOENT;
 
        /* Validate current fd and prevent destruction of id. */
-       ctx = ucma_get_ctx(filp->private_data, cmd.id);
+       ctx = ucma_get_ctx(f.file->private_data, cmd.id);
        if (IS_ERR(ctx)) {
                ret = PTR_ERR(ctx);
                goto file_put;
@@ -1231,7 +1230,7 @@ response:
 
        ucma_put_ctx(ctx);
 file_put:
-       fput(filp);
+       fdput(f);
        return ret;
 }
 
index f9d0d7c413a25b9e15775acf12ae20335140a2f7..0cb0007724a2d70611b7b1c7e3119dbf90d5d765 100644 (file)
@@ -705,7 +705,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
        struct ib_udata                 udata;
        struct ib_uxrcd_object         *obj;
        struct ib_xrcd                 *xrcd = NULL;
-       struct file                    *f = NULL;
+       struct fd                       f = {NULL, 0};
        struct inode                   *inode = NULL;
        int                             ret = 0;
        int                             new_xrcd = 0;
@@ -724,18 +724,13 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
 
        if (cmd.fd != -1) {
                /* search for file descriptor */
-               f = fget(cmd.fd);
-               if (!f) {
-                       ret = -EBADF;
-                       goto err_tree_mutex_unlock;
-               }
-
-               inode = f->f_dentry->d_inode;
-               if (!inode) {
+               f = fdget(cmd.fd);
+               if (!f.file) {
                        ret = -EBADF;
                        goto err_tree_mutex_unlock;
                }
 
+               inode = f.file->f_path.dentry->d_inode;
                xrcd = find_xrcd(file->device, inode);
                if (!xrcd && !(cmd.oflags & O_CREAT)) {
                        /* no file descriptor. Need CREATE flag */
@@ -800,8 +795,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
                goto err_copy;
        }
 
-       if (f)
-               fput(f);
+       if (f.file)
+               fdput(f);
 
        mutex_lock(&file->mutex);
        list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
@@ -830,8 +825,8 @@ err:
        put_uobj_write(&obj->uobject);
 
 err_tree_mutex_unlock:
-       if (f)
-               fput(f);
+       if (f.file)
+               fdput(f);
 
        mutex_unlock(&file->device->xrcd_tree_mutex);
 
index 604556d73d250e59cde7559a9e5a870089419fe6..6f2ce6fa98f8b0446d4ed45fabe7b3a67a49fcdb 100644 (file)
@@ -541,16 +541,15 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
 struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
 {
        struct ib_uverbs_event_file *ev_file = NULL;
-       struct file *filp;
+       struct fd f = fdget(fd);
 
-       filp = fget(fd);
-       if (!filp)
+       if (!f.file)
                return NULL;
 
-       if (filp->f_op != &uverbs_event_fops)
+       if (f.file->f_op != &uverbs_event_fops)
                goto out;
 
-       ev_file = filp->private_data;
+       ev_file = f.file->private_data;
        if (ev_file->is_async) {
                ev_file = NULL;
                goto out;
@@ -559,7 +558,7 @@ struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
        kref_get(&ev_file->ref);
 
 out:
-       fput(filp);
+       fdput(f);
        return ev_file;
 }
 
index 45aedf1d9338a2d9e274778fa025d496bb6f09fc..05bfe53bff647b8e47ed1d6ef361734ce69b4e8e 100644 (file)
@@ -137,19 +137,25 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                return -ENOMEM;
 
        wq->rq.qid = c4iw_get_qpid(rdev, uctx);
-       if (!wq->rq.qid)
-               goto err1;
+       if (!wq->rq.qid) {
+               ret = -ENOMEM;
+               goto free_sq_qid;
+       }
 
        if (!user) {
                wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
                                 GFP_KERNEL);
-               if (!wq->sq.sw_sq)
-                       goto err2;
+               if (!wq->sq.sw_sq) {
+                       ret = -ENOMEM;
+                       goto free_rq_qid;
+               }
 
                wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
                                 GFP_KERNEL);
-               if (!wq->rq.sw_rq)
-                       goto err3;
+               if (!wq->rq.sw_rq) {
+                       ret = -ENOMEM;
+                       goto free_sw_sq;
+               }
        }
 
        /*
@@ -157,15 +163,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
         */
        wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
        wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
-       if (!wq->rq.rqt_hwaddr)
-               goto err4;
+       if (!wq->rq.rqt_hwaddr) {
+               ret = -ENOMEM;
+               goto free_sw_rq;
+       }
 
        if (user) {
-               if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
-                       goto err5;
+               ret = alloc_oc_sq(rdev, &wq->sq);
+               if (ret)
+                       goto free_hwaddr;
+
+               ret = alloc_host_sq(rdev, &wq->sq);
+               if (ret)
+                       goto free_sq;
        } else
-               if (alloc_host_sq(rdev, &wq->sq))
-                       goto err5;
+               ret = alloc_host_sq(rdev, &wq->sq);
+               if (ret)
+                       goto free_hwaddr;
        memset(wq->sq.queue, 0, wq->sq.memsize);
        dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
 
@@ -173,7 +187,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                                          wq->rq.memsize, &(wq->rq.dma_addr),
                                          GFP_KERNEL);
        if (!wq->rq.queue)
-               goto err6;
+               goto free_sq;
        PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
                __func__, wq->sq.queue,
                (unsigned long long)virt_to_phys(wq->sq.queue),
@@ -201,7 +215,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        skb = alloc_skb(wr_len, GFP_KERNEL);
        if (!skb) {
                ret = -ENOMEM;
-               goto err7;
+               goto free_dma;
        }
        set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
 
@@ -266,33 +280,33 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 
        ret = c4iw_ofld_send(rdev, skb);
        if (ret)
-               goto err7;
+               goto free_dma;
        ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
        if (ret)
-               goto err7;
+               goto free_dma;
 
        PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
             __func__, wq->sq.qid, wq->rq.qid, wq->db,
             (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
 
        return 0;
-err7:
+free_dma:
        dma_free_coherent(&(rdev->lldi.pdev->dev),
                          wq->rq.memsize, wq->rq.queue,
                          dma_unmap_addr(&wq->rq, mapping));
-err6:
+free_sq:
        dealloc_sq(rdev, &wq->sq);
-err5:
+free_hwaddr:
        c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
-err4:
+free_sw_rq:
        kfree(wq->rq.sw_rq);
-err3:
+free_sw_sq:
        kfree(wq->sq.sw_sq);
-err2:
+free_rq_qid:
        c4iw_put_qpid(rdev, wq->rq.qid, uctx);
-err1:
+free_sq_qid:
        c4iw_put_qpid(rdev, wq->sq.qid, uctx);
-       return -ENOMEM;
+       return ret;
 }
 
 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
@@ -1155,7 +1169,7 @@ static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
                 */
                if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
                    (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
-                       writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db);
+                       writel(QID(qid) | PIDX(inc), qhp->wq.db);
                        break;
                }
                set_current_state(TASK_UNINTERRUPTIBLE);
index 70f09c7826da42492255f3ec9b4bbcfaaafab6af..f4213b3a8fe1144dcc79371d189660ec89a9ac20 100644 (file)
@@ -1,3 +1,3 @@
 obj-$(CONFIG_MLX4_INFINIBAND)  += mlx4_ib.o
 
-mlx4_ib-y :=   ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o
+mlx4_ib-y :=   ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
new file mode 100644 (file)
index 0000000..d2fb38d
--- /dev/null
@@ -0,0 +1,688 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+ /***********************************************************/
+/*This file support the handling of the Alias GUID feature. */
+/***********************************************************/
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_pack.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/delay.h>
+#include "mlx4_ib.h"
+
+/*
+The driver keeps the current state of all guids, as they are in the HW.
+Whenever we receive an smp mad GUIDInfo record, the data will be cached.
+*/
+
+struct mlx4_alias_guid_work_context {
+       u8 port;
+       struct mlx4_ib_dev     *dev ;
+       struct ib_sa_query     *sa_query;
+       struct completion       done;
+       int                     query_id;
+       struct list_head        list;
+       int                     block_num;
+};
+
+struct mlx4_next_alias_guid_work {
+       u8 port;
+       u8 block_num;
+       struct mlx4_sriov_alias_guid_info_rec_det rec_det;
+};
+
+
+void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
+                                        u8 port_num, u8 *p_data)
+{
+       int i;
+       u64 guid_indexes;
+       int slave_id;
+       int port_index = port_num - 1;
+
+       if (!mlx4_is_master(dev->dev))
+               return;
+
+       guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
+                                  ports_guid[port_num - 1].
+                                  all_rec_per_port[block_num].guid_indexes);
+       pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+
+       for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
+               /* The location of the specific index starts from bit number 4
+                * until bit num 11 */
+               if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
+                       slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
+                       if (slave_id >= dev->dev->num_slaves) {
+                               pr_debug("The last slave: %d\n", slave_id);
+                               return;
+                       }
+
+                       /* cache the guid: */
+                       memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
+                              &p_data[i * GUID_REC_SIZE],
+                              GUID_REC_SIZE);
+               } else
+                       pr_debug("Guid number: %d in block: %d"
+                                " was not updated\n", i, block_num);
+       }
+}
+
+static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
+{
+       if (index >= NUM_ALIAS_GUID_PER_PORT) {
+               pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
+               return  (__force __be64) ((u64) 0xFFFFFFFFFFFFFFFFUL);
+       }
+       return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
+}
+
+
+ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
+{
+       return IB_SA_COMP_MASK(4 + index);
+}
+
+/*
+ * Whenever new GUID is set/unset (guid table change) create event and
+ * notify the relevant slave (master also should be notified).
+ * If the GUID value is not as we have in the cache the slave will not be
+ * updated; in this case it waits for the smp_snoop or the port management
+ * event to call the function and to update the slave.
+ * block_number - the index of the block (16 blocks available)
+ * port_number - 1 or 2
+ */
+void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
+                                         int block_num, u8 port_num,
+                                         u8 *p_data)
+{
+       int i;
+       u64 guid_indexes;
+       int slave_id;
+       enum slave_port_state new_state;
+       enum slave_port_state prev_state;
+       __be64 tmp_cur_ag, form_cache_ag;
+       enum slave_port_gen_event gen_event;
+
+       if (!mlx4_is_master(dev->dev))
+               return;
+
+       guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
+                                  ports_guid[port_num - 1].
+                                  all_rec_per_port[block_num].guid_indexes);
+       pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
+
+       /*calculate the slaves and notify them*/
+       for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
+               /* the location of the specific index runs from bits 4..11 */
+               if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
+                       continue;
+
+               slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
+               if (slave_id >= dev->dev->num_slaves)
+                       return;
+               tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
+               form_cache_ag = get_cached_alias_guid(dev, port_num,
+                                       (NUM_ALIAS_GUID_IN_REC * block_num) + i);
+               /*
+                * Check if guid is not the same as in the cache,
+                * If it is different, wait for the snoop_smp or the port mgmt
+                * change event to update the slave on its port state change
+                */
+               if (tmp_cur_ag != form_cache_ag)
+                       continue;
+               mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
+
+               /*2 cases: Valid GUID, and Invalid Guid*/
+
+               if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
+                       prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
+                       new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
+                                                                 MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
+                                                                 &gen_event);
+                       pr_debug("slave: %d, port: %d prev_port_state: %d,"
+                                " new_port_state: %d, gen_event: %d\n",
+                                slave_id, port_num, prev_state, new_state, gen_event);
+                       if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
+                               pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
+                                        slave_id, port_num);
+                               mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
+                                                              port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
+                       }
+               } else { /* request to invalidate GUID */
+                       set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
+                                                     MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
+                                                     &gen_event);
+                       pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
+                                slave_id, port_num);
+                       mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
+                                                      MLX4_PORT_CHANGE_SUBTYPE_DOWN);
+               }
+       }
+}
+
+static void aliasguid_query_handler(int status,
+                                   struct ib_sa_guidinfo_rec *guid_rec,
+                                   void *context)
+{
+       struct mlx4_ib_dev *dev;
+       struct mlx4_alias_guid_work_context *cb_ctx = context;
+       u8 port_index ;
+       int i;
+       struct mlx4_sriov_alias_guid_info_rec_det *rec;
+       unsigned long flags, flags1;
+
+       if (!context)
+               return;
+
+       dev = cb_ctx->dev;
+       port_index = cb_ctx->port - 1;
+       rec = &dev->sriov.alias_guid.ports_guid[port_index].
+               all_rec_per_port[cb_ctx->block_num];
+
+       if (status) {
+               rec->status = MLX4_GUID_INFO_STATUS_IDLE;
+               pr_debug("(port: %d) failed: status = %d\n",
+                        cb_ctx->port, status);
+               goto out;
+       }
+
+       if (guid_rec->block_num != cb_ctx->block_num) {
+               pr_err("block num mismatch: %d != %d\n",
+                      cb_ctx->block_num, guid_rec->block_num);
+               goto out;
+       }
+
+       pr_debug("lid/port: %d/%d, block_num: %d\n",
+                be16_to_cpu(guid_rec->lid), cb_ctx->port,
+                guid_rec->block_num);
+
+       rec = &dev->sriov.alias_guid.ports_guid[port_index].
+               all_rec_per_port[guid_rec->block_num];
+
+       rec->status = MLX4_GUID_INFO_STATUS_SET;
+       rec->method = MLX4_GUID_INFO_RECORD_SET;
+
+       for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
+               __be64 tmp_cur_ag;
+               tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
+               /* check if the SM didn't assign one of the records.
+                * if it didn't, if it was not sysadmin request:
+                * ask the SM to give a new GUID, (instead of the driver request).
+                */
+               if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
+                       mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
+                                    "block_num: %d was declined by SM, "
+                                    "ownership by %d (0 = driver, 1=sysAdmin,"
+                                    " 2=None)\n", __func__, i,
+                                    guid_rec->block_num, rec->ownership);
+                       if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
+                               /* if it is driver assign, asks for new GUID from SM*/
+                               *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
+                                       MLX4_NOT_SET_GUID;
+
+                               /* Mark the record as not assigned, and let it
+                                * be sent again in the next work sched.*/
+                               rec->status = MLX4_GUID_INFO_STATUS_IDLE;
+                               rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
+                       }
+               } else {
+                      /* properly assigned record. */
+                      /* We save the GUID we just got from the SM in the
+                       * admin_guid in order to be persistent, and in the
+                       * request from the sm the process will ask for the same GUID */
+                       if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
+                           tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
+                               /* the sysadmin assignment failed.*/
+                               mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
+                                            " admin guid after SysAdmin "
+                                            "configuration. "
+                                            "Record num %d in block_num:%d "
+                                            "was declined by SM, "
+                                            "new val(0x%llx) was kept\n",
+                                             __func__, i,
+                                            guid_rec->block_num,
+                                            be64_to_cpu(*(__be64 *) &
+                                                        rec->all_recs[i * GUID_REC_SIZE]));
+                       } else {
+                               memcpy(&rec->all_recs[i * GUID_REC_SIZE],
+                                      &guid_rec->guid_info_list[i * GUID_REC_SIZE],
+                                      GUID_REC_SIZE);
+                       }
+               }
+       }
+       /*
+       The func is call here to close the cases when the
+       sm doesn't send smp, so in the sa response the driver
+       notifies the slave.
+       */
+       mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
+                                            cb_ctx->port,
+                                            guid_rec->guid_info_list);
+out:
+       spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+       spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       if (!dev->sriov.is_going_down)
+               queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
+                                  &dev->sriov.alias_guid.ports_guid[port_index].
+                                  alias_guid_work, 0);
+       if (cb_ctx->sa_query) {
+               list_del(&cb_ctx->list);
+               kfree(cb_ctx);
+       } else
+               complete(&cb_ctx->done);
+       spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+}
+
+static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
+{
+       int i;
+       u64 cur_admin_val;
+       ib_sa_comp_mask comp_mask = 0;
+
+       dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
+               = MLX4_GUID_INFO_STATUS_IDLE;
+       dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
+               = MLX4_GUID_INFO_RECORD_SET;
+
+       /* calculate the comp_mask for that record.*/
+       for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
+               cur_admin_val =
+                       *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
+                       all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
+               /*
+               check the admin value: if it's for delete (~00LL) or
+               it is the first guid of the first record (hw guid) or
+               the records is not in ownership of the sysadmin and the sm doesn't
+               need to assign GUIDs, then don't put it up for assignment.
+               */
+               if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
+                   (!index && !i) ||
+                   MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
+                   ports_guid[port - 1].all_rec_per_port[index].ownership)
+                       continue;
+               comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
+       }
+       dev->sriov.alias_guid.ports_guid[port - 1].
+               all_rec_per_port[index].guid_indexes = comp_mask;
+}
+
+static int set_guid_rec(struct ib_device *ibdev,
+                       u8 port, int index,
+                       struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
+{
+       int err;
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       struct ib_sa_guidinfo_rec guid_info_rec;
+       ib_sa_comp_mask comp_mask;
+       struct ib_port_attr attr;
+       struct mlx4_alias_guid_work_context *callback_context;
+       unsigned long resched_delay, flags, flags1;
+       struct list_head *head =
+               &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
+
+       err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
+       if (err) {
+               pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
+                        err, port);
+               return err;
+       }
+       /*check the port was configured by the sm, otherwise no need to send */
+       if (attr.state != IB_PORT_ACTIVE) {
+               pr_debug("port %d not active...rescheduling\n", port);
+               resched_delay = 5 * HZ;
+               err = -EAGAIN;
+               goto new_schedule;
+       }
+
+       callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
+       if (!callback_context) {
+               err = -ENOMEM;
+               resched_delay = HZ * 5;
+               goto new_schedule;
+       }
+       callback_context->port = port;
+       callback_context->dev = dev;
+       callback_context->block_num = index;
+
+       memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
+
+       guid_info_rec.lid = cpu_to_be16(attr.lid);
+       guid_info_rec.block_num = index;
+
+       memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
+              GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
+       comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
+               rec_det->guid_indexes;
+
+       init_completion(&callback_context->done);
+       spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       list_add_tail(&callback_context->list, head);
+       spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
+
+       callback_context->query_id =
+               ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
+                                         ibdev, port, &guid_info_rec,
+                                         comp_mask, rec_det->method, 1000,
+                                         GFP_KERNEL, aliasguid_query_handler,
+                                         callback_context,
+                                         &callback_context->sa_query);
+       if (callback_context->query_id < 0) {
+               pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
+                        "%d. will reschedule to the next 1 sec.\n",
+                        callback_context->query_id);
+               spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+               list_del(&callback_context->list);
+               kfree(callback_context);
+               spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
+               resched_delay = 1 * HZ;
+               err = -EAGAIN;
+               goto new_schedule;
+       }
+       err = 0;
+       goto out;
+
+new_schedule:
+       spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+       spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       invalidate_guid_record(dev, port, index);
+       if (!dev->sriov.is_going_down) {
+               queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
+                                  &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
+                                  resched_delay);
+       }
+       spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+
+out:
+       return err;
+}
+
+void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
+{
+       int i;
+       unsigned long flags, flags1;
+
+       pr_debug("port %d\n", port);
+
+       spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+       spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
+               invalidate_guid_record(dev, port, i);
+
+       if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
+               /*
+               make sure no work waits in the queue, if the work is already
+               queued(not on the timer) the cancel will fail. That is not a problem
+               because we just want the work started.
+               */
+               cancel_delayed_work(&dev->sriov.alias_guid.
+                                     ports_guid[port - 1].alias_guid_work);
+               queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
+                                  &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
+                                  0);
+       }
+       spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+}
+
+/* The function returns the next record that was
+ * not configured (or failed to be configured) */
+static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
+                                    struct mlx4_next_alias_guid_work *rec)
+{
+       int j;
+       unsigned long flags;
+
+       for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+               spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+               if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
+                   MLX4_GUID_INFO_STATUS_IDLE) {
+                       memcpy(&rec->rec_det,
+                              &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
+                              sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
+                       rec->port = port;
+                       rec->block_num = j;
+                       dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
+                               MLX4_GUID_INFO_STATUS_PENDING;
+                       spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+                       return 0;
+               }
+               spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+       }
+       return -ENOENT;
+}
+
+static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
+                                            int rec_index,
+                                            struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
+{
+       dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
+               rec_det->guid_indexes;
+       memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
+              rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
+       dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
+               rec_det->status;
+}
+
+static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
+{
+       int j;
+       struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
+
+       for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
+               memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
+               rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
+                       IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
+                       IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
+                       IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
+                       IB_SA_GUIDINFO_REC_GID7;
+               rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
+               set_administratively_guid_record(dev, port, j, &rec_det);
+       }
+}
+
+static void alias_guid_work(struct work_struct *work)
+{
+       struct delayed_work *delay = to_delayed_work(work);
+       int ret = 0;
+       struct mlx4_next_alias_guid_work *rec;
+       struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
+               container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
+                            alias_guid_work);
+       struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
+       struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
+                                               struct mlx4_ib_sriov,
+                                               alias_guid);
+       struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
+
+       rec = kzalloc(sizeof *rec, GFP_KERNEL);
+       if (!rec) {
+               pr_err("alias_guid_work: No Memory\n");
+               return;
+       }
+
+       pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
+       ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
+       if (ret) {
+               pr_debug("No more records to update.\n");
+               goto out;
+       }
+
+       set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
+                    &rec->rec_det);
+
+out:
+       kfree(rec);
+}
+
+
+void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
+{
+       unsigned long flags, flags1;
+
+       if (!mlx4_is_master(dev->dev))
+               return;
+       spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+       spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       if (!dev->sriov.is_going_down) {
+               queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
+                          &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
+       }
+       spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
+       spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+}
+
+void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
+{
+       int i;
+       struct mlx4_ib_sriov *sriov = &dev->sriov;
+       struct mlx4_alias_guid_work_context *cb_ctx;
+       struct mlx4_sriov_alias_guid_port_rec_det *det;
+       struct ib_sa_query *sa_query;
+       unsigned long flags;
+
+       for (i = 0 ; i < dev->num_ports; i++) {
+               cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
+               det = &sriov->alias_guid.ports_guid[i];
+               spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
+               while (!list_empty(&det->cb_list)) {
+                       cb_ctx = list_entry(det->cb_list.next,
+                                           struct mlx4_alias_guid_work_context,
+                                           list);
+                       sa_query = cb_ctx->sa_query;
+                       cb_ctx->sa_query = NULL;
+                       list_del(&cb_ctx->list);
+                       spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
+                       ib_sa_cancel_query(cb_ctx->query_id, sa_query);
+                       wait_for_completion(&cb_ctx->done);
+                       kfree(cb_ctx);
+                       spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
+               }
+               spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
+       }
+       for (i = 0 ; i < dev->num_ports; i++) {
+               flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
+               destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
+       }
+       ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
+       kfree(dev->sriov.alias_guid.sa_client);
+}
+
+int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
+{
+       char alias_wq_name[15];
+       int ret = 0;
+       int i, j, k;
+       union ib_gid gid;
+
+       if (!mlx4_is_master(dev->dev))
+               return 0;
+       dev->sriov.alias_guid.sa_client =
+               kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
+       if (!dev->sriov.alias_guid.sa_client)
+               return -ENOMEM;
+
+       ib_sa_register_client(dev->sriov.alias_guid.sa_client);
+
+       spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
+
+       for (i = 1; i <= dev->num_ports; ++i) {
+               if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
+                       ret = -EFAULT;
+                       goto err_unregister;
+               }
+       }
+
+       for (i = 0 ; i < dev->num_ports; i++) {
+               memset(&dev->sriov.alias_guid.ports_guid[i], 0,
+                      sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
+               /*Check if the SM doesn't need to assign the GUIDs*/
+               for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+                       if (mlx4_ib_sm_guid_assign) {
+                               dev->sriov.alias_guid.ports_guid[i].
+                                       all_rec_per_port[j].
+                                       ownership = MLX4_GUID_DRIVER_ASSIGN;
+                               continue;
+                       }
+                       dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
+                                       ownership = MLX4_GUID_NONE_ASSIGN;
+                       /*mark each val as it was deleted,
+                         till the sysAdmin will give it valid val*/
+                       for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
+                               *(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
+                                       all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
+                                               cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
+                       }
+               }
+               INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
+               /*prepare the records, set them to be allocated by sm*/
+               for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
+                       invalidate_guid_record(dev, i + 1, j);
+
+               dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
+               dev->sriov.alias_guid.ports_guid[i].port  = i;
+               if (mlx4_ib_sm_guid_assign)
+                       set_all_slaves_guids(dev, i);
+
+               snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
+               dev->sriov.alias_guid.ports_guid[i].wq =
+                       create_singlethread_workqueue(alias_wq_name);
+               if (!dev->sriov.alias_guid.ports_guid[i].wq) {
+                       ret = -ENOMEM;
+                       goto err_thread;
+               }
+               INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
+                         alias_guid_work);
+       }
+       return 0;
+
+err_thread:
+       for (--i; i >= 0; i--) {
+               destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
+               dev->sriov.alias_guid.ports_guid[i].wq = NULL;
+       }
+
+err_unregister:
+       ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
+       kfree(dev->sriov.alias_guid.sa_client);
+       dev->sriov.alias_guid.sa_client = NULL;
+       pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
+       return ret;
+}
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
new file mode 100644 (file)
index 0000000..e25e4da
--- /dev/null
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+
+#include <linux/mlx4/cmd.h>
+#include <linux/rbtree.h>
+#include <linux/idr.h>
+#include <rdma/ib_cm.h>
+
+#include "mlx4_ib.h"
+
+#define CM_CLEANUP_CACHE_TIMEOUT  (5 * HZ)
+
+struct id_map_entry {
+       struct rb_node node;
+
+       u32 sl_cm_id;
+       u32 pv_cm_id;
+       int slave_id;
+       int scheduled_delete;
+       struct mlx4_ib_dev *dev;
+
+       struct list_head list;
+       struct delayed_work timeout;
+};
+
+struct cm_generic_msg {
+       struct ib_mad_hdr hdr;
+
+       __be32 local_comm_id;
+       __be32 remote_comm_id;
+};
+
+struct cm_req_msg {
+       unsigned char unused[0x60];
+       union ib_gid primary_path_sgid;
+};
+
+
+static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
+{
+       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+       msg->local_comm_id = cpu_to_be32(cm_id);
+}
+
+static u32 get_local_comm_id(struct ib_mad *mad)
+{
+       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+
+       return be32_to_cpu(msg->local_comm_id);
+}
+
+static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
+{
+       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+       msg->remote_comm_id = cpu_to_be32(cm_id);
+}
+
+static u32 get_remote_comm_id(struct ib_mad *mad)
+{
+       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+
+       return be32_to_cpu(msg->remote_comm_id);
+}
+
+static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
+{
+       struct cm_req_msg *msg = (struct cm_req_msg *)mad;
+
+       return msg->primary_path_sgid;
+}
+
+/* Lock should be taken before called */
+static struct id_map_entry *
+id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
+{
+       struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
+       struct rb_node *node = sl_id_map->rb_node;
+
+       while (node) {
+               struct id_map_entry *id_map_entry =
+                       rb_entry(node, struct id_map_entry, node);
+
+               if (id_map_entry->sl_cm_id > sl_cm_id)
+                       node = node->rb_left;
+               else if (id_map_entry->sl_cm_id < sl_cm_id)
+                       node = node->rb_right;
+               else if (id_map_entry->slave_id > slave_id)
+                       node = node->rb_left;
+               else if (id_map_entry->slave_id < slave_id)
+                       node = node->rb_right;
+               else
+                       return id_map_entry;
+       }
+       return NULL;
+}
+
+static void id_map_ent_timeout(struct work_struct *work)
+{
+       struct delayed_work *delay = to_delayed_work(work);
+       struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
+       struct id_map_entry *db_ent, *found_ent;
+       struct mlx4_ib_dev *dev = ent->dev;
+       struct mlx4_ib_sriov *sriov = &dev->sriov;
+       struct rb_root *sl_id_map = &sriov->sl_id_map;
+       int pv_id = (int) ent->pv_cm_id;
+
+       spin_lock(&sriov->id_map_lock);
+       db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
+       if (!db_ent)
+               goto out;
+       found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
+       if (found_ent && found_ent == ent)
+               rb_erase(&found_ent->node, sl_id_map);
+       idr_remove(&sriov->pv_id_table, pv_id);
+
+out:
+       list_del(&ent->list);
+       spin_unlock(&sriov->id_map_lock);
+       kfree(ent);
+}
+
+static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
+{
+       struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+       struct rb_root *sl_id_map = &sriov->sl_id_map;
+       struct id_map_entry *ent, *found_ent;
+
+       spin_lock(&sriov->id_map_lock);
+       ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
+       if (!ent)
+               goto out;
+       found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
+       if (found_ent && found_ent == ent)
+               rb_erase(&found_ent->node, sl_id_map);
+       idr_remove(&sriov->pv_id_table, pv_cm_id);
+out:
+       spin_unlock(&sriov->id_map_lock);
+}
+
+static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
+{
+       struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
+       struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
+       struct id_map_entry *ent;
+       int slave_id = new->slave_id;
+       int sl_cm_id = new->sl_cm_id;
+
+       ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
+       if (ent) {
+               pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
+                        sl_cm_id);
+
+               rb_replace_node(&ent->node, &new->node, sl_id_map);
+               return;
+       }
+
+       /* Go to the bottom of the tree */
+       while (*link) {
+               parent = *link;
+               ent = rb_entry(parent, struct id_map_entry, node);
+
+               if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
+                       link = &(*link)->rb_left;
+               else
+                       link = &(*link)->rb_right;
+       }
+
+       rb_link_node(&new->node, parent, link);
+       rb_insert_color(&new->node, sl_id_map);
+}
+
+static struct id_map_entry *
+id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
+{
+       int ret, id;
+       static int next_id;
+       struct id_map_entry *ent;
+       struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+
+       ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
+       if (!ent) {
+               mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       ent->sl_cm_id = sl_cm_id;
+       ent->slave_id = slave_id;
+       ent->scheduled_delete = 0;
+       ent->dev = to_mdev(ibdev);
+       INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
+
+       do {
+               spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
+               ret = idr_get_new_above(&sriov->pv_id_table, ent,
+                                       next_id, &id);
+               if (!ret) {
+                       next_id = ((unsigned) id + 1) & MAX_ID_MASK;
+                       ent->pv_cm_id = (u32)id;
+                       sl_id_map_add(ibdev, ent);
+               }
+
+               spin_unlock(&sriov->id_map_lock);
+       } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
+       /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
+       if (!ret) {
+               spin_lock(&sriov->id_map_lock);
+               list_add_tail(&ent->list, &sriov->cm_list);
+               spin_unlock(&sriov->id_map_lock);
+               return ent;
+       }
+       /*error flow*/
+       kfree(ent);
+       mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
+       return ERR_PTR(-ENOMEM);
+}
+
+static struct id_map_entry *
+id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
+{
+       struct id_map_entry *ent;
+       struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+
+       spin_lock(&sriov->id_map_lock);
+       if (*pv_cm_id == -1) {
+               ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
+               if (ent)
+                       *pv_cm_id = (int) ent->pv_cm_id;
+       } else
+               ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
+       spin_unlock(&sriov->id_map_lock);
+
+       return ent;
+}
+
+static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
+{
+       struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sriov->going_down_lock, flags);
+       spin_lock(&sriov->id_map_lock);
+       /*make sure that there is no schedule inside the scheduled work.*/
+       if (!sriov->is_going_down) {
+               id->scheduled_delete = 1;
+               schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+       }
+       spin_unlock(&sriov->id_map_lock);
+       spin_unlock_irqrestore(&sriov->going_down_lock, flags);
+}
+
+int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
+               struct ib_mad *mad)
+{
+       struct id_map_entry *id;
+       u32 sl_cm_id;
+       int pv_cm_id = -1;
+
+       sl_cm_id = get_local_comm_id(mad);
+
+       if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
+                       mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
+               id = id_map_alloc(ibdev, slave_id, sl_cm_id);
+               if (IS_ERR(id)) {
+                       mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
+                               __func__, slave_id, sl_cm_id);
+                       return PTR_ERR(id);
+               }
+       } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
+               return 0;
+       } else {
+               id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
+       }
+
+       if (!id) {
+               pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
+                        slave_id, sl_cm_id);
+               return -EINVAL;
+       }
+
+       set_local_comm_id(mad, id->pv_cm_id);
+
+       if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
+               schedule_delayed(ibdev, id);
+       else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
+               id_map_find_del(ibdev, pv_cm_id);
+
+       return 0;
+}
+
+int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
+                                                            struct ib_mad *mad)
+{
+       u32 pv_cm_id;
+       struct id_map_entry *id;
+
+       if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
+               union ib_gid gid;
+
+               gid = gid_from_req_msg(ibdev, mad);
+               *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
+               if (*slave < 0) {
+                       mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
+                                       gid.global.interface_id);
+                       return -ENOENT;
+               }
+               return 0;
+       }
+
+       pv_cm_id = get_remote_comm_id(mad);
+       id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
+
+       if (!id) {
+               pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
+               return -ENOENT;
+       }
+
+       *slave = id->slave_id;
+       set_remote_comm_id(mad, id->sl_cm_id);
+
+       if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
+               schedule_delayed(ibdev, id);
+       else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
+                       mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
+               id_map_find_del(ibdev, (int) pv_cm_id);
+       }
+
+       return 0;
+}
+
+void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
+{
+       spin_lock_init(&dev->sriov.id_map_lock);
+       INIT_LIST_HEAD(&dev->sriov.cm_list);
+       dev->sriov.sl_id_map = RB_ROOT;
+       idr_init(&dev->sriov.pv_id_table);
+       idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
+}
+
+/* slave = -1 ==> all slaves */
+/* TBD -- call paravirt clean for single slave.  Need for slave RESET event */
+void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
+{
+       struct mlx4_ib_sriov *sriov = &dev->sriov;
+       struct rb_root *sl_id_map = &sriov->sl_id_map;
+       struct list_head lh;
+       struct rb_node *nd;
+       int need_flush = 1;
+       struct id_map_entry *map, *tmp_map;
+       /* cancel all delayed work queue entries */
+       INIT_LIST_HEAD(&lh);
+       spin_lock(&sriov->id_map_lock);
+       list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
+               if (slave < 0 || slave == map->slave_id) {
+                       if (map->scheduled_delete)
+                               need_flush &= !!cancel_delayed_work(&map->timeout);
+               }
+       }
+
+       spin_unlock(&sriov->id_map_lock);
+
+       if (!need_flush)
+               flush_scheduled_work(); /* make sure all timers were flushed */
+
+       /* now, remove all leftover entries from databases*/
+       spin_lock(&sriov->id_map_lock);
+       if (slave < 0) {
+               while (rb_first(sl_id_map)) {
+                       struct id_map_entry *ent =
+                               rb_entry(rb_first(sl_id_map),
+                                        struct id_map_entry, node);
+
+                       rb_erase(&ent->node, sl_id_map);
+                       idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
+               }
+               list_splice_init(&dev->sriov.cm_list, &lh);
+       } else {
+               /* first, move nodes belonging to slave to db remove list */
+               nd = rb_first(sl_id_map);
+               while (nd) {
+                       struct id_map_entry *ent =
+                               rb_entry(nd, struct id_map_entry, node);
+                       nd = rb_next(nd);
+                       if (ent->slave_id == slave)
+                               list_move_tail(&ent->list, &lh);
+               }
+               /* remove those nodes from databases */
+               list_for_each_entry_safe(map, tmp_map, &lh, list) {
+                       rb_erase(&map->node, sl_id_map);
+                       idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
+               }
+
+               /* add remaining nodes from cm_list */
+               list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
+                       if (slave == map->slave_id)
+                               list_move_tail(&map->list, &lh);
+               }
+       }
+
+       spin_unlock(&sriov->id_map_lock);
+
+       /* free any map entries left behind due to cancel_delayed_work above */
+       list_for_each_entry_safe(map, tmp_map, &lh, list) {
+               list_del(&map->list);
+               kfree(map);
+       }
+}
index 6d4ef71cbcdf64c8823910d900ebf51b2b6c2c77..c9eb6a6815ce2f14b62b215d226cda91f98ae3cf 100644 (file)
@@ -547,6 +547,26 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
                checksum == cpu_to_be16(0xffff);
 }
 
+static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+                          unsigned tail, struct mlx4_cqe *cqe)
+{
+       struct mlx4_ib_proxy_sqp_hdr *hdr;
+
+       ib_dma_sync_single_for_cpu(qp->ibqp.device,
+                                  qp->sqp_proxy_rcv[tail].map,
+                                  sizeof (struct mlx4_ib_proxy_sqp_hdr),
+                                  DMA_FROM_DEVICE);
+       hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
+       wc->pkey_index  = be16_to_cpu(hdr->tun.pkey_index);
+       wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
+       wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
+       wc->src_qp      = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
+       wc->wc_flags   |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
+       wc->dlid_path_bits = 0;
+
+       return 0;
+}
+
 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                            struct mlx4_ib_qp **cur_qp,
                            struct ib_wc *wc)
@@ -559,6 +579,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
        int is_error;
        u32 g_mlpath_rqpn;
        u16 wqe_ctr;
+       unsigned tail = 0;
 
 repoll:
        cqe = next_cqe_sw(cq);
@@ -634,7 +655,8 @@ repoll:
                mlx4_ib_free_srq_wqe(srq, wqe_ctr);
        } else {
                wq        = &(*cur_qp)->rq;
-               wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+               tail      = wq->tail & (wq->wqe_cnt - 1);
+               wc->wr_id = wq->wrid[tail];
                ++wq->tail;
        }
 
@@ -717,6 +739,13 @@ repoll:
                        break;
                }
 
+               if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
+                       if ((*cur_qp)->mlx4_ib_qp_type &
+                           (MLX4_IB_QPT_PROXY_SMI_OWNER |
+                            MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
+                               return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
+               }
+
                wc->slid           = be16_to_cpu(cqe->rlid);
                g_mlpath_rqpn      = be32_to_cpu(cqe->g_mlpath_rqpn);
                wc->src_qp         = g_mlpath_rqpn & 0xffffff;
index 9c2ae7efd00f4c03db74d67a4b5e8d84b0a87e28..21a794152d15acea2a2e450258f4aaa915bce03a 100644 (file)
 
 #include <rdma/ib_mad.h>
 #include <rdma/ib_smi.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cache.h>
 
+#include <linux/random.h>
 #include <linux/mlx4/cmd.h>
 #include <linux/gfp.h>
 #include <rdma/ib_pma.h>
@@ -44,7 +47,62 @@ enum {
        MLX4_IB_VENDOR_CLASS2 = 0xa
 };
 
-int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
+#define MLX4_TUN_SEND_WRID_SHIFT 34
+#define MLX4_TUN_QPN_SHIFT 32
+#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
+#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
+
+#define MLX4_TUN_IS_RECV(a)  (((a) >>  MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
+#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
+
+ /* Port mgmt change event handling */
+
+#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
+#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
+#define NUM_IDX_IN_PKEY_TBL_BLK 32
+#define GUID_TBL_ENTRY_SIZE 8     /* size in bytes */
+#define GUID_TBL_BLK_NUM_ENTRIES 8
+#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
+
+struct mlx4_mad_rcv_buf {
+       struct ib_grh grh;
+       u8 payload[256];
+} __packed;
+
+struct mlx4_mad_snd_buf {
+       u8 payload[256];
+} __packed;
+
+struct mlx4_tunnel_mad {
+       struct ib_grh grh;
+       struct mlx4_ib_tunnel_header hdr;
+       struct ib_mad mad;
+} __packed;
+
+struct mlx4_rcv_tunnel_mad {
+       struct mlx4_rcv_tunnel_hdr hdr;
+       struct ib_grh grh;
+       struct ib_mad mad;
+} __packed;
+
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
+static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
+                               int block, u32 change_bitmap);
+
+__be64 mlx4_ib_gen_node_guid(void)
+{
+#define NODE_GUID_HI   ((u64) (((u64)IB_OPENIB_OUI) << 40))
+       return cpu_to_be64(NODE_GUID_HI | random32());
+}
+
+__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
+{
+       return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
+               cpu_to_be64(0xff00000000000000LL);
+}
+
+int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
                 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
                 void *in_mad, void *response_mad)
 {
@@ -71,10 +129,13 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
         * Key check traps can't be generated unless we have in_wc to
         * tell us where to send the trap.
         */
-       if (ignore_mkey || !in_wc)
+       if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
                op_modifier |= 0x1;
-       if (ignore_bkey || !in_wc)
+       if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
                op_modifier |= 0x2;
+       if (mlx4_is_mfunc(dev->dev) &&
+           (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
+               op_modifier |= 0x8;
 
        if (in_wc) {
                struct {
@@ -107,10 +168,10 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
                in_modifier |= in_wc->slid << 16;
        }
 
-       err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
-                          in_modifier, op_modifier,
+       err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
+                          mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
                           MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
-                          MLX4_CMD_NATIVE);
+                          (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
 
        if (!err)
                memcpy(response_mad, outmailbox->buf, 256);
@@ -156,6 +217,10 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
 {
        struct ib_port_info *pinfo;
        u16 lid;
+       __be16 *base;
+       u32 bn, pkey_change_bitmap;
+       int i;
+
 
        struct mlx4_ib_dev *dev = to_mdev(ibdev);
        if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
@@ -171,17 +236,46 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
                                     pinfo->neighbormtu_mastersmsl & 0xf);
 
                        if (pinfo->clientrereg_resv_subnetto & 0x80)
-                               mlx4_ib_dispatch_event(dev, port_num,
-                                                      IB_EVENT_CLIENT_REREGISTER);
+                               handle_client_rereg_event(dev, port_num);
 
                        if (prev_lid != lid)
-                               mlx4_ib_dispatch_event(dev, port_num,
-                                                      IB_EVENT_LID_CHANGE);
+                               handle_lid_change_event(dev, port_num);
                        break;
 
                case IB_SMP_ATTR_PKEY_TABLE:
-                       mlx4_ib_dispatch_event(dev, port_num,
-                                              IB_EVENT_PKEY_CHANGE);
+                       if (!mlx4_is_mfunc(dev->dev)) {
+                               mlx4_ib_dispatch_event(dev, port_num,
+                                                      IB_EVENT_PKEY_CHANGE);
+                               break;
+                       }
+
+                       /* at this point, we are running in the master.
+                        * Slaves do not receive SMPs.
+                        */
+                       bn  = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
+                       base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
+                       pkey_change_bitmap = 0;
+                       for (i = 0; i < 32; i++) {
+                               pr_debug("PKEY[%d] = x%x\n",
+                                        i + bn*32, be16_to_cpu(base[i]));
+                               if (be16_to_cpu(base[i]) !=
+                                   dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
+                                       pkey_change_bitmap |= (1 << i);
+                                       dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
+                                               be16_to_cpu(base[i]);
+                               }
+                       }
+                       pr_debug("PKEY Change event: port=%d, "
+                                "block=0x%x, change_bitmap=0x%x\n",
+                                port_num, bn, pkey_change_bitmap);
+
+                       if (pkey_change_bitmap) {
+                               mlx4_ib_dispatch_event(dev, port_num,
+                                                      IB_EVENT_PKEY_CHANGE);
+                               if (!dev->sriov.is_going_down)
+                                       __propagate_pkey_ev(dev, port_num, bn,
+                                                           pkey_change_bitmap);
+                       }
                        break;
 
                case IB_SMP_ATTR_GUID_INFO:
@@ -189,12 +283,56 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
                        if (!mlx4_is_master(dev->dev))
                                mlx4_ib_dispatch_event(dev, port_num,
                                                       IB_EVENT_GID_CHANGE);
+                       /*if master, notify relevant slaves*/
+                       if (mlx4_is_master(dev->dev) &&
+                           !dev->sriov.is_going_down) {
+                               bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
+                               mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
+                                                                   (u8 *)(&((struct ib_smp *)mad)->data));
+                               mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
+                                                                    (u8 *)(&((struct ib_smp *)mad)->data));
+                       }
                        break;
+
                default:
                        break;
                }
 }
 
+static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
+                               int block, u32 change_bitmap)
+{
+       int i, ix, slave, err;
+       int have_event = 0;
+
+       for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
+               if (slave == mlx4_master_func_num(dev->dev))
+                       continue;
+               if (!mlx4_is_slave_active(dev->dev, slave))
+                       continue;
+
+               have_event = 0;
+               for (i = 0; i < 32; i++) {
+                       if (!(change_bitmap & (1 << i)))
+                               continue;
+                       for (ix = 0;
+                            ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
+                               if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
+                                   [ix] == i + 32 * block) {
+                                       err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
+                                       pr_debug("propagate_pkey_ev: slave %d,"
+                                                " port %d, ix %d (%d)\n",
+                                                slave, port_num, ix, err);
+                                       have_event = 1;
+                                       break;
+                               }
+                       }
+                       if (have_event)
+                               break;
+               }
+       }
+}
+
 static void node_desc_override(struct ib_device *dev,
                               struct ib_mad *mad)
 {
@@ -242,6 +380,268 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
        }
 }
 
+static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
+                                                            struct ib_sa_mad *sa_mad)
+{
+       int ret = 0;
+
+       /* dispatch to different sa handlers */
+       switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
+       case IB_SA_ATTR_MC_MEMBER_REC:
+               ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       int i;
+
+       for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+               if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
+                       return i;
+       }
+       return -1;
+}
+
+
+static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix,
+                                u8 *full_pk_ix, u8 *partial_pk_ix,
+                                int *is_full_member)
+{
+       u16 search_pkey;
+       int fm;
+       int err = 0;
+       u16 pk;
+
+       err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey);
+       if (err)
+               return err;
+
+       fm = (search_pkey & 0x8000) ? 1 : 0;
+       if (fm) {
+               *full_pk_ix = ph_pkey_ix;
+               search_pkey &= 0x7FFF;
+       } else {
+               *partial_pk_ix = ph_pkey_ix;
+               search_pkey |= 0x8000;
+       }
+
+       if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk))
+               pk = 0xFFFF;
+
+       if (fm)
+               *partial_pk_ix = (pk & 0xFF);
+       else
+               *full_pk_ix = (pk & 0xFF);
+
+       *is_full_member = fm;
+       return err;
+}
+
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+                         enum ib_qp_type dest_qpt, struct ib_wc *wc,
+                         struct ib_grh *grh, struct ib_mad *mad)
+{
+       struct ib_sge list;
+       struct ib_send_wr wr, *bad_wr;
+       struct mlx4_ib_demux_pv_ctx *tun_ctx;
+       struct mlx4_ib_demux_pv_qp *tun_qp;
+       struct mlx4_rcv_tunnel_mad *tun_mad;
+       struct ib_ah_attr attr;
+       struct ib_ah *ah;
+       struct ib_qp *src_qp = NULL;
+       unsigned tun_tx_ix = 0;
+       int dqpn;
+       int ret = 0;
+       int i;
+       int is_full_member = 0;
+       u16 tun_pkey_ix;
+       u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0;
+
+       if (dest_qpt > IB_QPT_GSI)
+               return -EINVAL;
+
+       tun_ctx = dev->sriov.demux[port-1].tun[slave];
+
+       /* check if proxy qp created */
+       if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
+               return -EAGAIN;
+
+       /* QP0 forwarding only for Dom0 */
+       if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
+               return -EINVAL;
+
+       if (!dest_qpt)
+               tun_qp = &tun_ctx->qp[0];
+       else
+               tun_qp = &tun_ctx->qp[1];
+
+       /* compute pkey index for slave */
+       /* get physical pkey -- virtualized Dom0 pkey to phys*/
+       if (dest_qpt) {
+               ph_pkey_ix =
+                       dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index];
+
+               /* now, translate this to the slave pkey index */
+               ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
+                                           &partial_pk_ix, &is_full_member);
+               if (ret)
+                       return -EINVAL;
+
+               for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
+                       if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) ||
+                           (is_full_member &&
+                            (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
+                               break;
+               }
+               if (i == dev->dev->caps.pkey_table_len[port])
+                       return -EINVAL;
+               tun_pkey_ix = i;
+       } else
+               tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
+
+       dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
+
+       /* get tunnel tx data buf for slave */
+       src_qp = tun_qp->qp;
+
+       /* create ah. Just need an empty one with the port num for the post send.
+        * The driver will set the force loopback bit in post_send */
+       memset(&attr, 0, sizeof attr);
+       attr.port_num = port;
+       ah = ib_create_ah(tun_ctx->pd, &attr);
+       if (IS_ERR(ah))
+               return -ENOMEM;
+
+       /* allocate tunnel tx buf after pass failure returns */
+       spin_lock(&tun_qp->tx_lock);
+       if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
+           (MLX4_NUM_TUNNEL_BUFS - 1))
+               ret = -EAGAIN;
+       else
+               tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
+       spin_unlock(&tun_qp->tx_lock);
+       if (ret)
+               goto out;
+
+       tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
+       if (tun_qp->tx_ring[tun_tx_ix].ah)
+               ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
+       tun_qp->tx_ring[tun_tx_ix].ah = ah;
+       ib_dma_sync_single_for_cpu(&dev->ib_dev,
+                                  tun_qp->tx_ring[tun_tx_ix].buf.map,
+                                  sizeof (struct mlx4_rcv_tunnel_mad),
+                                  DMA_TO_DEVICE);
+
+       /* copy over to tunnel buffer */
+       if (grh)
+               memcpy(&tun_mad->grh, grh, sizeof *grh);
+       memcpy(&tun_mad->mad, mad, sizeof *mad);
+
+       /* adjust tunnel data */
+       tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
+       tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
+       tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
+       tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
+       tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
+
+       ib_dma_sync_single_for_device(&dev->ib_dev,
+                                     tun_qp->tx_ring[tun_tx_ix].buf.map,
+                                     sizeof (struct mlx4_rcv_tunnel_mad),
+                                     DMA_TO_DEVICE);
+
+       list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
+       list.length = sizeof (struct mlx4_rcv_tunnel_mad);
+       list.lkey = tun_ctx->mr->lkey;
+
+       wr.wr.ud.ah = ah;
+       wr.wr.ud.port_num = port;
+       wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
+       wr.wr.ud.remote_qpn = dqpn;
+       wr.next = NULL;
+       wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
+       wr.sg_list = &list;
+       wr.num_sge = 1;
+       wr.opcode = IB_WR_SEND;
+       wr.send_flags = IB_SEND_SIGNALED;
+
+       ret = ib_post_send(src_qp, &wr, &bad_wr);
+out:
+       if (ret)
+               ib_destroy_ah(ah);
+       return ret;
+}
+
+static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
+                       struct ib_wc *wc, struct ib_grh *grh,
+                       struct ib_mad *mad)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       int err;
+       int slave;
+       u8 *slave_id;
+
+       /* Initially assume that this mad is for us */
+       slave = mlx4_master_func_num(dev->dev);
+
+       /* See if the slave id is encoded in a response mad */
+       if (mad->mad_hdr.method & 0x80) {
+               slave_id = (u8 *) &mad->mad_hdr.tid;
+               slave = *slave_id;
+               if (slave != 255) /*255 indicates the dom0*/
+                       *slave_id = 0; /* remap tid */
+       }
+
+       /* If a grh is present, we demux according to it */
+       if (wc->wc_flags & IB_WC_GRH) {
+               slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
+               if (slave < 0) {
+                       mlx4_ib_warn(ibdev, "failed matching grh\n");
+                       return -ENOENT;
+               }
+       }
+       /* Class-specific handling */
+       switch (mad->mad_hdr.mgmt_class) {
+       case IB_MGMT_CLASS_SUBN_ADM:
+               if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
+                                            (struct ib_sa_mad *) mad))
+                       return 0;
+               break;
+       case IB_MGMT_CLASS_CM:
+               if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
+                       return 0;
+               break;
+       case IB_MGMT_CLASS_DEVICE_MGMT:
+               if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
+                       return 0;
+               break;
+       default:
+               /* Drop unsupported classes for slaves in tunnel mode */
+               if (slave != mlx4_master_func_num(dev->dev)) {
+                       pr_debug("dropping unsupported ingress mad from class:%d "
+                                "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
+                       return 0;
+               }
+       }
+       /*make sure that no slave==255 was not handled yet.*/
+       if (slave >= dev->dev->caps.sqp_demux) {
+               mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
+                            slave, dev->dev->caps.sqp_demux);
+               return -ENOENT;
+       }
+
+       err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
+       if (err)
+               pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+                        slave, err);
+       return 0;
+}
+
 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                        struct ib_wc *in_wc, struct ib_grh *in_grh,
                        struct ib_mad *in_mad, struct ib_mad *out_mad)
@@ -306,8 +706,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                prev_lid = pattr.lid;
 
        err = mlx4_MAD_IFC(to_mdev(ibdev),
-                          mad_flags & IB_MAD_IGNORE_MKEY,
-                          mad_flags & IB_MAD_IGNORE_BKEY,
+                          (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
+                          (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
+                          MLX4_MAD_IFC_NET_VIEW,
                           port_num, in_wc, in_grh, in_mad, out_mad);
        if (err)
                return IB_MAD_RESULT_FAILURE;
@@ -315,7 +716,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        if (!out_mad->mad_hdr.status) {
                if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
                        smp_snoop(ibdev, port_num, in_mad, prev_lid);
-               node_desc_override(ibdev, out_mad);
+               /* slaves get node desc from FW */
+               if (!mlx4_is_slave(to_mdev(ibdev)->dev))
+                       node_desc_override(ibdev, out_mad);
        }
 
        /* set return bit in status of directed route responses */
@@ -398,6 +801,8 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 static void send_handler(struct ib_mad_agent *agent,
                         struct ib_mad_send_wc *mad_send_wc)
 {
+       if (mad_send_wc->send_buf->context[0])
+               ib_destroy_ah(mad_send_wc->send_buf->context[0]);
        ib_free_send_mad(mad_send_wc->send_buf);
 }
 
@@ -456,6 +861,90 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
        }
 }
 
+static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
+{
+       mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
+
+       if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
+               mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
+                                           MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
+}
+
+static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
+{
+       /* re-configure the alias-guid and mcg's */
+       if (mlx4_is_master(dev->dev)) {
+               mlx4_ib_invalidate_all_guid_record(dev, port_num);
+
+               if (!dev->sriov.is_going_down) {
+                       mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
+                       mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
+                                                   MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
+               }
+       }
+       mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
+}
+
+static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
+                             struct mlx4_eqe *eqe)
+{
+       __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
+                           GET_MASK_FROM_EQE(eqe));
+}
+
+static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
+                                     u32 guid_tbl_blk_num, u32 change_bitmap)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad  = NULL;
+       u16 i;
+
+       if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
+               return;
+
+       in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad) {
+               mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
+               goto out;
+       }
+
+       guid_tbl_blk_num  *= 4;
+
+       for (i = 0; i < 4; i++) {
+               if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
+                       continue;
+               memset(in_mad, 0, sizeof *in_mad);
+               memset(out_mad, 0, sizeof *out_mad);
+
+               in_mad->base_version  = 1;
+               in_mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+               in_mad->class_version = 1;
+               in_mad->method        = IB_MGMT_METHOD_GET;
+               in_mad->attr_id       = IB_SMP_ATTR_GUID_INFO;
+               in_mad->attr_mod      = cpu_to_be32(guid_tbl_blk_num + i);
+
+               if (mlx4_MAD_IFC(dev,
+                                MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
+                                port_num, NULL, NULL, in_mad, out_mad)) {
+                       mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
+                       goto out;
+               }
+
+               mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
+                                                   port_num,
+                                                   (u8 *)(&((struct ib_smp *)out_mad)->data));
+               mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
+                                                    port_num,
+                                                    (u8 *)(&((struct ib_smp *)out_mad)->data));
+       }
+
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return;
+}
+
 void handle_port_mgmt_change_event(struct work_struct *work)
 {
        struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
@@ -463,6 +952,8 @@ void handle_port_mgmt_change_event(struct work_struct *work)
        struct mlx4_eqe *eqe = &(ew->ib_eqe);
        u8 port = eqe->event.port_mgmt_change.port;
        u32 changed_attr;
+       u32 tbl_block;
+       u32 change_bitmap;
 
        switch (eqe->subtype) {
        case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
@@ -478,24 +969,36 @@ void handle_port_mgmt_change_event(struct work_struct *work)
 
                /* Check if it is a lid change event */
                if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
-                       mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
+                       handle_lid_change_event(dev, port);
 
                /* Generate GUID changed event */
-               if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
+               if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
                        mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
+                       /*if master, notify all slaves*/
+                       if (mlx4_is_master(dev->dev))
+                               mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
+                                                           MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
+               }
 
                if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
-                       mlx4_ib_dispatch_event(dev, port,
-                                              IB_EVENT_CLIENT_REREGISTER);
+                       handle_client_rereg_event(dev, port);
                break;
 
        case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
                mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
+               if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
+                       propagate_pkey_ev(dev, port, eqe);
                break;
        case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
                /* paravirtualized master's guid is guid 0 -- does not change */
                if (!mlx4_is_master(dev->dev))
                        mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
+               /*if master, notify relevant slaves*/
+               else if (!dev->sriov.is_going_down) {
+                       tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
+                       change_bitmap = GET_MASK_FROM_EQE(eqe);
+                       handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
+               }
                break;
        default:
                pr_warn("Unsupported subtype 0x%x for "
@@ -516,3 +1019,1035 @@ void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
 
        ib_dispatch_event(&event);
 }
+
+static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
+{
+       unsigned long flags;
+       struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
+       struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
+       spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+       if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
+               queue_work(ctx->wq, &ctx->work);
+       spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+}
+
+static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
+                                 struct mlx4_ib_demux_pv_qp *tun_qp,
+                                 int index)
+{
+       struct ib_sge sg_list;
+       struct ib_recv_wr recv_wr, *bad_recv_wr;
+       int size;
+
+       size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
+               sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
+
+       sg_list.addr = tun_qp->ring[index].map;
+       sg_list.length = size;
+       sg_list.lkey = ctx->mr->lkey;
+
+       recv_wr.next = NULL;
+       recv_wr.sg_list = &sg_list;
+       recv_wr.num_sge = 1;
+       recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
+               MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
+       ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
+                                     size, DMA_FROM_DEVICE);
+       return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
+}
+
+static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
+               int slave, struct ib_sa_mad *sa_mad)
+{
+       int ret = 0;
+
+       /* dispatch to different sa handlers */
+       switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
+       case IB_SA_ATTR_MC_MEMBER_REC:
+               ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
+{
+       int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
+
+       return (qpn >= proxy_start && qpn <= proxy_start + 1);
+}
+
+
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+                        enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
+                        u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
+{
+       struct ib_sge list;
+       struct ib_send_wr wr, *bad_wr;
+       struct mlx4_ib_demux_pv_ctx *sqp_ctx;
+       struct mlx4_ib_demux_pv_qp *sqp;
+       struct mlx4_mad_snd_buf *sqp_mad;
+       struct ib_ah *ah;
+       struct ib_qp *send_qp = NULL;
+       unsigned wire_tx_ix = 0;
+       int ret = 0;
+       u16 wire_pkey_ix;
+       int src_qpnum;
+       u8 sgid_index;
+
+
+       sqp_ctx = dev->sriov.sqps[port-1];
+
+       /* check if proxy qp created */
+       if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
+               return -EAGAIN;
+
+       /* QP0 forwarding only for Dom0 */
+       if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
+               return -EINVAL;
+
+       if (dest_qpt == IB_QPT_SMI) {
+               src_qpnum = 0;
+               sqp = &sqp_ctx->qp[0];
+               wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
+       } else {
+               src_qpnum = 1;
+               sqp = &sqp_ctx->qp[1];
+               wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
+       }
+
+       send_qp = sqp->qp;
+
+       /* create ah */
+       sgid_index = attr->grh.sgid_index;
+       attr->grh.sgid_index = 0;
+       ah = ib_create_ah(sqp_ctx->pd, attr);
+       if (IS_ERR(ah))
+               return -ENOMEM;
+       attr->grh.sgid_index = sgid_index;
+       to_mah(ah)->av.ib.gid_index = sgid_index;
+       /* get rid of force-loopback bit */
+       to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
+       spin_lock(&sqp->tx_lock);
+       if (sqp->tx_ix_head - sqp->tx_ix_tail >=
+           (MLX4_NUM_TUNNEL_BUFS - 1))
+               ret = -EAGAIN;
+       else
+               wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
+       spin_unlock(&sqp->tx_lock);
+       if (ret)
+               goto out;
+
+       sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
+       if (sqp->tx_ring[wire_tx_ix].ah)
+               ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
+       sqp->tx_ring[wire_tx_ix].ah = ah;
+       ib_dma_sync_single_for_cpu(&dev->ib_dev,
+                                  sqp->tx_ring[wire_tx_ix].buf.map,
+                                  sizeof (struct mlx4_mad_snd_buf),
+                                  DMA_TO_DEVICE);
+
+       memcpy(&sqp_mad->payload, mad, sizeof *mad);
+
+       ib_dma_sync_single_for_device(&dev->ib_dev,
+                                     sqp->tx_ring[wire_tx_ix].buf.map,
+                                     sizeof (struct mlx4_mad_snd_buf),
+                                     DMA_TO_DEVICE);
+
+       list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
+       list.length = sizeof (struct mlx4_mad_snd_buf);
+       list.lkey = sqp_ctx->mr->lkey;
+
+       wr.wr.ud.ah = ah;
+       wr.wr.ud.port_num = port;
+       wr.wr.ud.pkey_index = wire_pkey_ix;
+       wr.wr.ud.remote_qkey = qkey;
+       wr.wr.ud.remote_qpn = remote_qpn;
+       wr.next = NULL;
+       wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
+       wr.sg_list = &list;
+       wr.num_sge = 1;
+       wr.opcode = IB_WR_SEND;
+       wr.send_flags = IB_SEND_SIGNALED;
+
+       ret = ib_post_send(send_qp, &wr, &bad_wr);
+out:
+       if (ret)
+               ib_destroy_ah(ah);
+       return ret;
+}
+
+static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
+       struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
+       int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
+       struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
+       struct mlx4_ib_ah ah;
+       struct ib_ah_attr ah_attr;
+       u8 *slave_id;
+       int slave;
+
+       /* Get slave that sent this packet */
+       if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
+           wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
+           (wc->src_qp & 0x1) != ctx->port - 1 ||
+           wc->src_qp & 0x4) {
+               mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
+               return;
+       }
+       slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
+       if (slave != ctx->slave) {
+               mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
+                            "belongs to another slave\n", wc->src_qp);
+               return;
+       }
+       if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
+               mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
+                            "non-master trying to send QP0 packets\n", wc->src_qp);
+               return;
+       }
+
+       /* Map transaction ID */
+       ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
+                                  sizeof (struct mlx4_tunnel_mad),
+                                  DMA_FROM_DEVICE);
+       switch (tunnel->mad.mad_hdr.method) {
+       case IB_MGMT_METHOD_SET:
+       case IB_MGMT_METHOD_GET:
+       case IB_MGMT_METHOD_REPORT:
+       case IB_SA_METHOD_GET_TABLE:
+       case IB_SA_METHOD_DELETE:
+       case IB_SA_METHOD_GET_MULTI:
+       case IB_SA_METHOD_GET_TRACE_TBL:
+               slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
+               if (*slave_id) {
+                       mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
+                                    "class:%d slave:%d\n", *slave_id,
+                                    tunnel->mad.mad_hdr.mgmt_class, slave);
+                       return;
+               } else
+                       *slave_id = slave;
+       default:
+               /* nothing */;
+       }
+
+       /* Class-specific handling */
+       switch (tunnel->mad.mad_hdr.mgmt_class) {
+       case IB_MGMT_CLASS_SUBN_ADM:
+               if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
+                             (struct ib_sa_mad *) &tunnel->mad))
+                       return;
+               break;
+       case IB_MGMT_CLASS_CM:
+               if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
+                             (struct ib_mad *) &tunnel->mad))
+                       return;
+               break;
+       case IB_MGMT_CLASS_DEVICE_MGMT:
+               if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
+                   tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
+                       return;
+               break;
+       default:
+               /* Drop unsupported classes for slaves in tunnel mode */
+               if (slave != mlx4_master_func_num(dev->dev)) {
+                       mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
+                                    "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
+                       return;
+               }
+       }
+
+       /* We are using standard ib_core services to send the mad, so generate a
+        * stadard address handle by decoding the tunnelled mlx4_ah fields */
+       memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
+       ah.ibah.device = ctx->ib_dev;
+       mlx4_ib_query_ah(&ah.ibah, &ah_attr);
+       if ((ah_attr.ah_flags & IB_AH_GRH) &&
+           (ah_attr.grh.sgid_index != slave)) {
+               mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
+                            slave, ah_attr.grh.sgid_index);
+               return;
+       }
+
+       mlx4_ib_send_to_wire(dev, slave, ctx->port,
+                            is_proxy_qp0(dev, wc->src_qp, slave) ?
+                            IB_QPT_SMI : IB_QPT_GSI,
+                            be16_to_cpu(tunnel->hdr.pkey_index),
+                            be32_to_cpu(tunnel->hdr.remote_qpn),
+                            be32_to_cpu(tunnel->hdr.qkey),
+                            &ah_attr, &tunnel->mad);
+}
+
+static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
+                                enum ib_qp_type qp_type, int is_tun)
+{
+       int i;
+       struct mlx4_ib_demux_pv_qp *tun_qp;
+       int rx_buf_size, tx_buf_size;
+
+       if (qp_type > IB_QPT_GSI)
+               return -EINVAL;
+
+       tun_qp = &ctx->qp[qp_type];
+
+       tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
+                              GFP_KERNEL);
+       if (!tun_qp->ring)
+               return -ENOMEM;
+
+       tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
+                                 sizeof (struct mlx4_ib_tun_tx_buf),
+                                 GFP_KERNEL);
+       if (!tun_qp->tx_ring) {
+               kfree(tun_qp->ring);
+               tun_qp->ring = NULL;
+               return -ENOMEM;
+       }
+
+       if (is_tun) {
+               rx_buf_size = sizeof (struct mlx4_tunnel_mad);
+               tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
+       } else {
+               rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
+               tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
+       }
+
+       for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+               tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
+               if (!tun_qp->ring[i].addr)
+                       goto err;
+               tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
+                                                       tun_qp->ring[i].addr,
+                                                       rx_buf_size,
+                                                       DMA_FROM_DEVICE);
+       }
+
+       for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+               tun_qp->tx_ring[i].buf.addr =
+                       kmalloc(tx_buf_size, GFP_KERNEL);
+               if (!tun_qp->tx_ring[i].buf.addr)
+                       goto tx_err;
+               tun_qp->tx_ring[i].buf.map =
+                       ib_dma_map_single(ctx->ib_dev,
+                                         tun_qp->tx_ring[i].buf.addr,
+                                         tx_buf_size,
+                                         DMA_TO_DEVICE);
+               tun_qp->tx_ring[i].ah = NULL;
+       }
+       spin_lock_init(&tun_qp->tx_lock);
+       tun_qp->tx_ix_head = 0;
+       tun_qp->tx_ix_tail = 0;
+       tun_qp->proxy_qpt = qp_type;
+
+       return 0;
+
+tx_err:
+       while (i > 0) {
+               --i;
+               ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
+                                   tx_buf_size, DMA_TO_DEVICE);
+               kfree(tun_qp->tx_ring[i].buf.addr);
+       }
+       kfree(tun_qp->tx_ring);
+       tun_qp->tx_ring = NULL;
+       i = MLX4_NUM_TUNNEL_BUFS;
+err:
+       while (i > 0) {
+               --i;
+               ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
+                                   rx_buf_size, DMA_FROM_DEVICE);
+               kfree(tun_qp->ring[i].addr);
+       }
+       kfree(tun_qp->ring);
+       tun_qp->ring = NULL;
+       return -ENOMEM;
+}
+
+static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
+                                    enum ib_qp_type qp_type, int is_tun)
+{
+       int i;
+       struct mlx4_ib_demux_pv_qp *tun_qp;
+       int rx_buf_size, tx_buf_size;
+
+       if (qp_type > IB_QPT_GSI)
+               return;
+
+       tun_qp = &ctx->qp[qp_type];
+       if (is_tun) {
+               rx_buf_size = sizeof (struct mlx4_tunnel_mad);
+               tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
+       } else {
+               rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
+               tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
+       }
+
+
+       for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+               ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
+                                   rx_buf_size, DMA_FROM_DEVICE);
+               kfree(tun_qp->ring[i].addr);
+       }
+
+       for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+               ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
+                                   tx_buf_size, DMA_TO_DEVICE);
+               kfree(tun_qp->tx_ring[i].buf.addr);
+               if (tun_qp->tx_ring[i].ah)
+                       ib_destroy_ah(tun_qp->tx_ring[i].ah);
+       }
+       kfree(tun_qp->tx_ring);
+       kfree(tun_qp->ring);
+}
+
+static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
+{
+       struct mlx4_ib_demux_pv_ctx *ctx;
+       struct mlx4_ib_demux_pv_qp *tun_qp;
+       struct ib_wc wc;
+       int ret;
+       ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
+       ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
+
+       while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
+               tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
+               if (wc.status == IB_WC_SUCCESS) {
+                       switch (wc.opcode) {
+                       case IB_WC_RECV:
+                               mlx4_ib_multiplex_mad(ctx, &wc);
+                               ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
+                                                            wc.wr_id &
+                                                            (MLX4_NUM_TUNNEL_BUFS - 1));
+                               if (ret)
+                                       pr_err("Failed reposting tunnel "
+                                              "buf:%lld\n", wc.wr_id);
+                               break;
+                       case IB_WC_SEND:
+                               pr_debug("received tunnel send completion:"
+                                        "wrid=0x%llx, status=0x%x\n",
+                                        wc.wr_id, wc.status);
+                               ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
+                                             (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
+                               tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+                                       = NULL;
+                               spin_lock(&tun_qp->tx_lock);
+                               tun_qp->tx_ix_tail++;
+                               spin_unlock(&tun_qp->tx_lock);
+
+                               break;
+                       default:
+                               break;
+                       }
+               } else  {
+                       pr_debug("mlx4_ib: completion error in tunnel: %d."
+                                " status = %d, wrid = 0x%llx\n",
+                                ctx->slave, wc.status, wc.wr_id);
+                       if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
+                               ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
+                                             (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
+                               tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+                                       = NULL;
+                               spin_lock(&tun_qp->tx_lock);
+                               tun_qp->tx_ix_tail++;
+                               spin_unlock(&tun_qp->tx_lock);
+                       }
+               }
+       }
+}
+
+static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
+{
+       struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
+
+       /* It's worse than that! He's dead, Jim! */
+       pr_err("Fatal error (%d) on a MAD QP on port %d\n",
+              event->event, sqp->port);
+}
+
+static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
+                           enum ib_qp_type qp_type, int create_tun)
+{
+       int i, ret;
+       struct mlx4_ib_demux_pv_qp *tun_qp;
+       struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
+       struct ib_qp_attr attr;
+       int qp_attr_mask_INIT;
+
+       if (qp_type > IB_QPT_GSI)
+               return -EINVAL;
+
+       tun_qp = &ctx->qp[qp_type];
+
+       memset(&qp_init_attr, 0, sizeof qp_init_attr);
+       qp_init_attr.init_attr.send_cq = ctx->cq;
+       qp_init_attr.init_attr.recv_cq = ctx->cq;
+       qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+       qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
+       qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
+       qp_init_attr.init_attr.cap.max_send_sge = 1;
+       qp_init_attr.init_attr.cap.max_recv_sge = 1;
+       if (create_tun) {
+               qp_init_attr.init_attr.qp_type = IB_QPT_UD;
+               qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
+               qp_init_attr.port = ctx->port;
+               qp_init_attr.slave = ctx->slave;
+               qp_init_attr.proxy_qp_type = qp_type;
+               qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
+                          IB_QP_QKEY | IB_QP_PORT;
+       } else {
+               qp_init_attr.init_attr.qp_type = qp_type;
+               qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
+               qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
+       }
+       qp_init_attr.init_attr.port_num = ctx->port;
+       qp_init_attr.init_attr.qp_context = ctx;
+       qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
+       tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
+       if (IS_ERR(tun_qp->qp)) {
+               ret = PTR_ERR(tun_qp->qp);
+               tun_qp->qp = NULL;
+               pr_err("Couldn't create %s QP (%d)\n",
+                      create_tun ? "tunnel" : "special", ret);
+               return ret;
+       }
+
+       memset(&attr, 0, sizeof attr);
+       attr.qp_state = IB_QPS_INIT;
+       attr.pkey_index =
+               to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
+       attr.qkey = IB_QP1_QKEY;
+       attr.port_num = ctx->port;
+       ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
+       if (ret) {
+               pr_err("Couldn't change %s qp state to INIT (%d)\n",
+                      create_tun ? "tunnel" : "special", ret);
+               goto err_qp;
+       }
+       attr.qp_state = IB_QPS_RTR;
+       ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
+       if (ret) {
+               pr_err("Couldn't change %s qp state to RTR (%d)\n",
+                      create_tun ? "tunnel" : "special", ret);
+               goto err_qp;
+       }
+       attr.qp_state = IB_QPS_RTS;
+       attr.sq_psn = 0;
+       ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
+       if (ret) {
+               pr_err("Couldn't change %s qp state to RTS (%d)\n",
+                      create_tun ? "tunnel" : "special", ret);
+               goto err_qp;
+       }
+
+       for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+               ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
+               if (ret) {
+                       pr_err(" mlx4_ib_post_pv_buf error"
+                              " (err = %d, i = %d)\n", ret, i);
+                       goto err_qp;
+               }
+       }
+       return 0;
+
+err_qp:
+       ib_destroy_qp(tun_qp->qp);
+       tun_qp->qp = NULL;
+       return ret;
+}
+
+/*
+ * IB MAD completion callback for real SQPs
+ */
+static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
+{
+       struct mlx4_ib_demux_pv_ctx *ctx;
+       struct mlx4_ib_demux_pv_qp *sqp;
+       struct ib_wc wc;
+       struct ib_grh *grh;
+       struct ib_mad *mad;
+
+       ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
+       ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
+
+       while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
+               sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
+               if (wc.status == IB_WC_SUCCESS) {
+                       switch (wc.opcode) {
+                       case IB_WC_SEND:
+                               ib_destroy_ah(sqp->tx_ring[wc.wr_id &
+                                             (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
+                               sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+                                       = NULL;
+                               spin_lock(&sqp->tx_lock);
+                               sqp->tx_ix_tail++;
+                               spin_unlock(&sqp->tx_lock);
+                               break;
+                       case IB_WC_RECV:
+                               mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
+                                               (sqp->ring[wc.wr_id &
+                                               (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
+                               grh = &(((struct mlx4_mad_rcv_buf *)
+                                               (sqp->ring[wc.wr_id &
+                                               (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
+                               mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
+                               if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
+                                                          (MLX4_NUM_TUNNEL_BUFS - 1)))
+                                       pr_err("Failed reposting SQP "
+                                              "buf:%lld\n", wc.wr_id);
+                               break;
+                       default:
+                               BUG_ON(1);
+                               break;
+                       }
+               } else  {
+                       pr_debug("mlx4_ib: completion error in tunnel: %d."
+                                " status = %d, wrid = 0x%llx\n",
+                                ctx->slave, wc.status, wc.wr_id);
+                       if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
+                               ib_destroy_ah(sqp->tx_ring[wc.wr_id &
+                                             (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
+                               sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+                                       = NULL;
+                               spin_lock(&sqp->tx_lock);
+                               sqp->tx_ix_tail++;
+                               spin_unlock(&sqp->tx_lock);
+                       }
+               }
+       }
+}
+
+static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
+                              struct mlx4_ib_demux_pv_ctx **ret_ctx)
+{
+       struct mlx4_ib_demux_pv_ctx *ctx;
+
+       *ret_ctx = NULL;
+       ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
+       if (!ctx) {
+               pr_err("failed allocating pv resource context "
+                      "for port %d, slave %d\n", port, slave);
+               return -ENOMEM;
+       }
+
+       ctx->ib_dev = &dev->ib_dev;
+       ctx->port = port;
+       ctx->slave = slave;
+       *ret_ctx = ctx;
+       return 0;
+}
+
+static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
+{
+       if (dev->sriov.demux[port - 1].tun[slave]) {
+               kfree(dev->sriov.demux[port - 1].tun[slave]);
+               dev->sriov.demux[port - 1].tun[slave] = NULL;
+       }
+}
+
+static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
+                              int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
+{
+       int ret, cq_size;
+
+       if (ctx->state != DEMUX_PV_STATE_DOWN)
+               return -EEXIST;
+
+       ctx->state = DEMUX_PV_STATE_STARTING;
+       /* have QP0 only on port owner, and only if link layer is IB */
+       if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
+           rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
+               ctx->has_smi = 1;
+
+       if (ctx->has_smi) {
+               ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
+               if (ret) {
+                       pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
+                       goto err_out;
+               }
+       }
+
+       ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
+       if (ret) {
+               pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
+               goto err_out_qp0;
+       }
+
+       cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
+       if (ctx->has_smi)
+               cq_size *= 2;
+
+       ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
+                              NULL, ctx, cq_size, 0);
+       if (IS_ERR(ctx->cq)) {
+               ret = PTR_ERR(ctx->cq);
+               pr_err("Couldn't create tunnel CQ (%d)\n", ret);
+               goto err_buf;
+       }
+
+       ctx->pd = ib_alloc_pd(ctx->ib_dev);
+       if (IS_ERR(ctx->pd)) {
+               ret = PTR_ERR(ctx->pd);
+               pr_err("Couldn't create tunnel PD (%d)\n", ret);
+               goto err_cq;
+       }
+
+       ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
+       if (IS_ERR(ctx->mr)) {
+               ret = PTR_ERR(ctx->mr);
+               pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
+               goto err_pd;
+       }
+
+       if (ctx->has_smi) {
+               ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
+               if (ret) {
+                       pr_err("Couldn't create %s QP0 (%d)\n",
+                              create_tun ? "tunnel for" : "",  ret);
+                       goto err_mr;
+               }
+       }
+
+       ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
+       if (ret) {
+               pr_err("Couldn't create %s QP1 (%d)\n",
+                      create_tun ? "tunnel for" : "",  ret);
+               goto err_qp0;
+       }
+
+       if (create_tun)
+               INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
+       else
+               INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
+
+       ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
+
+       ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
+       if (ret) {
+               pr_err("Couldn't arm tunnel cq (%d)\n", ret);
+               goto err_wq;
+       }
+       ctx->state = DEMUX_PV_STATE_ACTIVE;
+       return 0;
+
+err_wq:
+       ctx->wq = NULL;
+       ib_destroy_qp(ctx->qp[1].qp);
+       ctx->qp[1].qp = NULL;
+
+
+err_qp0:
+       if (ctx->has_smi)
+               ib_destroy_qp(ctx->qp[0].qp);
+       ctx->qp[0].qp = NULL;
+
+err_mr:
+       ib_dereg_mr(ctx->mr);
+       ctx->mr = NULL;
+
+err_pd:
+       ib_dealloc_pd(ctx->pd);
+       ctx->pd = NULL;
+
+err_cq:
+       ib_destroy_cq(ctx->cq);
+       ctx->cq = NULL;
+
+err_buf:
+       mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
+
+err_out_qp0:
+       if (ctx->has_smi)
+               mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
+err_out:
+       ctx->state = DEMUX_PV_STATE_DOWN;
+       return ret;
+}
+
+static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
+                                struct mlx4_ib_demux_pv_ctx *ctx, int flush)
+{
+       if (!ctx)
+               return;
+       if (ctx->state > DEMUX_PV_STATE_DOWN) {
+               ctx->state = DEMUX_PV_STATE_DOWNING;
+               if (flush)
+                       flush_workqueue(ctx->wq);
+               if (ctx->has_smi) {
+                       ib_destroy_qp(ctx->qp[0].qp);
+                       ctx->qp[0].qp = NULL;
+                       mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
+               }
+               ib_destroy_qp(ctx->qp[1].qp);
+               ctx->qp[1].qp = NULL;
+               mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
+               ib_dereg_mr(ctx->mr);
+               ctx->mr = NULL;
+               ib_dealloc_pd(ctx->pd);
+               ctx->pd = NULL;
+               ib_destroy_cq(ctx->cq);
+               ctx->cq = NULL;
+               ctx->state = DEMUX_PV_STATE_DOWN;
+       }
+}
+
+static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
+                                 int port, int do_init)
+{
+       int ret = 0;
+
+       if (!do_init) {
+               clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
+               /* for master, destroy real sqp resources */
+               if (slave == mlx4_master_func_num(dev->dev))
+                       destroy_pv_resources(dev, slave, port,
+                                            dev->sriov.sqps[port - 1], 1);
+               /* destroy the tunnel qp resources */
+               destroy_pv_resources(dev, slave, port,
+                                    dev->sriov.demux[port - 1].tun[slave], 1);
+               return 0;
+       }
+
+       /* create the tunnel qp resources */
+       ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
+                                 dev->sriov.demux[port - 1].tun[slave]);
+
+       /* for master, create the real sqp resources */
+       if (!ret && slave == mlx4_master_func_num(dev->dev))
+               ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
+                                         dev->sriov.sqps[port - 1]);
+       return ret;
+}
+
+void mlx4_ib_tunnels_update_work(struct work_struct *work)
+{
+       struct mlx4_ib_demux_work *dmxw;
+
+       dmxw = container_of(work, struct mlx4_ib_demux_work, work);
+       mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
+                              dmxw->do_init);
+       kfree(dmxw);
+       return;
+}
+
+static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+                                      struct mlx4_ib_demux_ctx *ctx,
+                                      int port)
+{
+       char name[12];
+       int ret = 0;
+       int i;
+
+       ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
+                          sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
+       if (!ctx->tun)
+               return -ENOMEM;
+
+       ctx->dev = dev;
+       ctx->port = port;
+       ctx->ib_dev = &dev->ib_dev;
+
+       for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+               ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
+               if (ret) {
+                       ret = -ENOMEM;
+                       goto err_mcg;
+               }
+       }
+
+       ret = mlx4_ib_mcg_port_init(ctx);
+       if (ret) {
+               pr_err("Failed initializing mcg para-virt (%d)\n", ret);
+               goto err_mcg;
+       }
+
+       snprintf(name, sizeof name, "mlx4_ibt%d", port);
+       ctx->wq = create_singlethread_workqueue(name);
+       if (!ctx->wq) {
+               pr_err("Failed to create tunnelling WQ for port %d\n", port);
+               ret = -ENOMEM;
+               goto err_wq;
+       }
+
+       snprintf(name, sizeof name, "mlx4_ibud%d", port);
+       ctx->ud_wq = create_singlethread_workqueue(name);
+       if (!ctx->ud_wq) {
+               pr_err("Failed to create up/down WQ for port %d\n", port);
+               ret = -ENOMEM;
+               goto err_udwq;
+       }
+
+       return 0;
+
+err_udwq:
+       destroy_workqueue(ctx->wq);
+       ctx->wq = NULL;
+
+err_wq:
+       mlx4_ib_mcg_port_cleanup(ctx, 1);
+err_mcg:
+       for (i = 0; i < dev->dev->caps.sqp_demux; i++)
+               free_pv_object(dev, i, port);
+       kfree(ctx->tun);
+       ctx->tun = NULL;
+       return ret;
+}
+
+static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
+{
+       if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
+               sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
+               flush_workqueue(sqp_ctx->wq);
+               if (sqp_ctx->has_smi) {
+                       ib_destroy_qp(sqp_ctx->qp[0].qp);
+                       sqp_ctx->qp[0].qp = NULL;
+                       mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
+               }
+               ib_destroy_qp(sqp_ctx->qp[1].qp);
+               sqp_ctx->qp[1].qp = NULL;
+               mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
+               ib_dereg_mr(sqp_ctx->mr);
+               sqp_ctx->mr = NULL;
+               ib_dealloc_pd(sqp_ctx->pd);
+               sqp_ctx->pd = NULL;
+               ib_destroy_cq(sqp_ctx->cq);
+               sqp_ctx->cq = NULL;
+               sqp_ctx->state = DEMUX_PV_STATE_DOWN;
+       }
+}
+
+static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
+{
+       int i;
+       if (ctx) {
+               struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
+               mlx4_ib_mcg_port_cleanup(ctx, 1);
+               for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+                       if (!ctx->tun[i])
+                               continue;
+                       if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
+                               ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
+               }
+               flush_workqueue(ctx->wq);
+               for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+                       destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
+                       free_pv_object(dev, i, ctx->port);
+               }
+               kfree(ctx->tun);
+               destroy_workqueue(ctx->ud_wq);
+               destroy_workqueue(ctx->wq);
+       }
+}
+
+static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
+{
+       int i;
+
+       if (!mlx4_is_master(dev->dev))
+               return;
+       /* initialize or tear down tunnel QPs for the master */
+       for (i = 0; i < dev->dev->caps.num_ports; i++)
+               mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
+       return;
+}
+
+int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
+{
+       int i = 0;
+       int err;
+
+       if (!mlx4_is_mfunc(dev->dev))
+               return 0;
+
+       dev->sriov.is_going_down = 0;
+       spin_lock_init(&dev->sriov.going_down_lock);
+       mlx4_ib_cm_paravirt_init(dev);
+
+       mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
+
+       if (mlx4_is_slave(dev->dev)) {
+               mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
+               return 0;
+       }
+
+       for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+               if (i == mlx4_master_func_num(dev->dev))
+                       mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
+               else
+                       mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
+       }
+
+       err = mlx4_ib_init_alias_guid_service(dev);
+       if (err) {
+               mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
+               goto paravirt_err;
+       }
+       err = mlx4_ib_device_register_sysfs(dev);
+       if (err) {
+               mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
+               goto sysfs_err;
+       }
+
+       mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
+                    dev->dev->caps.sqp_demux);
+       for (i = 0; i < dev->num_ports; i++) {
+               union ib_gid gid;
+               err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
+               if (err)
+                       goto demux_err;
+               dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+               err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
+                                     &dev->sriov.sqps[i]);
+               if (err)
+                       goto demux_err;
+               err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
+               if (err)
+                       goto demux_err;
+       }
+       mlx4_ib_master_tunnels(dev, 1);
+       return 0;
+
+demux_err:
+       while (i > 0) {
+               free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
+               mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
+               --i;
+       }
+       mlx4_ib_device_unregister_sysfs(dev);
+
+sysfs_err:
+       mlx4_ib_destroy_alias_guid_service(dev);
+
+paravirt_err:
+       mlx4_ib_cm_paravirt_clean(dev, -1);
+
+       return err;
+}
+
+void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
+{
+       int i;
+       unsigned long flags;
+
+       if (!mlx4_is_mfunc(dev->dev))
+               return;
+
+       spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+       dev->sriov.is_going_down = 1;
+       spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+       if (mlx4_is_master(dev->dev)) {
+               for (i = 0; i < dev->num_ports; i++) {
+                       flush_workqueue(dev->sriov.demux[i].ud_wq);
+                       mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
+                       kfree(dev->sriov.sqps[i]);
+                       dev->sriov.sqps[i] = NULL;
+                       mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
+               }
+
+               mlx4_ib_cm_paravirt_clean(dev, -1);
+               mlx4_ib_destroy_alias_guid_service(dev);
+               mlx4_ib_device_unregister_sysfs(dev);
+       }
+}
index cc05579ebce7fd6b5f5fc9c86b2a1452db0101a7..718ec6b2bad24e5cbee875834739604d2dd0308d 100644 (file)
@@ -59,6 +59,10 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRV_VERSION);
 
+int mlx4_ib_sm_guid_assign = 1;
+module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
+MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
+
 static const char mlx4_ib_version[] =
        DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
        DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -70,6 +74,8 @@ struct update_gid_work {
        int                     port;
 };
 
+static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
+
 static struct workqueue_struct *wq;
 
 static void init_query_mad(struct ib_smp *mad)
@@ -98,7 +104,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        init_query_mad(in_mad);
        in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 
-       err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
+                          1, NULL, NULL, in_mad, out_mad);
        if (err)
                goto out;
 
@@ -133,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 
        props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
                0xffffff;
-       props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
+       props->vendor_part_id      = dev->dev->pdev->device;
        props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
        memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
 
@@ -182,11 +189,12 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
 }
 
 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
-                             struct ib_port_attr *props)
+                             struct ib_port_attr *props, int netw_view)
 {
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
        int ext_active_speed;
+       int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
        int err = -ENOMEM;
 
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -198,7 +206,10 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
        in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
        in_mad->attr_mod = cpu_to_be32(port);
 
-       err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL,
+       if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
+               mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
+
+       err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
                                in_mad, out_mad);
        if (err)
                goto out;
@@ -211,7 +222,10 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
        props->state            = out_mad->data[32] & 0xf;
        props->phys_state       = out_mad->data[33] >> 4;
        props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
-       props->gid_tbl_len      = to_mdev(ibdev)->dev->caps.gid_table_len[port];
+       if (netw_view)
+               props->gid_tbl_len = out_mad->data[50];
+       else
+               props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
        props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
        props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
        props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
@@ -244,7 +258,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
                in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
                in_mad->attr_mod = cpu_to_be32(port);
 
-               err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
+               err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
                                   NULL, NULL, in_mad, out_mad);
                if (err)
                        goto out;
@@ -270,7 +284,7 @@ static u8 state_to_phys_state(enum ib_port_state state)
 }
 
 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
-                              struct ib_port_attr *props)
+                              struct ib_port_attr *props, int netw_view)
 {
 
        struct mlx4_ib_dev *mdev = to_mdev(ibdev);
@@ -320,26 +334,36 @@ out:
        return err;
 }
 
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
-                             struct ib_port_attr *props)
+int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+                        struct ib_port_attr *props, int netw_view)
 {
        int err;
 
        memset(props, 0, sizeof *props);
 
        err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
-               ib_link_query_port(ibdev, port, props) :
-                               eth_link_query_port(ibdev, port, props);
+               ib_link_query_port(ibdev, port, props, netw_view) :
+                               eth_link_query_port(ibdev, port, props, netw_view);
 
        return err;
 }
 
-static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
-                              union ib_gid *gid)
+static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+                             struct ib_port_attr *props)
+{
+       /* returns host view */
+       return __mlx4_ib_query_port(ibdev, port, props, 0);
+}
+
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+                       union ib_gid *gid, int netw_view)
 {
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
        int err = -ENOMEM;
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       int clear = 0;
+       int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
 
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -350,23 +374,38 @@ static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
        in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
        in_mad->attr_mod = cpu_to_be32(port);
 
-       err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+       if (mlx4_is_mfunc(dev->dev) && netw_view)
+               mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
+
+       err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
        if (err)
                goto out;
 
        memcpy(gid->raw, out_mad->data + 8, 8);
 
+       if (mlx4_is_mfunc(dev->dev) && !netw_view) {
+               if (index) {
+                       /* For any index > 0, return the null guid */
+                       err = 0;
+                       clear = 1;
+                       goto out;
+               }
+       }
+
        init_query_mad(in_mad);
        in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
        in_mad->attr_mod = cpu_to_be32(index / 8);
 
-       err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+       err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
+                          NULL, NULL, in_mad, out_mad);
        if (err)
                goto out;
 
        memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
 
 out:
+       if (clear)
+               memset(gid->raw + 8, 0, 8);
        kfree(in_mad);
        kfree(out_mad);
        return err;
@@ -386,16 +425,17 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
                             union ib_gid *gid)
 {
        if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
-               return __mlx4_ib_query_gid(ibdev, port, index, gid);
+               return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
        else
                return iboe_query_gid(ibdev, port, index, gid);
 }
 
-static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
-                             u16 *pkey)
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+                        u16 *pkey, int netw_view)
 {
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
+       int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
        int err = -ENOMEM;
 
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -407,7 +447,11 @@ static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
        in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
        in_mad->attr_mod = cpu_to_be32(index / 32);
 
-       err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+       if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
+               mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
+
+       err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
+                          in_mad, out_mad);
        if (err)
                goto out;
 
@@ -419,6 +463,11 @@ out:
        return err;
 }
 
+static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+       return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
+}
+
 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
                                 struct ib_device_modify *props)
 {
@@ -431,6 +480,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
        if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
                return 0;
 
+       if (mlx4_is_slave(to_mdev(ibdev)->dev))
+               return -EOPNOTSUPP;
+
        spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
        memcpy(ibdev->node_desc, props->node_desc, 64);
        spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
@@ -446,7 +498,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
        memset(mailbox->buf, 0, 256);
        memcpy(mailbox->buf, props->node_desc, 64);
        mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
-                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
 
@@ -849,6 +901,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
 {
        struct ib_smp *in_mad  = NULL;
        struct ib_smp *out_mad = NULL;
+       int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
        int err = -ENOMEM;
 
        in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -858,8 +911,10 @@ static int init_node_data(struct mlx4_ib_dev *dev)
 
        init_query_mad(in_mad);
        in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+       if (mlx4_is_master(dev->dev))
+               mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
 
-       err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
        if (err)
                goto out;
 
@@ -867,10 +922,11 @@ static int init_node_data(struct mlx4_ib_dev *dev)
 
        in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 
-       err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
        if (err)
                goto out;
 
+       dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
        memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
 
 out:
@@ -959,7 +1015,7 @@ static void update_gids_task(struct work_struct *work)
 
        err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
                       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-                      MLX4_CMD_NATIVE);
+                      MLX4_CMD_WRAPPED);
        if (err)
                pr_warn("set port command failed\n");
        else {
@@ -1121,6 +1177,38 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
        return NOTIFY_DONE;
 }
 
+static void init_pkeys(struct mlx4_ib_dev *ibdev)
+{
+       int port;
+       int slave;
+       int i;
+
+       if (mlx4_is_master(ibdev->dev)) {
+               for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
+                       for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
+                               for (i = 0;
+                                    i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
+                                    ++i) {
+                                       ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
+                                       /* master has the identity virt2phys pkey mapping */
+                                               (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
+                                                       ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
+                                       mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
+                                                            ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
+                               }
+                       }
+               }
+               /* initialize pkey cache */
+               for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
+                       for (i = 0;
+                            i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
+                            ++i)
+                               ibdev->pkeys.phys_pkey_cache[port-1][i] =
+                                       (i) ? 0 : 0xFFFF;
+               }
+       }
+}
+
 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
        char name[32];
@@ -1207,11 +1295,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        pr_info_once("%s", mlx4_ib_version);
 
-       if (mlx4_is_mfunc(dev)) {
-               pr_warn("IB not yet supported in SRIOV\n");
+       mlx4_foreach_non_ib_transport_port(i, dev)
+               num_ports++;
+
+       if (mlx4_is_mfunc(dev) && num_ports) {
+               dev_err(&dev->pdev->dev, "RoCE is not supported over SRIOV as yet\n");
                return NULL;
        }
 
+       num_ports = 0;
        mlx4_foreach_ib_transport_port(i, dev)
                num_ports++;
 
@@ -1318,10 +1410,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
        ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
 
-       ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
-       ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
-       ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
-       ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
+       if (!mlx4_is_slave(ibdev->dev)) {
+               ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
+               ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
+               ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
+               ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
+       }
 
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
                ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
@@ -1357,11 +1451,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        if (mlx4_ib_mad_init(ibdev))
                goto err_reg;
 
+       if (mlx4_ib_init_sriov(ibdev))
+               goto err_mad;
+
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
                iboe->nb.notifier_call = mlx4_ib_netdev_event;
                err = register_netdevice_notifier(&iboe->nb);
                if (err)
-                       goto err_reg;
+                       goto err_sriov;
        }
 
        for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -1372,6 +1469,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        ibdev->ib_active = true;
 
+       if (mlx4_is_mfunc(ibdev->dev))
+               init_pkeys(ibdev);
+
+       /* create paravirt contexts for any VFs which are active */
+       if (mlx4_is_master(ibdev->dev)) {
+               for (j = 0; j < MLX4_MFUNC_MAX; j++) {
+                       if (j == mlx4_master_func_num(ibdev->dev))
+                               continue;
+                       if (mlx4_is_slave_active(ibdev->dev, j))
+                               do_slave_init(ibdev, j, 1);
+               }
+       }
        return ibdev;
 
 err_notif:
@@ -1379,6 +1488,12 @@ err_notif:
                pr_warn("failure unregistering notifier\n");
        flush_workqueue(wq);
 
+err_sriov:
+       mlx4_ib_close_sriov(ibdev);
+
+err_mad:
+       mlx4_ib_mad_cleanup(ibdev);
+
 err_reg:
        ib_unregister_device(&ibdev->ib_dev);
 
@@ -1407,6 +1522,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        struct mlx4_ib_dev *ibdev = ibdev_ptr;
        int p;
 
+       mlx4_ib_close_sriov(ibdev);
        mlx4_ib_mad_cleanup(ibdev);
        ib_unregister_device(&ibdev->ib_dev);
        if (ibdev->iboe.nb.notifier_call) {
@@ -1428,6 +1544,51 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        ib_dealloc_device(&ibdev->ib_dev);
 }
 
+static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
+{
+       struct mlx4_ib_demux_work **dm = NULL;
+       struct mlx4_dev *dev = ibdev->dev;
+       int i;
+       unsigned long flags;
+
+       if (!mlx4_is_master(dev))
+               return;
+
+       dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
+       if (!dm) {
+               pr_err("failed to allocate memory for tunneling qp update\n");
+               goto out;
+       }
+
+       for (i = 0; i < dev->caps.num_ports; i++) {
+               dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
+               if (!dm[i]) {
+                       pr_err("failed to allocate memory for tunneling qp update work struct\n");
+                       for (i = 0; i < dev->caps.num_ports; i++) {
+                               if (dm[i])
+                                       kfree(dm[i]);
+                       }
+                       goto out;
+               }
+       }
+       /* initialize or tear down tunnel QPs for the slave */
+       for (i = 0; i < dev->caps.num_ports; i++) {
+               INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
+               dm[i]->port = i + 1;
+               dm[i]->slave = slave;
+               dm[i]->do_init = do_init;
+               dm[i]->dev = ibdev;
+               spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+               if (!ibdev->sriov.is_going_down)
+                       queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
+               spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+       }
+out:
+       if (dm)
+               kfree(dm);
+       return;
+}
+
 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
                          enum mlx4_dev_event event, unsigned long param)
 {
@@ -1435,22 +1596,28 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
        struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
        struct mlx4_eqe *eqe = NULL;
        struct ib_event_work *ew;
-       int port = 0;
+       int p = 0;
 
        if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
                eqe = (struct mlx4_eqe *)param;
        else
-               port = (u8)param;
-
-       if (port > ibdev->num_ports)
-               return;
+               p = (int) param;
 
        switch (event) {
        case MLX4_DEV_EVENT_PORT_UP:
+               if (p > ibdev->num_ports)
+                       return;
+               if (mlx4_is_master(dev) &&
+                   rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
+                       IB_LINK_LAYER_INFINIBAND) {
+                       mlx4_ib_invalidate_all_guid_record(ibdev, p);
+               }
                ibev.event = IB_EVENT_PORT_ACTIVE;
                break;
 
        case MLX4_DEV_EVENT_PORT_DOWN:
+               if (p > ibdev->num_ports)
+                       return;
                ibev.event = IB_EVENT_PORT_ERR;
                break;
 
@@ -1469,7 +1636,21 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
                INIT_WORK(&ew->work, handle_port_mgmt_change_event);
                memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
                ew->ib_dev = ibdev;
-               handle_port_mgmt_change_event(&ew->work);
+               /* need to queue only for port owner, which uses GEN_EQE */
+               if (mlx4_is_master(dev))
+                       queue_work(wq, &ew->work);
+               else
+                       handle_port_mgmt_change_event(&ew->work);
+               return;
+
+       case MLX4_DEV_EVENT_SLAVE_INIT:
+               /* here, p is the slave id */
+               do_slave_init(ibdev, p, 1);
+               return;
+
+       case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+               /* here, p is the slave id */
+               do_slave_init(ibdev, p, 0);
                return;
 
        default:
@@ -1477,7 +1658,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
        }
 
        ibev.device           = ibdev_ptr;
-       ibev.element.port_num = port;
+       ibev.element.port_num = (u8) p;
 
        ib_dispatch_event(&ibev);
 }
@@ -1497,18 +1678,28 @@ static int __init mlx4_ib_init(void)
        if (!wq)
                return -ENOMEM;
 
+       err = mlx4_ib_mcg_init();
+       if (err)
+               goto clean_wq;
+
        err = mlx4_register_interface(&mlx4_ib_interface);
-       if (err) {
-               destroy_workqueue(wq);
-               return err;
-       }
+       if (err)
+               goto clean_mcg;
 
        return 0;
+
+clean_mcg:
+       mlx4_ib_mcg_destroy();
+
+clean_wq:
+       destroy_workqueue(wq);
+       return err;
 }
 
 static void __exit mlx4_ib_cleanup(void)
 {
        mlx4_unregister_interface(&mlx4_ib_interface);
+       mlx4_ib_mcg_destroy();
        destroy_workqueue(wq);
 }
 
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
new file mode 100644 (file)
index 0000000..3c3b54c
--- /dev/null
@@ -0,0 +1,1254 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_sa.h>
+
+#include <linux/mlx4/cmd.h>
+#include <linux/rbtree.h>
+#include <linux/delay.h>
+
+#include "mlx4_ib.h"
+
+#define MAX_VFS                80
+#define MAX_PEND_REQS_PER_FUNC 4
+#define MAD_TIMEOUT_MS 2000
+
+#define mcg_warn(fmt, arg...)  pr_warn("MCG WARNING: " fmt, ##arg)
+#define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
+#define mcg_warn_group(group, format, arg...) \
+       pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
+       (group)->name, group->demux->port, ## arg)
+
+#define mcg_error_group(group, format, arg...) \
+       pr_err("  %16s: " format, (group)->name, ## arg)
+
+
+static union ib_gid mgid0;
+
+static struct workqueue_struct *clean_wq;
+
+enum mcast_state {
+       MCAST_NOT_MEMBER = 0,
+       MCAST_MEMBER,
+};
+
+enum mcast_group_state {
+       MCAST_IDLE,
+       MCAST_JOIN_SENT,
+       MCAST_LEAVE_SENT,
+       MCAST_RESP_READY
+};
+
+struct mcast_member {
+       enum mcast_state state;
+       uint8_t                 join_state;
+       int                     num_pend_reqs;
+       struct list_head        pending;
+};
+
+struct ib_sa_mcmember_data {
+       union ib_gid    mgid;
+       union ib_gid    port_gid;
+       __be32          qkey;
+       __be16          mlid;
+       u8              mtusel_mtu;
+       u8              tclass;
+       __be16          pkey;
+       u8              ratesel_rate;
+       u8              lifetmsel_lifetm;
+       __be32          sl_flowlabel_hoplimit;
+       u8              scope_join_state;
+       u8              proxy_join;
+       u8              reserved[2];
+};
+
+struct mcast_group {
+       struct ib_sa_mcmember_data rec;
+       struct rb_node          node;
+       struct list_head        mgid0_list;
+       struct mlx4_ib_demux_ctx *demux;
+       struct mcast_member     func[MAX_VFS];
+       struct mutex            lock;
+       struct work_struct      work;
+       struct list_head        pending_list;
+       int                     members[3];
+       enum mcast_group_state  state;
+       enum mcast_group_state  prev_state;
+       struct ib_sa_mad        response_sa_mad;
+       __be64                  last_req_tid;
+
+       char                    name[33]; /* MGID string */
+       struct device_attribute dentry;
+
+       /* refcount is the reference count for the following:
+          1. Each queued request
+          2. Each invocation of the worker thread
+          3. Membership of the port at the SA
+       */
+       atomic_t                refcount;
+
+       /* delayed work to clean pending SM request */
+       struct delayed_work     timeout_work;
+       struct list_head        cleanup_list;
+};
+
+struct mcast_req {
+       int                     func;
+       struct ib_sa_mad        sa_mad;
+       struct list_head        group_list;
+       struct list_head        func_list;
+       struct mcast_group      *group;
+       int                     clean;
+};
+
+
+#define safe_atomic_dec(ref) \
+       do {\
+               if (atomic_dec_and_test(ref)) \
+                       mcg_warn_group(group, "did not expect to reach zero\n"); \
+       } while (0)
+
+static const char *get_state_string(enum mcast_group_state state)
+{
+       switch (state) {
+       case MCAST_IDLE:
+               return "MCAST_IDLE";
+       case MCAST_JOIN_SENT:
+               return "MCAST_JOIN_SENT";
+       case MCAST_LEAVE_SENT:
+               return "MCAST_LEAVE_SENT";
+       case MCAST_RESP_READY:
+               return "MCAST_RESP_READY";
+       }
+       return "Invalid State";
+}
+
+static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
+                                     union ib_gid *mgid)
+{
+       struct rb_node *node = ctx->mcg_table.rb_node;
+       struct mcast_group *group;
+       int ret;
+
+       while (node) {
+               group = rb_entry(node, struct mcast_group, node);
+               ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
+               if (!ret)
+                       return group;
+
+               if (ret < 0)
+                       node = node->rb_left;
+               else
+                       node = node->rb_right;
+       }
+       return NULL;
+}
+
+static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
+                                       struct mcast_group *group)
+{
+       struct rb_node **link = &ctx->mcg_table.rb_node;
+       struct rb_node *parent = NULL;
+       struct mcast_group *cur_group;
+       int ret;
+
+       while (*link) {
+               parent = *link;
+               cur_group = rb_entry(parent, struct mcast_group, node);
+
+               ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
+                            sizeof group->rec.mgid);
+               if (ret < 0)
+                       link = &(*link)->rb_left;
+               else if (ret > 0)
+                       link = &(*link)->rb_right;
+               else
+                       return cur_group;
+       }
+       rb_link_node(&group->node, parent, link);
+       rb_insert_color(&group->node, &ctx->mcg_table);
+       return NULL;
+}
+
+static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
+{
+       struct mlx4_ib_dev *dev = ctx->dev;
+       struct ib_ah_attr       ah_attr;
+
+       spin_lock(&dev->sm_lock);
+       if (!dev->sm_ah[ctx->port - 1]) {
+               /* port is not yet Active, sm_ah not ready */
+               spin_unlock(&dev->sm_lock);
+               return -EAGAIN;
+       }
+       mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
+       spin_unlock(&dev->sm_lock);
+       return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port,
+                                   IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad);
+}
+
+static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
+                            struct ib_mad *mad)
+{
+       struct mlx4_ib_dev *dev = ctx->dev;
+       struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
+       struct ib_wc wc;
+       struct ib_ah_attr ah_attr;
+
+       /* Our agent might not yet be registered when mads start to arrive */
+       if (!agent)
+               return -EAGAIN;
+
+       ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
+
+       wc.pkey_index = 0;
+       wc.sl = 0;
+       wc.dlid_path_bits = 0;
+       wc.port_num = ctx->port;
+       wc.slid = ah_attr.dlid;  /* opensm lid */
+       wc.src_qp = 1;
+       return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
+}
+
+static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
+{
+       struct ib_sa_mad mad;
+       struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data;
+       int ret;
+
+       /* we rely on a mad request as arrived from a VF */
+       memcpy(&mad, sa_mad, sizeof mad);
+
+       /* fix port GID to be the real one (slave 0) */
+       sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
+
+       /* assign our own TID */
+       mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
+       group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
+
+       ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
+       /* set timeout handler */
+       if (!ret) {
+               /* calls mlx4_ib_mcg_timeout_handler */
+               queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
+                               msecs_to_jiffies(MAD_TIMEOUT_MS));
+       }
+
+       return ret;
+}
+
+static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
+{
+       struct ib_sa_mad mad;
+       struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
+       int ret;
+
+       memset(&mad, 0, sizeof mad);
+       mad.mad_hdr.base_version = 1;
+       mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
+       mad.mad_hdr.class_version = 2;
+       mad.mad_hdr.method = IB_SA_METHOD_DELETE;
+       mad.mad_hdr.status = cpu_to_be16(0);
+       mad.mad_hdr.class_specific = cpu_to_be16(0);
+       mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
+       group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
+       mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
+       mad.mad_hdr.attr_mod = cpu_to_be32(0);
+       mad.sa_hdr.sm_key = 0x0;
+       mad.sa_hdr.attr_offset = cpu_to_be16(7);
+       mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID |
+               IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE;
+
+       *sa_data = group->rec;
+       sa_data->scope_join_state = join_state;
+
+       ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
+       if (ret)
+               group->state = MCAST_IDLE;
+
+       /* set timeout handler */
+       if (!ret) {
+               /* calls mlx4_ib_mcg_timeout_handler */
+               queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
+                               msecs_to_jiffies(MAD_TIMEOUT_MS));
+       }
+
+       return ret;
+}
+
+static int send_reply_to_slave(int slave, struct mcast_group *group,
+               struct ib_sa_mad *req_sa_mad, u16 status)
+{
+       struct ib_sa_mad mad;
+       struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
+       struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data;
+       int ret;
+
+       memset(&mad, 0, sizeof mad);
+       mad.mad_hdr.base_version = 1;
+       mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
+       mad.mad_hdr.class_version = 2;
+       mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+       mad.mad_hdr.status = cpu_to_be16(status);
+       mad.mad_hdr.class_specific = cpu_to_be16(0);
+       mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid;
+       *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */
+       mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
+       mad.mad_hdr.attr_mod = cpu_to_be32(0);
+       mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key;
+       mad.sa_hdr.attr_offset = cpu_to_be16(7);
+       mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */
+
+       *sa_data = group->rec;
+
+       /* reconstruct VF's requested join_state and port_gid */
+       sa_data->scope_join_state &= 0xf0;
+       sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
+       memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid);
+
+       ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
+       return ret;
+}
+
+static int check_selector(ib_sa_comp_mask comp_mask,
+                         ib_sa_comp_mask selector_mask,
+                         ib_sa_comp_mask value_mask,
+                         u8 src_value, u8 dst_value)
+{
+       int err;
+       u8 selector = dst_value >> 6;
+       dst_value &= 0x3f;
+       src_value &= 0x3f;
+
+       if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
+               return 0;
+
+       switch (selector) {
+       case IB_SA_GT:
+               err = (src_value <= dst_value);
+               break;
+       case IB_SA_LT:
+               err = (src_value >= dst_value);
+               break;
+       case IB_SA_EQ:
+               err = (src_value != dst_value);
+               break;
+       default:
+               err = 0;
+               break;
+       }
+
+       return err;
+}
+
+static u16 cmp_rec(struct ib_sa_mcmember_data *src,
+                  struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
+{
+       /* src is group record, dst is request record */
+       /* MGID must already match */
+       /* Port_GID we always replace to our Port_GID, so it is a match */
+
+#define MAD_STATUS_REQ_INVALID 0x0200
+       if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
+               return MAD_STATUS_REQ_INVALID;
+       if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
+               return MAD_STATUS_REQ_INVALID;
+       if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
+                                IB_SA_MCMEMBER_REC_MTU,
+                                src->mtusel_mtu, dst->mtusel_mtu))
+               return MAD_STATUS_REQ_INVALID;
+       if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
+           src->tclass != dst->tclass)
+               return MAD_STATUS_REQ_INVALID;
+       if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
+               return MAD_STATUS_REQ_INVALID;
+       if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
+                                IB_SA_MCMEMBER_REC_RATE,
+                                src->ratesel_rate, dst->ratesel_rate))
+               return MAD_STATUS_REQ_INVALID;
+       if (check_selector(comp_mask,
+                                IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
+                                IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
+                                src->lifetmsel_lifetm, dst->lifetmsel_lifetm))
+               return MAD_STATUS_REQ_INVALID;
+       if (comp_mask & IB_SA_MCMEMBER_REC_SL &&
+                       (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) !=
+                       (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000))
+               return MAD_STATUS_REQ_INVALID;
+       if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
+                       (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) !=
+                       (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00))
+               return MAD_STATUS_REQ_INVALID;
+       if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
+                       (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) !=
+                       (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff))
+               return MAD_STATUS_REQ_INVALID;
+       if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE &&
+                       (src->scope_join_state & 0xf0) !=
+                       (dst->scope_join_state & 0xf0))
+               return MAD_STATUS_REQ_INVALID;
+
+       /* join_state checked separately, proxy_join ignored */
+
+       return 0;
+}
+
+/* release group, return 1 if this was last release and group is destroyed
+ * timout work is canceled sync */
+static int release_group(struct mcast_group *group, int from_timeout_handler)
+{
+       struct mlx4_ib_demux_ctx *ctx = group->demux;
+       int nzgroup;
+
+       mutex_lock(&ctx->mcg_table_lock);
+       mutex_lock(&group->lock);
+       if (atomic_dec_and_test(&group->refcount)) {
+               if (!from_timeout_handler) {
+                       if (group->state != MCAST_IDLE &&
+                           !cancel_delayed_work(&group->timeout_work)) {
+                               atomic_inc(&group->refcount);
+                               mutex_unlock(&group->lock);
+                               mutex_unlock(&ctx->mcg_table_lock);
+                               return 0;
+                       }
+               }
+
+               nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
+               if (nzgroup)
+                       del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
+               if (!list_empty(&group->pending_list))
+                       mcg_warn_group(group, "releasing a group with non empty pending list\n");
+               if (nzgroup)
+                       rb_erase(&group->node, &ctx->mcg_table);
+               list_del_init(&group->mgid0_list);
+               mutex_unlock(&group->lock);
+               mutex_unlock(&ctx->mcg_table_lock);
+               kfree(group);
+               return 1;
+       } else {
+               mutex_unlock(&group->lock);
+               mutex_unlock(&ctx->mcg_table_lock);
+       }
+       return 0;
+}
+
+static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
+{
+       int i;
+
+       for (i = 0; i < 3; i++, join_state >>= 1)
+               if (join_state & 0x1)
+                       group->members[i] += inc;
+}
+
+static u8 get_leave_state(struct mcast_group *group)
+{
+       u8 leave_state = 0;
+       int i;
+
+       for (i = 0; i < 3; i++)
+               if (!group->members[i])
+                       leave_state |= (1 << i);
+
+       return leave_state & (group->rec.scope_join_state & 7);
+}
+
+static int join_group(struct mcast_group *group, int slave, u8 join_mask)
+{
+       int ret = 0;
+       u8 join_state;
+
+       /* remove bits that slave is already member of, and adjust */
+       join_state = join_mask & (~group->func[slave].join_state);
+       adjust_membership(group, join_state, 1);
+       group->func[slave].join_state |= join_state;
+       if (group->func[slave].state != MCAST_MEMBER && join_state) {
+               group->func[slave].state = MCAST_MEMBER;
+               ret = 1;
+       }
+       return ret;
+}
+
+static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
+{
+       int ret = 0;
+
+       adjust_membership(group, leave_state, -1);
+       group->func[slave].join_state &= ~leave_state;
+       if (!group->func[slave].join_state) {
+               group->func[slave].state = MCAST_NOT_MEMBER;
+               ret = 1;
+       }
+       return ret;
+}
+
+static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
+{
+       if (group->func[slave].state != MCAST_MEMBER)
+               return MAD_STATUS_REQ_INVALID;
+
+       /* make sure we're not deleting unset bits */
+       if (~group->func[slave].join_state & leave_mask)
+               return MAD_STATUS_REQ_INVALID;
+
+       if (!leave_mask)
+               return MAD_STATUS_REQ_INVALID;
+
+       return 0;
+}
+
+static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
+{
+       struct delayed_work *delay = to_delayed_work(work);
+       struct mcast_group *group;
+       struct mcast_req *req = NULL;
+
+       group = container_of(delay, typeof(*group), timeout_work);
+
+       mutex_lock(&group->lock);
+       if (group->state == MCAST_JOIN_SENT) {
+               if (!list_empty(&group->pending_list)) {
+                       req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
+                       list_del(&req->group_list);
+                       list_del(&req->func_list);
+                       --group->func[req->func].num_pend_reqs;
+                       mutex_unlock(&group->lock);
+                       kfree(req);
+                       if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
+                               if (release_group(group, 1))
+                                       return;
+                       } else {
+                               kfree(group);
+                               return;
+                       }
+                       mutex_lock(&group->lock);
+               } else
+                       mcg_warn_group(group, "DRIVER BUG\n");
+       } else if (group->state == MCAST_LEAVE_SENT) {
+               if (group->rec.scope_join_state & 7)
+                       group->rec.scope_join_state &= 0xf8;
+               group->state = MCAST_IDLE;
+               mutex_unlock(&group->lock);
+               if (release_group(group, 1))
+                       return;
+               mutex_lock(&group->lock);
+       } else
+               mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
+       group->state = MCAST_IDLE;
+       atomic_inc(&group->refcount);
+       if (!queue_work(group->demux->mcg_wq, &group->work))
+               safe_atomic_dec(&group->refcount);
+
+       mutex_unlock(&group->lock);
+}
+
+static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
+                           struct mcast_req *req)
+{
+       u16 status;
+
+       if (req->clean)
+               leave_mask = group->func[req->func].join_state;
+
+       status = check_leave(group, req->func, leave_mask);
+       if (!status)
+               leave_group(group, req->func, leave_mask);
+
+       if (!req->clean)
+               send_reply_to_slave(req->func, group, &req->sa_mad, status);
+       --group->func[req->func].num_pend_reqs;
+       list_del(&req->group_list);
+       list_del(&req->func_list);
+       kfree(req);
+       return 1;
+}
+
+static int handle_join_req(struct mcast_group *group, u8 join_mask,
+                          struct mcast_req *req)
+{
+       u8 group_join_state = group->rec.scope_join_state & 7;
+       int ref = 0;
+       u16 status;
+       struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+
+       if (join_mask == (group_join_state & join_mask)) {
+               /* port's membership need not change */
+               status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
+               if (!status)
+                       join_group(group, req->func, join_mask);
+
+               --group->func[req->func].num_pend_reqs;
+               send_reply_to_slave(req->func, group, &req->sa_mad, status);
+               list_del(&req->group_list);
+               list_del(&req->func_list);
+               kfree(req);
+               ++ref;
+       } else {
+               /* port's membership needs to be updated */
+               group->prev_state = group->state;
+               if (send_join_to_wire(group, &req->sa_mad)) {
+                       --group->func[req->func].num_pend_reqs;
+                       list_del(&req->group_list);
+                       list_del(&req->func_list);
+                       kfree(req);
+                       ref = 1;
+                       group->state = group->prev_state;
+               } else
+                       group->state = MCAST_JOIN_SENT;
+       }
+
+       return ref;
+}
+
+static void mlx4_ib_mcg_work_handler(struct work_struct *work)
+{
+       struct mcast_group *group;
+       struct mcast_req *req = NULL;
+       struct ib_sa_mcmember_data *sa_data;
+       u8 req_join_state;
+       int rc = 1; /* release_count - this is for the scheduled work */
+       u16 status;
+       u8 method;
+
+       group = container_of(work, typeof(*group), work);
+
+       mutex_lock(&group->lock);
+
+       /* First, let's see if a response from SM is waiting regarding this group.
+        * If so, we need to update the group's REC. If this is a bad response, we
+        * may need to send a bad response to a VF waiting for it. If VF is waiting
+        * and this is a good response, the VF will be answered later in this func. */
+       if (group->state == MCAST_RESP_READY) {
+               /* cancels mlx4_ib_mcg_timeout_handler */
+               cancel_delayed_work(&group->timeout_work);
+               status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
+               method = group->response_sa_mad.mad_hdr.method;
+               if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
+                       mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
+                               be64_to_cpu(group->response_sa_mad.mad_hdr.tid),
+                               be64_to_cpu(group->last_req_tid));
+                       group->state = group->prev_state;
+                       goto process_requests;
+               }
+               if (status) {
+                       if (!list_empty(&group->pending_list))
+                               req = list_first_entry(&group->pending_list,
+                                               struct mcast_req, group_list);
+                       if ((method == IB_MGMT_METHOD_GET_RESP)) {
+                                       if (req) {
+                                               send_reply_to_slave(req->func, group, &req->sa_mad, status);
+                                               --group->func[req->func].num_pend_reqs;
+                                               list_del(&req->group_list);
+                                               list_del(&req->func_list);
+                                               kfree(req);
+                                               ++rc;
+                                       } else
+                                               mcg_warn_group(group, "no request for failed join\n");
+                       } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
+                               ++rc;
+               } else {
+                       u8 resp_join_state;
+                       u8 cur_join_state;
+
+                       resp_join_state = ((struct ib_sa_mcmember_data *)
+                                               group->response_sa_mad.data)->scope_join_state & 7;
+                       cur_join_state = group->rec.scope_join_state & 7;
+
+                       if (method == IB_MGMT_METHOD_GET_RESP) {
+                               /* successfull join */
+                               if (!cur_join_state && resp_join_state)
+                                       --rc;
+                       } else if (!resp_join_state)
+                                       ++rc;
+                       memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
+               }
+               group->state = MCAST_IDLE;
+       }
+
+process_requests:
+       /* We should now go over pending join/leave requests, as long as we are idle. */
+       while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
+               req = list_first_entry(&group->pending_list, struct mcast_req,
+                                      group_list);
+               sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
+               req_join_state = sa_data->scope_join_state & 0x7;
+
+               /* For a leave request, we will immediately answer the VF, and
+                * update our internal counters. The actual leave will be sent
+                * to SM later, if at all needed. We dequeue the request now. */
+               if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
+                       rc += handle_leave_req(group, req_join_state, req);
+               else
+                       rc += handle_join_req(group, req_join_state, req);
+       }
+
+       /* Handle leaves */
+       if (group->state == MCAST_IDLE) {
+               req_join_state = get_leave_state(group);
+               if (req_join_state) {
+                       group->rec.scope_join_state &= ~req_join_state;
+                       group->prev_state = group->state;
+                       if (send_leave_to_wire(group, req_join_state)) {
+                               group->state = group->prev_state;
+                               ++rc;
+                       } else
+                               group->state = MCAST_LEAVE_SENT;
+               }
+       }
+
+       if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
+               goto process_requests;
+       mutex_unlock(&group->lock);
+
+       while (rc--)
+               release_group(group, 0);
+}
+
+static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
+                                                      __be64 tid,
+                                                      union ib_gid *new_mgid)
+{
+       struct mcast_group *group = NULL, *cur_group;
+       struct mcast_req *req;
+       struct list_head *pos;
+       struct list_head *n;
+
+       mutex_lock(&ctx->mcg_table_lock);
+       list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
+               group = list_entry(pos, struct mcast_group, mgid0_list);
+               mutex_lock(&group->lock);
+               if (group->last_req_tid == tid) {
+                       if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
+                               group->rec.mgid = *new_mgid;
+                               sprintf(group->name, "%016llx%016llx",
+                                               be64_to_cpu(group->rec.mgid.global.subnet_prefix),
+                                               be64_to_cpu(group->rec.mgid.global.interface_id));
+                               list_del_init(&group->mgid0_list);
+                               cur_group = mcast_insert(ctx, group);
+                               if (cur_group) {
+                                       /* A race between our code and SM. Silently cleaning the new one */
+                                       req = list_first_entry(&group->pending_list,
+                                                              struct mcast_req, group_list);
+                                       --group->func[req->func].num_pend_reqs;
+                                       list_del(&req->group_list);
+                                       list_del(&req->func_list);
+                                       kfree(req);
+                                       mutex_unlock(&group->lock);
+                                       mutex_unlock(&ctx->mcg_table_lock);
+                                       release_group(group, 0);
+                                       return NULL;
+                               }
+
+                               atomic_inc(&group->refcount);
+                               add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
+                               mutex_unlock(&group->lock);
+                               mutex_unlock(&ctx->mcg_table_lock);
+                               return group;
+                       } else {
+                               struct mcast_req *tmp1, *tmp2;
+
+                               list_del(&group->mgid0_list);
+                               if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
+                                       cancel_delayed_work_sync(&group->timeout_work);
+
+                               list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
+                                       list_del(&tmp1->group_list);
+                                       kfree(tmp1);
+                               }
+                               mutex_unlock(&group->lock);
+                               mutex_unlock(&ctx->mcg_table_lock);
+                               kfree(group);
+                               return NULL;
+                       }
+               }
+               mutex_unlock(&group->lock);
+       }
+       mutex_unlock(&ctx->mcg_table_lock);
+
+       return NULL;
+}
+
+static ssize_t sysfs_show_group(struct device *dev,
+               struct device_attribute *attr, char *buf);
+
+static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
+                                        union ib_gid *mgid, int create,
+                                        gfp_t gfp_mask)
+{
+       struct mcast_group *group, *cur_group;
+       int is_mgid0;
+       int i;
+
+       is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
+       if (!is_mgid0) {
+               group = mcast_find(ctx, mgid);
+               if (group)
+                       goto found;
+       }
+
+       if (!create)
+               return ERR_PTR(-ENOENT);
+
+       group = kzalloc(sizeof *group, gfp_mask);
+       if (!group)
+               return ERR_PTR(-ENOMEM);
+
+       group->demux = ctx;
+       group->rec.mgid = *mgid;
+       INIT_LIST_HEAD(&group->pending_list);
+       INIT_LIST_HEAD(&group->mgid0_list);
+       for (i = 0; i < MAX_VFS; ++i)
+               INIT_LIST_HEAD(&group->func[i].pending);
+       INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
+       INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
+       mutex_init(&group->lock);
+       sprintf(group->name, "%016llx%016llx",
+                       be64_to_cpu(group->rec.mgid.global.subnet_prefix),
+                       be64_to_cpu(group->rec.mgid.global.interface_id));
+       sysfs_attr_init(&group->dentry.attr);
+       group->dentry.show = sysfs_show_group;
+       group->dentry.store = NULL;
+       group->dentry.attr.name = group->name;
+       group->dentry.attr.mode = 0400;
+       group->state = MCAST_IDLE;
+
+       if (is_mgid0) {
+               list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
+               goto found;
+       }
+
+       cur_group = mcast_insert(ctx, group);
+       if (cur_group) {
+               mcg_warn("group just showed up %s - confused\n", cur_group->name);
+               kfree(group);
+               return ERR_PTR(-EINVAL);
+       }
+
+       add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
+
+found:
+       atomic_inc(&group->refcount);
+       return group;
+}
+
+static void queue_req(struct mcast_req *req)
+{
+       struct mcast_group *group = req->group;
+
+       atomic_inc(&group->refcount); /* for the request */
+       atomic_inc(&group->refcount); /* for scheduling the work */
+       list_add_tail(&req->group_list, &group->pending_list);
+       list_add_tail(&req->func_list, &group->func[req->func].pending);
+       /* calls mlx4_ib_mcg_work_handler */
+       if (!queue_work(group->demux->mcg_wq, &group->work))
+               safe_atomic_dec(&group->refcount);
+}
+
+int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
+                             struct ib_sa_mad *mad)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data;
+       struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
+       struct mcast_group *group;
+
+       switch (mad->mad_hdr.method) {
+       case IB_MGMT_METHOD_GET_RESP:
+       case IB_SA_METHOD_DELETE_RESP:
+               mutex_lock(&ctx->mcg_table_lock);
+               group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
+               mutex_unlock(&ctx->mcg_table_lock);
+               if (IS_ERR(group)) {
+                       if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
+                               __be64 tid = mad->mad_hdr.tid;
+                               *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
+                               group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
+                       } else
+                               group = NULL;
+               }
+
+               if (!group)
+                       return 1;
+
+               mutex_lock(&group->lock);
+               group->response_sa_mad = *mad;
+               group->prev_state = group->state;
+               group->state = MCAST_RESP_READY;
+               /* calls mlx4_ib_mcg_work_handler */
+               atomic_inc(&group->refcount);
+               if (!queue_work(ctx->mcg_wq, &group->work))
+                       safe_atomic_dec(&group->refcount);
+               mutex_unlock(&group->lock);
+               release_group(group, 0);
+               return 1; /* consumed */
+       case IB_MGMT_METHOD_SET:
+       case IB_SA_METHOD_GET_TABLE:
+       case IB_SA_METHOD_GET_TABLE_RESP:
+       case IB_SA_METHOD_DELETE:
+               return 0; /* not consumed, pass-through to guest over tunnel */
+       default:
+               mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
+                       port, mad->mad_hdr.method);
+               return 1; /* consumed */
+       }
+}
+
+int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
+                                 int slave, struct ib_sa_mad *sa_mad)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data;
+       struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
+       struct mcast_group *group;
+       struct mcast_req *req;
+       int may_create = 0;
+
+       if (ctx->flushing)
+               return -EAGAIN;
+
+       switch (sa_mad->mad_hdr.method) {
+       case IB_MGMT_METHOD_SET:
+               may_create = 1;
+       case IB_SA_METHOD_DELETE:
+               req = kzalloc(sizeof *req, GFP_KERNEL);
+               if (!req)
+                       return -ENOMEM;
+
+               req->func = slave;
+               req->sa_mad = *sa_mad;
+
+               mutex_lock(&ctx->mcg_table_lock);
+               group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
+               mutex_unlock(&ctx->mcg_table_lock);
+               if (IS_ERR(group)) {
+                       kfree(req);
+                       return PTR_ERR(group);
+               }
+               mutex_lock(&group->lock);
+               if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
+                       mutex_unlock(&group->lock);
+                       mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
+                                      port, slave, MAX_PEND_REQS_PER_FUNC);
+                       release_group(group, 0);
+                       kfree(req);
+                       return -ENOMEM;
+               }
+               ++group->func[slave].num_pend_reqs;
+               req->group = group;
+               queue_req(req);
+               mutex_unlock(&group->lock);
+               release_group(group, 0);
+               return 1; /* consumed */
+       case IB_SA_METHOD_GET_TABLE:
+       case IB_MGMT_METHOD_GET_RESP:
+       case IB_SA_METHOD_GET_TABLE_RESP:
+       case IB_SA_METHOD_DELETE_RESP:
+               return 0; /* not consumed, pass-through */
+       default:
+               mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
+                       port, slave, sa_mad->mad_hdr.method);
+               return 1; /* consumed */
+       }
+}
+
+static ssize_t sysfs_show_group(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct mcast_group *group =
+               container_of(attr, struct mcast_group, dentry);
+       struct mcast_req *req = NULL;
+       char pending_str[40];
+       char state_str[40];
+       ssize_t len = 0;
+       int f;
+
+       if (group->state == MCAST_IDLE)
+               sprintf(state_str, "%s", get_state_string(group->state));
+       else
+               sprintf(state_str, "%s(TID=0x%llx)",
+                               get_state_string(group->state),
+                               be64_to_cpu(group->last_req_tid));
+       if (list_empty(&group->pending_list)) {
+               sprintf(pending_str, "No");
+       } else {
+               req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
+               sprintf(pending_str, "Yes(TID=0x%llx)",
+                               be64_to_cpu(req->sa_mad.mad_hdr.tid));
+       }
+       len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s     ",
+                       group->rec.scope_join_state & 0xf,
+                       group->members[2], group->members[1], group->members[0],
+                       atomic_read(&group->refcount),
+                       pending_str,
+                       state_str);
+       for (f = 0; f < MAX_VFS; ++f)
+               if (group->func[f].state == MCAST_MEMBER)
+                       len += sprintf(buf + len, "%d[%1x] ",
+                                       f, group->func[f].join_state);
+
+       len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
+               "%4x %4x %2x %2x)\n",
+               be16_to_cpu(group->rec.pkey),
+               be32_to_cpu(group->rec.qkey),
+               (group->rec.mtusel_mtu & 0xc0) >> 6,
+               group->rec.mtusel_mtu & 0x3f,
+               group->rec.tclass,
+               (group->rec.ratesel_rate & 0xc0) >> 6,
+               group->rec.ratesel_rate & 0x3f,
+               (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
+               (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
+               be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
+               group->rec.proxy_join);
+
+       return len;
+}
+
+int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
+{
+       char name[20];
+
+       atomic_set(&ctx->tid, 0);
+       sprintf(name, "mlx4_ib_mcg%d", ctx->port);
+       ctx->mcg_wq = create_singlethread_workqueue(name);
+       if (!ctx->mcg_wq)
+               return -ENOMEM;
+
+       mutex_init(&ctx->mcg_table_lock);
+       ctx->mcg_table = RB_ROOT;
+       INIT_LIST_HEAD(&ctx->mcg_mgid0_list);
+       ctx->flushing = 0;
+
+       return 0;
+}
+
+static void force_clean_group(struct mcast_group *group)
+{
+       struct mcast_req *req, *tmp
+               ;
+       list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
+               list_del(&req->group_list);
+               kfree(req);
+       }
+       del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
+       rb_erase(&group->node, &group->demux->mcg_table);
+       kfree(group);
+}
+
+static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
+{
+       int i;
+       struct rb_node *p;
+       struct mcast_group *group;
+       unsigned long end;
+       int count;
+
+       if (ctx->flushing)
+               return;
+
+       ctx->flushing = 1;
+       for (i = 0; i < MAX_VFS; ++i)
+               clean_vf_mcast(ctx, i);
+
+       end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
+       do {
+               count = 0;
+               mutex_lock(&ctx->mcg_table_lock);
+               for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
+                       ++count;
+               mutex_unlock(&ctx->mcg_table_lock);
+               if (!count)
+                       break;
+
+               msleep(1);
+       } while (time_after(end, jiffies));
+
+       flush_workqueue(ctx->mcg_wq);
+       if (destroy_wq)
+               destroy_workqueue(ctx->mcg_wq);
+
+       mutex_lock(&ctx->mcg_table_lock);
+       while ((p = rb_first(&ctx->mcg_table)) != NULL) {
+               group = rb_entry(p, struct mcast_group, node);
+               if (atomic_read(&group->refcount))
+                       mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
+
+               force_clean_group(group);
+       }
+       mutex_unlock(&ctx->mcg_table_lock);
+
+       if (!destroy_wq)
+               ctx->flushing = 0;
+}
+
+struct clean_work {
+       struct work_struct work;
+       struct mlx4_ib_demux_ctx *ctx;
+       int destroy_wq;
+};
+
+static void mcg_clean_task(struct work_struct *work)
+{
+       struct clean_work *cw = container_of(work, struct clean_work, work);
+
+       _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
+       kfree(cw);
+}
+
+void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
+{
+       struct clean_work *work;
+
+       if (destroy_wq) {
+               _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
+               return;
+       }
+
+       work = kmalloc(sizeof *work, GFP_KERNEL);
+       if (!work) {
+               mcg_warn("failed allocating work for cleanup\n");
+               return;
+       }
+
+       work->ctx = ctx;
+       work->destroy_wq = destroy_wq;
+       INIT_WORK(&work->work, mcg_clean_task);
+       queue_work(clean_wq, &work->work);
+}
+
+static void build_leave_mad(struct mcast_req *req)
+{
+       struct ib_sa_mad *mad = &req->sa_mad;
+
+       mad->mad_hdr.method = IB_SA_METHOD_DELETE;
+}
+
+
+static void clear_pending_reqs(struct mcast_group *group, int vf)
+{
+       struct mcast_req *req, *tmp, *group_first = NULL;
+       int clear;
+       int pend = 0;
+
+       if (!list_empty(&group->pending_list))
+               group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
+
+       list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
+               clear = 1;
+               if (group_first == req &&
+                   (group->state == MCAST_JOIN_SENT ||
+                    group->state == MCAST_LEAVE_SENT)) {
+                       clear = cancel_delayed_work(&group->timeout_work);
+                       pend = !clear;
+                       group->state = MCAST_IDLE;
+               }
+               if (clear) {
+                       --group->func[vf].num_pend_reqs;
+                       list_del(&req->group_list);
+                       list_del(&req->func_list);
+                       kfree(req);
+                       atomic_dec(&group->refcount);
+               }
+       }
+
+       if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
+               mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
+                              list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
+       }
+}
+
+static int push_deleteing_req(struct mcast_group *group, int slave)
+{
+       struct mcast_req *req;
+       struct mcast_req *pend_req;
+
+       if (!group->func[slave].join_state)
+               return 0;
+
+       req = kzalloc(sizeof *req, GFP_KERNEL);
+       if (!req) {
+               mcg_warn_group(group, "failed allocation - may leave stall groups\n");
+               return -ENOMEM;
+       }
+
+       if (!list_empty(&group->func[slave].pending)) {
+               pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
+               if (pend_req->clean) {
+                       kfree(req);
+                       return 0;
+               }
+       }
+
+       req->clean = 1;
+       req->func = slave;
+       req->group = group;
+       ++group->func[slave].num_pend_reqs;
+       build_leave_mad(req);
+       queue_req(req);
+       return 0;
+}
+
+void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
+{
+       struct mcast_group *group;
+       struct rb_node *p;
+
+       mutex_lock(&ctx->mcg_table_lock);
+       for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
+               group = rb_entry(p, struct mcast_group, node);
+               mutex_lock(&group->lock);
+               if (atomic_read(&group->refcount)) {
+                       /* clear pending requests of this VF */
+                       clear_pending_reqs(group, slave);
+                       push_deleteing_req(group, slave);
+               }
+               mutex_unlock(&group->lock);
+       }
+       mutex_unlock(&ctx->mcg_table_lock);
+}
+
+
+int mlx4_ib_mcg_init(void)
+{
+       clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
+       if (!clean_wq)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void mlx4_ib_mcg_destroy(void)
+{
+       destroy_workqueue(clean_wq);
+}
index c136bb618e291ae8b48cff2bdf8181ea99ca9220..e04cbc9a54a53ee68863c06f3440bf2be877df7e 100644 (file)
 #include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/idr.h>
 
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_umem.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_sa.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -62,6 +65,9 @@ enum {
 #define MLX4_IB_SQ_HEADROOM(shift)     ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
 #define MLX4_IB_SQ_MAX_SPARE           (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
 
+/*module param to indicate if SM assigns the alias_GUID*/
+extern int mlx4_ib_sm_guid_assign;
+
 struct mlx4_ib_ucontext {
        struct ib_ucontext      ibucontext;
        struct mlx4_uar         uar;
@@ -133,8 +139,10 @@ struct mlx4_ib_wq {
 };
 
 enum mlx4_ib_qp_flags {
-       MLX4_IB_QP_LSO                          = 1 << 0,
-       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK     = 1 << 1,
+       MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
+       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+       MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
+       MLX4_IB_SRIOV_SQP = 1 << 31,
 };
 
 struct mlx4_ib_gid_entry {
@@ -144,6 +152,80 @@ struct mlx4_ib_gid_entry {
        u8                      port;
 };
 
+enum mlx4_ib_qp_type {
+       /*
+        * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
+        * here (and in that order) since the MAD layer uses them as
+        * indices into a 2-entry table.
+        */
+       MLX4_IB_QPT_SMI = IB_QPT_SMI,
+       MLX4_IB_QPT_GSI = IB_QPT_GSI,
+
+       MLX4_IB_QPT_RC = IB_QPT_RC,
+       MLX4_IB_QPT_UC = IB_QPT_UC,
+       MLX4_IB_QPT_UD = IB_QPT_UD,
+       MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
+       MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
+       MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
+       MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
+       MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
+
+       MLX4_IB_QPT_PROXY_SMI_OWNER     = 1 << 16,
+       MLX4_IB_QPT_PROXY_SMI           = 1 << 17,
+       MLX4_IB_QPT_PROXY_GSI           = 1 << 18,
+       MLX4_IB_QPT_TUN_SMI_OWNER       = 1 << 19,
+       MLX4_IB_QPT_TUN_SMI             = 1 << 20,
+       MLX4_IB_QPT_TUN_GSI             = 1 << 21,
+};
+
+#define MLX4_IB_QPT_ANY_SRIOV  (MLX4_IB_QPT_PROXY_SMI_OWNER | \
+       MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
+       MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
+
+enum mlx4_ib_mad_ifc_flags {
+       MLX4_MAD_IFC_IGNORE_MKEY        = 1,
+       MLX4_MAD_IFC_IGNORE_BKEY        = 2,
+       MLX4_MAD_IFC_IGNORE_KEYS        = (MLX4_MAD_IFC_IGNORE_MKEY |
+                                          MLX4_MAD_IFC_IGNORE_BKEY),
+       MLX4_MAD_IFC_NET_VIEW           = 4,
+};
+
+enum {
+       MLX4_NUM_TUNNEL_BUFS            = 256,
+};
+
+struct mlx4_ib_tunnel_header {
+       struct mlx4_av av;
+       __be32 remote_qpn;
+       __be32 qkey;
+       __be16 vlan;
+       u8 mac[6];
+       __be16 pkey_index;
+       u8 reserved[6];
+};
+
+struct mlx4_ib_buf {
+       void *addr;
+       dma_addr_t map;
+};
+
+struct mlx4_rcv_tunnel_hdr {
+       __be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
+                             * 0x0 - no vlan was in the packet
+                             * 0x01 - C-VLAN was in the packet */
+       u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
+       u8 reserved;
+       __be16 pkey_index;
+       __be16 sl_vid;
+       __be16 slid_mac_47_32;
+       __be32 mac_31_0;
+};
+
+struct mlx4_ib_proxy_sqp_hdr {
+       struct ib_grh grh;
+       struct mlx4_rcv_tunnel_hdr tun;
+}  __packed;
+
 struct mlx4_ib_qp {
        struct ib_qp            ibqp;
        struct mlx4_qp          mqp;
@@ -159,6 +241,7 @@ struct mlx4_ib_qp {
        int                     sq_spare_wqes;
        struct mlx4_ib_wq       sq;
 
+       enum mlx4_ib_qp_type    mlx4_ib_qp_type;
        struct ib_umem         *umem;
        struct mlx4_mtt         mtt;
        int                     buf_size;
@@ -174,6 +257,8 @@ struct mlx4_ib_qp {
        int                     mlx_type;
        struct list_head        gid_list;
        struct list_head        steering_rules;
+       struct mlx4_ib_buf      *sqp_proxy_rcv;
+
 };
 
 struct mlx4_ib_srq {
@@ -196,6 +281,138 @@ struct mlx4_ib_ah {
        union mlx4_ext_av       av;
 };
 
+/****************************************/
+/* alias guid support */
+/****************************************/
+#define NUM_PORT_ALIAS_GUID            2
+#define NUM_ALIAS_GUID_IN_REC          8
+#define NUM_ALIAS_GUID_REC_IN_PORT     16
+#define GUID_REC_SIZE                  8
+#define NUM_ALIAS_GUID_PER_PORT                128
+#define MLX4_NOT_SET_GUID              (0x00LL)
+#define MLX4_GUID_FOR_DELETE_VAL       (~(0x00LL))
+
+enum mlx4_guid_alias_rec_status {
+       MLX4_GUID_INFO_STATUS_IDLE,
+       MLX4_GUID_INFO_STATUS_SET,
+       MLX4_GUID_INFO_STATUS_PENDING,
+};
+
+enum mlx4_guid_alias_rec_ownership {
+       MLX4_GUID_DRIVER_ASSIGN,
+       MLX4_GUID_SYSADMIN_ASSIGN,
+       MLX4_GUID_NONE_ASSIGN, /*init state of each record*/
+};
+
+enum mlx4_guid_alias_rec_method {
+       MLX4_GUID_INFO_RECORD_SET       = IB_MGMT_METHOD_SET,
+       MLX4_GUID_INFO_RECORD_DELETE    = IB_SA_METHOD_DELETE,
+};
+
+struct mlx4_sriov_alias_guid_info_rec_det {
+       u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
+       ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
+       enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
+       u8 method; /*set or delete*/
+       enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/
+};
+
+struct mlx4_sriov_alias_guid_port_rec_det {
+       struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
+       struct workqueue_struct *wq;
+       struct delayed_work alias_guid_work;
+       u8 port;
+       struct mlx4_sriov_alias_guid *parent;
+       struct list_head cb_list;
+};
+
+struct mlx4_sriov_alias_guid {
+       struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
+       spinlock_t ag_work_lock;
+       struct ib_sa_client *sa_client;
+};
+
+struct mlx4_ib_demux_work {
+       struct work_struct      work;
+       struct mlx4_ib_dev     *dev;
+       int                     slave;
+       int                     do_init;
+       u8                      port;
+
+};
+
+struct mlx4_ib_tun_tx_buf {
+       struct mlx4_ib_buf buf;
+       struct ib_ah *ah;
+};
+
+struct mlx4_ib_demux_pv_qp {
+       struct ib_qp *qp;
+       enum ib_qp_type proxy_qpt;
+       struct mlx4_ib_buf *ring;
+       struct mlx4_ib_tun_tx_buf *tx_ring;
+       spinlock_t tx_lock;
+       unsigned tx_ix_head;
+       unsigned tx_ix_tail;
+};
+
+enum mlx4_ib_demux_pv_state {
+       DEMUX_PV_STATE_DOWN,
+       DEMUX_PV_STATE_STARTING,
+       DEMUX_PV_STATE_ACTIVE,
+       DEMUX_PV_STATE_DOWNING,
+};
+
+struct mlx4_ib_demux_pv_ctx {
+       int port;
+       int slave;
+       enum mlx4_ib_demux_pv_state state;
+       int has_smi;
+       struct ib_device *ib_dev;
+       struct ib_cq *cq;
+       struct ib_pd *pd;
+       struct ib_mr *mr;
+       struct work_struct work;
+       struct workqueue_struct *wq;
+       struct mlx4_ib_demux_pv_qp qp[2];
+};
+
+struct mlx4_ib_demux_ctx {
+       struct ib_device *ib_dev;
+       int port;
+       struct workqueue_struct *wq;
+       struct workqueue_struct *ud_wq;
+       spinlock_t ud_lock;
+       __be64 subnet_prefix;
+       __be64 guid_cache[128];
+       struct mlx4_ib_dev *dev;
+       /* the following lock protects both mcg_table and mcg_mgid0_list */
+       struct mutex            mcg_table_lock;
+       struct rb_root          mcg_table;
+       struct list_head        mcg_mgid0_list;
+       struct workqueue_struct *mcg_wq;
+       struct mlx4_ib_demux_pv_ctx **tun;
+       atomic_t tid;
+       int    flushing; /* flushing the work queue */
+};
+
+struct mlx4_ib_sriov {
+       struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
+       struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
+       /* when using this spinlock you should use "irq" because
+        * it may be called from interrupt context.*/
+       spinlock_t going_down_lock;
+       int is_going_down;
+
+       struct mlx4_sriov_alias_guid alias_guid;
+
+       /* CM paravirtualization fields */
+       struct list_head cm_list;
+       spinlock_t id_map_lock;
+       struct rb_root sl_id_map;
+       struct idr pv_id_table;
+};
+
 struct mlx4_ib_iboe {
        spinlock_t              lock;
        struct net_device      *netdevs[MLX4_MAX_PORTS];
@@ -203,6 +420,42 @@ struct mlx4_ib_iboe {
        union ib_gid            gid_table[MLX4_MAX_PORTS][128];
 };
 
+struct pkey_mgt {
+       u8                      virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
+       u16                     phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
+       struct list_head        pkey_port_list[MLX4_MFUNC_MAX];
+       struct kobject         *device_parent[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_ib_iov_sysfs_attr {
+       void *ctx;
+       struct kobject *kobj;
+       unsigned long data;
+       u32 entry_num;
+       char name[15];
+       struct device_attribute dentry;
+       struct device *dev;
+};
+
+struct mlx4_ib_iov_sysfs_attr_ar {
+       struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
+};
+
+struct mlx4_ib_iov_port {
+       char name[100];
+       u8 num;
+       struct mlx4_ib_dev *dev;
+       struct list_head list;
+       struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
+       struct ib_port_attr attr;
+       struct kobject  *cur_port;
+       struct kobject  *admin_alias_parent;
+       struct kobject  *gids_parent;
+       struct kobject  *pkeys_parent;
+       struct kobject  *mcgs_parent;
+       struct mlx4_ib_iov_sysfs_attr mcg_dentry;
+};
+
 struct mlx4_ib_dev {
        struct ib_device        ib_dev;
        struct mlx4_dev        *dev;
@@ -216,6 +469,7 @@ struct mlx4_ib_dev {
        struct ib_mad_agent    *send_agent[MLX4_MAX_PORTS][2];
        struct ib_ah           *sm_ah[MLX4_MAX_PORTS];
        spinlock_t              sm_lock;
+       struct mlx4_ib_sriov    sriov;
 
        struct mutex            cap_mask_mutex;
        bool                    ib_active;
@@ -223,6 +477,11 @@ struct mlx4_ib_dev {
        int                     counters[MLX4_MAX_PORTS];
        int                    *eq_table;
        int                     eq_added;
+       struct kobject         *iov_parent;
+       struct kobject         *ports_parent;
+       struct kobject         *dev_ports_parent[MLX4_MFUNC_MAX];
+       struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
+       struct pkey_mgt         pkeys;
 };
 
 struct ib_event_work {
@@ -231,6 +490,13 @@ struct ib_event_work {
        struct mlx4_eqe         ib_eqe;
 };
 
+struct mlx4_ib_qp_tunnel_init_attr {
+       struct ib_qp_init_attr init_attr;
+       int slave;
+       enum ib_qp_type proxy_qp_type;
+       u8 port;
+};
+
 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
 {
        return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -300,6 +566,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
        return container_of(ibah, struct mlx4_ib_ah, ibah);
 }
 
+int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
+void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
+
 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
                        struct mlx4_db *db);
 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
@@ -356,7 +625,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                      struct ib_recv_wr **bad_wr);
 
-int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
+int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
                 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
                 void *in_mad, void *response_mad);
 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,        u8 port_num,
@@ -371,6 +640,13 @@ int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
                         u64 iova);
 int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
+int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
+                        struct ib_port_attr *props, int netw_view);
+int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+                        u16 *pkey, int netw_view);
+
+int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+                       union ib_gid *gid, int netw_view);
 
 int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
                        u8 *mac, int *is_mcast, u8 port);
@@ -385,10 +661,69 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
        return !!(ah->av.ib.g_slid & 0x80);
 }
 
+int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
+void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
+void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
+int mlx4_ib_mcg_init(void);
+void mlx4_ib_mcg_destroy(void);
+
+int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
+
+int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
+                                 struct ib_sa_mad *sa_mad);
+int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
+                             struct ib_sa_mad *mad);
+
 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
                   union ib_gid *gid);
 
 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
                            enum ib_event_type type);
 
+void mlx4_ib_tunnels_update_work(struct work_struct *work);
+
+int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
+                         enum ib_qp_type qpt, struct ib_wc *wc,
+                         struct ib_grh *grh, struct ib_mad *mad);
+int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
+                        enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
+                        u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad);
+__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
+
+int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
+               struct ib_mad *mad);
+
+int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
+               struct ib_mad *mad);
+
+void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
+void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
+
+/* alias guid support */
+void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
+int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
+void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
+void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
+
+void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
+                                         int block_num,
+                                         u8 port_num, u8 *p_data);
+
+void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
+                                        int block_num, u8 port_num,
+                                        u8 *p_data);
+
+int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
+                           struct attribute *attr);
+void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
+                            struct attribute *attr);
+ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
+
+int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
+
+void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
+
+__be64 mlx4_ib_gen_node_guid(void);
+
+
 #endif /* MLX4_IB_H */
index f585eddef4b7d5c7921575cebaa508eefde4f934..19e0637220b988a4ef15f421227fdca13586e76e 100644 (file)
@@ -38,6 +38,7 @@
 #include <rdma/ib_cache.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
 
 #include <linux/mlx4/qp.h>
 
@@ -110,16 +111,62 @@ static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
        return container_of(mqp, struct mlx4_ib_sqp, qp);
 }
 
+static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+       if (!mlx4_is_master(dev->dev))
+               return 0;
+
+       return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
+              qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
+               8 * MLX4_MFUNC_MAX;
+}
+
 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 {
-       return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
-               qp->mqp.qpn <= dev->dev->caps.sqp_start + 3;
+       int proxy_sqp = 0;
+       int real_sqp = 0;
+       int i;
+       /* PPF or Native -- real SQP */
+       real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
+                   qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
+                   qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
+       if (real_sqp)
+               return 1;
+       /* VF or PF -- proxy SQP */
+       if (mlx4_is_mfunc(dev->dev)) {
+               for (i = 0; i < dev->dev->caps.num_ports; i++) {
+                       if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] ||
+                           qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
+                               proxy_sqp = 1;
+                               break;
+                       }
+               }
+       }
+       return proxy_sqp;
 }
 
+/* used for INIT/CLOSE port logic */
 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 {
-       return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
-               qp->mqp.qpn <= dev->dev->caps.sqp_start + 1;
+       int proxy_qp0 = 0;
+       int real_qp0 = 0;
+       int i;
+       /* PPF or Native -- real QP0 */
+       real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) &&
+                   qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
+                   qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
+       if (real_qp0)
+               return 1;
+       /* VF or PF -- proxy QP0 */
+       if (mlx4_is_mfunc(dev->dev)) {
+               for (i = 0; i < dev->dev->caps.num_ports; i++) {
+                       if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
+                               proxy_qp0 = 1;
+                               break;
+                       }
+               }
+       }
+       return proxy_qp0;
 }
 
 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
@@ -270,7 +317,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
        }
 }
 
-static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
+static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
 {
        /*
         * UD WQEs must have a datagram segment.
@@ -279,19 +326,29 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
         * header and space for the ICRC).
         */
        switch (type) {
-       case IB_QPT_UD:
+       case MLX4_IB_QPT_UD:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
                        sizeof (struct mlx4_wqe_datagram_seg) +
                        ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
-       case IB_QPT_UC:
+       case MLX4_IB_QPT_PROXY_SMI_OWNER:
+       case MLX4_IB_QPT_PROXY_SMI:
+       case MLX4_IB_QPT_PROXY_GSI:
+               return sizeof (struct mlx4_wqe_ctrl_seg) +
+                       sizeof (struct mlx4_wqe_datagram_seg) + 64;
+       case MLX4_IB_QPT_TUN_SMI_OWNER:
+       case MLX4_IB_QPT_TUN_GSI:
+               return sizeof (struct mlx4_wqe_ctrl_seg) +
+                       sizeof (struct mlx4_wqe_datagram_seg);
+
+       case MLX4_IB_QPT_UC:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
                        sizeof (struct mlx4_wqe_raddr_seg);
-       case IB_QPT_RC:
+       case MLX4_IB_QPT_RC:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
                        sizeof (struct mlx4_wqe_atomic_seg) +
                        sizeof (struct mlx4_wqe_raddr_seg);
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
+       case MLX4_IB_QPT_SMI:
+       case MLX4_IB_QPT_GSI:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
                        ALIGN(MLX4_IB_UD_HEADER_SIZE +
                              DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE,
@@ -345,7 +402,7 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 }
 
 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
-                             enum ib_qp_type type, struct mlx4_ib_qp *qp)
+                             enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
 {
        int s;
 
@@ -360,7 +417,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
         * For MLX transport we need 2 extra S/G entries:
         * one for the header and one for the checksum at the end
         */
-       if ((type == IB_QPT_SMI || type == IB_QPT_GSI) &&
+       if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
+            type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
            cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
                return -EINVAL;
 
@@ -404,7 +462,9 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
         */
        if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
            qp->sq_signal_bits && BITS_PER_LONG == 64 &&
-           type != IB_QPT_SMI && type != IB_QPT_GSI)
+           type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
+           !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
+                     MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
                qp->sq.wqe_shift = ilog2(64);
        else
                qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
@@ -476,6 +536,54 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
        return 0;
 }
 
+static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
+{
+       int i;
+
+       qp->sqp_proxy_rcv =
+               kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt,
+                       GFP_KERNEL);
+       if (!qp->sqp_proxy_rcv)
+               return -ENOMEM;
+       for (i = 0; i < qp->rq.wqe_cnt; i++) {
+               qp->sqp_proxy_rcv[i].addr =
+                       kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr),
+                               GFP_KERNEL);
+               if (!qp->sqp_proxy_rcv[i].addr)
+                       goto err;
+               qp->sqp_proxy_rcv[i].map =
+                       ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
+                                         sizeof (struct mlx4_ib_proxy_sqp_hdr),
+                                         DMA_FROM_DEVICE);
+       }
+       return 0;
+
+err:
+       while (i > 0) {
+               --i;
+               ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
+                                   sizeof (struct mlx4_ib_proxy_sqp_hdr),
+                                   DMA_FROM_DEVICE);
+               kfree(qp->sqp_proxy_rcv[i].addr);
+       }
+       kfree(qp->sqp_proxy_rcv);
+       qp->sqp_proxy_rcv = NULL;
+       return -ENOMEM;
+}
+
+static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
+{
+       int i;
+
+       for (i = 0; i < qp->rq.wqe_cnt; i++) {
+               ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
+                                   sizeof (struct mlx4_ib_proxy_sqp_hdr),
+                                   DMA_FROM_DEVICE);
+               kfree(qp->sqp_proxy_rcv[i].addr);
+       }
+       kfree(qp->sqp_proxy_rcv);
+}
+
 static int qp_has_rq(struct ib_qp_init_attr *attr)
 {
        if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
@@ -486,10 +594,67 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
 
 static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                            struct ib_qp_init_attr *init_attr,
-                           struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
+                           struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp)
 {
        int qpn;
        int err;
+       struct mlx4_ib_sqp *sqp;
+       struct mlx4_ib_qp *qp;
+       enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
+
+       /* When tunneling special qps, we use a plain UD qp */
+       if (sqpn) {
+               if (mlx4_is_mfunc(dev->dev) &&
+                   (!mlx4_is_master(dev->dev) ||
+                    !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
+                       if (init_attr->qp_type == IB_QPT_GSI)
+                               qp_type = MLX4_IB_QPT_PROXY_GSI;
+                       else if (mlx4_is_master(dev->dev))
+                               qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
+                       else
+                               qp_type = MLX4_IB_QPT_PROXY_SMI;
+               }
+               qpn = sqpn;
+               /* add extra sg entry for tunneling */
+               init_attr->cap.max_recv_sge++;
+       } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) {
+               struct mlx4_ib_qp_tunnel_init_attr *tnl_init =
+                       container_of(init_attr,
+                                    struct mlx4_ib_qp_tunnel_init_attr, init_attr);
+               if ((tnl_init->proxy_qp_type != IB_QPT_SMI &&
+                    tnl_init->proxy_qp_type != IB_QPT_GSI)   ||
+                   !mlx4_is_master(dev->dev))
+                       return -EINVAL;
+               if (tnl_init->proxy_qp_type == IB_QPT_GSI)
+                       qp_type = MLX4_IB_QPT_TUN_GSI;
+               else if (tnl_init->slave == mlx4_master_func_num(dev->dev))
+                       qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
+               else
+                       qp_type = MLX4_IB_QPT_TUN_SMI;
+               /* we are definitely in the PPF here, since we are creating
+                * tunnel QPs. base_tunnel_sqpn is therefore valid. */
+               qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave
+                       + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1;
+               sqpn = qpn;
+       }
+
+       if (!*caller_qp) {
+               if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
+                   (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
+                               MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
+                       sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL);
+                       if (!sqp)
+                               return -ENOMEM;
+                       qp = &sqp->qp;
+               } else {
+                       qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
+                       if (!qp)
+                               return -ENOMEM;
+               }
+       } else
+               qp = *caller_qp;
+
+       qp->mlx4_ib_qp_type = qp_type;
 
        mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
@@ -550,7 +715,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
                        qp->flags |= MLX4_IB_QP_LSO;
 
-               err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
+               err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
                if (err)
                        goto err;
 
@@ -586,7 +751,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        }
 
        if (sqpn) {
-               qpn = sqpn;
+               if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
+                   MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+                       if (alloc_proxy_bufs(pd->device, qp)) {
+                               err = -ENOMEM;
+                               goto err_wrid;
+                       }
+               }
        } else {
                /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
                 * BlueFlame setup flow wrongly causes VLAN insertion. */
@@ -595,7 +766,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                else
                        err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
                if (err)
-                       goto err_wrid;
+                       goto err_proxy;
        }
 
        err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
@@ -613,13 +784,16 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
 
        qp->mqp.event = mlx4_ib_qp_event;
-
+       if (!*caller_qp)
+               *caller_qp = qp;
        return 0;
 
 err_qpn:
        if (!sqpn)
                mlx4_qp_release_range(dev->dev, qpn, 1);
-
+err_proxy:
+       if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+               free_proxy_bufs(pd->device, qp);
 err_wrid:
        if (pd->uobject) {
                if (qp_has_rq(init_attr))
@@ -643,6 +817,8 @@ err_db:
                mlx4_db_free(dev->dev, &qp->db);
 
 err:
+       if (!*caller_qp)
+               kfree(qp);
        return err;
 }
 
@@ -755,7 +931,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
 
        mlx4_qp_free(dev->dev, &qp->mqp);
 
-       if (!is_sqp(dev, qp))
+       if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp))
                mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
 
        mlx4_mtt_cleanup(dev->dev, &qp->mtt);
@@ -768,6 +944,9 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
        } else {
                kfree(qp->sq.wrid);
                kfree(qp->rq.wrid);
+               if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
+                   MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
+                       free_proxy_bufs(&dev->ib_dev, qp);
                mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
                if (qp->rq.wqe_cnt)
                        mlx4_db_free(dev->dev, &qp->db);
@@ -776,25 +955,46 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
        del_gid_entries(qp);
 }
 
+static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
+{
+       /* Native or PPF */
+       if (!mlx4_is_mfunc(dev->dev) ||
+           (mlx4_is_master(dev->dev) &&
+            attr->create_flags & MLX4_IB_SRIOV_SQP)) {
+               return  dev->dev->phys_caps.base_sqpn +
+                       (attr->qp_type == IB_QPT_SMI ? 0 : 2) +
+                       attr->port_num - 1;
+       }
+       /* PF or VF -- creating proxies */
+       if (attr->qp_type == IB_QPT_SMI)
+               return dev->dev->caps.qp0_proxy[attr->port_num - 1];
+       else
+               return dev->dev->caps.qp1_proxy[attr->port_num - 1];
+}
+
 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
                                struct ib_qp_init_attr *init_attr,
                                struct ib_udata *udata)
 {
-       struct mlx4_ib_sqp *sqp;
-       struct mlx4_ib_qp *qp;
+       struct mlx4_ib_qp *qp = NULL;
        int err;
        u16 xrcdn = 0;
 
        /*
-        * We only support LSO and multicast loopback blocking, and
-        * only for kernel UD QPs.
+        * We only support LSO, vendor flag1, and multicast loopback blocking,
+        * and only for kernel UD QPs.
         */
-       if (init_attr->create_flags & ~(IB_QP_CREATE_IPOIB_UD_LSO |
-                                       IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
+       if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
+                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
+                                       MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP))
                return ERR_PTR(-EINVAL);
 
        if (init_attr->create_flags &&
-           (udata || init_attr->qp_type != IB_QPT_UD))
+           (udata ||
+            ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) &&
+             init_attr->qp_type != IB_QPT_UD) ||
+            ((init_attr->create_flags & MLX4_IB_SRIOV_SQP) &&
+             init_attr->qp_type > IB_QPT_GSI)))
                return ERR_PTR(-EINVAL);
 
        switch (init_attr->qp_type) {
@@ -810,18 +1010,17 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
                /* fall through */
        case IB_QPT_RC:
        case IB_QPT_UC:
-       case IB_QPT_UD:
        case IB_QPT_RAW_PACKET:
-       {
                qp = kzalloc(sizeof *qp, GFP_KERNEL);
                if (!qp)
                        return ERR_PTR(-ENOMEM);
-
-               err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 0, qp);
-               if (err) {
-                       kfree(qp);
+               /* fall through */
+       case IB_QPT_UD:
+       {
+               err = create_qp_common(to_mdev(pd->device), pd, init_attr,
+                                      udata, 0, &qp);
+               if (err)
                        return ERR_PTR(err);
-               }
 
                qp->ibqp.qp_num = qp->mqp.qpn;
                qp->xrcdn = xrcdn;
@@ -835,21 +1034,11 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
                if (udata)
                        return ERR_PTR(-EINVAL);
 
-               sqp = kzalloc(sizeof *sqp, GFP_KERNEL);
-               if (!sqp)
-                       return ERR_PTR(-ENOMEM);
-
-               qp = &sqp->qp;
-
                err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
-                                      to_mdev(pd->device)->dev->caps.sqp_start +
-                                      (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) +
-                                      init_attr->port_num - 1,
-                                      qp);
-               if (err) {
-                       kfree(sqp);
+                                      get_sqp_num(to_mdev(pd->device), init_attr),
+                                      &qp);
+               if (err)
                        return ERR_PTR(err);
-               }
 
                qp->port        = init_attr->port_num;
                qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
@@ -884,18 +1073,27 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
        return 0;
 }
 
-static int to_mlx4_st(enum ib_qp_type type)
+static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
 {
        switch (type) {
-       case IB_QPT_RC:         return MLX4_QP_ST_RC;
-       case IB_QPT_UC:         return MLX4_QP_ST_UC;
-       case IB_QPT_UD:         return MLX4_QP_ST_UD;
-       case IB_QPT_XRC_INI:
-       case IB_QPT_XRC_TGT:    return MLX4_QP_ST_XRC;
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-       case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
-       default:                return -1;
+       case MLX4_IB_QPT_RC:            return MLX4_QP_ST_RC;
+       case MLX4_IB_QPT_UC:            return MLX4_QP_ST_UC;
+       case MLX4_IB_QPT_UD:            return MLX4_QP_ST_UD;
+       case MLX4_IB_QPT_XRC_INI:
+       case MLX4_IB_QPT_XRC_TGT:       return MLX4_QP_ST_XRC;
+       case MLX4_IB_QPT_SMI:
+       case MLX4_IB_QPT_GSI:
+       case MLX4_IB_QPT_RAW_PACKET:    return MLX4_QP_ST_MLX;
+
+       case MLX4_IB_QPT_PROXY_SMI_OWNER:
+       case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ?
+                                               MLX4_QP_ST_MLX : -1);
+       case MLX4_IB_QPT_PROXY_SMI:
+       case MLX4_IB_QPT_TUN_SMI:
+       case MLX4_IB_QPT_PROXY_GSI:
+       case MLX4_IB_QPT_TUN_GSI:       return (mlx4_is_mfunc(dev->dev) ?
+                                               MLX4_QP_ST_UD : -1);
+       default:                        return -1;
        }
 }
 
@@ -1043,7 +1241,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                return -ENOMEM;
 
        context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
-                                    (to_mlx4_st(ibqp->qp_type) << 16));
+                                    (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
 
        if (!(attr_mask & IB_QP_PATH_MIG_STATE))
                context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
@@ -1121,13 +1319,16 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        }
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
+               if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
+                       context->pri_path.disable_pkey_check = 0x40;
                context->pri_path.pkey_index = attr->pkey_index;
                optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
        }
 
        if (attr_mask & IB_QP_AV) {
                if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
-                                 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
+                                 attr_mask & IB_QP_PORT ?
+                                 attr->port_num : qp->port))
                        goto out;
 
                optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
@@ -1210,8 +1411,24 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_RQ_PSN)
                context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
 
+       /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
        if (attr_mask & IB_QP_QKEY) {
-               context->qkey = cpu_to_be32(attr->qkey);
+               if (qp->mlx4_ib_qp_type &
+                   (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))
+                       context->qkey = cpu_to_be32(IB_QP_SET_QKEY);
+               else {
+                       if (mlx4_is_mfunc(dev->dev) &&
+                           !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
+                           (attr->qkey & MLX4_RESERVED_QKEY_MASK) ==
+                           MLX4_RESERVED_QKEY_BASE) {
+                               pr_err("Cannot use reserved QKEY"
+                                      " 0x%x (range 0xffff0000..0xffffffff"
+                                      " is reserved)\n", attr->qkey);
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       context->qkey = cpu_to_be32(attr->qkey);
+               }
                optpar |= MLX4_QP_OPTPAR_Q_KEY;
        }
 
@@ -1227,10 +1444,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
             ibqp->qp_type == IB_QPT_UD ||
             ibqp->qp_type == IB_QPT_RAW_PACKET)) {
                context->pri_path.sched_queue = (qp->port - 1) << 6;
-               if (is_qp0(dev, qp))
+               if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
+                   qp->mlx4_ib_qp_type &
+                   (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) {
                        context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
-               else
+                       if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
+                               context->pri_path.fl = 0x80;
+               } else {
+                       if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
+                               context->pri_path.fl = 0x80;
                        context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
+               }
        }
 
        if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
@@ -1346,7 +1570,7 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        }
 
        if ((attr_mask & IB_QP_PORT) &&
-           (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
+           (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
                pr_debug("qpn 0x%x: invalid port number (%d) specified "
                         "for transition %d to %d. qp_type %d\n",
                         ibqp->qp_num, attr->port_num, cur_state,
@@ -1400,6 +1624,114 @@ out:
        return err;
 }
 
+static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
+                                 struct ib_send_wr *wr,
+                                 void *wqe, unsigned *mlx_seg_len)
+{
+       struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
+       struct ib_device *ib_dev = &mdev->ib_dev;
+       struct mlx4_wqe_mlx_seg *mlx = wqe;
+       struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
+       struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+       u16 pkey;
+       u32 qkey;
+       int send_size;
+       int header_size;
+       int spc;
+       int i;
+
+       if (wr->opcode != IB_WR_SEND)
+               return -EINVAL;
+
+       send_size = 0;
+
+       for (i = 0; i < wr->num_sge; ++i)
+               send_size += wr->sg_list[i].length;
+
+       /* for proxy-qp0 sends, need to add in size of tunnel header */
+       /* for tunnel-qp0 sends, tunnel header is already in s/g list */
+       if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
+               send_size += sizeof (struct mlx4_ib_tunnel_header);
+
+       ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header);
+
+       if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
+               sqp->ud_header.lrh.service_level =
+                       be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+               sqp->ud_header.lrh.destination_lid =
+                       cpu_to_be16(ah->av.ib.g_slid & 0x7f);
+               sqp->ud_header.lrh.source_lid =
+                       cpu_to_be16(ah->av.ib.g_slid & 0x7f);
+       }
+
+       mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
+
+       /* force loopback */
+       mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR);
+       mlx->rlid = sqp->ud_header.lrh.destination_lid;
+
+       sqp->ud_header.lrh.virtual_lane    = 0;
+       sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
+       ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+       sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
+       if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
+               sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+       else
+               sqp->ud_header.bth.destination_qpn =
+                       cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
+
+       sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
+       if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+               return -EINVAL;
+       sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
+       sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
+
+       sqp->ud_header.bth.opcode        = IB_OPCODE_UD_SEND_ONLY;
+       sqp->ud_header.immediate_present = 0;
+
+       header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
+
+       /*
+        * Inline data segments may not cross a 64 byte boundary.  If
+        * our UD header is bigger than the space available up to the
+        * next 64 byte boundary in the WQE, use two inline data
+        * segments to hold the UD header.
+        */
+       spc = MLX4_INLINE_ALIGN -
+             ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
+       if (header_size <= spc) {
+               inl->byte_count = cpu_to_be32(1 << 31 | header_size);
+               memcpy(inl + 1, sqp->header_buf, header_size);
+               i = 1;
+       } else {
+               inl->byte_count = cpu_to_be32(1 << 31 | spc);
+               memcpy(inl + 1, sqp->header_buf, spc);
+
+               inl = (void *) (inl + 1) + spc;
+               memcpy(inl + 1, sqp->header_buf + spc, header_size - spc);
+               /*
+                * Need a barrier here to make sure all the data is
+                * visible before the byte_count field is set.
+                * Otherwise the HCA prefetcher could grab the 64-byte
+                * chunk with this inline segment and get a valid (!=
+                * 0xffffffff) byte count but stale data, and end up
+                * generating a packet with bad headers.
+                *
+                * The first inline segment's byte_count field doesn't
+                * need a barrier, because it comes after a
+                * control/MLX segment and therefore is at an offset
+                * of 16 mod 64.
+                */
+               wmb();
+               inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
+               i = 2;
+       }
+
+       *mlx_seg_len =
+       ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
+       return 0;
+}
+
 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                            void *wqe, unsigned *mlx_seg_len)
 {
@@ -1418,6 +1750,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        int is_vlan = 0;
        int is_grh;
        u16 vlan;
+       int err = 0;
 
        send_size = 0;
        for (i = 0; i < wr->num_sge; ++i)
@@ -1426,8 +1759,24 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
        is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
        is_grh = mlx4_ib_ah_grh_present(ah);
        if (is_eth) {
-               ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
-                                 ah->av.ib.gid_index, &sgid);
+               if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                       /* When multi-function is enabled, the ib_core gid
+                        * indexes don't necessarily match the hw ones, so
+                        * we must use our own cache */
+                       sgid.global.subnet_prefix =
+                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+                               subnet_prefix;
+                       sgid.global.interface_id =
+                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+                               guid_cache[ah->av.ib.gid_index];
+               } else  {
+                       err = ib_get_cached_gid(ib_dev,
+                                               be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                               ah->av.ib.gid_index, &sgid);
+                       if (err)
+                               return err;
+               }
+
                vlan = rdma_get_vlan_id(&sgid);
                is_vlan = vlan < 0x1000;
        }
@@ -1446,8 +1795,21 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                sqp->ud_header.grh.flow_label    =
                        ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
                sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
-               ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24,
-                                 ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid);
+               if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+                       /* When multi-function is enabled, the ib_core gid
+                        * indexes don't necessarily match the hw ones, so
+                        * we must use our own cache */
+                       sqp->ud_header.grh.source_gid.global.subnet_prefix =
+                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+                                                      subnet_prefix;
+                       sqp->ud_header.grh.source_gid.global.interface_id =
+                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+                                              guid_cache[ah->av.ib.gid_index];
+               } else
+                       ib_get_cached_gid(ib_dev,
+                                         be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                         ah->av.ib.gid_index,
+                                         &sqp->ud_header.grh.source_gid);
                memcpy(sqp->ud_header.grh.destination_gid.raw,
                       ah->av.ib.dgid, 16);
        }
@@ -1459,6 +1821,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                                          (sqp->ud_header.lrh.destination_lid ==
                                           IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
                                          (sqp->ud_header.lrh.service_level << 8));
+               if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
+                       mlx->flags |= cpu_to_be32(0x1); /* force loopback */
                mlx->rlid = sqp->ud_header.lrh.destination_lid;
        }
 
@@ -1667,6 +2031,63 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
        memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
 }
 
+static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
+                                   struct mlx4_wqe_datagram_seg *dseg,
+                                   struct ib_send_wr *wr, enum ib_qp_type qpt)
+{
+       union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av;
+       struct mlx4_av sqp_av = {0};
+       int port = *((u8 *) &av->ib.port_pd) & 0x3;
+
+       /* force loopback */
+       sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000);
+       sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */
+       sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel &
+                       cpu_to_be32(0xf0000000);
+
+       memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
+       /* This function used only for sending on QP1 proxies */
+       dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
+       /* Use QKEY from the QP context, which is set by master */
+       dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
+}
+
+static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len)
+{
+       struct mlx4_wqe_inline_seg *inl = wqe;
+       struct mlx4_ib_tunnel_header hdr;
+       struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+       int spc;
+       int i;
+
+       memcpy(&hdr.av, &ah->av, sizeof hdr.av);
+       hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+       hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
+       hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+
+       spc = MLX4_INLINE_ALIGN -
+               ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
+       if (sizeof (hdr) <= spc) {
+               memcpy(inl + 1, &hdr, sizeof (hdr));
+               wmb();
+               inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr));
+               i = 1;
+       } else {
+               memcpy(inl + 1, &hdr, spc);
+               wmb();
+               inl->byte_count = cpu_to_be32(1 << 31 | spc);
+
+               inl = (void *) (inl + 1) + spc;
+               memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
+               wmb();
+               inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc));
+               i = 2;
+       }
+
+       *mlx_seg_len =
+               ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16);
+}
+
 static void set_mlx_icrc_seg(void *dseg)
 {
        u32 *t = dseg;
@@ -1748,6 +2169,13 @@ static __be32 send_ieth(struct ib_send_wr *wr)
        }
 }
 
+static void add_zero_len_inline(void *wqe)
+{
+       struct mlx4_wqe_inline_seg *inl = wqe;
+       memset(wqe, 0, 16);
+       inl->byte_count = cpu_to_be32(1 << 31);
+}
+
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                      struct ib_send_wr **bad_wr)
 {
@@ -1806,9 +2234,9 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                wqe += sizeof *ctrl;
                size = sizeof *ctrl / 16;
 
-               switch (ibqp->qp_type) {
-               case IB_QPT_RC:
-               case IB_QPT_UC:
+               switch (qp->mlx4_ib_qp_type) {
+               case MLX4_IB_QPT_RC:
+               case MLX4_IB_QPT_UC:
                        switch (wr->opcode) {
                        case IB_WR_ATOMIC_CMP_AND_SWP:
                        case IB_WR_ATOMIC_FETCH_AND_ADD:
@@ -1869,7 +2297,25 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                        break;
 
-               case IB_QPT_UD:
+               case MLX4_IB_QPT_TUN_SMI_OWNER:
+                       err =  build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
+                       if (unlikely(err)) {
+                               *bad_wr = wr;
+                               goto out;
+                       }
+                       wqe  += seglen;
+                       size += seglen / 16;
+                       break;
+               case MLX4_IB_QPT_TUN_SMI:
+               case MLX4_IB_QPT_TUN_GSI:
+                       /* this is a UD qp used in MAD responses to slaves. */
+                       set_datagram_seg(wqe, wr);
+                       /* set the forced-loopback bit in the data seg av */
+                       *(__be32 *) wqe |= cpu_to_be32(0x80000000);
+                       wqe  += sizeof (struct mlx4_wqe_datagram_seg);
+                       size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+                       break;
+               case MLX4_IB_QPT_UD:
                        set_datagram_seg(wqe, wr);
                        wqe  += sizeof (struct mlx4_wqe_datagram_seg);
                        size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
@@ -1886,8 +2332,47 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                        break;
 
-               case IB_QPT_SMI:
-               case IB_QPT_GSI:
+               case MLX4_IB_QPT_PROXY_SMI_OWNER:
+                       if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) {
+                               err = -ENOSYS;
+                               *bad_wr = wr;
+                               goto out;
+                       }
+                       err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
+                       if (unlikely(err)) {
+                               *bad_wr = wr;
+                               goto out;
+                       }
+                       wqe  += seglen;
+                       size += seglen / 16;
+                       /* to start tunnel header on a cache-line boundary */
+                       add_zero_len_inline(wqe);
+                       wqe += 16;
+                       size++;
+                       build_tunnel_header(wr, wqe, &seglen);
+                       wqe  += seglen;
+                       size += seglen / 16;
+                       break;
+               case MLX4_IB_QPT_PROXY_SMI:
+                       /* don't allow QP0 sends on guests */
+                       err = -ENOSYS;
+                       *bad_wr = wr;
+                       goto out;
+               case MLX4_IB_QPT_PROXY_GSI:
+                       /* If we are tunneling special qps, this is a UD qp.
+                        * In this case we first add a UD segment targeting
+                        * the tunnel qp, and then add a header with address
+                        * information */
+                       set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type);
+                       wqe  += sizeof (struct mlx4_wqe_datagram_seg);
+                       size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+                       build_tunnel_header(wr, wqe, &seglen);
+                       wqe  += seglen;
+                       size += seglen / 16;
+                       break;
+
+               case MLX4_IB_QPT_SMI:
+               case MLX4_IB_QPT_GSI:
                        err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
                        if (unlikely(err)) {
                                *bad_wr = wr;
@@ -1913,8 +2398,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
 
                /* Add one more inline data segment for ICRC for MLX sends */
-               if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI ||
-                            qp->ibqp.qp_type == IB_QPT_GSI)) {
+               if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
+                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
+                            qp->mlx4_ib_qp_type &
+                            (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) {
                        set_mlx_icrc_seg(dseg + 1);
                        size += sizeof (struct mlx4_wqe_data_seg) / 16;
                }
@@ -2006,8 +2493,10 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        int err = 0;
        int nreq;
        int ind;
+       int max_gs;
        int i;
 
+       max_gs = qp->rq.max_gs;
        spin_lock_irqsave(&qp->rq.lock, flags);
 
        ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
@@ -2027,10 +2516,25 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
                scat = get_recv_wqe(qp, ind);
 
+               if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
+                   MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+                       ib_dma_sync_single_for_device(ibqp->device,
+                                                     qp->sqp_proxy_rcv[ind].map,
+                                                     sizeof (struct mlx4_ib_proxy_sqp_hdr),
+                                                     DMA_FROM_DEVICE);
+                       scat->byte_count =
+                               cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr));
+                       /* use dma lkey from upper layer entry */
+                       scat->lkey = cpu_to_be32(wr->sg_list->lkey);
+                       scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
+                       scat++;
+                       max_gs--;
+               }
+
                for (i = 0; i < wr->num_sge; ++i)
                        __set_data_seg(scat + i, wr->sg_list + i);
 
-               if (i < qp->rq.max_gs) {
+               if (i < max_gs) {
                        scat[i].byte_count = 0;
                        scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
                        scat[i].addr       = 0;
@@ -2225,6 +2729,10 @@ done:
        if (qp->flags & MLX4_IB_QP_LSO)
                qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
 
+       qp_init_attr->sq_sig_type =
+               qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
+               IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+
 out:
        mutex_unlock(&qp->mutex);
        return err;
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
new file mode 100644 (file)
index 0000000..5b2a01d
--- /dev/null
@@ -0,0 +1,794 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*#include "core_priv.h"*/
+#include "mlx4_ib.h"
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+
+#include <rdma/ib_mad.h>
+/*show_admin_alias_guid returns the administratively assigned value of that GUID.
+ * Values returned in buf parameter string:
+ *     0                       - requests opensm to assign a value.
+ *     ffffffffffffffff        - delete this entry.
+ *     other                   - value assigned by administrator.
+ */
+static ssize_t show_admin_alias_guid(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       int record_num;/*0-15*/
+       int guid_index_in_rec; /*0 - 7*/
+       struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
+               container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
+       struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
+       struct mlx4_ib_dev *mdev = port->dev;
+
+       record_num = mlx4_ib_iov_dentry->entry_num / 8 ;
+       guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ;
+
+       return sprintf(buf, "%llx\n",
+                      be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid.
+                                  ports_guid[port->num - 1].
+                                  all_rec_per_port[record_num].
+                                  all_recs[8 * guid_index_in_rec]));
+}
+
+/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
+ * Values in buf parameter string:
+ *     0                       - requests opensm to assign a value.
+ *     0xffffffffffffffff      - delete this entry.
+ *     other                   - guid value assigned by the administrator.
+ */
+static ssize_t store_admin_alias_guid(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       int record_num;/*0-15*/
+       int guid_index_in_rec; /*0 - 7*/
+       struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
+               container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
+       struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
+       struct mlx4_ib_dev *mdev = port->dev;
+       u64 sysadmin_ag_val;
+
+       record_num = mlx4_ib_iov_dentry->entry_num / 8;
+       guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8;
+       if (0 == record_num && 0 == guid_index_in_rec) {
+               pr_err("GUID 0 block 0 is RO\n");
+               return count;
+       }
+       sscanf(buf, "%llx", &sysadmin_ag_val);
+       *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1].
+               all_rec_per_port[record_num].
+               all_recs[GUID_REC_SIZE * guid_index_in_rec] =
+                       cpu_to_be64(sysadmin_ag_val);
+
+       /* Change the state to be pending for update */
+       mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status
+               = MLX4_GUID_INFO_STATUS_IDLE ;
+
+       mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
+               = MLX4_GUID_INFO_RECORD_SET;
+
+       switch (sysadmin_ag_val) {
+       case MLX4_GUID_FOR_DELETE_VAL:
+               mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
+                       = MLX4_GUID_INFO_RECORD_DELETE;
+               mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
+                       = MLX4_GUID_SYSADMIN_ASSIGN;
+               break;
+       /* The sysadmin requests the SM to re-assign */
+       case MLX4_NOT_SET_GUID:
+               mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
+                       = MLX4_GUID_DRIVER_ASSIGN;
+               break;
+       /* The sysadmin requests a specific value.*/
+       default:
+               mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
+                       = MLX4_GUID_SYSADMIN_ASSIGN;
+               break;
+       }
+
+       /* set the record index */
+       mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes
+               = mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
+
+       mlx4_ib_init_alias_guid_work(mdev, port->num - 1);
+
+       return count;
+}
+
+static ssize_t show_port_gid(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
+               container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
+       struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
+       struct mlx4_ib_dev *mdev = port->dev;
+       union ib_gid gid;
+       ssize_t ret;
+
+       ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num,
+                                 mlx4_ib_iov_dentry->entry_num, &gid, 1);
+       if (ret)
+               return ret;
+       ret = sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+                     be16_to_cpu(((__be16 *) gid.raw)[0]),
+                     be16_to_cpu(((__be16 *) gid.raw)[1]),
+                     be16_to_cpu(((__be16 *) gid.raw)[2]),
+                     be16_to_cpu(((__be16 *) gid.raw)[3]),
+                     be16_to_cpu(((__be16 *) gid.raw)[4]),
+                     be16_to_cpu(((__be16 *) gid.raw)[5]),
+                     be16_to_cpu(((__be16 *) gid.raw)[6]),
+                     be16_to_cpu(((__be16 *) gid.raw)[7]));
+       return ret;
+}
+
+static ssize_t show_phys_port_pkey(struct device *dev,
+                                  struct device_attribute *attr,
+                                  char *buf)
+{
+       struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
+               container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
+       struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
+       struct mlx4_ib_dev *mdev = port->dev;
+       u16 pkey;
+       ssize_t ret;
+
+       ret = __mlx4_ib_query_pkey(&mdev->ib_dev, port->num,
+                                  mlx4_ib_iov_dentry->entry_num, &pkey, 1);
+       if (ret)
+               return ret;
+
+       return sprintf(buf, "0x%04x\n", pkey);
+}
+
+#define DENTRY_REMOVE(_dentry)                                         \
+do {                                                                   \
+       sysfs_remove_file((_dentry)->kobj, &(_dentry)->dentry.attr);    \
+} while (0);
+
+static int create_sysfs_entry(void *_ctx, struct mlx4_ib_iov_sysfs_attr *_dentry,
+                             char *_name, struct kobject *_kobj,
+                             ssize_t (*show)(struct device *dev,
+                                             struct device_attribute *attr,
+                                             char *buf),
+                             ssize_t (*store)(struct device *dev,
+                                              struct device_attribute *attr,
+                                              const char *buf, size_t count)
+                             )
+{
+       int ret = 0;
+       struct mlx4_ib_iov_sysfs_attr *vdentry = _dentry;
+
+       vdentry->ctx = _ctx;
+       vdentry->dentry.show = show;
+       vdentry->dentry.store = store;
+       sysfs_attr_init(&vdentry->dentry.attr);
+       vdentry->dentry.attr.name = vdentry->name;
+       vdentry->dentry.attr.mode = 0;
+       vdentry->kobj = _kobj;
+       snprintf(vdentry->name, 15, "%s", _name);
+
+       if (vdentry->dentry.store)
+               vdentry->dentry.attr.mode |= S_IWUSR;
+
+       if (vdentry->dentry.show)
+               vdentry->dentry.attr.mode |= S_IRUGO;
+
+       ret = sysfs_create_file(vdentry->kobj, &vdentry->dentry.attr);
+       if (ret) {
+               pr_err("failed to create %s\n", vdentry->dentry.attr.name);
+               vdentry->ctx = NULL;
+               return ret;
+       }
+
+       return ret;
+}
+
+int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
+               struct attribute *attr)
+{
+       struct mlx4_ib_iov_port *port = &device->iov_ports[port_num - 1];
+       int ret;
+
+       ret = sysfs_create_file(port->mcgs_parent, attr);
+       if (ret)
+               pr_err("failed to create %s\n", attr->name);
+
+       return ret;
+}
+
+void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
+               struct attribute *attr)
+{
+       struct mlx4_ib_iov_port *port = &device->iov_ports[port_num - 1];
+
+       sysfs_remove_file(port->mcgs_parent, attr);
+}
+
+static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
+{
+       int i;
+       char buff[10];
+       struct mlx4_ib_iov_port *port = NULL;
+       int ret = 0 ;
+       struct ib_port_attr attr;
+
+       /* get the physical gid and pkey table sizes.*/
+       ret = __mlx4_ib_query_port(&device->ib_dev, port_num, &attr, 1);
+       if (ret)
+               goto err;
+
+       port = &device->iov_ports[port_num - 1];
+       port->dev = device;
+       port->num = port_num;
+       /* Directory structure:
+        * iov -
+        *   port num -
+        *      admin_guids
+        *      gids (operational)
+        *      mcg_table
+        */
+       port->dentr_ar = kzalloc(sizeof (struct mlx4_ib_iov_sysfs_attr_ar),
+                                GFP_KERNEL);
+       if (!port->dentr_ar) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       sprintf(buff, "%d", port_num);
+       port->cur_port = kobject_create_and_add(buff,
+                                kobject_get(device->ports_parent));
+       if (!port->cur_port) {
+               ret = -ENOMEM;
+               goto kobj_create_err;
+       }
+       /* admin GUIDs */
+       port->admin_alias_parent = kobject_create_and_add("admin_guids",
+                                                 kobject_get(port->cur_port));
+       if (!port->admin_alias_parent) {
+               ret = -ENOMEM;
+               goto err_admin_guids;
+       }
+       for (i = 0 ; i < attr.gid_tbl_len; i++) {
+               sprintf(buff, "%d", i);
+               port->dentr_ar->dentries[i].entry_num = i;
+               ret = create_sysfs_entry(port, &port->dentr_ar->dentries[i],
+                                         buff, port->admin_alias_parent,
+                                         show_admin_alias_guid, store_admin_alias_guid);
+               if (ret)
+                       goto err_admin_alias_parent;
+       }
+
+       /* gids subdirectory (operational gids) */
+       port->gids_parent = kobject_create_and_add("gids",
+                                                 kobject_get(port->cur_port));
+       if (!port->gids_parent) {
+               ret = -ENOMEM;
+               goto err_gids;
+       }
+
+       for (i = 0 ; i < attr.gid_tbl_len; i++) {
+               sprintf(buff, "%d", i);
+               port->dentr_ar->dentries[attr.gid_tbl_len + i].entry_num = i;
+               ret = create_sysfs_entry(port,
+                                        &port->dentr_ar->dentries[attr.gid_tbl_len + i],
+                                        buff,
+                                        port->gids_parent, show_port_gid, NULL);
+               if (ret)
+                       goto err_gids_parent;
+       }
+
+       /* physical port pkey table */
+       port->pkeys_parent =
+               kobject_create_and_add("pkeys", kobject_get(port->cur_port));
+       if (!port->pkeys_parent) {
+               ret = -ENOMEM;
+               goto err_pkeys;
+       }
+
+       for (i = 0 ; i < attr.pkey_tbl_len; i++) {
+               sprintf(buff, "%d", i);
+               port->dentr_ar->dentries[2 * attr.gid_tbl_len + i].entry_num = i;
+               ret = create_sysfs_entry(port,
+                                        &port->dentr_ar->dentries[2 * attr.gid_tbl_len + i],
+                                        buff, port->pkeys_parent,
+                                        show_phys_port_pkey, NULL);
+               if (ret)
+                       goto err_pkeys_parent;
+       }
+
+       /* MCGs table */
+       port->mcgs_parent =
+               kobject_create_and_add("mcgs", kobject_get(port->cur_port));
+       if (!port->mcgs_parent) {
+               ret = -ENOMEM;
+               goto err_mcgs;
+       }
+       return 0;
+
+err_mcgs:
+       kobject_put(port->cur_port);
+
+err_pkeys_parent:
+       kobject_put(port->pkeys_parent);
+
+err_pkeys:
+       kobject_put(port->cur_port);
+
+err_gids_parent:
+       kobject_put(port->gids_parent);
+
+err_gids:
+       kobject_put(port->cur_port);
+
+err_admin_alias_parent:
+       kobject_put(port->admin_alias_parent);
+
+err_admin_guids:
+       kobject_put(port->cur_port);
+       kobject_put(port->cur_port); /* once more for create_and_add buff */
+
+kobj_create_err:
+       kobject_put(device->ports_parent);
+       kfree(port->dentr_ar);
+
+err:
+       pr_err("add_port_entries FAILED: for port:%d, error: %d\n",
+              port_num, ret);
+       return ret;
+}
+
+static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
+{
+       char base_name[9];
+
+       /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
+       strlcpy(name, pci_name(dev->dev->pdev), max);
+       strncpy(base_name, name, 8); /*till xxxx:yy:*/
+       base_name[8] = '\0';
+       /* with no ARI only 3 last bits are used so when the fn is higher than 8
+        * need to add it to the dev num, so count in the last number will be
+        * modulo 8 */
+       sprintf(name, "%s%.2d.%d", base_name, (i/8), (i%8));
+}
+
+struct mlx4_port {
+       struct kobject         kobj;
+       struct mlx4_ib_dev    *dev;
+       struct attribute_group pkey_group;
+       struct attribute_group gid_group;
+       u8                     port_num;
+       int                    slave;
+};
+
+
+static void mlx4_port_release(struct kobject *kobj)
+{
+       struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
+       struct attribute *a;
+       int i;
+
+       for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
+               kfree(a);
+       kfree(p->pkey_group.attrs);
+       for (i = 0; (a = p->gid_group.attrs[i]); ++i)
+               kfree(a);
+       kfree(p->gid_group.attrs);
+       kfree(p);
+}
+
+struct port_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct mlx4_port *, struct port_attribute *, char *buf);
+       ssize_t (*store)(struct mlx4_port *, struct port_attribute *,
+                        const char *buf, size_t count);
+};
+
+static ssize_t port_attr_show(struct kobject *kobj,
+                             struct attribute *attr, char *buf)
+{
+       struct port_attribute *port_attr =
+               container_of(attr, struct port_attribute, attr);
+       struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
+
+       if (!port_attr->show)
+               return -EIO;
+       return port_attr->show(p, port_attr, buf);
+}
+
+static ssize_t port_attr_store(struct kobject *kobj,
+                              struct attribute *attr,
+                              const char *buf, size_t size)
+{
+       struct port_attribute *port_attr =
+               container_of(attr, struct port_attribute, attr);
+       struct mlx4_port *p = container_of(kobj, struct mlx4_port, kobj);
+
+       if (!port_attr->store)
+               return -EIO;
+       return port_attr->store(p, port_attr, buf, size);
+}
+
+static const struct sysfs_ops port_sysfs_ops = {
+       .show = port_attr_show,
+       .store = port_attr_store,
+};
+
+static struct kobj_type port_type = {
+       .release    = mlx4_port_release,
+       .sysfs_ops  = &port_sysfs_ops,
+};
+
+struct port_table_attribute {
+       struct port_attribute   attr;
+       char                    name[8];
+       int                     index;
+};
+
+static ssize_t show_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
+                             char *buf)
+{
+       struct port_table_attribute *tab_attr =
+               container_of(attr, struct port_table_attribute, attr);
+       ssize_t ret = -ENODEV;
+
+       if (p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1][tab_attr->index] >=
+           (p->dev->dev->caps.pkey_table_len[p->port_num]))
+               ret = sprintf(buf, "none\n");
+       else
+               ret = sprintf(buf, "%d\n",
+                             p->dev->pkeys.virt2phys_pkey[p->slave]
+                             [p->port_num - 1][tab_attr->index]);
+       return ret;
+}
+
+static ssize_t store_port_pkey(struct mlx4_port *p, struct port_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct port_table_attribute *tab_attr =
+               container_of(attr, struct port_table_attribute, attr);
+       int idx;
+       int err;
+
+       /* do not allow remapping Dom0 virtual pkey table */
+       if (p->slave == mlx4_master_func_num(p->dev->dev))
+               return -EINVAL;
+
+       if (!strncasecmp(buf, "no", 2))
+               idx = p->dev->dev->phys_caps.pkey_phys_table_len[p->port_num] - 1;
+       else if (sscanf(buf, "%i", &idx) != 1 ||
+                idx >= p->dev->dev->caps.pkey_table_len[p->port_num] ||
+                idx < 0)
+               return -EINVAL;
+
+       p->dev->pkeys.virt2phys_pkey[p->slave][p->port_num - 1]
+                                   [tab_attr->index] = idx;
+       mlx4_sync_pkey_table(p->dev->dev, p->slave, p->port_num,
+                            tab_attr->index, idx);
+       err = mlx4_gen_pkey_eqe(p->dev->dev, p->slave, p->port_num);
+       if (err) {
+               pr_err("mlx4_gen_pkey_eqe failed for slave %d,"
+                      " port %d, index %d\n", p->slave, p->port_num, idx);
+               return err;
+       }
+       return count;
+}
+
+static ssize_t show_port_gid_idx(struct mlx4_port *p,
+                                struct port_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", p->slave);
+}
+
+static struct attribute **
+alloc_group_attrs(ssize_t (*show)(struct mlx4_port *,
+                                 struct port_attribute *, char *buf),
+                 ssize_t (*store)(struct mlx4_port *, struct port_attribute *,
+                                  const char *buf, size_t count),
+                 int len)
+{
+       struct attribute **tab_attr;
+       struct port_table_attribute *element;
+       int i;
+
+       tab_attr = kcalloc(1 + len, sizeof (struct attribute *), GFP_KERNEL);
+       if (!tab_attr)
+               return NULL;
+
+       for (i = 0; i < len; i++) {
+               element = kzalloc(sizeof (struct port_table_attribute),
+                                 GFP_KERNEL);
+               if (!element)
+                       goto err;
+               if (snprintf(element->name, sizeof (element->name),
+                            "%d", i) >= sizeof (element->name)) {
+                       kfree(element);
+                       goto err;
+               }
+               sysfs_attr_init(&element->attr.attr);
+               element->attr.attr.name  = element->name;
+               if (store) {
+                       element->attr.attr.mode  = S_IWUSR | S_IRUGO;
+                       element->attr.store      = store;
+               } else
+                       element->attr.attr.mode  = S_IRUGO;
+
+               element->attr.show       = show;
+               element->index           = i;
+               tab_attr[i] = &element->attr.attr;
+       }
+       return tab_attr;
+
+err:
+       while (--i >= 0)
+               kfree(tab_attr[i]);
+       kfree(tab_attr);
+       return NULL;
+}
+
+static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
+{
+       struct mlx4_port *p;
+       int i;
+       int ret;
+
+       p = kzalloc(sizeof *p, GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       p->dev = dev;
+       p->port_num = port_num;
+       p->slave = slave;
+
+       ret = kobject_init_and_add(&p->kobj, &port_type,
+                                  kobject_get(dev->dev_ports_parent[slave]),
+                                  "%d", port_num);
+       if (ret)
+               goto err_alloc;
+
+       p->pkey_group.name  = "pkey_idx";
+       p->pkey_group.attrs =
+               alloc_group_attrs(show_port_pkey, store_port_pkey,
+                                 dev->dev->caps.pkey_table_len[port_num]);
+       if (!p->pkey_group.attrs)
+               goto err_alloc;
+
+       ret = sysfs_create_group(&p->kobj, &p->pkey_group);
+       if (ret)
+               goto err_free_pkey;
+
+       p->gid_group.name  = "gid_idx";
+       p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1);
+       if (!p->gid_group.attrs)
+               goto err_free_pkey;
+
+       ret = sysfs_create_group(&p->kobj, &p->gid_group);
+       if (ret)
+               goto err_free_gid;
+
+       list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]);
+       return 0;
+
+err_free_gid:
+       kfree(p->gid_group.attrs[0]);
+       kfree(p->gid_group.attrs);
+
+err_free_pkey:
+       for (i = 0; i < dev->dev->caps.pkey_table_len[port_num]; ++i)
+               kfree(p->pkey_group.attrs[i]);
+       kfree(p->pkey_group.attrs);
+
+err_alloc:
+       kobject_put(dev->dev_ports_parent[slave]);
+       kfree(p);
+       return ret;
+}
+
+static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
+{
+       char name[32];
+       int err;
+       int port;
+       struct kobject *p, *t;
+       struct mlx4_port *mport;
+
+       get_name(dev, name, slave, sizeof name);
+
+       dev->pkeys.device_parent[slave] =
+               kobject_create_and_add(name, kobject_get(dev->iov_parent));
+
+       if (!dev->pkeys.device_parent[slave]) {
+               err = -ENOMEM;
+               goto fail_dev;
+       }
+
+       INIT_LIST_HEAD(&dev->pkeys.pkey_port_list[slave]);
+
+       dev->dev_ports_parent[slave] =
+               kobject_create_and_add("ports",
+                                      kobject_get(dev->pkeys.device_parent[slave]));
+
+       if (!dev->dev_ports_parent[slave]) {
+               err = -ENOMEM;
+               goto err_ports;
+       }
+
+       for (port = 1; port <= dev->dev->caps.num_ports; ++port) {
+               err = add_port(dev, port, slave);
+               if (err)
+                       goto err_add;
+       }
+       return 0;
+
+err_add:
+       list_for_each_entry_safe(p, t,
+                                &dev->pkeys.pkey_port_list[slave],
+                                entry) {
+               list_del(&p->entry);
+               mport = container_of(p, struct mlx4_port, kobj);
+               sysfs_remove_group(p, &mport->pkey_group);
+               sysfs_remove_group(p, &mport->gid_group);
+               kobject_put(p);
+       }
+       kobject_put(dev->dev_ports_parent[slave]);
+
+err_ports:
+       kobject_put(dev->pkeys.device_parent[slave]);
+       /* extra put for the device_parent create_and_add */
+       kobject_put(dev->pkeys.device_parent[slave]);
+
+fail_dev:
+       kobject_put(dev->iov_parent);
+       return err;
+}
+
+static int register_pkey_tree(struct mlx4_ib_dev *device)
+{
+       int i;
+
+       if (!mlx4_is_master(device->dev))
+               return 0;
+
+       for (i = 0; i <= device->dev->num_vfs; ++i)
+               register_one_pkey_tree(device, i);
+
+       return 0;
+}
+
+static void unregister_pkey_tree(struct mlx4_ib_dev *device)
+{
+       int slave;
+       struct kobject *p, *t;
+       struct mlx4_port *port;
+
+       if (!mlx4_is_master(device->dev))
+               return;
+
+       for (slave = device->dev->num_vfs; slave >= 0; --slave) {
+               list_for_each_entry_safe(p, t,
+                                        &device->pkeys.pkey_port_list[slave],
+                                        entry) {
+                       list_del(&p->entry);
+                       port = container_of(p, struct mlx4_port, kobj);
+                       sysfs_remove_group(p, &port->pkey_group);
+                       sysfs_remove_group(p, &port->gid_group);
+                       kobject_put(p);
+                       kobject_put(device->dev_ports_parent[slave]);
+               }
+               kobject_put(device->dev_ports_parent[slave]);
+               kobject_put(device->pkeys.device_parent[slave]);
+               kobject_put(device->pkeys.device_parent[slave]);
+               kobject_put(device->iov_parent);
+       }
+}
+
+int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
+{
+       int i;
+       int ret = 0;
+
+       if (!mlx4_is_master(dev->dev))
+               return 0;
+
+       dev->iov_parent =
+               kobject_create_and_add("iov",
+                                      kobject_get(dev->ib_dev.ports_parent->parent));
+       if (!dev->iov_parent) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       dev->ports_parent =
+               kobject_create_and_add("ports",
+                                      kobject_get(dev->iov_parent));
+       if (!dev->iov_parent) {
+               ret = -ENOMEM;
+               goto err_ports;
+       }
+
+       for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) {
+               ret = add_port_entries(dev, i);
+               if (ret)
+                       goto err_add_entries;
+       }
+
+       ret = register_pkey_tree(dev);
+       if (ret)
+               goto err_add_entries;
+       return 0;
+
+err_add_entries:
+       kobject_put(dev->ports_parent);
+
+err_ports:
+       kobject_put(dev->iov_parent);
+err:
+       kobject_put(dev->ib_dev.ports_parent->parent);
+       pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret);
+       return ret;
+}
+
+static void unregister_alias_guid_tree(struct mlx4_ib_dev *device)
+{
+       struct mlx4_ib_iov_port *p;
+       int i;
+
+       if (!mlx4_is_master(device->dev))
+               return;
+
+       for (i = 0; i < device->dev->caps.num_ports; i++) {
+               p = &device->iov_ports[i];
+               kobject_put(p->admin_alias_parent);
+               kobject_put(p->gids_parent);
+               kobject_put(p->pkeys_parent);
+               kobject_put(p->mcgs_parent);
+               kobject_put(p->cur_port);
+               kobject_put(p->cur_port);
+               kobject_put(p->cur_port);
+               kobject_put(p->cur_port);
+               kobject_put(p->cur_port);
+               kobject_put(p->dev->ports_parent);
+               kfree(p->dentr_ar);
+       }
+}
+
+void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device)
+{
+       unregister_alias_guid_tree(device);
+       unregister_pkey_tree(device);
+       kobject_put(device->ports_parent);
+       kobject_put(device->iov_parent);
+       kobject_put(device->iov_parent);
+       kobject_put(device->ib_dev.ports_parent->parent);
+}
index c438e4691b3cde3ec4c3874a41c07f73ec4c59b9..0da62b904d00f3f51407522d6e4ae03eafbbf879 100644 (file)
@@ -399,11 +399,20 @@ static inline void nes_write8(void __iomem *addr, u8 val)
        writeb(val, addr);
 }
 
-
+enum nes_resource {
+       NES_RESOURCE_MW = 1,
+       NES_RESOURCE_FAST_MR,
+       NES_RESOURCE_PHYS_MR,
+       NES_RESOURCE_USER_MR,
+       NES_RESOURCE_PD,
+       NES_RESOURCE_QP,
+       NES_RESOURCE_CQ,
+       NES_RESOURCE_ARP
+};
 
 static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
                unsigned long *resource_array, u32 max_resources,
-               u32 *req_resource_num, u32 *next)
+               u32 *req_resource_num, u32 *next, enum nes_resource resource_type)
 {
        unsigned long flags;
        u32 resource_num;
@@ -414,7 +423,7 @@ static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
        if (resource_num >= max_resources) {
                resource_num = find_first_zero_bit(resource_array, max_resources);
                if (resource_num >= max_resources) {
-                       printk(KERN_ERR PFX "%s: No available resourcess.\n", __func__);
+                       printk(KERN_ERR PFX "%s: No available resources [type=%u].\n", __func__, resource_type);
                        spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
                        return -EMFILE;
                }
index 020e95c4c4b9b08da6119b13bbe6b8b6a2710b88..cfaacaf6bf5f9c8e11ce0bb5c0dce62c54e15ae4 100644 (file)
@@ -430,6 +430,8 @@ static void form_cm_frame(struct sk_buff *skb,
        buf += sizeof(*tcph);
 
        skb->ip_summed = CHECKSUM_PARTIAL;
+       if (!(cm_node->netdev->features & NETIF_F_IP_CSUM))
+               skb->ip_summed = CHECKSUM_NONE;
        skb->protocol = htons(0x800);
        skb->data_len = 0;
        skb->mac_len = ETH_HLEN;
@@ -1356,7 +1358,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        else
                netdev = nesvnic->netdev;
 
-       neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
+       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
 
        rcu_read_lock();
        if (neigh) {
@@ -1465,12 +1467,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
        cm_node->loopbackpartner = NULL;
 
        /* get the mac addr for the remote node */
-       if (ipv4_is_loopback(htonl(cm_node->rem_addr))) {
-               arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
-       } else {
-               oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
-               arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
-       }
+       oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+       arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
        if (arpindex < 0) {
                kfree(cm_node);
                return NULL;
@@ -3153,11 +3151,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        nesqp->nesqp_context->tcpPorts[1] =
                cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
 
-       if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
-               nesqp->nesqp_context->ip0 =
-                       cpu_to_le32(ntohl(nesvnic->local_ipaddr));
-       else
-               nesqp->nesqp_context->ip0 =
+       nesqp->nesqp_context->ip0 =
                        cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
 
        nesqp->nesqp_context->misc2 |= cpu_to_le32(
@@ -3182,10 +3176,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        memset(&nes_quad, 0, sizeof(nes_quad));
        nes_quad.DstIpAdrIndex =
                cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
-       if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
-               nes_quad.SrcIpadr = nesvnic->local_ipaddr;
-       else
-               nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+       nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
        nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
        nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
 
@@ -3538,11 +3529,7 @@ static void cm_event_connected(struct nes_cm_event *event)
                cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
        nesqp->nesqp_context->tcpPorts[1] =
                cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
-       if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
-               nesqp->nesqp_context->ip0 =
-                       cpu_to_le32(ntohl(nesvnic->local_ipaddr));
-       else
-               nesqp->nesqp_context->ip0 =
+       nesqp->nesqp_context->ip0 =
                        cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
 
        nesqp->nesqp_context->misc2 |= cpu_to_le32(
@@ -3571,10 +3558,7 @@ static void cm_event_connected(struct nes_cm_event *event)
 
        nes_quad.DstIpAdrIndex =
                cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
-       if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
-               nes_quad.SrcIpadr = nesvnic->local_ipaddr;
-       else
-               nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+       nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
        nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
        nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
 
index d42c9f435b1b7a62ad55a331c1f6bc76c84fdbab..fe7965ee40968fa2cedba5e77a774ae348fcf02d 100644 (file)
@@ -2679,11 +2679,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                        }
                }
                if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
-                       if (nesdev->link_recheck)
-                               cancel_delayed_work(&nesdev->work);
                        nesdev->link_recheck = 1;
-                       schedule_delayed_work(&nesdev->work,
-                                             NES_LINK_RECHECK_DELAY);
+                       mod_delayed_work(system_wq, &nesdev->work,
+                                        NES_LINK_RECHECK_DELAY);
                }
        }
 
@@ -3577,10 +3575,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
        tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
        iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
        nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
-                       " Tcp state = %s, iWARP state = %s\n",
+                       " Tcp state = %d, iWARP state = %d\n",
                        async_event_id,
                        le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
-                       nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
+                       tcp_state, iwarp_state);
 
        aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
        if (aeq_info & NES_AEQE_QP) {
index f3a3ecf8d09ebfbff9bcc37b1a4beac97d60ebeb..0564be757d82740842ca6e6582f8eeead8bf1d9a 100644 (file)
@@ -243,10 +243,9 @@ static int nes_netdev_open(struct net_device *netdev)
 
        spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
        if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
-               if (nesdev->link_recheck)
-                       cancel_delayed_work(&nesdev->work);
                nesdev->link_recheck = 1;
-               schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
+               mod_delayed_work(system_wq, &nesdev->work,
+                                NES_LINK_RECHECK_DELAY);
        }
        spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
 
@@ -385,24 +384,20 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
        /* bump past the vlan tag */
        wqe_fragment_length++;
        /*      wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
+       wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               tcph = tcp_hdr(skb);
-               if (1) {
-                       if (skb_is_gso(skb)) {
-                               /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
-                                               netdev->name, skb_is_gso(skb)); */
-                               wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
-                                               NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
-                               set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
-                                               ((u32)tcph->doff) |
-                                               (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
-                       } else {
-                               wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
-                       }
+               if (skb_is_gso(skb)) {
+                       tcph = tcp_hdr(skb);
+                       /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... is_gso = %u seg size = %u\n",
+                                       netdev->name, skb_is_gso(skb), skb_shinfo(skb)->gso_size); */
+                       wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE | (u16)skb_shinfo(skb)->gso_size;
+                       set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
+                                       ((u32)tcph->doff) |
+                                       (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
                }
        } else {        /* CHECKSUM_HW */
-               wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
+               wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM;
        }
 
        set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
@@ -597,10 +592,10 @@ tso_sq_no_longer_full:
                                        nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
                                                        original_first_length, NES_FIRST_FRAG_SIZE);
                                        nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
-                                                       " (%u frags), tso_size=%u\n",
+                                                       " (%u frags), is_gso = %u tso_size=%u\n",
                                                        netdev->name,
                                                        skb->len, skb_headlen(skb),
-                                                       skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
+                                                       skb_shinfo(skb)->nr_frags, skb_is_gso(skb), skb_shinfo(skb)->gso_size);
                                }
                                memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
                                                skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
@@ -652,8 +647,8 @@ tso_sq_no_longer_full:
                                } else {
                                        nesnic->tx_skb[nesnic->sq_head] = NULL;
                                }
-                               wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
-                               if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
+                               wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size;
+                               if ((tso_wqe_length + original_first_length) > skb_shinfo(skb)->gso_size) {
                                        wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
                                } else {
                                        iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
@@ -1679,12 +1674,10 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
        netdev->hard_header_len = ETH_HLEN;
        netdev->addr_len = ETH_ALEN;
        netdev->type = ARPHRD_ETHER;
-       netdev->features = NETIF_F_HIGHDMA;
        netdev->netdev_ops = &nes_netdev_ops;
        netdev->ethtool_ops = &nes_ethtool_ops;
        netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
        nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
-       netdev->features |= NETIF_F_HW_VLAN_TX;
 
        /* Fill in the port structure */
        nesvnic->netdev = netdev;
@@ -1711,11 +1704,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
        netdev->dev_addr[5] = (u8)u64temp;
        memcpy(netdev->perm_addr, netdev->dev_addr, 6);
 
-       netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
-                             NETIF_F_HW_VLAN_RX;
+       netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
        if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
                netdev->hw_features |= NETIF_F_TSO;
-       netdev->features |= netdev->hw_features;
+
+       netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX;
        netdev->hw_features |= NETIF_F_LRO;
 
        nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
index e98f4fc0b7683230136490c5fdf0f3567c01901d..2042c0f2975942a3284bce33eda298cfc32ca11d 100644 (file)
@@ -699,7 +699,7 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
 
                arp_index = 0;
                err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps,
-                               nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index);
+                               nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index, NES_RESOURCE_ARP);
                if (err) {
                        nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err);
                        return err;
index 8b8812de4b5c881486ed265f31d411f2427ac97b..1dadcf388c0278b1d2e8597e1baa1c5d833ef106 100644 (file)
@@ -80,7 +80,7 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
        next_stag_index %= nesadapter->max_mr;
 
        ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
-                       nesadapter->max_mr, &stag_index, &next_stag_index);
+                       nesadapter->max_mr, &stag_index, &next_stag_index, NES_RESOURCE_MW);
        if (ret) {
                return ERR_PTR(ret);
        }
@@ -404,7 +404,7 @@ static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list
 
        err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
                                 nesadapter->max_mr, &stag_index,
-                                &next_stag_index);
+                                &next_stag_index, NES_RESOURCE_FAST_MR);
        if (err)
                return ERR_PTR(err);
 
@@ -780,7 +780,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
                        netdev_refcnt_read(nesvnic->netdev));
 
        err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
-                       nesadapter->max_pd, &pd_num, &nesadapter->next_pd);
+                       nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD);
        if (err) {
                return ERR_PTR(err);
        }
@@ -1157,7 +1157,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
                        nes_debug(NES_DBG_QP, "RQ size=%u, SQ Size=%u\n", rq_size, sq_size);
 
                        ret = nes_alloc_resource(nesadapter, nesadapter->allocated_qps,
-                                       nesadapter->max_qp, &qp_num, &nesadapter->next_qp);
+                                       nesadapter->max_qp, &qp_num, &nesadapter->next_qp, NES_RESOURCE_QP);
                        if (ret) {
                                return ERR_PTR(ret);
                        }
@@ -1546,7 +1546,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
                return ERR_PTR(-EINVAL);
 
        err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
-                       nesadapter->max_cq, &cq_num, &nesadapter->next_cq);
+                       nesadapter->max_cq, &cq_num, &nesadapter->next_cq, NES_RESOURCE_CQ);
        if (err) {
                return ERR_PTR(err);
        }
@@ -2129,7 +2129,7 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
                return ERR_PTR(-EINVAL);
 
        err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, nesadapter->max_mr,
-                       &stag_index, &next_stag_index);
+                       &stag_index, &next_stag_index, NES_RESOURCE_PHYS_MR);
        if (err) {
                return ERR_PTR(err);
        }
@@ -2360,7 +2360,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                        next_stag_index %= nesadapter->max_mr;
 
                        err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
-                                       nesadapter->max_mr, &stag_index, &next_stag_index);
+                                       nesadapter->max_mr, &stag_index, &next_stag_index, NES_RESOURCE_USER_MR);
                        if (err) {
                                ib_umem_release(region);
                                return ERR_PTR(err);
index 145da4040883cb440987ce412277d6ec94bb4a67..d39e0183ff822ceb71fe19b3e212ef5aa1125dca 100644 (file)
@@ -285,7 +285,6 @@ struct qib_base_info {
 
 #ifndef QIB_KERN_TYPE
 #define QIB_KERN_TYPE 0
-#define QIB_IDSTR "QLogic kernel.org driver"
 #endif
 
 /*
@@ -301,6 +300,19 @@ struct qib_base_info {
 */
 #define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
 
+/*
+ * Define the driver version number.  This is something that refers only
+ * to the driver itself, not the software interfaces it supports.
+ */
+#define QIB_DRIVER_VERSION_BASE "1.11"
+
+/* create the final driver version string */
+#ifdef QIB_IDSTR
+#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE " " QIB_IDSTR
+#else
+#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE
+#endif
+
 /*
  * If the unit is specified via open, HCA choice is fixed.  If port is
  * specified, it's also fixed.  Otherwise we try to spread contexts
index e41e7f7fc763e8ad3659f9060e2efab5f911c317..5423edcab51f4404905e47674957cb32e5f33b9f 100644 (file)
@@ -46,7 +46,7 @@
  * The size has to be longer than this string, so we can append
  * board/chip information to it in the init code.
  */
-const char ib_qib_version[] = QIB_IDSTR "\n";
+const char ib_qib_version[] = QIB_DRIVER_VERSION "\n";
 
 DEFINE_SPINLOCK(qib_devs_lock);
 LIST_HEAD(qib_dev_list);
@@ -65,6 +65,7 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("QLogic <support@qlogic.com>");
 MODULE_DESCRIPTION("QLogic IB driver");
+MODULE_VERSION(QIB_DRIVER_VERSION);
 
 /*
  * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
index cff8a6c32161a1cc7111e02671ce882e7152f505..65a2a23f6f8a8f537193ea775f2da2da4bce93d6 100644 (file)
@@ -61,8 +61,8 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
 
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
-       inode->i_uid = 0;
-       inode->i_gid = 0;
+       inode->i_uid = GLOBAL_ROOT_UID;
+       inode->i_gid = GLOBAL_ROOT_GID;
        inode->i_blocks = 0;
        inode->i_atime = CURRENT_TIME;
        inode->i_mtime = inode->i_atime;
index e9486c74c2262b5997aca9599971d625e9399677..81c7b73695d26c4898735b7e58abcd49bca398d6 100644 (file)
@@ -186,8 +186,9 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
                goto bail;
 
        off = sge->addr - mr->user_base;
-       if (unlikely(sge->addr < mr->iova || off + sge->length > mr->length ||
-                    (mr->access_flags & acc) == 0))
+       if (unlikely(sge->addr < mr->user_base ||
+                    off + sge->length > mr->length ||
+                    (mr->access_flags & acc) != acc))
                goto bail;
        if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
                goto bail;
index fc9b205c24124aa04f9cb0aa8b015d3c48a1fb25..ba51a4715a1dcdd5963261d84497dc1e6974c070 100644 (file)
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        ibdev->dma_ops = &qib_dma_mapping_ops;
 
        snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
-                QIB_IDSTR " %s", init_utsname()->nodename);
+                "QLogic Infiniband HCA %s", init_utsname()->nodename);
 
        ret = ib_register_device(ibdev, qib_create_port_files);
        if (ret)
index 3090100f0de7598fac29096f9b9e490397d60338..e5430dd50764c9c6e13b871a51ee8c273a3f9941 100644 (file)
@@ -5,7 +5,8 @@ ib_ipoib-y                                      := ipoib_main.o \
                                                   ipoib_multicast.o \
                                                   ipoib_verbs.o \
                                                   ipoib_vlan.o \
-                                                  ipoib_ethtool.o
+                                                  ipoib_ethtool.o \
+                                                  ipoib_netlink.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM)         += ipoib_cm.o
 ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG)      += ipoib_fs.o
 
index 0af216d21f8790c31af507022bbc722627f41671..196eb52f003519c15cafdf97dd0a08703e33111b 100644 (file)
@@ -104,6 +104,10 @@ enum {
 
        MAX_SEND_CQE              = 16,
        IPOIB_CM_COPYBREAK        = 256,
+
+       IPOIB_NON_CHILD           = 0,
+       IPOIB_LEGACY_CHILD        = 1,
+       IPOIB_RTNL_CHILD          = 2,
 };
 
 #define        IPOIB_OP_RECV   (1ul << 31)
@@ -353,6 +357,7 @@ struct ipoib_dev_priv {
        struct net_device *parent;
        struct list_head child_intfs;
        struct list_head list;
+       int    child_type;
 
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
        struct ipoib_cm_dev_priv cm;
@@ -512,6 +517,17 @@ void ipoib_event(struct ib_event_handler *handler,
 int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
 int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
 
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+                    u16 pkey, int child_type);
+
+int  __init ipoib_netlink_init(void);
+void __exit ipoib_netlink_fini(void);
+
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val);
+int  ipoib_set_mode(struct net_device *dev, const char *buf);
+
+void ipoib_setup(struct net_device *dev);
+
 void ipoib_pkey_poll(struct work_struct *work);
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 void ipoib_drain_cq(struct net_device *dev);
index 24683fda8e21cdbaca2973cc69454013a2bbb995..175581cf478c2188c101dbb14a264b4e35c14c1b 100644 (file)
@@ -1448,15 +1448,10 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
                return sprintf(buf, "datagram\n");
 }
 
-static ssize_t set_mode(struct device *d, struct device_attribute *attr,
-                       const char *buf, size_t count)
+int ipoib_set_mode(struct net_device *dev, const char *buf)
 {
-       struct net_device *dev = to_net_dev(d);
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
-       if (!rtnl_trylock())
-               return restart_syscall();
-
        /* flush paths if we switch modes so that connections are restarted */
        if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
                set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
@@ -1467,7 +1462,8 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
                priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
 
                ipoib_flush_paths(dev);
-               return count;
+               rtnl_lock();
+               return 0;
        }
 
        if (!strcmp(buf, "datagram\n")) {
@@ -1476,14 +1472,32 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
                dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
                rtnl_unlock();
                ipoib_flush_paths(dev);
-
-               return count;
+               rtnl_lock();
+               return 0;
        }
-       rtnl_unlock();
 
        return -EINVAL;
 }
 
+static ssize_t set_mode(struct device *d, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       struct net_device *dev = to_net_dev(d);
+       int ret;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       ret = ipoib_set_mode(dev, buf);
+
+       rtnl_unlock();
+
+       if (!ret)
+               return count;
+
+       return ret;
+}
+
 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
 
 int ipoib_cm_add_mode_attr(struct net_device *dev)
index 1e19b5ae7c479a5865837ffa4b08f7f5c89bdcdc..d576c7aad89d491bf53f0a79e8b133f23531f0eb 100644 (file)
@@ -150,7 +150,7 @@ static int ipoib_stop(struct net_device *dev)
 
        netif_stop_queue(dev);
 
-       ipoib_ib_dev_down(dev, 0);
+       ipoib_ib_dev_down(dev, 1);
        ipoib_ib_dev_stop(dev, 0);
 
        if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
@@ -173,6 +173,11 @@ static int ipoib_stop(struct net_device *dev)
        return 0;
 }
 
+static void ipoib_uninit(struct net_device *dev)
+{
+       ipoib_dev_cleanup(dev);
+}
+
 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1257,6 +1262,9 @@ out:
 void ipoib_dev_cleanup(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
+       LIST_HEAD(head);
+
+       ASSERT_RTNL();
 
        ipoib_delete_debug_files(dev);
 
@@ -1265,10 +1273,9 @@ void ipoib_dev_cleanup(struct net_device *dev)
                /* Stop GC on child */
                set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
                cancel_delayed_work(&cpriv->neigh_reap_task);
-               unregister_netdev(cpriv->dev);
-               ipoib_dev_cleanup(cpriv->dev);
-               free_netdev(cpriv->dev);
+               unregister_netdevice_queue(cpriv->dev, &head);
        }
+       unregister_netdevice_many(&head);
 
        ipoib_ib_dev_cleanup(dev);
 
@@ -1286,6 +1293,7 @@ static const struct header_ops ipoib_header_ops = {
 };
 
 static const struct net_device_ops ipoib_netdev_ops = {
+       .ndo_uninit              = ipoib_uninit,
        .ndo_open                = ipoib_open,
        .ndo_stop                = ipoib_stop,
        .ndo_change_mtu          = ipoib_change_mtu,
@@ -1295,7 +1303,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
        .ndo_set_rx_mode         = ipoib_set_mcast_list,
 };
 
-static void ipoib_setup(struct net_device *dev)
+void ipoib_setup(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -1373,12 +1381,9 @@ static ssize_t show_umcast(struct device *dev,
        return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
 }
 
-static ssize_t set_umcast(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf, size_t count)
+void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
 {
-       struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
-       unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
+       struct ipoib_dev_priv *priv = netdev_priv(ndev);
 
        if (umcast_val > 0) {
                set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
@@ -1386,6 +1391,15 @@ static ssize_t set_umcast(struct device *dev,
                                "by userspace\n");
        } else
                clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+}
+
+static ssize_t set_umcast(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
+
+       ipoib_set_umcast(to_net_dev(dev), umcast_val);
 
        return count;
 }
@@ -1657,7 +1671,6 @@ static void ipoib_remove_one(struct ib_device *device)
                flush_workqueue(ipoib_workqueue);
 
                unregister_netdev(priv->dev);
-               ipoib_dev_cleanup(priv->dev);
                free_netdev(priv->dev);
        }
 
@@ -1709,8 +1722,15 @@ static int __init ipoib_init_module(void)
        if (ret)
                goto err_sa;
 
+       ret = ipoib_netlink_init();
+       if (ret)
+               goto err_client;
+
        return 0;
 
+err_client:
+       ib_unregister_client(&ipoib_client);
+
 err_sa:
        ib_sa_unregister_client(&ipoib_sa_client);
        destroy_workqueue(ipoib_workqueue);
@@ -1723,6 +1743,7 @@ err_fs:
 
 static void __exit ipoib_cleanup_module(void)
 {
+       ipoib_netlink_fini();
        ib_unregister_client(&ipoib_client);
        ib_sa_unregister_client(&ipoib_sa_client);
        ipoib_unregister_debugfs();
index 75367249f447f497851e76a0a4312b6000989ff2..cecb98a4c662b7d82053f152a3da46d653e4d6ca 100644 (file)
@@ -175,7 +175,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
 
        mcast->mcmember = *mcmember;
 
-       /* Set the cached Q_Key before we attach if it's the broadcast group */
+       /* Set the multicast MTU and cached Q_Key before we attach if it's
+        * the broadcast group.
+        */
        if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
                    sizeof (union ib_gid))) {
                spin_lock_irq(&priv->lock);
@@ -183,10 +185,17 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                        spin_unlock_irq(&priv->lock);
                        return -EAGAIN;
                }
+               priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
                priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
                spin_unlock_irq(&priv->lock);
                priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
                set_qkey = 1;
+
+               if (!ipoib_cm_admin_enabled(dev)) {
+                       rtnl_lock();
+                       dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+                       rtnl_unlock();
+               }
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -574,14 +583,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
                return;
        }
 
-       priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
-
-       if (!ipoib_cm_admin_enabled(dev)) {
-               rtnl_lock();
-               dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
-               rtnl_unlock();
-       }
-
        ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
 
        clear_bit(IPOIB_MCAST_RUN, &priv->flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
new file mode 100644 (file)
index 0000000..7468593
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. -  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <net/rtnetlink.h>
+#include "ipoib.h"
+
+static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
+       [IFLA_IPOIB_PKEY]       = { .type = NLA_U16 },
+       [IFLA_IPOIB_MODE]       = { .type = NLA_U16 },
+       [IFLA_IPOIB_UMCAST]     = { .type = NLA_U16 },
+};
+
+static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       u16 val;
+
+       if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
+               goto nla_put_failure;
+
+       val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+       if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
+               goto nla_put_failure;
+
+       val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
+       if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int ipoib_changelink(struct net_device *dev,
+                           struct nlattr *tb[], struct nlattr *data[])
+{
+       u16 mode, umcast;
+       int ret = 0;
+
+       if (data[IFLA_IPOIB_MODE]) {
+               mode  = nla_get_u16(data[IFLA_IPOIB_MODE]);
+               if (mode == IPOIB_MODE_DATAGRAM)
+                       ret = ipoib_set_mode(dev, "datagram\n");
+               else if (mode == IPOIB_MODE_CONNECTED)
+                       ret = ipoib_set_mode(dev, "connected\n");
+               else
+                       ret = -EINVAL;
+
+               if (ret < 0)
+                       goto out_err;
+       }
+
+       if (data[IFLA_IPOIB_UMCAST]) {
+               umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
+               ipoib_set_umcast(dev, umcast);
+       }
+
+out_err:
+       return ret;
+}
+
+static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+                              struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_device *pdev;
+       struct ipoib_dev_priv *ppriv;
+       u16 child_pkey;
+       int err;
+
+       if (!tb[IFLA_LINK])
+               return -EINVAL;
+
+       pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+       if (!pdev)
+               return -ENODEV;
+
+       ppriv = netdev_priv(pdev);
+
+       if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
+               ipoib_warn(ppriv, "child creation disallowed for child devices\n");
+               return -EINVAL;
+       }
+
+       if (!data || !data[IFLA_IPOIB_PKEY]) {
+               ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
+               child_pkey  = ppriv->pkey;
+       } else
+               child_pkey  = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+
+       err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
+
+       if (!err && data)
+               err = ipoib_changelink(dev, tb, data);
+       return err;
+}
+
+static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head *head)
+{
+       struct ipoib_dev_priv *priv, *ppriv;
+
+       priv = netdev_priv(dev);
+       ppriv = netdev_priv(priv->parent);
+
+       mutex_lock(&ppriv->vlan_mutex);
+       unregister_netdevice_queue(dev, head);
+       list_del(&priv->list);
+       mutex_unlock(&ppriv->vlan_mutex);
+}
+
+static size_t ipoib_get_size(const struct net_device *dev)
+{
+       return nla_total_size(2) +      /* IFLA_IPOIB_PKEY   */
+               nla_total_size(2) +     /* IFLA_IPOIB_MODE   */
+               nla_total_size(2);      /* IFLA_IPOIB_UMCAST */
+}
+
+static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+       .kind           = "ipoib",
+       .maxtype        = IFLA_IPOIB_MAX,
+       .policy         = ipoib_policy,
+       .priv_size      = sizeof(struct ipoib_dev_priv),
+       .setup          = ipoib_setup,
+       .newlink        = ipoib_new_child_link,
+       .changelink     = ipoib_changelink,
+       .dellink        = ipoib_unregister_child_dev,
+       .get_size       = ipoib_get_size,
+       .fill_info      = ipoib_fill_info,
+};
+
+int __init ipoib_netlink_init(void)
+{
+       return rtnl_link_register(&ipoib_link_ops);
+}
+
+void __exit ipoib_netlink_fini(void)
+{
+       rtnl_link_unregister(&ipoib_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("ipoib");
index d7e9740c724804afcdf20ba9c54fbe4fe91fdb87..8292554bccb5de2387d7e858eaaa1d27ddb6b8b4 100644 (file)
@@ -49,47 +49,11 @@ static ssize_t show_parent(struct device *d, struct device_attribute *attr,
 }
 static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
 
-int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
+                    u16 pkey, int type)
 {
-       struct ipoib_dev_priv *ppriv, *priv;
-       char intf_name[IFNAMSIZ];
        int result;
 
-       if (!capable(CAP_NET_ADMIN))
-               return -EPERM;
-
-       ppriv = netdev_priv(pdev);
-
-       if (!rtnl_trylock())
-               return restart_syscall();
-       mutex_lock(&ppriv->vlan_mutex);
-
-       /*
-        * First ensure this isn't a duplicate. We check the parent device and
-        * then all of the child interfaces to make sure the Pkey doesn't match.
-        */
-       if (ppriv->pkey == pkey) {
-               result = -ENOTUNIQ;
-               priv = NULL;
-               goto err;
-       }
-
-       list_for_each_entry(priv, &ppriv->child_intfs, list) {
-               if (priv->pkey == pkey) {
-                       result = -ENOTUNIQ;
-                       priv = NULL;
-                       goto err;
-               }
-       }
-
-       snprintf(intf_name, sizeof intf_name, "%s.%04x",
-                ppriv->dev->name, pkey);
-       priv = ipoib_intf_alloc(intf_name);
-       if (!priv) {
-               result = -ENOMEM;
-               goto err;
-       }
-
        priv->max_ib_mtu = ppriv->max_ib_mtu;
        /* MTU will be reset when mcast join happens */
        priv->dev->mtu   = IPOIB_UD_MTU(priv->max_ib_mtu);
@@ -124,24 +88,27 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
        ipoib_create_debug_files(priv->dev);
 
-       if (ipoib_cm_add_mode_attr(priv->dev))
-               goto sysfs_failed;
-       if (ipoib_add_pkey_attr(priv->dev))
-               goto sysfs_failed;
-       if (ipoib_add_umcast_attr(priv->dev))
-               goto sysfs_failed;
-
-       if (device_create_file(&priv->dev->dev, &dev_attr_parent))
-               goto sysfs_failed;
+       /* RTNL childs don't need proprietary sysfs entries */
+       if (type == IPOIB_LEGACY_CHILD) {
+               if (ipoib_cm_add_mode_attr(priv->dev))
+                       goto sysfs_failed;
+               if (ipoib_add_pkey_attr(priv->dev))
+                       goto sysfs_failed;
+               if (ipoib_add_umcast_attr(priv->dev))
+                       goto sysfs_failed;
+
+               if (device_create_file(&priv->dev->dev, &dev_attr_parent))
+                       goto sysfs_failed;
+       }
 
+       priv->child_type  = type;
+       priv->dev->iflink = ppriv->dev->ifindex;
        list_add_tail(&priv->list, &ppriv->child_intfs);
 
-       mutex_unlock(&ppriv->vlan_mutex);
-       rtnl_unlock();
-
        return 0;
 
 sysfs_failed:
+       result = -ENOMEM;
        ipoib_delete_debug_files(priv->dev);
        unregister_netdevice(priv->dev);
 
@@ -149,11 +116,60 @@ register_failed:
        ipoib_dev_cleanup(priv->dev);
 
 err:
+       return result;
+}
+
+int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+{
+       struct ipoib_dev_priv *ppriv, *priv;
+       char intf_name[IFNAMSIZ];
+       struct ipoib_dev_priv *tpriv;
+       int result;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       ppriv = netdev_priv(pdev);
+
+       snprintf(intf_name, sizeof intf_name, "%s.%04x",
+                ppriv->dev->name, pkey);
+       priv = ipoib_intf_alloc(intf_name);
+       if (!priv)
+               return -ENOMEM;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       mutex_lock(&ppriv->vlan_mutex);
+
+       /*
+        * First ensure this isn't a duplicate. We check the parent device and
+        * then all of the legacy child interfaces to make sure the Pkey
+        * doesn't match.
+        */
+       if (ppriv->pkey == pkey) {
+               result = -ENOTUNIQ;
+               goto out;
+       }
+
+       list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
+               if (tpriv->pkey == pkey &&
+                   tpriv->child_type == IPOIB_LEGACY_CHILD) {
+                       result = -ENOTUNIQ;
+                       goto out;
+               }
+       }
+
+       result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+
+out:
        mutex_unlock(&ppriv->vlan_mutex);
-       rtnl_unlock();
-       if (priv)
+
+       if (result)
                free_netdev(priv->dev);
 
+       rtnl_unlock();
+
        return result;
 }
 
@@ -171,9 +187,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
                return restart_syscall();
        mutex_lock(&ppriv->vlan_mutex);
        list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
-               if (priv->pkey == pkey) {
+               if (priv->pkey == pkey &&
+                   priv->child_type == IPOIB_LEGACY_CHILD) {
                        unregister_netdevice(priv->dev);
-                       ipoib_dev_cleanup(priv->dev);
                        list_del(&priv->list);
                        dev = priv->dev;
                        break;
index 1b5b0c7300549cefee683b3d5745b39a446b5bca..922d845f76b0a25b0090d581efeef35578fdd868 100644 (file)
@@ -638,9 +638,9 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
        struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
 
        if (scmnd) {
+               srp_free_req(target, req, scmnd, 0);
                scmnd->result = DID_RESET << 16;
                scmnd->scsi_done(scmnd);
-               srp_free_req(target, req, scmnd, 0);
        }
 }
 
@@ -1687,6 +1687,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
                          SRP_TSK_ABORT_TASK);
        srp_free_req(target, req, scmnd, 0);
        scmnd->result = DID_ABORT << 16;
+       scmnd->scsi_done(scmnd);
 
        return SUCCESS;
 }
index e90ee3d306132aa91348485110ee8ac3fd119665..650177a3c858ba13159bbed2dbc9148c66370ec3 100644 (file)
@@ -33,7 +33,7 @@ static void system_power_event(unsigned int keycode)
 }
 
 static void apmpower_event(struct input_handle *handle, unsigned int type,
-                       unsigned int code, int value)
+                          unsigned int code, int value)
 {
        /* only react on key down events */
        if (value != 1)
index 480eb9d9876a57a79f4b7b5c599aa38db909faea..f50f6dd92274c3d25f686c02e5f710d71a9f4102 100644 (file)
@@ -138,8 +138,8 @@ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
 
        if (effect->id == -1) {
                for (id = 0; id < ff->max_effects; id++)
-                    if (!ff->effect_owners[id])
-                       break;
+                       if (!ff->effect_owners[id])
+                               break;
 
                if (id >= ff->max_effects) {
                        ret = -ENOSPC;
index b107922514fb378a89e8951d985a0e89d5ed481d..74c0d8c6002aca6f74957c7f3a596f63e550199c 100644 (file)
@@ -72,12 +72,14 @@ static const struct ff_envelope *get_envelope(const struct ff_effect *effect)
        static const struct ff_envelope empty_envelope;
 
        switch (effect->type) {
-               case FF_PERIODIC:
-                       return &effect->u.periodic.envelope;
-               case FF_CONSTANT:
-                       return &effect->u.constant.envelope;
-               default:
-                       return &empty_envelope;
+       case FF_PERIODIC:
+               return &effect->u.periodic.envelope;
+
+       case FF_CONSTANT:
+               return &effect->u.constant.envelope;
+
+       default:
+               return &empty_envelope;
        }
 }
 
index 5244f3d05b12af769cf8c2d2a3010e17feb21f05..ace3f7c4226d60325fb11b1959fae61180cb3869 100644 (file)
@@ -918,18 +918,10 @@ int input_set_keycode(struct input_dev *dev,
 }
 EXPORT_SYMBOL(input_set_keycode);
 
-#define MATCH_BIT(bit, max) \
-               for (i = 0; i < BITS_TO_LONGS(max); i++) \
-                       if ((id->bit[i] & dev->bit[i]) != id->bit[i]) \
-                               break; \
-               if (i != BITS_TO_LONGS(max)) \
-                       continue;
-
 static const struct input_device_id *input_match_device(struct input_handler *handler,
                                                        struct input_dev *dev)
 {
        const struct input_device_id *id;
-       int i;
 
        for (id = handler->id_table; id->flags || id->driver_info; id++) {
 
@@ -949,15 +941,32 @@ static const struct input_device_id *input_match_device(struct input_handler *ha
                        if (id->version != dev->id.version)
                                continue;
 
-               MATCH_BIT(evbit,  EV_MAX);
-               MATCH_BIT(keybit, KEY_MAX);
-               MATCH_BIT(relbit, REL_MAX);
-               MATCH_BIT(absbit, ABS_MAX);
-               MATCH_BIT(mscbit, MSC_MAX);
-               MATCH_BIT(ledbit, LED_MAX);
-               MATCH_BIT(sndbit, SND_MAX);
-               MATCH_BIT(ffbit,  FF_MAX);
-               MATCH_BIT(swbit,  SW_MAX);
+               if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
+                       continue;
+
+               if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
+                       continue;
 
                if (!handler->match || handler->match(handler, dev))
                        return id;
index 26043cc6a0165f80a36e7ee31436cc128a318de1..78f323ea1e4bff2821dcd8cd73bedf90ede73cbc 100644 (file)
@@ -711,7 +711,7 @@ static long joydev_ioctl(struct file *file,
 
        case JS_SET_ALL:
                retval = copy_from_user(&joydev->glue, argp,
-                                       sizeof(joydev->glue)) ? -EFAULT: 0;
+                                       sizeof(joydev->glue)) ? -EFAULT : 0;
                break;
 
        case JS_GET_ALL:
index cbb1add43d5e0b78e64d286a584abb3e799c6540..6a68041c261db45eafe9f4cadea39f1b032edb51 100644 (file)
@@ -43,11 +43,9 @@ struct gpio_button_data {
 };
 
 struct gpio_keys_drvdata {
+       const struct gpio_keys_platform_data *pdata;
        struct input_dev *input;
        struct mutex disable_lock;
-       unsigned int n_buttons;
-       int (*enable)(struct device *dev);
-       void (*disable)(struct device *dev);
        struct gpio_button_data data[0];
 };
 
@@ -171,7 +169,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
        if (!bits)
                return -ENOMEM;
 
-       for (i = 0; i < ddata->n_buttons; i++) {
+       for (i = 0; i < ddata->pdata->nbuttons; i++) {
                struct gpio_button_data *bdata = &ddata->data[i];
 
                if (bdata->button->type != type)
@@ -219,7 +217,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
                goto out;
 
        /* First validate */
-       for (i = 0; i < ddata->n_buttons; i++) {
+       for (i = 0; i < ddata->pdata->nbuttons; i++) {
                struct gpio_button_data *bdata = &ddata->data[i];
 
                if (bdata->button->type != type)
@@ -234,7 +232,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
 
        mutex_lock(&ddata->disable_lock);
 
-       for (i = 0; i < ddata->n_buttons; i++) {
+       for (i = 0; i < ddata->pdata->nbuttons; i++) {
                struct gpio_button_data *bdata = &ddata->data[i];
 
                if (bdata->button->type != type)
@@ -346,6 +344,9 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
                container_of(work, struct gpio_button_data, work);
 
        gpio_keys_gpio_report_event(bdata);
+
+       if (bdata->button->wakeup)
+               pm_relax(bdata->input->dev.parent);
 }
 
 static void gpio_keys_gpio_timer(unsigned long _data)
@@ -361,6 +362,8 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
 
        BUG_ON(irq != bdata->irq);
 
+       if (bdata->button->wakeup)
+               pm_stay_awake(bdata->input->dev.parent);
        if (bdata->timer_debounce)
                mod_timer(&bdata->timer,
                        jiffies + msecs_to_jiffies(bdata->timer_debounce));
@@ -397,6 +400,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
        spin_lock_irqsave(&bdata->lock, flags);
 
        if (!bdata->key_pressed) {
+               if (bdata->button->wakeup)
+                       pm_wakeup_event(bdata->input->dev.parent, 0);
+
                input_event(input, EV_KEY, button->code, 1);
                input_sync(input);
 
@@ -523,56 +529,64 @@ fail:
 static int gpio_keys_open(struct input_dev *input)
 {
        struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
+       const struct gpio_keys_platform_data *pdata = ddata->pdata;
 
-       return ddata->enable ? ddata->enable(input->dev.parent) : 0;
+       return pdata->enable ? pdata->enable(input->dev.parent) : 0;
 }
 
 static void gpio_keys_close(struct input_dev *input)
 {
        struct gpio_keys_drvdata *ddata = input_get_drvdata(input);
+       const struct gpio_keys_platform_data *pdata = ddata->pdata;
 
-       if (ddata->disable)
-               ddata->disable(input->dev.parent);
+       if (pdata->disable)
+               pdata->disable(input->dev.parent);
 }
 
 /*
  * Handlers for alternative sources of platform_data
  */
+
 #ifdef CONFIG_OF
 /*
  * Translate OpenFirmware node properties into platform_data
  */
-static int gpio_keys_get_devtree_pdata(struct device *dev,
-                           struct gpio_keys_platform_data *pdata)
+static struct gpio_keys_platform_data * __devinit
+gpio_keys_get_devtree_pdata(struct device *dev)
 {
        struct device_node *node, *pp;
+       struct gpio_keys_platform_data *pdata;
+       struct gpio_keys_button *button;
+       int error;
+       int nbuttons;
        int i;
-       struct gpio_keys_button *buttons;
-       u32 reg;
 
        node = dev->of_node;
-       if (node == NULL)
-               return -ENODEV;
-
-       memset(pdata, 0, sizeof *pdata);
+       if (!node) {
+               error = -ENODEV;
+               goto err_out;
+       }
 
-       pdata->rep = !!of_get_property(node, "autorepeat", NULL);
+       nbuttons = of_get_child_count(node);
+       if (nbuttons == 0) {
+               error = -ENODEV;
+               goto err_out;
+       }
 
-       /* First count the subnodes */
-       pp = NULL;
-       while ((pp = of_get_next_child(node, pp)))
-               pdata->nbuttons++;
+       pdata = kzalloc(sizeof(*pdata) + nbuttons * (sizeof *button),
+                       GFP_KERNEL);
+       if (!pdata) {
+               error = -ENOMEM;
+               goto err_out;
+       }
 
-       if (pdata->nbuttons == 0)
-               return -ENODEV;
+       pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
+       pdata->nbuttons = nbuttons;
 
-       buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL);
-       if (!buttons)
-               return -ENOMEM;
+       pdata->rep = !!of_get_property(node, "autorepeat", NULL);
 
-       pp = NULL;
        i = 0;
-       while ((pp = of_get_next_child(node, pp))) {
+       for_each_child_of_node(node, pp) {
                enum of_gpio_flags flags;
 
                if (!of_find_property(pp, "gpios", NULL)) {
@@ -580,39 +594,42 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
                        dev_warn(dev, "Found button without gpios\n");
                        continue;
                }
-               buttons[i].gpio = of_get_gpio_flags(pp, 0, &flags);
-               buttons[i].active_low = flags & OF_GPIO_ACTIVE_LOW;
 
-               if (of_property_read_u32(pp, "linux,code", &reg)) {
-                       dev_err(dev, "Button without keycode: 0x%x\n", buttons[i].gpio);
-                       goto out_fail;
-               }
-               buttons[i].code = reg;
+               button = &pdata->buttons[i++];
 
-               buttons[i].desc = of_get_property(pp, "label", NULL);
+               button->gpio = of_get_gpio_flags(pp, 0, &flags);
+               button->active_low = flags & OF_GPIO_ACTIVE_LOW;
 
-               if (of_property_read_u32(pp, "linux,input-type", &reg) == 0)
-                       buttons[i].type = reg;
-               else
-                       buttons[i].type = EV_KEY;
+               if (of_property_read_u32(pp, "linux,code", &button->code)) {
+                       dev_err(dev, "Button without keycode: 0x%x\n",
+                               button->gpio);
+                       error = -EINVAL;
+                       goto err_free_pdata;
+               }
 
-               buttons[i].wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+               button->desc = of_get_property(pp, "label", NULL);
 
-               if (of_property_read_u32(pp, "debounce-interval", &reg) == 0)
-                       buttons[i].debounce_interval = reg;
-               else
-                       buttons[i].debounce_interval = 5;
+               if (of_property_read_u32(pp, "linux,input-type", &button->type))
+                       button->type = EV_KEY;
 
-               i++;
+               button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+
+               if (of_property_read_u32(pp, "debounce-interval",
+                                        &button->debounce_interval))
+                       button->debounce_interval = 5;
        }
 
-       pdata->buttons = buttons;
+       if (pdata->nbuttons == 0) {
+               error = -EINVAL;
+               goto err_free_pdata;
+       }
 
-       return 0;
+       return pdata;
 
-out_fail:
-       kfree(buttons);
-       return -ENODEV;
+err_free_pdata:
+       kfree(pdata);
+err_out:
+       return ERR_PTR(error);
 }
 
 static struct of_device_id gpio_keys_of_match[] = {
@@ -623,14 +640,12 @@ MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
 
 #else
 
-static int gpio_keys_get_devtree_pdata(struct device *dev,
-                           struct gpio_keys_platform_data *altp)
+static inline struct gpio_keys_platform_data *
+gpio_keys_get_devtree_pdata(struct device *dev)
 {
-       return -ENODEV;
+       return ERR_PTR(-ENODEV);
 }
 
-#define gpio_keys_of_match NULL
-
 #endif
 
 static void gpio_remove_key(struct gpio_button_data *bdata)
@@ -645,19 +660,17 @@ static void gpio_remove_key(struct gpio_button_data *bdata)
 
 static int __devinit gpio_keys_probe(struct platform_device *pdev)
 {
-       const struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
-       struct gpio_keys_drvdata *ddata;
        struct device *dev = &pdev->dev;
-       struct gpio_keys_platform_data alt_pdata;
+       const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
+       struct gpio_keys_drvdata *ddata;
        struct input_dev *input;
        int i, error;
        int wakeup = 0;
 
        if (!pdata) {
-               error = gpio_keys_get_devtree_pdata(dev, &alt_pdata);
-               if (error)
-                       return error;
-               pdata = &alt_pdata;
+               pdata = gpio_keys_get_devtree_pdata(dev);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
        }
 
        ddata = kzalloc(sizeof(struct gpio_keys_drvdata) +
@@ -670,10 +683,8 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
                goto fail1;
        }
 
+       ddata->pdata = pdata;
        ddata->input = input;
-       ddata->n_buttons = pdata->nbuttons;
-       ddata->enable = pdata->enable;
-       ddata->disable = pdata->disable;
        mutex_init(&ddata->disable_lock);
 
        platform_set_drvdata(pdev, ddata);
@@ -742,9 +753,9 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
  fail1:
        input_free_device(input);
        kfree(ddata);
-       /* If we have no platform_data, we allocated buttons dynamically. */
-       if (!pdev->dev.platform_data)
-               kfree(pdata->buttons);
+       /* If we have no platform data, we allocated pdata dynamically. */
+       if (!dev_get_platdata(&pdev->dev))
+               kfree(pdata);
 
        return error;
 }
@@ -759,18 +770,14 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
 
        device_init_wakeup(&pdev->dev, 0);
 
-       for (i = 0; i < ddata->n_buttons; i++)
+       for (i = 0; i < ddata->pdata->nbuttons; i++)
                gpio_remove_key(&ddata->data[i]);
 
        input_unregister_device(input);
 
-       /*
-        * If we had no platform_data, we allocated buttons dynamically, and
-        * must free them here. ddata->data[0].button is the pointer to the
-        * beginning of the allocated array.
-        */
-       if (!pdev->dev.platform_data)
-               kfree(ddata->data[0].button);
+       /* If we have no platform data, we allocated pdata dynamically. */
+       if (!dev_get_platdata(&pdev->dev))
+               kfree(ddata->pdata);
 
        kfree(ddata);
 
@@ -784,7 +791,7 @@ static int gpio_keys_suspend(struct device *dev)
        int i;
 
        if (device_may_wakeup(dev)) {
-               for (i = 0; i < ddata->n_buttons; i++) {
+               for (i = 0; i < ddata->pdata->nbuttons; i++) {
                        struct gpio_button_data *bdata = &ddata->data[i];
                        if (bdata->button->wakeup)
                                enable_irq_wake(bdata->irq);
@@ -799,7 +806,7 @@ static int gpio_keys_resume(struct device *dev)
        struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
        int i;
 
-       for (i = 0; i < ddata->n_buttons; i++) {
+       for (i = 0; i < ddata->pdata->nbuttons; i++) {
                struct gpio_button_data *bdata = &ddata->data[i];
                if (bdata->button->wakeup && device_may_wakeup(dev))
                        disable_irq_wake(bdata->irq);
@@ -822,7 +829,7 @@ static struct platform_driver gpio_keys_device_driver = {
                .name   = "gpio-keys",
                .owner  = THIS_MODULE,
                .pm     = &gpio_keys_pm_ops,
-               .of_match_table = gpio_keys_of_match,
+               .of_match_table = of_match_ptr(gpio_keys_of_match),
        }
 };
 
index 20c8ab1722148251ebd748def95337fd2b25519e..f2142de789e76a44cc1fc316b90ef66a6fe46c0a 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/gpio_keys.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
 
 #define DRV_NAME       "gpio-keys-polled"
 
@@ -38,7 +40,7 @@ struct gpio_keys_button_data {
 struct gpio_keys_polled_dev {
        struct input_polled_dev *poll_dev;
        struct device *dev;
-       struct gpio_keys_platform_data *pdata;
+       const struct gpio_keys_platform_data *pdata;
        struct gpio_keys_button_data data[0];
 };
 
@@ -67,11 +69,11 @@ static void gpio_keys_polled_check_state(struct input_dev *input,
 static void gpio_keys_polled_poll(struct input_polled_dev *dev)
 {
        struct gpio_keys_polled_dev *bdev = dev->private;
-       struct gpio_keys_platform_data *pdata = bdev->pdata;
+       const struct gpio_keys_platform_data *pdata = bdev->pdata;
        struct input_dev *input = dev->input;
        int i;
 
-       for (i = 0; i < bdev->pdata->nbuttons; i++) {
+       for (i = 0; i < pdata->nbuttons; i++) {
                struct gpio_keys_button_data *bdata = &bdev->data[i];
 
                if (bdata->count < bdata->threshold)
@@ -85,7 +87,7 @@ static void gpio_keys_polled_poll(struct input_polled_dev *dev)
 static void gpio_keys_polled_open(struct input_polled_dev *dev)
 {
        struct gpio_keys_polled_dev *bdev = dev->private;
-       struct gpio_keys_platform_data *pdata = bdev->pdata;
+       const struct gpio_keys_platform_data *pdata = bdev->pdata;
 
        if (pdata->enable)
                pdata->enable(bdev->dev);
@@ -94,31 +96,139 @@ static void gpio_keys_polled_open(struct input_polled_dev *dev)
 static void gpio_keys_polled_close(struct input_polled_dev *dev)
 {
        struct gpio_keys_polled_dev *bdev = dev->private;
-       struct gpio_keys_platform_data *pdata = bdev->pdata;
+       const struct gpio_keys_platform_data *pdata = bdev->pdata;
 
        if (pdata->disable)
                pdata->disable(bdev->dev);
 }
 
+#ifdef CONFIG_OF
+static struct gpio_keys_platform_data * __devinit
+gpio_keys_polled_get_devtree_pdata(struct device *dev)
+{
+       struct device_node *node, *pp;
+       struct gpio_keys_platform_data *pdata;
+       struct gpio_keys_button *button;
+       int error;
+       int nbuttons;
+       int i;
+
+       node = dev->of_node;
+       if (!node)
+               return NULL;
+
+       nbuttons = of_get_child_count(node);
+       if (nbuttons == 0)
+               return NULL;
+
+       pdata = kzalloc(sizeof(*pdata) + nbuttons * (sizeof *button),
+                       GFP_KERNEL);
+       if (!pdata) {
+               error = -ENOMEM;
+               goto err_out;
+       }
+
+       pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
+       pdata->nbuttons = nbuttons;
+
+       pdata->rep = !!of_get_property(node, "autorepeat", NULL);
+       of_property_read_u32(node, "poll-interval", &pdata->poll_interval);
+
+       i = 0;
+       for_each_child_of_node(node, pp) {
+               enum of_gpio_flags flags;
+
+               if (!of_find_property(pp, "gpios", NULL)) {
+                       pdata->nbuttons--;
+                       dev_warn(dev, "Found button without gpios\n");
+                       continue;
+               }
+
+               button = &pdata->buttons[i++];
+
+               button->gpio = of_get_gpio_flags(pp, 0, &flags);
+               button->active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+               if (of_property_read_u32(pp, "linux,code", &button->code)) {
+                       dev_err(dev, "Button without keycode: 0x%x\n",
+                               button->gpio);
+                       error = -EINVAL;
+                       goto err_free_pdata;
+               }
+
+               button->desc = of_get_property(pp, "label", NULL);
+
+               if (of_property_read_u32(pp, "linux,input-type", &button->type))
+                       button->type = EV_KEY;
+
+               button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+
+               if (of_property_read_u32(pp, "debounce-interval",
+                                        &button->debounce_interval))
+                       button->debounce_interval = 5;
+       }
+
+       if (pdata->nbuttons == 0) {
+               error = -EINVAL;
+               goto err_free_pdata;
+       }
+
+       return pdata;
+
+err_free_pdata:
+       kfree(pdata);
+err_out:
+       return ERR_PTR(error);
+}
+
+static struct of_device_id gpio_keys_polled_of_match[] = {
+       { .compatible = "gpio-keys-polled", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match);
+
+#else
+
+static inline struct gpio_keys_platform_data *
+gpio_keys_polled_get_devtree_pdata(struct device *dev)
+{
+       return NULL;
+}
+#endif
+
 static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
 {
-       struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
        struct device *dev = &pdev->dev;
+       const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
        struct gpio_keys_polled_dev *bdev;
        struct input_polled_dev *poll_dev;
        struct input_dev *input;
        int error;
        int i;
 
-       if (!pdata || !pdata->poll_interval)
-               return -EINVAL;
+       if (!pdata) {
+               pdata = gpio_keys_polled_get_devtree_pdata(dev);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+               if (!pdata) {
+                       dev_err(dev, "missing platform data\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (!pdata->poll_interval) {
+               dev_err(dev, "missing poll_interval value\n");
+               error = -EINVAL;
+               goto err_free_pdata;
+       }
 
        bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) +
                       pdata->nbuttons * sizeof(struct gpio_keys_button_data),
                       GFP_KERNEL);
        if (!bdev) {
                dev_err(dev, "no memory for private data\n");
-               return -ENOMEM;
+               error = -ENOMEM;
+               goto err_free_pdata;
        }
 
        poll_dev = input_allocate_polled_device();
@@ -197,7 +307,7 @@ static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
        /* report initial state of the buttons */
        for (i = 0; i < pdata->nbuttons; i++)
                gpio_keys_polled_check_state(input, &pdata->buttons[i],
-                                        &bdev->data[i]);
+                                            &bdev->data[i]);
 
        return 0;
 
@@ -209,15 +319,20 @@ err_free_gpio:
 
 err_free_bdev:
        kfree(bdev);
-
        platform_set_drvdata(pdev, NULL);
+
+err_free_pdata:
+       /* If we have no platform_data, we allocated pdata dynamically.  */
+       if (!dev_get_platdata(&pdev->dev))
+               kfree(pdata);
+
        return error;
 }
 
 static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
 {
        struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
-       struct gpio_keys_platform_data *pdata = bdev->pdata;
+       const struct gpio_keys_platform_data *pdata = bdev->pdata;
        int i;
 
        input_unregister_polled_device(bdev->poll_dev);
@@ -227,6 +342,13 @@ static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
 
        input_free_polled_device(bdev->poll_dev);
 
+       /*
+        * If we had no platform_data, we allocated pdata dynamically and
+        * must free it here.
+        */
+       if (!dev_get_platdata(&pdev->dev))
+               kfree(pdata);
+
        kfree(bdev);
        platform_set_drvdata(pdev, NULL);
 
@@ -239,6 +361,7 @@ static struct platform_driver gpio_keys_polled_driver = {
        .driver = {
                .name   = DRV_NAME,
                .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(gpio_keys_polled_of_match),
        },
 };
 module_platform_driver(gpio_keys_polled_driver);
index 6d6b1427ae12fd6ce932c9188aba37ac44a3d321..4a5fcc8026f5630670f03cfdde498721eb55d97c 100644 (file)
@@ -179,7 +179,7 @@ static void omap_kp_tasklet(unsigned long data)
        memcpy(keypad_state, new_state, sizeof(keypad_state));
 
        if (key_down) {
-                int delay = HZ / 20;
+               int delay = HZ / 20;
                /* some key is pressed - keep irq disabled and use timer
                 * to poll the keypad */
                if (spurious)
@@ -346,9 +346,9 @@ err4:
 err3:
        device_remove_file(&pdev->dev, &dev_attr_enable);
 err2:
-       for (i = row_idx - 1; i >=0; i--)
+       for (i = row_idx - 1; i >= 0; i--)
                gpio_free(row_gpios[i]);
-       for (i = col_idx - 1; i >=0; i--)
+       for (i = col_idx - 1; i >= 0; i--)
                gpio_free(col_gpios[i]);
 
        kfree(omap_kp);
index e7a5e36e1203c26545ccb44dc7c4af82c9509011..76b7d430d03a4a26f18666469413be1449f8fc20 100644 (file)
@@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160)
 
        spin_lock_irqsave(&qt2160->lock, flags);
 
-       __cancel_delayed_work(&qt2160->dwork);
-       schedule_delayed_work(&qt2160->dwork, 0);
+       mod_delayed_work(system_wq, &qt2160->dwork, 0);
 
        spin_unlock_irqrestore(&qt2160->lock, flags);
 
index a061ba603a29f05861b9cf0e28ba9ba0f377784f..277e26dc910e40c8522722a5895b240d51a6b6d8 100644 (file)
@@ -256,7 +256,7 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
        struct matrix_keymap_data *keymap_data;
        uint32_t *keymap, num_rows = 0, num_cols = 0;
        struct device_node *np = dev->of_node, *key_np;
-       unsigned int key_count = 0;
+       unsigned int key_count;
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata) {
@@ -280,9 +280,7 @@ static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
        }
        pdata->keymap_data = keymap_data;
 
-       for_each_child_of_node(np, key_np)
-               key_count++;
-
+       key_count = of_get_child_count(np);
        keymap_data->keymap_size = key_count;
        keymap = devm_kzalloc(dev, sizeof(uint32_t) * key_count, GFP_KERNEL);
        if (!keymap) {
@@ -662,8 +660,6 @@ static const struct of_device_id samsung_keypad_dt_match[] = {
        {},
 };
 MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
-#else
-#define samsung_keypad_dt_match NULL
 #endif
 
 static struct platform_device_id samsung_keypad_driver_ids[] = {
@@ -684,7 +680,7 @@ static struct platform_driver samsung_keypad_driver = {
        .driver         = {
                .name   = "samsung-keypad",
                .owner  = THIS_MODULE,
-               .of_match_table = samsung_keypad_dt_match,
+               .of_match_table = of_match_ptr(samsung_keypad_dt_match),
                .pm     = &samsung_keypad_pm_ops,
        },
        .id_table       = samsung_keypad_driver_ids,
index 2c1c9ed1bd9f6ad194405c73d1d14a368ee3ade9..5faaf2553e33e37eedd17d9138617b85248253e9 100644 (file)
@@ -29,8 +29,8 @@
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
+#include <linux/input/tegra_kbc.h>
 #include <mach/clk.h>
-#include <mach/kbc.h>
 
 #define KBC_MAX_DEBOUNCE_CNT   0x3ffu
 
index f07f784198b9e81139f8ed5a192f17578cdf6c75..99a49e4968d2bec4a1cf8d0d02779edd77e55de1 100644 (file)
 #include <linux/gpio.h>
 #include <linux/rotary_encoder.h>
 #include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
 
 #define DRV_NAME "rotary-encoder"
 
 struct rotary_encoder {
        struct input_dev *input;
-       struct rotary_encoder_platform_data *pdata;
+       const struct rotary_encoder_platform_data *pdata;
 
        unsigned int axis;
        unsigned int pos;
@@ -43,7 +45,7 @@ struct rotary_encoder {
        char last_stable;
 };
 
-static int rotary_encoder_get_state(struct rotary_encoder_platform_data *pdata)
+static int rotary_encoder_get_state(const struct rotary_encoder_platform_data *pdata)
 {
        int a = !!gpio_get_value(pdata->gpio_a);
        int b = !!gpio_get_value(pdata->gpio_b);
@@ -56,7 +58,7 @@ static int rotary_encoder_get_state(struct rotary_encoder_platform_data *pdata)
 
 static void rotary_encoder_report_event(struct rotary_encoder *encoder)
 {
-       struct rotary_encoder_platform_data *pdata = encoder->pdata;
+       const struct rotary_encoder_platform_data *pdata = encoder->pdata;
 
        if (pdata->relative_axis) {
                input_report_rel(encoder->input,
@@ -140,36 +142,89 @@ static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_OF
+static struct of_device_id rotary_encoder_of_match[] = {
+       { .compatible = "rotary-encoder", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
+
+static struct rotary_encoder_platform_data * __devinit
+rotary_encoder_parse_dt(struct device *dev)
+{
+       const struct of_device_id *of_id =
+                               of_match_device(rotary_encoder_of_match, dev);
+       struct device_node *np = dev->of_node;
+       struct rotary_encoder_platform_data *pdata;
+       enum of_gpio_flags flags;
+
+       if (!of_id || !np)
+               return NULL;
+
+       pdata = kzalloc(sizeof(struct rotary_encoder_platform_data),
+                       GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       of_property_read_u32(np, "rotary-encoder,steps", &pdata->steps);
+       of_property_read_u32(np, "linux,axis", &pdata->axis);
+
+       pdata->gpio_a = of_get_gpio_flags(np, 0, &flags);
+       pdata->inverted_a = flags & OF_GPIO_ACTIVE_LOW;
+
+       pdata->gpio_b = of_get_gpio_flags(np, 1, &flags);
+       pdata->inverted_b = flags & OF_GPIO_ACTIVE_LOW;
+
+       pdata->relative_axis = !!of_get_property(np,
+                                       "rotary-encoder,relative-axis", NULL);
+       pdata->rollover = !!of_get_property(np,
+                                       "rotary-encoder,rollover", NULL);
+       pdata->half_period = !!of_get_property(np,
+                                       "rotary-encoder,half-period", NULL);
+
+       return pdata;
+}
+#else
+static inline struct rotary_encoder_platform_data *
+rotary_encoder_parse_dt(struct device *dev)
+{
+       return NULL;
+}
+#endif
+
 static int __devinit rotary_encoder_probe(struct platform_device *pdev)
 {
-       struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data;
+       struct device *dev = &pdev->dev;
+       const struct rotary_encoder_platform_data *pdata = dev_get_platdata(dev);
        struct rotary_encoder *encoder;
        struct input_dev *input;
        irq_handler_t handler;
        int err;
 
        if (!pdata) {
-               dev_err(&pdev->dev, "missing platform data\n");
-               return -ENOENT;
+               pdata = rotary_encoder_parse_dt(dev);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+
+               if (!pdata) {
+                       dev_err(dev, "missing platform data\n");
+                       return -EINVAL;
+               }
        }
 
        encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL);
        input = input_allocate_device();
        if (!encoder || !input) {
-               dev_err(&pdev->dev, "failed to allocate memory for device\n");
                err = -ENOMEM;
                goto exit_free_mem;
        }
 
        encoder->input = input;
        encoder->pdata = pdata;
-       encoder->irq_a = gpio_to_irq(pdata->gpio_a);
-       encoder->irq_b = gpio_to_irq(pdata->gpio_b);
 
-       /* create and register the input driver */
        input->name = pdev->name;
        input->id.bustype = BUS_HOST;
-       input->dev.parent = &pdev->dev;
+       input->dev.parent = dev;
 
        if (pdata->relative_axis) {
                input->evbit[0] = BIT_MASK(EV_REL);
@@ -180,40 +235,21 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
                                     pdata->axis, 0, pdata->steps, 0, 1);
        }
 
-       err = input_register_device(input);
-       if (err) {
-               dev_err(&pdev->dev, "failed to register input device\n");
-               goto exit_free_mem;
-       }
-
        /* request the GPIOs */
-       err = gpio_request(pdata->gpio_a, DRV_NAME);
-       if (err) {
-               dev_err(&pdev->dev, "unable to request GPIO %d\n",
-                       pdata->gpio_a);
-               goto exit_unregister_input;
-       }
-
-       err = gpio_direction_input(pdata->gpio_a);
+       err = gpio_request_one(pdata->gpio_a, GPIOF_IN, dev_name(dev));
        if (err) {
-               dev_err(&pdev->dev, "unable to set GPIO %d for input\n",
-                       pdata->gpio_a);
-               goto exit_unregister_input;
+               dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_a);
+               goto exit_free_mem;
        }
 
-       err = gpio_request(pdata->gpio_b, DRV_NAME);
+       err = gpio_request_one(pdata->gpio_b, GPIOF_IN, dev_name(dev));
        if (err) {
-               dev_err(&pdev->dev, "unable to request GPIO %d\n",
-                       pdata->gpio_b);
+               dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_b);
                goto exit_free_gpio_a;
        }
 
-       err = gpio_direction_input(pdata->gpio_b);
-       if (err) {
-               dev_err(&pdev->dev, "unable to set GPIO %d for input\n",
-                       pdata->gpio_b);
-               goto exit_free_gpio_a;
-       }
+       encoder->irq_a = gpio_to_irq(pdata->gpio_a);
+       encoder->irq_b = gpio_to_irq(pdata->gpio_b);
 
        /* request the IRQs */
        if (pdata->half_period) {
@@ -227,8 +263,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
                          IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                          DRV_NAME, encoder);
        if (err) {
-               dev_err(&pdev->dev, "unable to request IRQ %d\n",
-                       encoder->irq_a);
+               dev_err(dev, "unable to request IRQ %d\n", encoder->irq_a);
                goto exit_free_gpio_b;
        }
 
@@ -236,43 +271,55 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
                          IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
                          DRV_NAME, encoder);
        if (err) {
-               dev_err(&pdev->dev, "unable to request IRQ %d\n",
-                       encoder->irq_b);
+               dev_err(dev, "unable to request IRQ %d\n", encoder->irq_b);
                goto exit_free_irq_a;
        }
 
+       err = input_register_device(input);
+       if (err) {
+               dev_err(dev, "failed to register input device\n");
+               goto exit_free_irq_b;
+       }
+
        platform_set_drvdata(pdev, encoder);
 
        return 0;
 
+exit_free_irq_b:
+       free_irq(encoder->irq_b, encoder);
 exit_free_irq_a:
        free_irq(encoder->irq_a, encoder);
 exit_free_gpio_b:
        gpio_free(pdata->gpio_b);
 exit_free_gpio_a:
        gpio_free(pdata->gpio_a);
-exit_unregister_input:
-       input_unregister_device(input);
-       input = NULL; /* so we don't try to free it */
 exit_free_mem:
        input_free_device(input);
        kfree(encoder);
+       if (!dev_get_platdata(&pdev->dev))
+               kfree(pdata);
+
        return err;
 }
 
 static int __devexit rotary_encoder_remove(struct platform_device *pdev)
 {
        struct rotary_encoder *encoder = platform_get_drvdata(pdev);
-       struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data;
+       const struct rotary_encoder_platform_data *pdata = encoder->pdata;
 
        free_irq(encoder->irq_a, encoder);
        free_irq(encoder->irq_b, encoder);
        gpio_free(pdata->gpio_a);
        gpio_free(pdata->gpio_b);
+
        input_unregister_device(encoder->input);
-       platform_set_drvdata(pdev, NULL);
        kfree(encoder);
 
+       if (!dev_get_platdata(&pdev->dev))
+               kfree(pdata);
+
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
@@ -282,6 +329,7 @@ static struct platform_driver rotary_encoder_driver = {
        .driver         = {
                .name   = DRV_NAME,
                .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(rotary_encoder_of_match),
        }
 };
 module_platform_driver(rotary_encoder_driver);
index 38e4b507b94cc629ef330cf173b1b41d3765e904..b3dd96d6448b324569bdf8dc64b2af6a50e40d58 100644 (file)
@@ -42,6 +42,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
        err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
                                STS_HW_CONDITIONS);
        if (!err)  {
+               pm_wakeup_event(pwr->dev.parent, 0);
                input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
                input_sync(pwr);
        } else {
index 6b1797503e343a8cdae57e25ce84e9ac2dbe3a8c..a0a4bbaef02c242a8ee16e54bdb9337e8f80407e 100644 (file)
@@ -40,7 +40,8 @@
 #include <linux/input/mt.h>
 #include "../input-compat.h"
 
-static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
+static int uinput_dev_event(struct input_dev *dev,
+                           unsigned int type, unsigned int code, int value)
 {
        struct uinput_device    *udev = input_get_drvdata(dev);
 
@@ -56,10 +57,11 @@ static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned i
 }
 
 /* Atomically allocate an ID for the given request. Returns 0 on success. */
-static int uinput_request_alloc_id(struct uinput_device *udev, struct uinput_request *request)
+static bool uinput_request_alloc_id(struct uinput_device *udev,
+                                   struct uinput_request *request)
 {
-       int id;
-       int err = -1;
+       unsigned int id;
+       bool reserved = false;
 
        spin_lock(&udev->requests_lock);
 
@@ -67,32 +69,35 @@ static int uinput_request_alloc_id(struct uinput_device *udev, struct uinput_req
                if (!udev->requests[id]) {
                        request->id = id;
                        udev->requests[id] = request;
-                       err = 0;
+                       reserved = true;
                        break;
                }
        }
 
        spin_unlock(&udev->requests_lock);
-       return err;
+       return reserved;
 }
 
-static struct uinput_request *uinput_request_find(struct uinput_device *udev, int id)
+static struct uinput_request *uinput_request_find(struct uinput_device *udev,
+                                                 unsigned int id)
 {
        /* Find an input request, by ID. Returns NULL if the ID isn't valid. */
-       if (id >= UINPUT_NUM_REQUESTS || id < 0)
+       if (id >= UINPUT_NUM_REQUESTS)
                return NULL;
 
        return udev->requests[id];
 }
 
-static inline int uinput_request_reserve_slot(struct uinput_device *udev, struct uinput_request *request)
+static int uinput_request_reserve_slot(struct uinput_device *udev,
+                                      struct uinput_request *request)
 {
        /* Allocate slot. If none are available right away, wait. */
        return wait_event_interruptible(udev->requests_waitq,
-                                       !uinput_request_alloc_id(udev, request));
+                                       uinput_request_alloc_id(udev, request));
 }
 
-static void uinput_request_done(struct uinput_device *udev, struct uinput_request *request)
+static void uinput_request_done(struct uinput_device *udev,
+                               struct uinput_request *request)
 {
        /* Mark slot as available */
        udev->requests[request->id] = NULL;
@@ -101,14 +106,11 @@ static void uinput_request_done(struct uinput_device *udev, struct uinput_reques
        complete(&request->done);
 }
 
-static int uinput_request_submit(struct uinput_device *udev, struct uinput_request *request)
+static int uinput_request_send(struct uinput_device *udev,
+                              struct uinput_request *request)
 {
        int retval;
 
-       retval = uinput_request_reserve_slot(udev, request);
-       if (retval)
-               return retval;
-
        retval = mutex_lock_interruptible(&udev->mutex);
        if (retval)
                return retval;
@@ -118,7 +120,12 @@ static int uinput_request_submit(struct uinput_device *udev, struct uinput_reque
                goto out;
        }
 
-       /* Tell our userspace app about this new request by queueing an input event */
+       init_completion(&request->done);
+
+       /*
+        * Tell our userspace application about this new request
+        * by queueing an input event.
+        */
        uinput_dev_event(udev->dev, EV_UINPUT, request->code, request->id);
 
  out:
@@ -126,8 +133,27 @@ static int uinput_request_submit(struct uinput_device *udev, struct uinput_reque
        return retval;
 }
 
+static int uinput_request_submit(struct uinput_device *udev,
+                                struct uinput_request *request)
+{
+       int error;
+
+       error = uinput_request_reserve_slot(udev, request);
+       if (error)
+               return error;
+
+       error = uinput_request_send(udev, request);
+       if (error) {
+               uinput_request_done(udev, request);
+               return error;
+       }
+
+       wait_for_completion(&request->done);
+       return request->retval;
+}
+
 /*
- * Fail all ouitstanding requests so handlers don't wait for the userspace
+ * Fail all outstanding requests so handlers don't wait for the userspace
  * to finish processing them.
  */
 static void uinput_flush_requests(struct uinput_device *udev)
@@ -163,11 +189,12 @@ static int uinput_dev_playback(struct input_dev *dev, int effect_id, int value)
        return uinput_dev_event(dev, EV_FF, effect_id, value);
 }
 
-static int uinput_dev_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
+static int uinput_dev_upload_effect(struct input_dev *dev,
+                                   struct ff_effect *effect,
+                                   struct ff_effect *old)
 {
        struct uinput_device *udev = input_get_drvdata(dev);
        struct uinput_request request;
-       int retval;
 
        /*
         * uinput driver does not currently support periodic effects with
@@ -180,42 +207,25 @@ static int uinput_dev_upload_effect(struct input_dev *dev, struct ff_effect *eff
                        effect->u.periodic.waveform == FF_CUSTOM)
                return -EINVAL;
 
-       request.id = -1;
-       init_completion(&request.done);
        request.code = UI_FF_UPLOAD;
        request.u.upload.effect = effect;
        request.u.upload.old = old;
 
-       retval = uinput_request_submit(udev, &request);
-       if (!retval) {
-               wait_for_completion(&request.done);
-               retval = request.retval;
-       }
-
-       return retval;
+       return uinput_request_submit(udev, &request);
 }
 
 static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id)
 {
        struct uinput_device *udev = input_get_drvdata(dev);
        struct uinput_request request;
-       int retval;
 
        if (!test_bit(EV_FF, dev->evbit))
                return -ENOSYS;
 
-       request.id = -1;
-       init_completion(&request.done);
        request.code = UI_FF_ERASE;
        request.u.effect_id = effect_id;
 
-       retval = uinput_request_submit(udev, &request);
-       if (!retval) {
-               wait_for_completion(&request.done);
-               retval = request.retval;
-       }
-
-       return retval;
+       return uinput_request_submit(udev, &request);
 }
 
 static void uinput_destroy_device(struct uinput_device *udev)
@@ -347,7 +357,8 @@ static int uinput_allocate_device(struct uinput_device *udev)
        return 0;
 }
 
-static int uinput_setup_device(struct uinput_device *udev, const char __user *buffer, size_t count)
+static int uinput_setup_device(struct uinput_device *udev,
+                              const char __user *buffer, size_t count)
 {
        struct uinput_user_dev  *user_dev;
        struct input_dev        *dev;
@@ -419,7 +430,8 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
        return retval;
 }
 
-static inline ssize_t uinput_inject_event(struct uinput_device *udev, const char __user *buffer, size_t count)
+static ssize_t uinput_inject_event(struct uinput_device *udev,
+                                  const char __user *buffer, size_t count)
 {
        struct input_event ev;
 
@@ -434,11 +446,15 @@ static inline ssize_t uinput_inject_event(struct uinput_device *udev, const char
        return input_event_size();
 }
 
-static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t uinput_write(struct file *file, const char __user *buffer,
+                           size_t count, loff_t *ppos)
 {
        struct uinput_device *udev = file->private_data;
        int retval;
 
+       if (count == 0)
+               return 0;
+
        retval = mutex_lock_interruptible(&udev->mutex);
        if (retval)
                return retval;
@@ -452,42 +468,74 @@ static ssize_t uinput_write(struct file *file, const char __user *buffer, size_t
        return retval;
 }
 
-static ssize_t uinput_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static bool uinput_fetch_next_event(struct uinput_device *udev,
+                                   struct input_event *event)
 {
-       struct uinput_device *udev = file->private_data;
-       int retval = 0;
+       bool have_event;
 
-       if (udev->state != UIST_CREATED)
-               return -ENODEV;
+       spin_lock_irq(&udev->dev->event_lock);
 
-       if (udev->head == udev->tail && (file->f_flags & O_NONBLOCK))
-               return -EAGAIN;
+       have_event = udev->head != udev->tail;
+       if (have_event) {
+               *event = udev->buff[udev->tail];
+               udev->tail = (udev->tail + 1) % UINPUT_BUFFER_SIZE;
+       }
 
-       retval = wait_event_interruptible(udev->waitq,
-                       udev->head != udev->tail || udev->state != UIST_CREATED);
-       if (retval)
-               return retval;
+       spin_unlock_irq(&udev->dev->event_lock);
 
-       retval = mutex_lock_interruptible(&udev->mutex);
-       if (retval)
-               return retval;
+       return have_event;
+}
 
-       if (udev->state != UIST_CREATED) {
-               retval = -ENODEV;
-               goto out;
-       }
+static ssize_t uinput_events_to_user(struct uinput_device *udev,
+                                    char __user *buffer, size_t count)
+{
+       struct input_event event;
+       size_t read = 0;
 
-       while (udev->head != udev->tail && retval + input_event_size() <= count) {
-               if (input_event_to_user(buffer + retval, &udev->buff[udev->tail])) {
-                       retval = -EFAULT;
-                       goto out;
-               }
-               udev->tail = (udev->tail + 1) % UINPUT_BUFFER_SIZE;
-               retval += input_event_size();
+       while (read + input_event_size() <= count &&
+              uinput_fetch_next_event(udev, &event)) {
+
+               if (input_event_to_user(buffer + read, &event))
+                       return -EFAULT;
+
+               read += input_event_size();
        }
 
- out:
-       mutex_unlock(&udev->mutex);
+       return read;
+}
+
+static ssize_t uinput_read(struct file *file, char __user *buffer,
+                          size_t count, loff_t *ppos)
+{
+       struct uinput_device *udev = file->private_data;
+       ssize_t retval;
+
+       if (count != 0 && count < input_event_size())
+               return -EINVAL;
+
+       do {
+               retval = mutex_lock_interruptible(&udev->mutex);
+               if (retval)
+                       return retval;
+
+               if (udev->state != UIST_CREATED)
+                       retval = -ENODEV;
+               else if (udev->head == udev->tail &&
+                        (file->f_flags & O_NONBLOCK))
+                       retval = -EAGAIN;
+               else
+                       retval = uinput_events_to_user(udev, buffer, count);
+
+               mutex_unlock(&udev->mutex);
+
+               if (retval || count == 0)
+                       break;
+
+               if (!(file->f_flags & O_NONBLOCK))
+                       retval = wait_event_interruptible(udev->waitq,
+                                                 udev->head != udev->tail ||
+                                                 udev->state != UIST_CREATED);
+       } while (retval == 0);
 
        return retval;
 }
@@ -516,8 +564,8 @@ static int uinput_release(struct inode *inode, struct file *file)
 
 #ifdef CONFIG_COMPAT
 struct uinput_ff_upload_compat {
-       int                     request_id;
-       int                     retval;
+       __u32                   request_id;
+       __s32                   retval;
        struct ff_effect_compat effect;
        struct ff_effect_compat old;
 };
@@ -703,7 +751,8 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
                                break;
 
                        req = uinput_request_find(udev, ff_up.request_id);
-                       if (!req || req->code != UI_FF_UPLOAD || !req->u.upload.effect) {
+                       if (!req || req->code != UI_FF_UPLOAD ||
+                           !req->u.upload.effect) {
                                retval = -EINVAL;
                                break;
                        }
@@ -786,7 +835,8 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 }
 
 #ifdef CONFIG_COMPAT
-static long uinput_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long uinput_compat_ioctl(struct file *file,
+                               unsigned int cmd, unsigned long arg)
 {
        return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
 }
@@ -831,4 +881,3 @@ MODULE_VERSION("0.3");
 
 module_init(uinput_init);
 module_exit(uinput_exit);
-
index 575f880727fe3fc0e8501c3658e5ab3e6671e439..62be888e83d03df3aaf587a4a49f9024596b59be 100644 (file)
@@ -334,11 +334,8 @@ static bool hgpk_is_byte_valid(struct psmouse *psmouse, unsigned char *packet)
 
        if (!valid)
                psmouse_dbg(psmouse,
-                           "bad data, mode %d (%d) %02x %02x %02x %02x %02x %02x\n",
-                           priv->mode, pktcnt,
-                           psmouse->packet[0], psmouse->packet[1],
-                           psmouse->packet[2], psmouse->packet[3],
-                           psmouse->packet[4], psmouse->packet[5]);
+                           "bad data, mode %d (%d) %*ph\n",
+                           priv->mode, pktcnt, 6, psmouse->packet);
 
        return valid;
 }
@@ -1030,7 +1027,7 @@ static enum hgpk_model_t hgpk_get_model(struct psmouse *psmouse)
                return -EIO;
        }
 
-       psmouse_dbg(psmouse, "ID: %02x %02x %02x\n", param[0], param[1], param[2]);
+       psmouse_dbg(psmouse, "ID: %*ph\n", 3, param);
 
        /* HGPK signature: 0x67, 0x00, 0x<model> */
        if (param[0] != 0x67 || param[1] != 0x00)
index 37033ade79d3c70881517f128d7a3d0141919099..12d12ca3fee030e0ab9ced706616c1160a1c584b 100644 (file)
 #define ABS_POS_BITS 13
 
 /*
- * Any position values from the hardware above the following limits are
- * treated as "wrapped around negative" values that have been truncated to
- * the 13-bit reporting range of the hardware. These are just reasonable
- * guesses and can be adjusted if hardware is found that operates outside
- * of these parameters.
+ * These values should represent the absolute maximum value that will
+ * be reported for a positive position value. Some Synaptics firmware
+ * uses this value to indicate a finger near the edge of the touchpad
+ * whose precise position cannot be determined.
+ *
+ * At least one touchpad is known to report positions in excess of this
+ * value which are actually negative values truncated to the 13-bit
+ * reporting range. These values have never been observed to be lower
+ * than 8184 (i.e. -8), so we treat all values greater than 8176 as
+ * negative and any other value as positive.
  */
-#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
-#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
+#define X_MAX_POSITIVE 8176
+#define Y_MAX_POSITIVE 8176
 
 /*****************************************************************************
  *     Stuff we need even when we do not want native Synaptics support
@@ -604,11 +609,21 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
                hw->right = (buf[0] & 0x02) ? 1 : 0;
        }
 
-       /* Convert wrap-around values to negative */
+       /*
+        * Convert wrap-around values to negative. (X|Y)_MAX_POSITIVE
+        * is used by some firmware to indicate a finger at the edge of
+        * the touchpad whose precise position cannot be determined, so
+        * convert these values to the maximum axis value.
+        */
        if (hw->x > X_MAX_POSITIVE)
                hw->x -= 1 << ABS_POS_BITS;
+       else if (hw->x == X_MAX_POSITIVE)
+               hw->x = XMAX;
+
        if (hw->y > Y_MAX_POSITIVE)
                hw->y -= 1 << ABS_POS_BITS;
+       else if (hw->y == Y_MAX_POSITIVE)
+               hw->y = YMAX;
 
        return 0;
 }
index f14675702c0f0bdd2bf36c85188c7da0a7a94ea4..063a174d3a88c4a22acb67a7e474b330e355b336 100644 (file)
@@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
 
        spin_lock_irqsave(&touch->lock, flags);
 
-       /*
-        * If work is already scheduled then subsequent schedules will not
-        * change the scheduled time that's why we have to cancel it first.
-        */
-       __cancel_delayed_work(&touch->dwork);
-       schedule_delayed_work(&touch->dwork, delay);
+       mod_delayed_work(system_wq, &touch->dwork, delay);
 
        spin_unlock_irqrestore(&touch->lock, flags);
 }
index 0110b5a3a1678a7a0843672b8505e6f42c664fc1..964e43d81e29d9224a3261a0666f84dd500100c6 100644 (file)
@@ -551,17 +551,16 @@ static int mousedev_open(struct inode *inode, struct file *file)
                return -ENODEV;
 
        error = mutex_lock_interruptible(&mousedev_table_mutex);
-       if (error) {
+       if (error)
                return error;
-       }
+
        mousedev = mousedev_table[i];
        if (mousedev)
                get_device(&mousedev->dev);
        mutex_unlock(&mousedev_table_mutex);
 
-       if (!mousedev) {
+       if (!mousedev)
                return -ENODEV;
-       }
 
        client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL);
        if (!client) {
@@ -1088,7 +1087,7 @@ static int __init mousedev_init(void)
 #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
        error = misc_register(&psaux_mouse);
        if (error)
-               pr_warning("could not register psaux device, error: %d\n",
+               pr_warn("could not register psaux device, error: %d\n",
                           error);
        else
                psaux_registered = 1;
index 75fb040a3435c246b2077e96b5730f3d7030586b..a70aa555bbffe34d38d1e6f1378b697b9ee8665e 100644 (file)
@@ -180,11 +180,11 @@ int sparse_keymap_setup(struct input_dev *dev,
        for (e = keymap; e->type != KE_END; e++)
                map_size++;
 
-       map = kcalloc(map_size, sizeof (struct key_entry), GFP_KERNEL);
+       map = kcalloc(map_size, sizeof(struct key_entry), GFP_KERNEL);
        if (!map)
                return -ENOMEM;
 
-       memcpy(map, keymap, map_size * sizeof (struct key_entry));
+       memcpy(map, keymap, map_size * sizeof(struct key_entry));
 
        for (i = 0; i < map_size; i++) {
                entry = &map[i];
index 2a81ce375f756d0989bd3a6592efc6641137e9a5..08b462b6c0d8c9f9e8e8ee1c0dbc70127c434583 100644 (file)
@@ -606,7 +606,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                                input_report_abs(input, ABS_WHEEL, 0);
                        }
 
-                       if (data[2] | (data[3] & 0x01) | data[4]) {
+                       if (data[2] | (data[3] & 0x01) | data[4] | data[5]) {
                                input_report_key(input, wacom->tool[1], 1);
                                input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
                        } else {
index df9e816d55e4da0ace4576b97aee9b08e6bd946f..549fa29548f8380907bec098dc032819dff0b8dc 100644 (file)
@@ -406,7 +406,7 @@ static int s3c2410ts_resume(struct device *dev)
        return 0;
 }
 
-static struct dev_pm_ops s3c_ts_pmops = {
+static const struct dev_pm_ops s3c_ts_pmops = {
        .suspend        = s3c2410ts_suspend,
        .resume         = s3c2410ts_resume,
 };
index e83410721e38b79f6d9a7a93e6f6a689ef73f1ab..52abb98a8ae5cadcaa38091daf74ca39e65f354e 100644 (file)
@@ -221,7 +221,7 @@ static void wm831x_ts_input_close(struct input_dev *idev)
        synchronize_irq(wm831x_ts->pd_irq);
 
        /* Make sure the IRQ completion work is quiesced */
-       flush_work_sync(&wm831x_ts->pd_data_work);
+       flush_work(&wm831x_ts->pd_data_work);
 
        /* If we ended up with the pen down then make sure we revert back
         * to pen detection state for the next time we start up.
index aa41485bc594beb300bf5ee96f07cfcd331bb83e..30a6b174fbb08ea6f55aa5baada5b23f0ec01ac4 100644 (file)
@@ -1123,7 +1123,6 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
        return drv;
 
 error:
-       kfree(drv->cs);
        kfree(drv);
        return NULL;
 }
index 2602be23f341287468524c9fbd79e33a18e21aa6..84b4b0f7eb990cc700bd36006f75cc8b9cd3e225 100644 (file)
@@ -116,7 +116,7 @@ mISDN_freedchannel(struct dchannel *ch)
        }
        skb_queue_purge(&ch->squeue);
        skb_queue_purge(&ch->rqueue);
-       flush_work_sync(&ch->workq);
+       flush_work(&ch->workq);
        return 0;
 }
 EXPORT_SYMBOL(mISDN_freedchannel);
index f56b6e7ffdac3ec36f3c2a5137f3ada6704f64ab..f6837b99908c46f6c963383aafb53a5c8e602198 100644 (file)
@@ -737,7 +737,7 @@ err_sysfs_remove:
        sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
 err_unregister:
        led_classdev_unregister(&led->cdev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 
        return ret;
 }
@@ -751,7 +751,7 @@ static int __devexit lm3533_led_remove(struct platform_device *pdev)
        lm3533_ctrlbank_disable(&led->cb);
        sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
        led_classdev_unregister(&led->cdev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 
        return 0;
 }
@@ -765,7 +765,7 @@ static void lm3533_led_shutdown(struct platform_device *pdev)
 
        lm3533_ctrlbank_disable(&led->cb);
        lm3533_led_set(&led->cdev, LED_OFF);            /* disable blink */
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 }
 
 static struct platform_driver lm3533_led_driver = {
index 0ade6ebfc914dba6e6809b46a428fc9ee219b83e..64009a176651984343c6ce7fc7c55c819d50aa25 100644 (file)
@@ -172,7 +172,7 @@ static int __devexit lp8788_led_remove(struct platform_device *pdev)
        struct lp8788_led *led = platform_get_drvdata(pdev);
 
        led_classdev_unregister(&led->led_dev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
 
        return 0;
 }
index 918d4baff1c7257c65276bfcf4cb05b73350553c..4c62113f7a77d04a0eb7fb50de7fb84e089514bf 100644 (file)
@@ -275,7 +275,7 @@ static int wm8350_led_remove(struct platform_device *pdev)
        struct wm8350_led *led = platform_get_drvdata(pdev);
 
        led_classdev_unregister(&led->cdev);
-       flush_work_sync(&led->work);
+       flush_work(&led->work);
        wm8350_led_disable(led);
        regulator_put(led->dcdc);
        regulator_put(led->isink);
index 5c6a2d876562dd73f4a6f71fa958353182012dc5..36a4fdddd64a7eccdd38d657d4f8ee99cf247092 100644 (file)
@@ -226,7 +226,7 @@ void ams_sensor_detach(void)
         * We do this after ams_info.exit(), because an interrupt might
         * have arrived before disabling them.
         */
-       flush_work_sync(&ams_info.worker);
+       flush_work(&ams_info.worker);
 
        /* Remove device */
        of_device_unregister(ams_info.of_dev);
index 034233eefc8266eba122556fc6ffdb36e054eced..d778563a4ffd2ca829c42e3e480ed89cb908541e 100644 (file)
@@ -944,7 +944,7 @@ static void flush_multipath_work(struct multipath *m)
        flush_workqueue(kmpath_handlerd);
        multipath_wait_for_pg_init_completion(m);
        flush_workqueue(kmultipathd);
-       flush_work_sync(&m->trigger_event);
+       flush_work(&m->trigger_event);
 }
 
 static void multipath_dtr(struct dm_target *ti)
index bc5ddba8045b84a24e996235604a5db24d1d0e58..fd61f98ee1f68af3085297afc0b1c46dc0899e2f 100644 (file)
@@ -1146,7 +1146,7 @@ static void mirror_dtr(struct dm_target *ti)
 
        del_timer_sync(&ms->timer);
        flush_workqueue(ms->kmirrord_wq);
-       flush_work_sync(&ms->trigger_event);
+       flush_work(&ms->trigger_event);
        dm_kcopyd_client_destroy(ms->kcopyd_client);
        destroy_workqueue(ms->kmirrord_wq);
        free_context(ms, ti, ms->nr_mirrors);
index a087bf2a8d6666bbc699a44af28d003b4d2cfb88..e2f876539743feb800022f4b6da23fe776721fac 100644 (file)
@@ -199,7 +199,7 @@ static void stripe_dtr(struct dm_target *ti)
        for (i = 0; i < sc->stripes; i++)
                dm_put_device(ti, sc->stripe[i].dev);
 
-       flush_work_sync(&sc->trigger_event);
+       flush_work(&sc->trigger_event);
        kfree(sc);
 }
 
index 8766ce8c354dd908e1fbb9a493942e8669fe4259..c2117688aa23d8aad589d28a1e7a4cc1e0333024 100644 (file)
@@ -1329,8 +1329,8 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
                return -EBUSY;
 
        dvb_net_stop(net);
-       flush_work_sync(&priv->set_multicast_list_wq);
-       flush_work_sync(&priv->restart_net_feed_wq);
+       flush_work(&priv->set_multicast_list_wq);
+       flush_work(&priv->restart_net_feed_wq);
        printk("dvb_net: removed network interface %s\n", net->name);
        unregister_netdev(net);
        dvbnet->state[num]=0;
index 71ce52875c38ec1fdf5b3d822fc9de159e3a5bb7..909ff54868a3b183623b250849c166c70fdcbcf2 100644 (file)
@@ -111,7 +111,7 @@ void mantis_evmgr_exit(struct mantis_ca *ca)
        struct mantis_pci *mantis = ca->ca_priv;
 
        dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
-       flush_work_sync(&ca->hif_evm_work);
+       flush_work(&ca->hif_evm_work);
        mantis_hif_exit(ca);
        mantis_pcmcia_exit(ca);
 }
index 18340dafa426f3264c78688a0b3979f2972f1cf4..85e977861b4a68a8bbb44d76c9b6a010ec250c3f 100644 (file)
@@ -183,6 +183,6 @@ void mantis_uart_exit(struct mantis_pci *mantis)
 {
        /* disable interrupt */
        mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
-       flush_work_sync(&mantis->uart_work);
+       flush_work(&mantis->uart_work);
 }
 EXPORT_SYMBOL_GPL(mantis_uart_exit);
index b58ff87db771750dfa7a511d7f382b924dba8814..2ce7179a386464ff8932e498a1766919e845ca9a 100644 (file)
@@ -196,7 +196,7 @@ static void request_modules(struct bttv *dev)
 
 static void flush_request_modules(struct bttv *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index 7e5ffd6f51786d3390b629b8794b4139aa410551..75c890907920b487bd38e424332344942d92d707 100644 (file)
@@ -272,7 +272,7 @@ static void request_modules(struct cx18 *dev)
 
 static void flush_request_modules(struct cx18 *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index 02d4d36735d39e6d68a1f8b82d3f3896f4a8e468..b84ebc54d91bce2279e1f1f7b8a8b09d4d0ff12d 100644 (file)
@@ -1002,7 +1002,7 @@ static void request_modules(struct cx231xx *dev)
 
 static void flush_request_modules(struct cx231xx *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index ce765e3f77bdf6b679ba1b56900035f4f38d51c2..bcbf7faf1bab34bcf2974d40e7204e921b223929 100644 (file)
@@ -231,9 +231,9 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
                v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
                v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
        }
-       flush_work_sync(&dev->cx25840_work);
-       flush_work_sync(&dev->ir_rx_work);
-       flush_work_sync(&dev->ir_tx_work);
+       flush_work(&dev->cx25840_work);
+       flush_work(&dev->ir_rx_work);
+       flush_work(&dev->ir_tx_work);
 }
 
 static void cx23885_input_ir_close(struct rc_dev *rc)
index cd5386ee210cf02f79fbfb4abaeb86895e88ab99..c04fb618e10b78a22209575c2506e4aae270e575 100644 (file)
@@ -70,7 +70,7 @@ static void request_modules(struct cx8802_dev *dev)
 
 static void flush_request_modules(struct cx8802_dev *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index ca62b9981380427b587efb684d36a60f88105a21..f7831e73f077d8da7282ac847cdec076c0499709 100644 (file)
@@ -2900,7 +2900,7 @@ static void request_modules(struct em28xx *dev)
 
 static void flush_request_modules(struct em28xx *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index e5015b0d5508cec9bd9af486c734fef3ff478179..8d7283bbd431af44a9d7ff4e564e264ac54b8a1a 100644 (file)
@@ -1198,7 +1198,7 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
 
        atomic_inc(&cam->reset_disable);
 
-       flush_work_sync(&cam->sensor_reset_work);
+       flush_work(&cam->sensor_reset_work);
 
        rval = videobuf_streamoff(q);
        if (!rval) {
@@ -1512,7 +1512,7 @@ static int omap24xxcam_release(struct file *file)
 
        atomic_inc(&cam->reset_disable);
 
-       flush_work_sync(&cam->sensor_reset_work);
+       flush_work(&cam->sensor_reset_work);
 
        /* stop streaming capture */
        videobuf_streamoff(&fh->vbq);
@@ -1536,7 +1536,7 @@ static int omap24xxcam_release(struct file *file)
         * not be scheduled anymore since streaming is already
         * disabled.)
         */
-       flush_work_sync(&cam->sensor_reset_work);
+       flush_work(&cam->sensor_reset_work);
 
        mutex_lock(&cam->mutex);
        if (atomic_dec_return(&cam->users) == 0) {
index 5fbb4e49495c8278f25da91b4dca454c405e5dc0..f2b37e05b96479e7f534537a3208d120b408f661 100644 (file)
@@ -170,7 +170,7 @@ static void request_submodules(struct saa7134_dev *dev)
 
 static void flush_request_submodules(struct saa7134_dev *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 
 #else
index dde361a9194e003ce5cd42a22785dea9975c4458..4df79c6569094311d6dc6cee081b72f62427cd2d 100644 (file)
@@ -556,7 +556,7 @@ static int empress_fini(struct saa7134_dev *dev)
 
        if (NULL == dev->empress_dev)
                return 0;
-       flush_work_sync(&dev->empress_workqueue);
+       flush_work(&dev->empress_workqueue);
        video_unregister_device(dev->empress_dev);
        dev->empress_dev = NULL;
        return 0;
index 034659b1317480a3c389ff9619dd3123c9aeb39d..307d8c5fb7cd771654a1bc08d0c0f94f45c2d103 100644 (file)
@@ -1074,7 +1074,7 @@ static void request_modules(struct tm6000_core *dev)
 
 static void flush_request_modules(struct tm6000_core *dev)
 {
-       flush_work_sync(&dev->request_module_wk);
+       flush_work(&dev->request_module_wk);
 }
 #else
 #define request_modules(dev)
index d99db5623acf45039f53dd9eb7e6b9620d88735d..fb69baa06ca88f8b0cdd60b6ac79fc6fbc7ba118 100644 (file)
@@ -1666,7 +1666,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
        if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
                printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
                    "MEM failed\n", ioc->name);
-               return r;
+               goto out_pci_disable_device;
        }
 
        if (sizeof(dma_addr_t) > 4) {
@@ -1690,8 +1690,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
                } else {
                        printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
                            ioc->name, pci_name(pdev));
-                       pci_release_selected_regions(pdev, ioc->bars);
-                       return r;
+                       goto out_pci_release_region;
                }
        } else {
                if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
@@ -1704,8 +1703,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
                } else {
                        printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
                            ioc->name, pci_name(pdev));
-                       pci_release_selected_regions(pdev, ioc->bars);
-                       return r;
+                       goto out_pci_release_region;
                }
        }
 
@@ -1735,8 +1733,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
        if (mem == NULL) {
                printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
                        " memory!\n", ioc->name);
-               pci_release_selected_regions(pdev, ioc->bars);
-               return -EINVAL;
+               r = -EINVAL;
+               goto out_pci_release_region;
        }
        ioc->memmap = mem;
        dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
@@ -1750,6 +1748,12 @@ mpt_mapresources(MPT_ADAPTER *ioc)
        ioc->pio_chip = (SYSIF_REGS __iomem *)port;
 
        return 0;
+
+out_pci_release_region:
+       pci_release_selected_regions(pdev, ioc->bars);
+out_pci_disable_device:
+       pci_disable_device(pdev);
+       return r;
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
index cb4910ac4d12edad3ba31be42a3e2be0678fcb8a..55d589981412ec6b956566727ed05bbdbdb15982 100644 (file)
@@ -1259,7 +1259,7 @@ static int menelaus_probe(struct i2c_client *client,
        return 0;
 fail2:
        free_irq(client->irq, menelaus);
-       flush_work_sync(&menelaus->work);
+       flush_work(&menelaus->work);
 fail1:
        kfree(menelaus);
        return err;
@@ -1270,7 +1270,7 @@ static int __exit menelaus_remove(struct i2c_client *client)
        struct menelaus_chip    *menelaus = i2c_get_clientdata(client);
 
        free_irq(client->irq, menelaus);
-       flush_work_sync(&menelaus->work);
+       flush_work(&menelaus->work);
        kfree(menelaus);
        the_menelaus = NULL;
        return 0;
index df03dd3bd0e2b331db3459f3a632ffd2f85f6155..6a7710603a904dcb2522738c18ab8e62d789324c 100644 (file)
@@ -487,7 +487,7 @@ static void __exit
 ioc4_exit(void)
 {
        /* Ensure ioc4_load_modules() has completed before exiting */
-       flush_work_sync(&ioc4_load_modules_work);
+       flush_work(&ioc4_load_modules_work);
        pci_unregister_driver(&ioc4_driver);
 }
 
index 597f189b44278caa4edd682ef27b6c57066eb1f2..ee2e16b170174df6e59c0d53a314d0058ddaff2a 100644 (file)
@@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host)
        host->clk_requests--;
        if (mmc_host_may_gate_card(host->card) &&
            !host->clk_requests)
-               queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
-                               msecs_to_jiffies(host->clkgate_delay));
+               schedule_delayed_work(&host->clk_gate_work,
+                                     msecs_to_jiffies(host->clkgate_delay));
        spin_unlock_irqrestore(&host->clk_lock, flags);
 }
 
index ad3fcea1269ebc179105563333b01cfdf139a9ed..bb4c2bf04d094f60cbbffc6a7be34e43effd38e0 100644 (file)
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/module.h>
-#include <linux/fsl/mxs-dma.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/stmp_device.h>
 #include <linux/mmc/mxs-mmc.h>
+#include <linux/spi/mxs-spi.h>
 
 #define DRIVER_NAME    "mxs-mmc"
 
-/* card detect polling timeout */
-#define MXS_MMC_DETECT_TIMEOUT                 (HZ/2)
-
-#define ssp_is_old(host)       ((host)->devid == IMX23_MMC)
-
-/* SSP registers */
-#define HW_SSP_CTRL0                           0x000
-#define  BM_SSP_CTRL0_RUN                      (1 << 29)
-#define  BM_SSP_CTRL0_SDIO_IRQ_CHECK           (1 << 28)
-#define  BM_SSP_CTRL0_IGNORE_CRC               (1 << 26)
-#define  BM_SSP_CTRL0_READ                     (1 << 25)
-#define  BM_SSP_CTRL0_DATA_XFER                        (1 << 24)
-#define  BP_SSP_CTRL0_BUS_WIDTH                        (22)
-#define  BM_SSP_CTRL0_BUS_WIDTH                        (0x3 << 22)
-#define  BM_SSP_CTRL0_WAIT_FOR_IRQ             (1 << 21)
-#define  BM_SSP_CTRL0_LONG_RESP                        (1 << 19)
-#define  BM_SSP_CTRL0_GET_RESP                 (1 << 17)
-#define  BM_SSP_CTRL0_ENABLE                   (1 << 16)
-#define  BP_SSP_CTRL0_XFER_COUNT               (0)
-#define  BM_SSP_CTRL0_XFER_COUNT               (0xffff)
-#define HW_SSP_CMD0                            0x010
-#define  BM_SSP_CMD0_DBL_DATA_RATE_EN          (1 << 25)
-#define  BM_SSP_CMD0_SLOW_CLKING_EN            (1 << 22)
-#define  BM_SSP_CMD0_CONT_CLKING_EN            (1 << 21)
-#define  BM_SSP_CMD0_APPEND_8CYC               (1 << 20)
-#define  BP_SSP_CMD0_BLOCK_SIZE                        (16)
-#define  BM_SSP_CMD0_BLOCK_SIZE                        (0xf << 16)
-#define  BP_SSP_CMD0_BLOCK_COUNT               (8)
-#define  BM_SSP_CMD0_BLOCK_COUNT               (0xff << 8)
-#define  BP_SSP_CMD0_CMD                       (0)
-#define  BM_SSP_CMD0_CMD                       (0xff)
-#define HW_SSP_CMD1                            0x020
-#define HW_SSP_XFER_SIZE                       0x030
-#define HW_SSP_BLOCK_SIZE                      0x040
-#define  BP_SSP_BLOCK_SIZE_BLOCK_COUNT         (4)
-#define  BM_SSP_BLOCK_SIZE_BLOCK_COUNT         (0xffffff << 4)
-#define  BP_SSP_BLOCK_SIZE_BLOCK_SIZE          (0)
-#define  BM_SSP_BLOCK_SIZE_BLOCK_SIZE          (0xf)
-#define HW_SSP_TIMING(h)                       (ssp_is_old(h) ? 0x050 : 0x070)
-#define  BP_SSP_TIMING_TIMEOUT                 (16)
-#define  BM_SSP_TIMING_TIMEOUT                 (0xffff << 16)
-#define  BP_SSP_TIMING_CLOCK_DIVIDE            (8)
-#define  BM_SSP_TIMING_CLOCK_DIVIDE            (0xff << 8)
-#define  BP_SSP_TIMING_CLOCK_RATE              (0)
-#define  BM_SSP_TIMING_CLOCK_RATE              (0xff)
-#define HW_SSP_CTRL1(h)                                (ssp_is_old(h) ? 0x060 : 0x080)
-#define  BM_SSP_CTRL1_SDIO_IRQ                 (1 << 31)
-#define  BM_SSP_CTRL1_SDIO_IRQ_EN              (1 << 30)
-#define  BM_SSP_CTRL1_RESP_ERR_IRQ             (1 << 29)
-#define  BM_SSP_CTRL1_RESP_ERR_IRQ_EN          (1 << 28)
-#define  BM_SSP_CTRL1_RESP_TIMEOUT_IRQ         (1 << 27)
-#define  BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN      (1 << 26)
-#define  BM_SSP_CTRL1_DATA_TIMEOUT_IRQ         (1 << 25)
-#define  BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN      (1 << 24)
-#define  BM_SSP_CTRL1_DATA_CRC_IRQ             (1 << 23)
-#define  BM_SSP_CTRL1_DATA_CRC_IRQ_EN          (1 << 22)
-#define  BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ                (1 << 21)
-#define  BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN     (1 << 20)
-#define  BM_SSP_CTRL1_RECV_TIMEOUT_IRQ         (1 << 17)
-#define  BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN      (1 << 16)
-#define  BM_SSP_CTRL1_FIFO_OVERRUN_IRQ         (1 << 15)
-#define  BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN      (1 << 14)
-#define  BM_SSP_CTRL1_DMA_ENABLE               (1 << 13)
-#define  BM_SSP_CTRL1_POLARITY                 (1 << 9)
-#define  BP_SSP_CTRL1_WORD_LENGTH              (4)
-#define  BM_SSP_CTRL1_WORD_LENGTH              (0xf << 4)
-#define  BP_SSP_CTRL1_SSP_MODE                 (0)
-#define  BM_SSP_CTRL1_SSP_MODE                 (0xf)
-#define HW_SSP_SDRESP0(h)                      (ssp_is_old(h) ? 0x080 : 0x0a0)
-#define HW_SSP_SDRESP1(h)                      (ssp_is_old(h) ? 0x090 : 0x0b0)
-#define HW_SSP_SDRESP2(h)                      (ssp_is_old(h) ? 0x0a0 : 0x0c0)
-#define HW_SSP_SDRESP3(h)                      (ssp_is_old(h) ? 0x0b0 : 0x0d0)
-#define HW_SSP_STATUS(h)                       (ssp_is_old(h) ? 0x0c0 : 0x100)
-#define  BM_SSP_STATUS_CARD_DETECT             (1 << 28)
-#define  BM_SSP_STATUS_SDIO_IRQ                        (1 << 17)
-
-#define BF_SSP(value, field)   (((value) << BP_SSP_##field) & BM_SSP_##field)
-
 #define MXS_MMC_IRQ_BITS       (BM_SSP_CTRL1_SDIO_IRQ          | \
                                 BM_SSP_CTRL1_RESP_ERR_IRQ      | \
                                 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ  | \
                                 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ  | \
                                 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
 
-#define SSP_PIO_NUM    3
-
-enum mxs_mmc_id {
-       IMX23_MMC,
-       IMX28_MMC,
-};
+/* card detect polling timeout */
+#define MXS_MMC_DETECT_TIMEOUT                 (HZ/2)
 
 struct mxs_mmc_host {
+       struct mxs_ssp                  ssp;
+
        struct mmc_host                 *mmc;
        struct mmc_request              *mrq;
        struct mmc_command              *cmd;
        struct mmc_data                 *data;
 
-       void __iomem                    *base;
-       int                             dma_channel;
-       struct clk                      *clk;
-       unsigned int                    clk_rate;
-
-       struct dma_chan                 *dmach;
-       struct mxs_dma_data             dma_data;
-       unsigned int                    dma_dir;
-       enum dma_transfer_direction     slave_dirn;
-       u32                             ssp_pio_words[SSP_PIO_NUM];
-
-       enum mxs_mmc_id                 devid;
        unsigned char                   bus_width;
        spinlock_t                      lock;
        int                             sdio_irq_en;
@@ -186,16 +94,18 @@ static int mxs_mmc_get_ro(struct mmc_host *mmc)
 static int mxs_mmc_get_cd(struct mmc_host *mmc)
 {
        struct mxs_mmc_host *host = mmc_priv(mmc);
+       struct mxs_ssp *ssp = &host->ssp;
 
-       return !(readl(host->base + HW_SSP_STATUS(host)) &
+       return !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
                 BM_SSP_STATUS_CARD_DETECT);
 }
 
 static void mxs_mmc_reset(struct mxs_mmc_host *host)
 {
+       struct mxs_ssp *ssp = &host->ssp;
        u32 ctrl0, ctrl1;
 
-       stmp_reset_block(host->base);
+       stmp_reset_block(ssp->base);
 
        ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
        ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -211,15 +121,15 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
        writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
               BF_SSP(2, TIMING_CLOCK_DIVIDE) |
               BF_SSP(0, TIMING_CLOCK_RATE),
-              host->base + HW_SSP_TIMING(host));
+              ssp->base + HW_SSP_TIMING(ssp));
 
        if (host->sdio_irq_en) {
                ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
                ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
        }
 
-       writel(ctrl0, host->base + HW_SSP_CTRL0);
-       writel(ctrl1, host->base + HW_SSP_CTRL1(host));
+       writel(ctrl0, ssp->base + HW_SSP_CTRL0);
+       writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
 }
 
 static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -230,21 +140,22 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
        struct mmc_command *cmd = host->cmd;
        struct mmc_data *data = host->data;
        struct mmc_request *mrq = host->mrq;
+       struct mxs_ssp *ssp = &host->ssp;
 
        if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
                if (mmc_resp_type(cmd) & MMC_RSP_136) {
-                       cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
-                       cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
-                       cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
-                       cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
+                       cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
+                       cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
+                       cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
+                       cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
                } else {
-                       cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
+                       cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
                }
        }
 
        if (data) {
                dma_unmap_sg(mmc_dev(host->mmc), data->sg,
-                            data->sg_len, host->dma_dir);
+                            data->sg_len, ssp->dma_dir);
                /*
                 * If there was an error on any block, we mark all
                 * data blocks as being in error.
@@ -277,13 +188,14 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
        struct mxs_mmc_host *host = dev_id;
        struct mmc_command *cmd = host->cmd;
        struct mmc_data *data = host->data;
+       struct mxs_ssp *ssp = &host->ssp;
        u32 stat;
 
        spin_lock(&host->lock);
 
-       stat = readl(host->base + HW_SSP_CTRL1(host));
+       stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
        writel(stat & MXS_MMC_IRQ_BITS,
-              host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
+              ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
 
        spin_unlock(&host->lock);
 
@@ -312,6 +224,7 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
 static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
        struct mxs_mmc_host *host, unsigned long flags)
 {
+       struct mxs_ssp *ssp = &host->ssp;
        struct dma_async_tx_descriptor *desc;
        struct mmc_data *data = host->data;
        struct scatterlist * sgl;
@@ -320,24 +233,24 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
        if (data) {
                /* data */
                dma_map_sg(mmc_dev(host->mmc), data->sg,
-                          data->sg_len, host->dma_dir);
+                          data->sg_len, ssp->dma_dir);
                sgl = data->sg;
                sg_len = data->sg_len;
        } else {
                /* pio */
-               sgl = (struct scatterlist *) host->ssp_pio_words;
+               sgl = (struct scatterlist *) ssp->ssp_pio_words;
                sg_len = SSP_PIO_NUM;
        }
 
-       desc = dmaengine_prep_slave_sg(host->dmach,
-                               sgl, sg_len, host->slave_dirn, flags);
+       desc = dmaengine_prep_slave_sg(ssp->dmach,
+                               sgl, sg_len, ssp->slave_dirn, flags);
        if (desc) {
                desc->callback = mxs_mmc_dma_irq_callback;
                desc->callback_param = host;
        } else {
                if (data)
                        dma_unmap_sg(mmc_dev(host->mmc), data->sg,
-                                    data->sg_len, host->dma_dir);
+                                    data->sg_len, ssp->dma_dir);
        }
 
        return desc;
@@ -345,6 +258,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
 
 static void mxs_mmc_bc(struct mxs_mmc_host *host)
 {
+       struct mxs_ssp *ssp = &host->ssp;
        struct mmc_command *cmd = host->cmd;
        struct dma_async_tx_descriptor *desc;
        u32 ctrl0, cmd0, cmd1;
@@ -358,17 +272,17 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
                cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
        }
 
-       host->ssp_pio_words[0] = ctrl0;
-       host->ssp_pio_words[1] = cmd0;
-       host->ssp_pio_words[2] = cmd1;
-       host->dma_dir = DMA_NONE;
-       host->slave_dirn = DMA_TRANS_NONE;
+       ssp->ssp_pio_words[0] = ctrl0;
+       ssp->ssp_pio_words[1] = cmd0;
+       ssp->ssp_pio_words[2] = cmd1;
+       ssp->dma_dir = DMA_NONE;
+       ssp->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
        if (!desc)
                goto out;
 
        dmaengine_submit(desc);
-       dma_async_issue_pending(host->dmach);
+       dma_async_issue_pending(ssp->dmach);
        return;
 
 out:
@@ -378,6 +292,7 @@ out:
 
 static void mxs_mmc_ac(struct mxs_mmc_host *host)
 {
+       struct mxs_ssp *ssp = &host->ssp;
        struct mmc_command *cmd = host->cmd;
        struct dma_async_tx_descriptor *desc;
        u32 ignore_crc, get_resp, long_resp;
@@ -399,17 +314,17 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
                cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
        }
 
-       host->ssp_pio_words[0] = ctrl0;
-       host->ssp_pio_words[1] = cmd0;
-       host->ssp_pio_words[2] = cmd1;
-       host->dma_dir = DMA_NONE;
-       host->slave_dirn = DMA_TRANS_NONE;
+       ssp->ssp_pio_words[0] = ctrl0;
+       ssp->ssp_pio_words[1] = cmd0;
+       ssp->ssp_pio_words[2] = cmd1;
+       ssp->dma_dir = DMA_NONE;
+       ssp->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
        if (!desc)
                goto out;
 
        dmaengine_submit(desc);
-       dma_async_issue_pending(host->dmach);
+       dma_async_issue_pending(ssp->dmach);
        return;
 
 out:
@@ -447,6 +362,8 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        unsigned int data_size = 0, log2_blksz;
        unsigned int blocks = data->blocks;
 
+       struct mxs_ssp *ssp = &host->ssp;
+
        u32 ignore_crc, get_resp, long_resp, read;
        u32 ctrl0, cmd0, cmd1, val;
 
@@ -489,15 +406,15 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
                blocks = 1;
 
        /* xfer count, block size and count need to be set differently */
-       if (ssp_is_old(host)) {
+       if (ssp_is_old(ssp)) {
                ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
                cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
                        BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
        } else {
-               writel(data_size, host->base + HW_SSP_XFER_SIZE);
+               writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
                writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
                       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
-                      host->base + HW_SSP_BLOCK_SIZE);
+                      ssp->base + HW_SSP_BLOCK_SIZE);
        }
 
        if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
@@ -512,18 +429,18 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        }
 
        /* set the timeout count */
-       timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
-       val = readl(host->base + HW_SSP_TIMING(host));
+       timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
+       val = readl(ssp->base + HW_SSP_TIMING(ssp));
        val &= ~(BM_SSP_TIMING_TIMEOUT);
        val |= BF_SSP(timeout, TIMING_TIMEOUT);
-       writel(val, host->base + HW_SSP_TIMING(host));
+       writel(val, ssp->base + HW_SSP_TIMING(ssp));
 
        /* pio */
-       host->ssp_pio_words[0] = ctrl0;
-       host->ssp_pio_words[1] = cmd0;
-       host->ssp_pio_words[2] = cmd1;
-       host->dma_dir = DMA_NONE;
-       host->slave_dirn = DMA_TRANS_NONE;
+       ssp->ssp_pio_words[0] = ctrl0;
+       ssp->ssp_pio_words[1] = cmd0;
+       ssp->ssp_pio_words[2] = cmd1;
+       ssp->dma_dir = DMA_NONE;
+       ssp->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, 0);
        if (!desc)
                goto out;
@@ -531,14 +448,14 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        /* append data sg */
        WARN_ON(host->data != NULL);
        host->data = data;
-       host->dma_dir = dma_data_dir;
-       host->slave_dirn = slave_dirn;
+       ssp->dma_dir = dma_data_dir;
+       ssp->slave_dirn = slave_dirn;
        desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc)
                goto out;
 
        dmaengine_submit(desc);
-       dma_async_issue_pending(host->dmach);
+       dma_async_issue_pending(ssp->dmach);
        return;
 out:
        dev_warn(mmc_dev(host->mmc),
@@ -579,42 +496,6 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
        mxs_mmc_start_cmd(host, mrq->cmd);
 }
 
-static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
-{
-       unsigned int ssp_clk, ssp_sck;
-       u32 clock_divide, clock_rate;
-       u32 val;
-
-       ssp_clk = clk_get_rate(host->clk);
-
-       for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
-               clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
-               clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
-               if (clock_rate <= 255)
-                       break;
-       }
-
-       if (clock_divide > 254) {
-               dev_err(mmc_dev(host->mmc),
-                       "%s: cannot set clock to %d\n", __func__, rate);
-               return;
-       }
-
-       ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
-
-       val = readl(host->base + HW_SSP_TIMING(host));
-       val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
-       val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
-       val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
-       writel(val, host->base + HW_SSP_TIMING(host));
-
-       host->clk_rate = ssp_sck;
-
-       dev_dbg(mmc_dev(host->mmc),
-               "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
-               __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
-}
-
 static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
        struct mxs_mmc_host *host = mmc_priv(mmc);
@@ -627,12 +508,13 @@ static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                host->bus_width = 0;
 
        if (ios->clock)
-               mxs_mmc_set_clk_rate(host, ios->clock);
+               mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
 }
 
 static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 {
        struct mxs_mmc_host *host = mmc_priv(mmc);
+       struct mxs_ssp *ssp = &host->ssp;
        unsigned long flags;
 
        spin_lock_irqsave(&host->lock, flags);
@@ -641,19 +523,19 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 
        if (enable) {
                writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
-                      host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+                      ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
                writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
                       host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
        } else {
                writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
-                      host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+                      ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
                writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
-                      host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
+                      ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
        }
 
        spin_unlock_irqrestore(&host->lock, flags);
 
-       if (enable && readl(host->base + HW_SSP_STATUS(host)) &
+       if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
                        BM_SSP_STATUS_SDIO_IRQ)
                mmc_signal_sdio_irq(host->mmc);
 
@@ -670,34 +552,35 @@ static const struct mmc_host_ops mxs_mmc_ops = {
 static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
 {
        struct mxs_mmc_host *host = param;
+       struct mxs_ssp *ssp = &host->ssp;
 
        if (!mxs_dma_is_apbh(chan))
                return false;
 
-       if (chan->chan_id != host->dma_channel)
+       if (chan->chan_id != ssp->dma_channel)
                return false;
 
-       chan->private = &host->dma_data;
+       chan->private = &ssp->dma_data;
 
        return true;
 }
 
-static struct platform_device_id mxs_mmc_ids[] = {
+static struct platform_device_id mxs_ssp_ids[] = {
        {
                .name = "imx23-mmc",
-               .driver_data = IMX23_MMC,
+               .driver_data = IMX23_SSP,
        }, {
                .name = "imx28-mmc",
-               .driver_data = IMX28_MMC,
+               .driver_data = IMX28_SSP,
        }, {
                /* sentinel */
        }
 };
-MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
+MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
 
 static const struct of_device_id mxs_mmc_dt_ids[] = {
-       { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
-       { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
+       { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
+       { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
@@ -716,6 +599,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
        dma_cap_mask_t mask;
        struct regulator *reg_vmmc;
        enum of_gpio_flags flags;
+       struct mxs_ssp *ssp;
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -729,28 +613,30 @@ static int mxs_mmc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        host = mmc_priv(mmc);
-       host->base = devm_request_and_ioremap(&pdev->dev, iores);
-       if (!host->base) {
+       ssp = &host->ssp;
+       ssp->dev = &pdev->dev;
+       ssp->base = devm_request_and_ioremap(&pdev->dev, iores);
+       if (!ssp->base) {
                ret = -EADDRNOTAVAIL;
                goto out_mmc_free;
        }
 
        if (np) {
-               host->devid = (enum mxs_mmc_id) of_id->data;
+               ssp->devid = (enum mxs_ssp_id) of_id->data;
                /*
                 * TODO: This is a temporary solution and should be changed
                 * to use generic DMA binding later when the helpers get in.
                 */
                ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
-                                          &host->dma_channel);
+                                          &ssp->dma_channel);
                if (ret) {
                        dev_err(mmc_dev(host->mmc),
                                "failed to get dma channel\n");
                        goto out_mmc_free;
                }
        } else {
-               host->devid = pdev->id_entry->driver_data;
-               host->dma_channel = dmares->start;
+               ssp->devid = pdev->id_entry->driver_data;
+               ssp->dma_channel = dmares->start;
        }
 
        host->mmc = mmc;
@@ -772,20 +658,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
                goto out_mmc_free;
        }
 
-       host->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(host->clk)) {
-               ret = PTR_ERR(host->clk);
+       ssp->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(ssp->clk)) {
+               ret = PTR_ERR(ssp->clk);
                goto out_mmc_free;
        }
-       clk_prepare_enable(host->clk);
+       clk_prepare_enable(ssp->clk);
 
        mxs_mmc_reset(host);
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
-       host->dma_data.chan_irq = irq_dma;
-       host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
-       if (!host->dmach) {
+       ssp->dma_data.chan_irq = irq_dma;
+       ssp->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
+       if (!ssp->dmach) {
                dev_err(mmc_dev(host->mmc),
                        "%s: failed to request dma\n", __func__);
                goto out_clk_put;
@@ -822,9 +708,9 @@ static int mxs_mmc_probe(struct platform_device *pdev)
 
        mmc->max_segs = 52;
        mmc->max_blk_size = 1 << 0xf;
-       mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
-       mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
-       mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
+       mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
+       mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
+       mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
 
        platform_set_drvdata(pdev, mmc);
 
@@ -844,11 +730,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
        return 0;
 
 out_free_dma:
-       if (host->dmach)
-               dma_release_channel(host->dmach);
+       if (ssp->dmach)
+               dma_release_channel(ssp->dmach);
 out_clk_put:
-       clk_disable_unprepare(host->clk);
-       clk_put(host->clk);
+       clk_disable_unprepare(ssp->clk);
+       clk_put(ssp->clk);
 out_mmc_free:
        mmc_free_host(mmc);
        return ret;
@@ -858,16 +744,17 @@ static int mxs_mmc_remove(struct platform_device *pdev)
 {
        struct mmc_host *mmc = platform_get_drvdata(pdev);
        struct mxs_mmc_host *host = mmc_priv(mmc);
+       struct mxs_ssp *ssp = &host->ssp;
 
        mmc_remove_host(mmc);
 
        platform_set_drvdata(pdev, NULL);
 
-       if (host->dmach)
-               dma_release_channel(host->dmach);
+       if (ssp->dmach)
+               dma_release_channel(ssp->dmach);
 
-       clk_disable_unprepare(host->clk);
-       clk_put(host->clk);
+       clk_disable_unprepare(ssp->clk);
+       clk_put(ssp->clk);
 
        mmc_free_host(mmc);
 
@@ -879,11 +766,12 @@ static int mxs_mmc_suspend(struct device *dev)
 {
        struct mmc_host *mmc = dev_get_drvdata(dev);
        struct mxs_mmc_host *host = mmc_priv(mmc);
+       struct mxs_ssp *ssp = &host->ssp;
        int ret = 0;
 
        ret = mmc_suspend_host(mmc);
 
-       clk_disable_unprepare(host->clk);
+       clk_disable_unprepare(ssp->clk);
 
        return ret;
 }
@@ -892,9 +780,10 @@ static int mxs_mmc_resume(struct device *dev)
 {
        struct mmc_host *mmc = dev_get_drvdata(dev);
        struct mxs_mmc_host *host = mmc_priv(mmc);
+       struct mxs_ssp *ssp = &host->ssp;
        int ret = 0;
 
-       clk_prepare_enable(host->clk);
+       clk_prepare_enable(ssp->clk);
 
        ret = mmc_resume_host(mmc);
 
@@ -910,7 +799,7 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = {
 static struct platform_driver mxs_mmc_driver = {
        .probe          = mxs_mmc_probe,
        .remove         = mxs_mmc_remove,
-       .id_table       = mxs_mmc_ids,
+       .id_table       = mxs_ssp_ids,
        .driver         = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
index 551e316e4454d99f9b552a2924bea045784c7a8c..438737a1f59a3c882c5c6db3fade53696b341cd5 100644 (file)
@@ -387,8 +387,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
                printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
 
        cxt->mtd = NULL;
-       flush_work_sync(&cxt->work_erase);
-       flush_work_sync(&cxt->work_write);
+       flush_work(&cxt->work_erase);
+       flush_work(&cxt->work_write);
 }
 
 
index d518e4db8a0bf8665156fd2383ad40fcde0fe146..3a49e6de5e603e2641acc86acb50025bde9b69a6 100644 (file)
@@ -744,7 +744,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
        return ret;
 }
 
-int mtd_is_partition(struct mtd_info *mtd)
+int mtd_is_partition(const struct mtd_info *mtd)
 {
        struct mtd_part *part;
        int ispart = 0;
@@ -760,3 +760,13 @@ int mtd_is_partition(struct mtd_info *mtd)
        return ispart;
 }
 EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+       if (!mtd_is_partition(mtd))
+               return mtd->size;
+
+       return PART(mtd)->master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
index ea4b95b5451c8b6c2ec25a0905c5e4af7e1e76ee..271a842f8c393125d517a0d91f6ca74e9a975a39 100644 (file)
@@ -27,20 +27,34 @@ config MTD_UBI_WL_THRESHOLD
          life-cycle less than 10000, the threshold should be lessened (e.g.,
          to 128 or 256, although it does not have to be power of 2).
 
-config MTD_UBI_BEB_RESERVE
-       int "Percentage of reserved eraseblocks for bad eraseblocks handling"
-       default 2
-       range 0 25
+config MTD_UBI_BEB_LIMIT
+       int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+       default 20
+       range 0 768
        help
-         If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
-         reserves some amount of physical eraseblocks to handle new bad
-         eraseblocks. For example, if a flash physical eraseblock becomes bad,
-         UBI uses these reserved physical eraseblocks to relocate the bad one.
-         This option specifies how many physical eraseblocks will be reserved
-         for bad eraseblock handling (percents of total number of good flash
-         eraseblocks). If the underlying flash does not admit of bad
-         eraseblocks (e.g. NOR flash), this value is ignored and nothing is
-         reserved. Leave the default value if unsure.
+         This option specifies the maximum bad physical eraseblocks UBI
+         expects on the MTD device (per 1024 eraseblocks). If the underlying
+         flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+         is ignored.
+
+         NAND datasheets often specify the minimum and maximum NVM (Number of
+         Valid Blocks) for the flashes' endurance lifetime. The maximum
+         expected bad eraseblocks per 1024 eraseblocks then can be calculated
+         as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+         (MaxNVB is basically the total count of eraseblocks on the chip).
+
+         To put it differently, if this value is 20, UBI will try to reserve
+         about 1.9% of physical eraseblocks for bad blocks handling. And that
+         will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+         partition UBI attaches. This means that if you have, say, a NAND
+         flash chip admits maximum 40 bad eraseblocks, and it is split on two
+         MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+         attaching a partition.
+
+         This option can be overridden by the "mtd=" UBI module parameter or
+         by the "attach" ioctl.
+
+         Leave the default value if unsure.
 
 config MTD_UBI_GLUEBI
        tristate "MTD devices emulation driver (gluebi)"
index bd27cbbb4066df18dce282977d9879a011aa4e79..f7adf53e4f4511f7ce1bb423124aeb57b86f9f82 100644 (file)
@@ -79,7 +79,7 @@
  *     NAND), it is probably a PEB which was being erased when power cut
  *     happened, so this is corruption type 1. However, this is just a guess,
  *     which might be wrong.
- *   o Otherwise this it corruption type 2.
+ *   o Otherwise this is corruption type 2.
  */
 
 #include <linux/err.h>
@@ -378,8 +378,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
                        if (err == UBI_IO_BITFLIPS)
                                bitflips = 1;
                        else {
-                               ubi_err("VID of PEB %d header is bad, but it "
-                                       "was OK earlier, err %d", pnum, err);
+                               ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+                                       pnum, err);
                                if (err > 0)
                                        err = -EIO;
 
@@ -790,12 +790,12 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
        if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
                goto out_unlock;
 
-       ubi_err("PEB %d contains corrupted VID header, and the data does not "
-               "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
-               "header corruption which requires manual inspection", pnum);
+       ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+               pnum);
+       ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
        ubi_dump_vid_hdr(vid_hdr);
-       dbg_msg("hexdump of PEB %d offset %d, length %d",
-               pnum, ubi->leb_start, ubi->leb_size);
+       pr_err("hexdump of PEB %d offset %d, length %d",
+              pnum, ubi->leb_start, ubi->leb_size);
        ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
                               ubi->peb_buf, ubi->leb_size, 1);
        err = 1;
@@ -907,8 +907,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                        ubi->image_seq = image_seq;
                if (ubi->image_seq && image_seq &&
                    ubi->image_seq != image_seq) {
-                       ubi_err("bad image sequence number %d in PEB %d, "
-                               "expected %d", image_seq, pnum, ubi->image_seq);
+                       ubi_err("bad image sequence number %d in PEB %d, expected %d",
+                               image_seq, pnum, ubi->image_seq);
                        ubi_dump_ec_hdr(ech);
                        return -EINVAL;
                }
@@ -975,7 +975,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                        return err;
                goto adjust_mean_ec;
        case UBI_IO_FF:
-               if (ec_err)
+               if (ec_err || bitflips)
                        err = add_to_list(ai, pnum, UBI_UNKNOWN,
                                          UBI_UNKNOWN, ec, 1, &ai->erase);
                else
@@ -997,8 +997,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                /* Unsupported internal volume */
                switch (vidh->compat) {
                case UBI_COMPAT_DELETE:
-                       ubi_msg("\"delete\" compatible internal volume %d:%d"
-                               " found, will remove it", vol_id, lnum);
+                       ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+                               vol_id, lnum);
                        err = add_to_list(ai, pnum, vol_id, lnum,
                                          ec, 1, &ai->erase);
                        if (err)
@@ -1006,15 +1006,14 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                        return 0;
 
                case UBI_COMPAT_RO:
-                       ubi_msg("read-only compatible internal volume %d:%d"
-                               " found, switch to read-only mode",
+                       ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
                                vol_id, lnum);
                        ubi->ro_mode = 1;
                        break;
 
                case UBI_COMPAT_PRESERVE:
-                       ubi_msg("\"preserve\" compatible internal volume %d:%d"
-                               " found", vol_id, lnum);
+                       ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+                               vol_id, lnum);
                        err = add_to_list(ai, pnum, vol_id, lnum,
                                          ec, 0, &ai->alien);
                        if (err)
@@ -1075,10 +1074,10 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
        if (ai->corr_peb_count) {
                ubi_err("%d PEBs are corrupted and preserved",
                        ai->corr_peb_count);
-               printk(KERN_ERR "Corrupted PEBs are:");
+               pr_err("Corrupted PEBs are:");
                list_for_each_entry(aeb, &ai->corr, u.list)
-                       printk(KERN_CONT " %d", aeb->pnum);
-               printk(KERN_CONT "\n");
+                       pr_cont(" %d", aeb->pnum);
+               pr_cont("\n");
 
                /*
                 * If too many PEBs are corrupted, we refuse attaching,
@@ -1112,8 +1111,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
                        get_random_bytes(&ubi->image_seq,
                                         sizeof(ubi->image_seq));
                } else {
-                       ubi_err("MTD device is not UBI-formatted and possibly "
-                               "contains non-UBI data - refusing it");
+                       ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
                        return -EINVAL;
                }
 
@@ -1172,7 +1170,7 @@ static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
                        goto out_vidh;
        }
 
-       dbg_msg("scanning is finished");
+       ubi_msg("scanning is finished");
 
        /* Calculate mean erase counter */
        if (ai->ec_count)
@@ -1244,7 +1242,7 @@ int ubi_attach(struct ubi_device *ubi)
        ubi->corr_peb_count = ai->corr_peb_count;
        ubi->max_ec = ai->max_ec;
        ubi->mean_ec = ai->mean_ec;
-       ubi_msg("max. sequence number:       %llu", ai->max_sqnum);
+       dbg_gen("max. sequence number:       %llu", ai->max_sqnum);
 
        err = ubi_read_volume_table(ubi, ai);
        if (err)
index 2c5ed5ca9c3370bdf64ec05ccfab068585975fdf..34977039850c033986285b8e207d66a653c4a791 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/namei.h>
 #include <linux/stat.h>
 #include <linux/miscdevice.h>
+#include <linux/mtd/partitions.h>
 #include <linux/log2.h>
 #include <linux/kthread.h>
 #include <linux/kernel.h>
 /* Maximum length of the 'mtd=' parameter */
 #define MTD_PARAM_LEN_MAX 64
 
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 3
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
 #ifdef CONFIG_MTD_UBI_MODULE
 #define ubi_is_module() 1
 #else
  * @name: MTD character device node path, MTD device name, or MTD device number
  *        string
  * @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
  */
 struct mtd_dev_param {
        char name[MTD_PARAM_LEN_MAX];
        int vid_hdr_offs;
+       int max_beb_per1024;
 };
 
 /* Numbers of elements set in the @mtd_dev_param array */
@@ -564,9 +573,38 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
        }
 }
 
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
+{
+       int limit, device_pebs;
+       uint64_t device_size;
+
+       if (!max_beb_per1024)
+               return 0;
+
+       /*
+        * Here we are using size of the entire flash chip and
+        * not just the MTD partition size because the maximum
+        * number of bad eraseblocks is a percentage of the
+        * whole device and bad eraseblocks are not fairly
+        * distributed over the flash chip. So the worst case
+        * is that all the bad eraseblocks of the chip are in
+        * the MTD partition we are attaching (ubi->mtd).
+        */
+       device_size = mtd_get_device_size(ubi->mtd);
+       device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+       limit = mult_frac(device_pebs, max_beb_per1024, 1024);
+
+       /* Round it up */
+       if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+               limit += 1;
+
+       return limit;
+}
+
 /**
  * io_init - initialize I/O sub-system for a given UBI device.
  * @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  *
  * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
  * assumed:
@@ -579,8 +617,11 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int io_init(struct ubi_device *ubi)
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
 {
+       dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+       dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
        if (ubi->mtd->numeraseregions != 0) {
                /*
                 * Some flashes have several erase regions. Different regions
@@ -607,8 +648,10 @@ static int io_init(struct ubi_device *ubi)
        ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
        ubi->flash_size = ubi->mtd->size;
 
-       if (mtd_can_have_bb(ubi->mtd))
+       if (mtd_can_have_bb(ubi->mtd)) {
                ubi->bad_allowed = 1;
+               ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+       }
 
        if (ubi->mtd->type == MTD_NORFLASH) {
                ubi_assert(ubi->mtd->writesize == 1);
@@ -650,11 +693,11 @@ static int io_init(struct ubi_device *ubi)
        ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
        ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
 
-       dbg_msg("min_io_size      %d", ubi->min_io_size);
-       dbg_msg("max_write_size   %d", ubi->max_write_size);
-       dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
-       dbg_msg("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
-       dbg_msg("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
+       dbg_gen("min_io_size      %d", ubi->min_io_size);
+       dbg_gen("max_write_size   %d", ubi->max_write_size);
+       dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+       dbg_gen("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
+       dbg_gen("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);
 
        if (ubi->vid_hdr_offset == 0)
                /* Default offset */
@@ -671,10 +714,10 @@ static int io_init(struct ubi_device *ubi)
        ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
        ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
 
-       dbg_msg("vid_hdr_offset   %d", ubi->vid_hdr_offset);
-       dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
-       dbg_msg("vid_hdr_shift    %d", ubi->vid_hdr_shift);
-       dbg_msg("leb_start        %d", ubi->leb_start);
+       dbg_gen("vid_hdr_offset   %d", ubi->vid_hdr_offset);
+       dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+       dbg_gen("vid_hdr_shift    %d", ubi->vid_hdr_shift);
+       dbg_gen("leb_start        %d", ubi->leb_start);
 
        /* The shift must be aligned to 32-bit boundary */
        if (ubi->vid_hdr_shift % 4) {
@@ -700,7 +743,7 @@ static int io_init(struct ubi_device *ubi)
        ubi->max_erroneous = ubi->peb_count / 10;
        if (ubi->max_erroneous < 16)
                ubi->max_erroneous = 16;
-       dbg_msg("max_erroneous    %d", ubi->max_erroneous);
+       dbg_gen("max_erroneous    %d", ubi->max_erroneous);
 
        /*
         * It may happen that EC and VID headers are situated in one minimal
@@ -708,30 +751,18 @@ static int io_init(struct ubi_device *ubi)
         * read-only mode.
         */
        if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
-               ubi_warn("EC and VID headers are in the same minimal I/O unit, "
-                        "switch to read-only mode");
+               ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
                ubi->ro_mode = 1;
        }
 
        ubi->leb_size = ubi->peb_size - ubi->leb_start;
 
        if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
-               ubi_msg("MTD device %d is write-protected, attach in "
-                       "read-only mode", ubi->mtd->index);
+               ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+                       ubi->mtd->index);
                ubi->ro_mode = 1;
        }
 
-       ubi_msg("physical eraseblock size:   %d bytes (%d KiB)",
-               ubi->peb_size, ubi->peb_size >> 10);
-       ubi_msg("logical eraseblock size:    %d bytes", ubi->leb_size);
-       ubi_msg("smallest flash I/O unit:    %d", ubi->min_io_size);
-       if (ubi->hdrs_min_io_size != ubi->min_io_size)
-               ubi_msg("sub-page size:              %d",
-                       ubi->hdrs_min_io_size);
-       ubi_msg("VID header offset:          %d (aligned %d)",
-               ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
-       ubi_msg("data offset:                %d", ubi->leb_start);
-
        /*
         * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
         * unfortunately, MTD does not provide this information. We should loop
@@ -759,6 +790,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
        struct ubi_volume *vol = ubi->volumes[vol_id];
        int err, old_reserved_pebs = vol->reserved_pebs;
 
+       if (ubi->ro_mode) {
+               ubi_warn("skip auto-resize because of R/O mode");
+               return 0;
+       }
+
        /*
         * Clear the auto-resize flag in the volume in-memory copy of the
         * volume table, and 'ubi_resize_volume()' will propagate this change
@@ -800,6 +836,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
  * @mtd: MTD device description object
  * @ubi_num: number to assign to the new UBI device
  * @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  *
  * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
  * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -810,11 +847,18 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
  * Note, the invocations of this function has to be serialized by the
  * @ubi_devices_mutex.
  */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+                      int vid_hdr_offset, int max_beb_per1024)
 {
        struct ubi_device *ubi;
        int i, err, ref = 0;
 
+       if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+               return -EINVAL;
+
+       if (!max_beb_per1024)
+               max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
        /*
         * Check if we already have the same MTD device attached.
         *
@@ -839,8 +883,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
         * no sense to attach emulated MTD devices, so we prohibit this.
         */
        if (mtd->type == MTD_UBIVOLUME) {
-               ubi_err("refuse attaching mtd%d - it is already emulated on "
-                       "top of UBI", mtd->index);
+               ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+                       mtd->index);
                return -EINVAL;
        }
 
@@ -880,10 +924,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
        spin_lock_init(&ubi->volumes_lock);
 
        ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
-       dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
-       dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
 
-       err = io_init(ubi);
+       err = io_init(ubi, max_beb_per1024);
        if (err)
                goto out_free;
 
@@ -924,23 +966,24 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
                goto out_debugfs;
        }
 
-       ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
-       ubi_msg("MTD device name:            \"%s\"", mtd->name);
-       ubi_msg("MTD device size:            %llu MiB", ubi->flash_size >> 20);
-       ubi_msg("number of good PEBs:        %d", ubi->good_peb_count);
-       ubi_msg("number of bad PEBs:         %d", ubi->bad_peb_count);
-       ubi_msg("number of corrupted PEBs:   %d", ubi->corr_peb_count);
-       ubi_msg("max. allowed volumes:       %d", ubi->vtbl_slots);
-       ubi_msg("wear-leveling threshold:    %d", CONFIG_MTD_UBI_WL_THRESHOLD);
-       ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
-       ubi_msg("number of user volumes:     %d",
-               ubi->vol_count - UBI_INT_VOL_COUNT);
-       ubi_msg("available PEBs:             %d", ubi->avail_pebs);
-       ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
-       ubi_msg("number of PEBs reserved for bad PEB handling: %d",
-               ubi->beb_rsvd_pebs);
-       ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
-       ubi_msg("image sequence number:  %d", ubi->image_seq);
+       ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+               mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+       ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+               ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+       ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+               ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+       ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+               ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+       ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+               ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+       ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+               ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+               ubi->vtbl_slots);
+       ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+               ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+               ubi->image_seq);
+       ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+               ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
 
        /*
         * The below lock makes sure we do not race with 'ubi_thread()' which
@@ -1017,7 +1060,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
 
        ubi_assert(ubi_num == ubi->ubi_num);
        ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
-       dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+       ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
 
        /*
         * Before freeing anything, we have to stop the background thread to
@@ -1172,7 +1215,7 @@ static int __init ubi_init(void)
 
                mutex_lock(&ubi_devices_mutex);
                err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
-                                        p->vid_hdr_offs);
+                                        p->vid_hdr_offs, p->max_beb_per1024);
                mutex_unlock(&ubi_devices_mutex);
                if (err < 0) {
                        ubi_err("cannot attach mtd%d", mtd->index);
@@ -1218,7 +1261,7 @@ out:
        ubi_err("UBI error: cannot initialize UBI, error %d", err);
        return err;
 }
-module_init(ubi_init);
+late_initcall(ubi_init);
 
 static void __exit ubi_exit(void)
 {
@@ -1252,8 +1295,7 @@ static int __init bytes_str_to_int(const char *str)
 
        result = simple_strtoul(str, &endp, 0);
        if (str == endp || result >= INT_MAX) {
-               printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
-                      str);
+               ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
                return -EINVAL;
        }
 
@@ -1269,8 +1311,7 @@ static int __init bytes_str_to_int(const char *str)
        case '\0':
                break;
        default:
-               printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
-                      str);
+               ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
                return -EINVAL;
        }
 
@@ -1291,27 +1332,26 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
        struct mtd_dev_param *p;
        char buf[MTD_PARAM_LEN_MAX];
        char *pbuf = &buf[0];
-       char *tokens[2] = {NULL, NULL};
+       char *tokens[MTD_PARAM_MAX_COUNT];
 
        if (!val)
                return -EINVAL;
 
        if (mtd_devs == UBI_MAX_DEVICES) {
-               printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
-                      UBI_MAX_DEVICES);
+               ubi_err("UBI error: too many parameters, max. is %d\n",
+                       UBI_MAX_DEVICES);
                return -EINVAL;
        }
 
        len = strnlen(val, MTD_PARAM_LEN_MAX);
        if (len == MTD_PARAM_LEN_MAX) {
-               printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
-                      "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+               ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+                       val, MTD_PARAM_LEN_MAX);
                return -EINVAL;
        }
 
        if (len == 0) {
-               printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
-                      "ignored\n");
+               pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
                return 0;
        }
 
@@ -1321,12 +1361,11 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
        if (buf[len - 1] == '\n')
                buf[len - 1] = '\0';
 
-       for (i = 0; i < 2; i++)
+       for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
                tokens[i] = strsep(&pbuf, ",");
 
        if (pbuf) {
-               printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
-                      val);
+               ubi_err("UBI error: too many arguments at \"%s\"\n", val);
                return -EINVAL;
        }
 
@@ -1339,23 +1378,32 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
        if (p->vid_hdr_offs < 0)
                return p->vid_hdr_offs;
 
+       if (tokens[2]) {
+               int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024);
+
+               if (err) {
+                       ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+                               tokens[2]);
+                       return -EINVAL;
+               }
+       }
+
        mtd_devs += 1;
        return 0;
 }
 
 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
-MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
-                     "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n"
                      "Multiple \"mtd\" parameters may be specified.\n"
-                     "MTD devices may be specified by their number, name, or "
-                     "path to the MTD character device node.\n"
-                     "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
-                     "header position to be used by UBI.\n"
-                     "Example 1: mtd=/dev/mtd0 - attach MTD device "
-                     "/dev/mtd0.\n"
-                     "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
-                     "with name \"content\" using VID header offset 1984, and "
-                     "MTD device number 4 with default VID header offset.");
+                     "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+                     "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+                     "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+                     __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+                     "\n"
+                     "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+                     "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+                     "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+                     "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
 
 MODULE_VERSION(__stringify(UBI_VERSION));
 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
index fb55678781813da69b91f81c4aaa5430aef93974..dfcc65b33e99f35d5289cede948d72550f86f843 100644 (file)
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
                vol->updating = 0;
                vfree(vol->upd_buf);
        } else if (vol->changing_leb) {
-               dbg_gen("only %lld of %lld bytes received for atomic LEB change"
-                       " for volume %d:%d, cancel", vol->upd_received,
-                       vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+               dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+                       vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+                       vol->vol_id);
                vol->changing_leb = 0;
                vfree(vol->upd_buf);
        }
@@ -189,7 +189,8 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
        return new_offset;
 }
 
-static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+                         int datasync)
 {
        struct ubi_volume_desc *desc = file->private_data;
        struct ubi_device *ubi = desc->vol->ubi;
@@ -753,7 +754,7 @@ static int rename_volumes(struct ubi_device *ubi,
                re->new_name_len = name_len;
                memcpy(re->new_name, name, name_len);
                list_add_tail(&re->list, &rename_list);
-               dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
+               dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
                        vol_id, re->desc->vol->name, name);
        }
 
@@ -811,7 +812,7 @@ static int rename_volumes(struct ubi_device *ubi,
                re1->remove = 1;
                re1->desc = desc;
                list_add(&re1->list, &rename_list);
-               dbg_msg("will remove volume %d, name \"%s\"",
+               dbg_gen("will remove volume %d, name \"%s\"",
                        re1->desc->vol->vol_id, re1->desc->vol->name);
        }
 
@@ -942,7 +943,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
        {
                struct ubi_rnvol_req *req;
 
-               dbg_msg("re-name volumes");
+               dbg_gen("re-name volumes");
                req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
                if (!req) {
                        err = -ENOMEM;
@@ -1010,7 +1011,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
                 * 'ubi_attach_mtd_dev()'.
                 */
                mutex_lock(&ubi_devices_mutex);
-               err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+               err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
+                                        req.max_beb_per1024);
                mutex_unlock(&ubi_devices_mutex);
                if (err < 0)
                        put_mtd_device(mtd);
index 7c1380305219724bc0837cb6c1dc93320d4087c1..26908a59506b09925accb2eace4e492c88fe4b97 100644 (file)
@@ -43,8 +43,8 @@ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
                return;
        err = mtd_read(ubi->mtd, addr, len, &read, buf);
        if (err && err != -EUCLEAN) {
-               ubi_err("error %d while reading %d bytes from PEB %d:%d, "
-                       "read %zd bytes", err, len, pnum, offset, read);
+               ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+                       err, len, pnum, offset, read);
                goto out;
        }
 
@@ -62,21 +62,15 @@ out:
  */
 void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
 {
-       printk(KERN_DEBUG "Erase counter header dump:\n");
-       printk(KERN_DEBUG "\tmagic          %#08x\n",
-              be32_to_cpu(ec_hdr->magic));
-       printk(KERN_DEBUG "\tversion        %d\n", (int)ec_hdr->version);
-       printk(KERN_DEBUG "\tec             %llu\n",
-              (long long)be64_to_cpu(ec_hdr->ec));
-       printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
-              be32_to_cpu(ec_hdr->vid_hdr_offset));
-       printk(KERN_DEBUG "\tdata_offset    %d\n",
-              be32_to_cpu(ec_hdr->data_offset));
-       printk(KERN_DEBUG "\timage_seq      %d\n",
-              be32_to_cpu(ec_hdr->image_seq));
-       printk(KERN_DEBUG "\thdr_crc        %#08x\n",
-              be32_to_cpu(ec_hdr->hdr_crc));
-       printk(KERN_DEBUG "erase counter header hexdump:\n");
+       pr_err("Erase counter header dump:\n");
+       pr_err("\tmagic          %#08x\n", be32_to_cpu(ec_hdr->magic));
+       pr_err("\tversion        %d\n", (int)ec_hdr->version);
+       pr_err("\tec             %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+       pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+       pr_err("\tdata_offset    %d\n", be32_to_cpu(ec_hdr->data_offset));
+       pr_err("\timage_seq      %d\n", be32_to_cpu(ec_hdr->image_seq));
+       pr_err("\thdr_crc        %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+       pr_err("erase counter header hexdump:\n");
        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
                       ec_hdr, UBI_EC_HDR_SIZE, 1);
 }
@@ -87,21 +81,21 @@ void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
  */
 void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
 {
-       printk(KERN_DEBUG "Volume identifier header dump:\n");
-       printk(KERN_DEBUG "\tmagic     %08x\n", be32_to_cpu(vid_hdr->magic));
-       printk(KERN_DEBUG "\tversion   %d\n",  (int)vid_hdr->version);
-       printk(KERN_DEBUG "\tvol_type  %d\n",  (int)vid_hdr->vol_type);
-       printk(KERN_DEBUG "\tcopy_flag %d\n",  (int)vid_hdr->copy_flag);
-       printk(KERN_DEBUG "\tcompat    %d\n",  (int)vid_hdr->compat);
-       printk(KERN_DEBUG "\tvol_id    %d\n",  be32_to_cpu(vid_hdr->vol_id));
-       printk(KERN_DEBUG "\tlnum      %d\n",  be32_to_cpu(vid_hdr->lnum));
-       printk(KERN_DEBUG "\tdata_size %d\n",  be32_to_cpu(vid_hdr->data_size));
-       printk(KERN_DEBUG "\tused_ebs  %d\n",  be32_to_cpu(vid_hdr->used_ebs));
-       printk(KERN_DEBUG "\tdata_pad  %d\n",  be32_to_cpu(vid_hdr->data_pad));
-       printk(KERN_DEBUG "\tsqnum     %llu\n",
+       pr_err("Volume identifier header dump:\n");
+       pr_err("\tmagic     %08x\n", be32_to_cpu(vid_hdr->magic));
+       pr_err("\tversion   %d\n",  (int)vid_hdr->version);
+       pr_err("\tvol_type  %d\n",  (int)vid_hdr->vol_type);
+       pr_err("\tcopy_flag %d\n",  (int)vid_hdr->copy_flag);
+       pr_err("\tcompat    %d\n",  (int)vid_hdr->compat);
+       pr_err("\tvol_id    %d\n",  be32_to_cpu(vid_hdr->vol_id));
+       pr_err("\tlnum      %d\n",  be32_to_cpu(vid_hdr->lnum));
+       pr_err("\tdata_size %d\n",  be32_to_cpu(vid_hdr->data_size));
+       pr_err("\tused_ebs  %d\n",  be32_to_cpu(vid_hdr->used_ebs));
+       pr_err("\tdata_pad  %d\n",  be32_to_cpu(vid_hdr->data_pad));
+       pr_err("\tsqnum     %llu\n",
                (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
-       printk(KERN_DEBUG "\thdr_crc   %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
-       printk(KERN_DEBUG "Volume identifier header hexdump:\n");
+       pr_err("\thdr_crc   %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+       pr_err("Volume identifier header hexdump:\n");
        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
                       vid_hdr, UBI_VID_HDR_SIZE, 1);
 }
@@ -112,25 +106,25 @@ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
  */
 void ubi_dump_vol_info(const struct ubi_volume *vol)
 {
-       printk(KERN_DEBUG "Volume information dump:\n");
-       printk(KERN_DEBUG "\tvol_id          %d\n", vol->vol_id);
-       printk(KERN_DEBUG "\treserved_pebs   %d\n", vol->reserved_pebs);
-       printk(KERN_DEBUG "\talignment       %d\n", vol->alignment);
-       printk(KERN_DEBUG "\tdata_pad        %d\n", vol->data_pad);
-       printk(KERN_DEBUG "\tvol_type        %d\n", vol->vol_type);
-       printk(KERN_DEBUG "\tname_len        %d\n", vol->name_len);
-       printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
-       printk(KERN_DEBUG "\tused_ebs        %d\n", vol->used_ebs);
-       printk(KERN_DEBUG "\tused_bytes      %lld\n", vol->used_bytes);
-       printk(KERN_DEBUG "\tlast_eb_bytes   %d\n", vol->last_eb_bytes);
-       printk(KERN_DEBUG "\tcorrupted       %d\n", vol->corrupted);
-       printk(KERN_DEBUG "\tupd_marker      %d\n", vol->upd_marker);
+       pr_err("Volume information dump:\n");
+       pr_err("\tvol_id          %d\n", vol->vol_id);
+       pr_err("\treserved_pebs   %d\n", vol->reserved_pebs);
+       pr_err("\talignment       %d\n", vol->alignment);
+       pr_err("\tdata_pad        %d\n", vol->data_pad);
+       pr_err("\tvol_type        %d\n", vol->vol_type);
+       pr_err("\tname_len        %d\n", vol->name_len);
+       pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+       pr_err("\tused_ebs        %d\n", vol->used_ebs);
+       pr_err("\tused_bytes      %lld\n", vol->used_bytes);
+       pr_err("\tlast_eb_bytes   %d\n", vol->last_eb_bytes);
+       pr_err("\tcorrupted       %d\n", vol->corrupted);
+       pr_err("\tupd_marker      %d\n", vol->upd_marker);
 
        if (vol->name_len <= UBI_VOL_NAME_MAX &&
            strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
-               printk(KERN_DEBUG "\tname            %s\n", vol->name);
+               pr_err("\tname            %s\n", vol->name);
        } else {
-               printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+               pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
                       vol->name[0], vol->name[1], vol->name[2],
                       vol->name[3], vol->name[4]);
        }
@@ -145,29 +139,28 @@ void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
 {
        int name_len = be16_to_cpu(r->name_len);
 
-       printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
-       printk(KERN_DEBUG "\treserved_pebs   %d\n",
-              be32_to_cpu(r->reserved_pebs));
-       printk(KERN_DEBUG "\talignment       %d\n", be32_to_cpu(r->alignment));
-       printk(KERN_DEBUG "\tdata_pad        %d\n", be32_to_cpu(r->data_pad));
-       printk(KERN_DEBUG "\tvol_type        %d\n", (int)r->vol_type);
-       printk(KERN_DEBUG "\tupd_marker      %d\n", (int)r->upd_marker);
-       printk(KERN_DEBUG "\tname_len        %d\n", name_len);
+       pr_err("Volume table record %d dump:\n", idx);
+       pr_err("\treserved_pebs   %d\n", be32_to_cpu(r->reserved_pebs));
+       pr_err("\talignment       %d\n", be32_to_cpu(r->alignment));
+       pr_err("\tdata_pad        %d\n", be32_to_cpu(r->data_pad));
+       pr_err("\tvol_type        %d\n", (int)r->vol_type);
+       pr_err("\tupd_marker      %d\n", (int)r->upd_marker);
+       pr_err("\tname_len        %d\n", name_len);
 
        if (r->name[0] == '\0') {
-               printk(KERN_DEBUG "\tname            NULL\n");
+               pr_err("\tname            NULL\n");
                return;
        }
 
        if (name_len <= UBI_VOL_NAME_MAX &&
            strnlen(&r->name[0], name_len + 1) == name_len) {
-               printk(KERN_DEBUG "\tname            %s\n", &r->name[0]);
+               pr_err("\tname            %s\n", &r->name[0]);
        } else {
-               printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+               pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
                        r->name[0], r->name[1], r->name[2], r->name[3],
                        r->name[4]);
        }
-       printk(KERN_DEBUG "\tcrc             %#08x\n", be32_to_cpu(r->crc));
+       pr_err("\tcrc             %#08x\n", be32_to_cpu(r->crc));
 }
 
 /**
@@ -176,15 +169,15 @@ void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
  */
 void ubi_dump_av(const struct ubi_ainf_volume *av)
 {
-       printk(KERN_DEBUG "Volume attaching information dump:\n");
-       printk(KERN_DEBUG "\tvol_id         %d\n", av->vol_id);
-       printk(KERN_DEBUG "\thighest_lnum   %d\n", av->highest_lnum);
-       printk(KERN_DEBUG "\tleb_count      %d\n", av->leb_count);
-       printk(KERN_DEBUG "\tcompat         %d\n", av->compat);
-       printk(KERN_DEBUG "\tvol_type       %d\n", av->vol_type);
-       printk(KERN_DEBUG "\tused_ebs       %d\n", av->used_ebs);
-       printk(KERN_DEBUG "\tlast_data_size %d\n", av->last_data_size);
-       printk(KERN_DEBUG "\tdata_pad       %d\n", av->data_pad);
+       pr_err("Volume attaching information dump:\n");
+       pr_err("\tvol_id         %d\n", av->vol_id);
+       pr_err("\thighest_lnum   %d\n", av->highest_lnum);
+       pr_err("\tleb_count      %d\n", av->leb_count);
+       pr_err("\tcompat         %d\n", av->compat);
+       pr_err("\tvol_type       %d\n", av->vol_type);
+       pr_err("\tused_ebs       %d\n", av->used_ebs);
+       pr_err("\tlast_data_size %d\n", av->last_data_size);
+       pr_err("\tdata_pad       %d\n", av->data_pad);
 }
 
 /**
@@ -194,13 +187,13 @@ void ubi_dump_av(const struct ubi_ainf_volume *av)
  */
 void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
 {
-       printk(KERN_DEBUG "eraseblock attaching information dump:\n");
-       printk(KERN_DEBUG "\tec       %d\n", aeb->ec);
-       printk(KERN_DEBUG "\tpnum     %d\n", aeb->pnum);
+       pr_err("eraseblock attaching information dump:\n");
+       pr_err("\tec       %d\n", aeb->ec);
+       pr_err("\tpnum     %d\n", aeb->pnum);
        if (type == 0) {
-               printk(KERN_DEBUG "\tlnum     %d\n", aeb->lnum);
-               printk(KERN_DEBUG "\tscrub    %d\n", aeb->scrub);
-               printk(KERN_DEBUG "\tsqnum    %llu\n", aeb->sqnum);
+               pr_err("\tlnum     %d\n", aeb->lnum);
+               pr_err("\tscrub    %d\n", aeb->scrub);
+               pr_err("\tsqnum    %llu\n", aeb->sqnum);
        }
 }
 
@@ -212,16 +205,16 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
 {
        char nm[17];
 
-       printk(KERN_DEBUG "Volume creation request dump:\n");
-       printk(KERN_DEBUG "\tvol_id    %d\n",   req->vol_id);
-       printk(KERN_DEBUG "\talignment %d\n",   req->alignment);
-       printk(KERN_DEBUG "\tbytes     %lld\n", (long long)req->bytes);
-       printk(KERN_DEBUG "\tvol_type  %d\n",   req->vol_type);
-       printk(KERN_DEBUG "\tname_len  %d\n",   req->name_len);
+       pr_err("Volume creation request dump:\n");
+       pr_err("\tvol_id    %d\n",   req->vol_id);
+       pr_err("\talignment %d\n",   req->alignment);
+       pr_err("\tbytes     %lld\n", (long long)req->bytes);
+       pr_err("\tvol_type  %d\n",   req->vol_type);
+       pr_err("\tname_len  %d\n",   req->name_len);
 
        memcpy(nm, req->name, 16);
        nm[16] = 0;
-       printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
+       pr_err("\t1st 16 characters of name: %s\n", nm);
 }
 
 /**
index d5d2645b51a78b899fdd8a0e0f4a57087f646b30..3dbc877d96633b5324c725f6a47810811b64e30c 100644 (file)
@@ -29,22 +29,18 @@ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
 
 #define ubi_assert(expr)  do {                                               \
        if (unlikely(!(expr))) {                                             \
-               printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
+               pr_crit("UBI assert failed in %s at %u (pid %d)\n",          \
                       __func__, __LINE__, current->pid);                    \
                dump_stack();                                                \
        }                                                                    \
 } while (0)
 
-#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a)  \
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a)                   \
                print_hex_dump(l, ps, pt, r, g, b, len, a)
 
 #define ubi_dbg_msg(type, fmt, ...) \
-       pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
-
-/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...)                                    \
-       printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
-              current->pid, __func__, ##__VA_ARGS__)
+       pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid,       \
+                ##__VA_ARGS__)
 
 /* General debugging messages */
 #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
index b703ac7729cf8b44619848696bb7fe470ebba8a7..a26d7d253174ad114f248b77bb5f83febd88ebfd 100644 (file)
@@ -420,9 +420,8 @@ retry:
                                 */
                                if (err == UBI_IO_BAD_HDR_EBADMSG ||
                                    err == UBI_IO_BAD_HDR) {
-                                       ubi_warn("corrupted VID header at PEB "
-                                                "%d, LEB %d:%d", pnum, vol_id,
-                                                lnum);
+                                       ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+                                                pnum, vol_id, lnum);
                                        err = -EBADMSG;
                                } else
                                        ubi_ro_mode(ubi);
@@ -660,9 +659,8 @@ retry:
        if (len) {
                err = ubi_io_write_data(ubi, buf, pnum, offset, len);
                if (err) {
-                       ubi_warn("failed to write %d bytes at offset %d of "
-                                "LEB %d:%d, PEB %d", len, offset, vol_id,
-                                lnum, pnum);
+                       ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+                                len, offset, vol_id, lnum, pnum);
                        goto write_error;
                }
        }
@@ -1040,9 +1038,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
         * cancel it.
         */
        if (vol->eba_tbl[lnum] != from) {
-               dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
-                      "PEB %d, cancel", vol_id, lnum, from,
-                      vol->eba_tbl[lnum]);
+               dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+                      vol_id, lnum, from, vol->eba_tbl[lnum]);
                err = MOVE_CANCEL_RACE;
                goto out_unlock_leb;
        }
@@ -1107,8 +1104,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
        err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
        if (err) {
                if (err != UBI_IO_BITFLIPS) {
-                       ubi_warn("error %d while reading VID header back from "
-                                 "PEB %d", err, to);
+                       ubi_warn("error %d while reading VID header back from PEB %d",
+                                err, to);
                        if (is_error_sane(err))
                                err = MOVE_TARGET_RD_ERR;
                } else
@@ -1134,8 +1131,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
                err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
                if (err) {
                        if (err != UBI_IO_BITFLIPS) {
-                               ubi_warn("error %d while reading data back "
-                                        "from PEB %d", err, to);
+                               ubi_warn("error %d while reading data back from PEB %d",
+                                        err, to);
                                if (is_error_sane(err))
                                        err = MOVE_TARGET_RD_ERR;
                        } else
@@ -1146,8 +1143,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
                cond_resched();
 
                if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
-                       ubi_warn("read data back from PEB %d and it is "
-                                "different", to);
+                       ubi_warn("read data back from PEB %d and it is different",
+                                to);
                        err = -EINVAL;
                        goto out_unlock_buf;
                }
@@ -1197,11 +1194,11 @@ static void print_rsvd_warning(struct ubi_device *ubi,
                        return;
        }
 
-       ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
-                " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+       ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+                ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
        if (ubi->corr_peb_count)
                ubi_warn("%d PEBs are corrupted and not used",
-                       ubi->corr_peb_count);
+                        ubi->corr_peb_count);
 }
 
 /**
index 4e44bee4c564b4bf410fd4684126ff8ff2967c12..4bd4db8c84c96cd4c925d59b567eaa39f72c92e2 100644 (file)
@@ -41,7 +41,7 @@
 #include "ubi-media.h"
 
 #define err_msg(fmt, ...)                                   \
-       printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+       pr_err("gluebi (pid %d): %s: " fmt "\n",            \
               current->pid, __func__, ##__VA_ARGS__)
 
 /**
@@ -341,9 +341,8 @@ static int gluebi_create(struct ubi_device_info *di,
        mutex_lock(&devices_mutex);
        g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (g)
-               err_msg("gluebi MTD device %d form UBI device %d volume %d "
-                       "already exists", g->mtd.index, vi->ubi_num,
-                       vi->vol_id);
+               err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+                       g->mtd.index, vi->ubi_num, vi->vol_id);
        mutex_unlock(&devices_mutex);
 
        if (mtd_device_register(mtd, NULL, 0)) {
@@ -376,8 +375,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
        mutex_lock(&devices_mutex);
        gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (!gluebi) {
-               err_msg("got remove notification for unknown UBI device %d "
-                       "volume %d", vi->ubi_num, vi->vol_id);
+               err_msg("got remove notification for unknown UBI device %d volume %d",
+                       vi->ubi_num, vi->vol_id);
                err = -ENOENT;
        } else if (gluebi->refcnt)
                err = -EBUSY;
@@ -390,9 +389,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
        mtd = &gluebi->mtd;
        err = mtd_device_unregister(mtd);
        if (err) {
-               err_msg("cannot remove fake MTD device %d, UBI device %d, "
-                       "volume %d, error %d", mtd->index, gluebi->ubi_num,
-                       gluebi->vol_id, err);
+               err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+                       mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
                mutex_lock(&devices_mutex);
                list_add_tail(&gluebi->list, &gluebi_devices);
                mutex_unlock(&devices_mutex);
@@ -422,8 +420,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
        gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (!gluebi) {
                mutex_unlock(&devices_mutex);
-               err_msg("got update notification for unknown UBI device %d "
-                       "volume %d", vi->ubi_num, vi->vol_id);
+               err_msg("got update notification for unknown UBI device %d volume %d",
+                       vi->ubi_num, vi->vol_id);
                return -ENOENT;
        }
 
@@ -449,8 +447,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
        gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
        if (!gluebi) {
                mutex_unlock(&devices_mutex);
-               err_msg("got update notification for unknown UBI device %d "
-                       "volume %d", vi->ubi_num, vi->vol_id);
+               err_msg("got update notification for unknown UBI device %d volume %d",
+                       vi->ubi_num, vi->vol_id);
                return -ENOENT;
        }
        gluebi->mtd.size = vi->used_bytes;
@@ -507,9 +505,9 @@ static void __exit ubi_gluebi_exit(void)
 
                err = mtd_device_unregister(mtd);
                if (err)
-                       err_msg("error %d while removing gluebi MTD device %d, "
-                               "UBI device %d, volume %d - ignoring", err,
-                               mtd->index, gluebi->ubi_num, gluebi->vol_id);
+                       err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+                               err, mtd->index, gluebi->ubi_num,
+                               gluebi->vol_id);
                kfree(mtd->name);
                kfree(gluebi);
        }
index a8d523794b525f8b9147887fe8291214e41dd713..78a1dcbf210758536ac3da2ebf83419ef6af8de0 100644 (file)
@@ -177,21 +177,20 @@ retry:
                         * enabled. A corresponding message will be printed
                         * later, when it is has been scrubbed.
                         */
-                       dbg_msg("fixable bit-flip detected at PEB %d", pnum);
+                       ubi_msg("fixable bit-flip detected at PEB %d", pnum);
                        ubi_assert(len == read);
                        return UBI_IO_BITFLIPS;
                }
 
                if (retries++ < UBI_IO_RETRIES) {
-                       ubi_warn("error %d%s while reading %d bytes from PEB "
-                                "%d:%d, read only %zd bytes, retry",
+                       ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
                                 err, errstr, len, pnum, offset, read);
                        yield();
                        goto retry;
                }
 
-               ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
-                       "read %zd bytes", err, errstr, len, pnum, offset, read);
+               ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+                       err, errstr, len, pnum, offset, read);
                dump_stack();
 
                /*
@@ -274,8 +273,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
        }
 
        if (ubi_dbg_is_write_failure(ubi)) {
-               ubi_err("cannot write %d bytes to PEB %d:%d "
-                       "(emulated)", len, pnum, offset);
+               ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+                       len, pnum, offset);
                dump_stack();
                return -EIO;
        }
@@ -283,8 +282,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
        addr = (loff_t)pnum * ubi->peb_size + offset;
        err = mtd_write(ubi->mtd, addr, len, &written, buf);
        if (err) {
-               ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
-                       "%zd bytes", err, len, pnum, offset, written);
+               ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+                       err, len, pnum, offset, written);
                dump_stack();
                ubi_dump_flash(ubi, pnum, offset, len);
        } else
@@ -685,8 +684,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
        leb_start = be32_to_cpu(ec_hdr->data_offset);
 
        if (ec_hdr->version != UBI_VERSION) {
-               ubi_err("node with incompatible UBI version found: "
-                       "this UBI version is %d, image version is %d",
+               ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
                        UBI_VERSION, (int)ec_hdr->version);
                goto bad;
        }
@@ -777,10 +775,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
                if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
                        /* The physical eraseblock is supposedly empty */
                        if (verbose)
-                               ubi_warn("no EC header found at PEB %d, "
-                                        "only 0xFF bytes", pnum);
-                       dbg_bld("no EC header found at PEB %d, "
-                               "only 0xFF bytes", pnum);
+                               ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+                                        pnum);
+                       dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+                               pnum);
                        if (!read_err)
                                return UBI_IO_FF;
                        else
@@ -792,12 +790,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
                 * 0xFF bytes. Report that the header is corrupted.
                 */
                if (verbose) {
-                       ubi_warn("bad magic number at PEB %d: %08x instead of "
-                                "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+                       ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+                                pnum, magic, UBI_EC_HDR_MAGIC);
                        ubi_dump_ec_hdr(ec_hdr);
                }
-               dbg_bld("bad magic number at PEB %d: %08x instead of "
-                       "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+               dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+                       pnum, magic, UBI_EC_HDR_MAGIC);
                return UBI_IO_BAD_HDR;
        }
 
@@ -806,12 +804,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
 
        if (hdr_crc != crc) {
                if (verbose) {
-                       ubi_warn("bad EC header CRC at PEB %d, calculated "
-                                "%#08x, read %#08x", pnum, crc, hdr_crc);
+                       ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+                                pnum, crc, hdr_crc);
                        ubi_dump_ec_hdr(ec_hdr);
                }
-               dbg_bld("bad EC header CRC at PEB %d, calculated "
-                       "%#08x, read %#08x", pnum, crc, hdr_crc);
+               dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+                       pnum, crc, hdr_crc);
 
                if (!read_err)
                        return UBI_IO_BAD_HDR;
@@ -1032,10 +1030,10 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
 
                if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
                        if (verbose)
-                               ubi_warn("no VID header found at PEB %d, "
-                                        "only 0xFF bytes", pnum);
-                       dbg_bld("no VID header found at PEB %d, "
-                               "only 0xFF bytes", pnum);
+                               ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+                                        pnum);
+                       dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+                               pnum);
                        if (!read_err)
                                return UBI_IO_FF;
                        else
@@ -1043,12 +1041,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
                }
 
                if (verbose) {
-                       ubi_warn("bad magic number at PEB %d: %08x instead of "
-                                "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+                       ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+                                pnum, magic, UBI_VID_HDR_MAGIC);
                        ubi_dump_vid_hdr(vid_hdr);
                }
-               dbg_bld("bad magic number at PEB %d: %08x instead of "
-                       "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+               dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+                       pnum, magic, UBI_VID_HDR_MAGIC);
                return UBI_IO_BAD_HDR;
        }
 
@@ -1057,12 +1055,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
 
        if (hdr_crc != crc) {
                if (verbose) {
-                       ubi_warn("bad CRC at PEB %d, calculated %#08x, "
-                                "read %#08x", pnum, crc, hdr_crc);
+                       ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+                                pnum, crc, hdr_crc);
                        ubi_dump_vid_hdr(vid_hdr);
                }
-               dbg_bld("bad CRC at PEB %d, calculated %#08x, "
-                       "read %#08x", pnum, crc, hdr_crc);
+               dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+                       pnum, crc, hdr_crc);
                if (!read_err)
                        return UBI_IO_BAD_HDR;
                else
@@ -1300,8 +1298,8 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
        crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
        hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
        if (hdr_crc != crc) {
-               ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
-                       "read %#08x", pnum, crc, hdr_crc);
+               ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+                       pnum, crc, hdr_crc);
                ubi_err("self-check failed for PEB %d", pnum);
                ubi_dump_vid_hdr(vid_hdr);
                dump_stack();
@@ -1411,15 +1409,15 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
 
        err = mtd_read(ubi->mtd, addr, len, &read, buf);
        if (err && !mtd_is_bitflip(err)) {
-               ubi_err("error %d while reading %d bytes from PEB %d:%d, "
-                       "read %zd bytes", err, len, pnum, offset, read);
+               ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+                       err, len, pnum, offset, read);
                goto error;
        }
 
        err = ubi_check_pattern(buf, 0xFF, len);
        if (err == 0) {
-               ubi_err("flash region at PEB %d:%d, length %d does not "
-                       "contain all 0xFF bytes", pnum, offset, len);
+               ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+                       pnum, offset, len);
                goto fail;
        }
 
index 8bbfb444b89525cbb9c0a071914a4de0af65f1fe..f913d701a5b3d96222ca9eeb71007c6ee982e209 100644 (file)
@@ -121,10 +121,16 @@ void ubi_update_reserved(struct ubi_device *ubi)
  */
 void ubi_calculate_reserved(struct ubi_device *ubi)
 {
-       ubi->beb_rsvd_level = ubi->good_peb_count/100;
-       ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
-       if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
-               ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+       /*
+        * Calculate the actual number of PEBs currently needed to be reserved
+        * for future bad eraseblock handling.
+        */
+       ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+       if (ubi->beb_rsvd_level < 0) {
+               ubi->beb_rsvd_level = 0;
+               ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+                        ubi->bad_peb_count, ubi->bad_peb_limit);
+       }
 }
 
 /**
index 84f66e3fa05d7d39ef265a657ec296fd1be1448c..383ee43d2425841932f5676ea833275091f41577 100644 (file)
 #define UBI_NAME_STR "ubi"
 
 /* Normal UBI messages */
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
 /* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
-                                 __func__, ##__VA_ARGS__)
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n",  \
+                                  __func__, ##__VA_ARGS__)
 /* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n",      \
                                 __func__, ##__VA_ARGS__)
 
-/* Lowest number PEBs reserved for bad PEB handling */
-#define MIN_RESEVED_PEBS 2
-
 /* Background thread name pattern */
 #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
 
@@ -363,6 +360,7 @@ struct ubi_wl_entry;
  * @flash_size: underlying MTD device size (in bytes)
  * @peb_count: count of physical eraseblocks on the MTD device
  * @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
  * @bad_peb_count: count of bad physical eraseblocks
  * @good_peb_count: count of good physical eraseblocks
  * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
@@ -410,6 +408,7 @@ struct ubi_device {
        int avail_pebs;
        int beb_rsvd_pebs;
        int beb_rsvd_level;
+       int bad_peb_limit;
 
        int autoresize_vol_id;
        int vtbl_slots;
@@ -694,7 +693,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
                         struct ubi_vid_hdr *vid_hdr);
 
 /* build.c */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+                      int vid_hdr_offset, int max_beb_per1024);
 int ubi_detach_mtd_dev(int ubi_num, int anyway);
 struct ubi_device *ubi_get_device(int ubi_num);
 void ubi_put_device(struct ubi_device *ubi);
index 568307cc7caf8d882034011aa5303ad47eaf3bed..926e3df14fb2a83776650a820742f4882b41893f 100644 (file)
@@ -270,8 +270,8 @@ static int vtbl_check(const struct ubi_device *ubi,
 
                        if (len1 > 0 && len1 == len2 &&
                            !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
-                               ubi_err("volumes %d and %d have the same name"
-                                       " \"%s\"", i, n, vtbl[i].name);
+                               ubi_err("volumes %d and %d have the same name \"%s\"",
+                                       i, n, vtbl[i].name);
                                ubi_dump_vtbl_record(&vtbl[i], i);
                                ubi_dump_vtbl_record(&vtbl[n], n);
                                return -EINVAL;
@@ -304,7 +304,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
        struct ubi_vid_hdr *vid_hdr;
        struct ubi_ainf_peb *new_aeb;
 
-       ubi_msg("create volume table (copy #%d)", copy + 1);
+       dbg_gen("create volume table (copy #%d)", copy + 1);
 
        vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
        if (!vid_hdr)
@@ -562,8 +562,8 @@ static int init_volumes(struct ubi_device *ubi,
                if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
                        /* Auto re-size flag may be set only for one volume */
                        if (ubi->autoresize_vol_id != -1) {
-                               ubi_err("more than one auto-resize volume (%d "
-                                       "and %d)", ubi->autoresize_vol_id, i);
+                               ubi_err("more than one auto-resize volume (%d and %d)",
+                                       ubi->autoresize_vol_id, i);
                                kfree(vol);
                                return -EINVAL;
                        }
index b6be644e7b85f5194c9f0e1303ffa4d2a51e9cf6..032fc57f109004e7e6d462cbd36c2971a281cf45 100644 (file)
@@ -978,9 +978,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
                        int cancel)
 {
        struct ubi_wl_entry *e = wl_wrk->e;
-       int pnum = e->pnum, err, need;
+       int pnum = e->pnum;
        int vol_id = wl_wrk->vol_id;
        int lnum = wl_wrk->lnum;
+       int err, available_consumed = 0;
 
        if (cancel) {
                dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1045,20 +1046,14 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
        }
 
        spin_lock(&ubi->volumes_lock);
-       need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
-       if (need > 0) {
-               need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
-               ubi->avail_pebs -= need;
-               ubi->rsvd_pebs += need;
-               ubi->beb_rsvd_pebs += need;
-               if (need > 0)
-                       ubi_msg("reserve more %d PEBs", need);
-       }
-
        if (ubi->beb_rsvd_pebs == 0) {
-               spin_unlock(&ubi->volumes_lock);
-               ubi_err("no reserved physical eraseblocks");
-               goto out_ro;
+               if (ubi->avail_pebs == 0) {
+                       spin_unlock(&ubi->volumes_lock);
+                       ubi_err("no reserved/available physical eraseblocks");
+                       goto out_ro;
+               }
+               ubi->avail_pebs -= 1;
+               available_consumed = 1;
        }
        spin_unlock(&ubi->volumes_lock);
 
@@ -1068,19 +1063,36 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
                goto out_ro;
 
        spin_lock(&ubi->volumes_lock);
-       ubi->beb_rsvd_pebs -= 1;
+       if (ubi->beb_rsvd_pebs > 0) {
+               if (available_consumed) {
+                       /*
+                        * The amount of reserved PEBs increased since we last
+                        * checked.
+                        */
+                       ubi->avail_pebs += 1;
+                       available_consumed = 0;
+               }
+               ubi->beb_rsvd_pebs -= 1;
+       }
        ubi->bad_peb_count += 1;
        ubi->good_peb_count -= 1;
        ubi_calculate_reserved(ubi);
-       if (ubi->beb_rsvd_pebs)
+       if (available_consumed)
+               ubi_warn("no PEBs in the reserved pool, used an available PEB");
+       else if (ubi->beb_rsvd_pebs)
                ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
        else
-               ubi_warn("last PEB from the reserved pool was used");
+               ubi_warn("last PEB from the reserve was used");
        spin_unlock(&ubi->volumes_lock);
 
        return err;
 
 out_ro:
+       if (available_consumed) {
+               spin_lock(&ubi->volumes_lock);
+               ubi->avail_pebs += 1;
+               spin_unlock(&ubi->volumes_lock);
+       }
        ubi_ro_mode(ubi);
        return err;
 }
@@ -1189,7 +1201,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
 {
        struct ubi_wl_entry *e;
 
-       dbg_msg("schedule PEB %d for scrubbing", pnum);
+       ubi_msg("schedule PEB %d for scrubbing", pnum);
 
 retry:
        spin_lock(&ubi->wl_lock);
index 0c2bd806950e16313605ee62151e25e2c4a3730b..6a70184c3f237d832a265afbef7779e659686df2 100644 (file)
@@ -107,8 +107,6 @@ config MII
          or internal device.  It is safe to say Y or M here even if your
          ethernet card lacks MII.
 
-source "drivers/ieee802154/Kconfig"
-
 config IFB
        tristate "Intermediate Functional Block support"
        depends on NET_CLS_ACT
@@ -151,6 +149,19 @@ config MACVTAP
          To compile this driver as a module, choose M here: the module
          will be called macvtap.
 
+config VXLAN
+       tristate "Virtual eXtensible Local Area Network (VXLAN)"
+       depends on EXPERIMENTAL && INET
+       ---help---
+         This allows one to create vxlan virtual interfaces that provide
+         Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+         to tunnel virtual network infrastructure in virtualized environments.
+         For more information see:
+           http://tools.ietf.org/html/draft-mahalingam-dutt-dcops-vxlan-02
+
+         To compile this driver as a module, choose M here: the module
+         will be called vxlan.
+
 config NETCONSOLE
        tristate "Network console logging support"
        ---help---
@@ -290,6 +301,8 @@ source "drivers/net/wimax/Kconfig"
 
 source "drivers/net/wan/Kconfig"
 
+source "drivers/net/ieee802154/Kconfig"
+
 config XEN_NETDEV_FRONTEND
        tristate "Xen network device frontend driver"
        depends on XEN
index 3d375ca128a6d1a657782a467db56e9116587f74..335db78fd987a289b4db696c60a89f682be5c5ed 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_TEAM) += team/
 obj-$(CONFIG_TUN) += tun.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_VXLAN) += vxlan.o
 
 #
 # Networking Drivers
@@ -53,6 +54,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
 obj-$(CONFIG_WAN) += wan/
 obj-$(CONFIG_WLAN) += wireless/
 obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_IEEE802154) += ieee802154/
 
 obj-$(CONFIG_VMXNET3) += vmxnet3/
 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
index d688a8af432c52a8ea3dc5e45607c2801e9fb831..7858c58df4a3a12c47de7c1a015b613781352c19 100644 (file)
@@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                        write_unlock_bh(&bond->curr_slave_lock);
                        read_unlock(&bond->lock);
 
-                       netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
+                       call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
                        if (should_notify_peers)
-                               netdev_bonding_change(bond->dev,
-                                                     NETDEV_NOTIFY_PEERS);
+                               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+                                                        bond->dev);
 
                        read_lock(&bond->lock);
                        write_lock_bh(&bond->curr_slave_lock);
@@ -1558,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                 bond_dev->name,
                                 bond_dev->type, slave_dev->type);
 
-                       res = netdev_bonding_change(bond_dev,
-                                                   NETDEV_PRE_TYPE_CHANGE);
+                       res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
+                                                      bond_dev);
                        res = notifier_to_errno(res);
                        if (res) {
                                pr_err("%s: refused to change device type\n",
@@ -1579,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                        }
 
-                       netdev_bonding_change(bond_dev,
-                                             NETDEV_POST_TYPE_CHANGE);
+                       call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+                                                bond_dev);
                }
        } else if (bond_dev->type != slave_dev->type) {
                pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1941,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        block_netpoll_tx();
-       netdev_bonding_change(bond_dev, NETDEV_RELEASE);
+       call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
        write_lock_bh(&bond->lock);
 
        slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2584,7 +2584,7 @@ re_arm:
                        read_unlock(&bond->lock);
                        return;
                }
-               netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
                rtnl_unlock();
        }
 }
@@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                                            arp_work.work);
        struct slave *slave, *oldcurrent;
        int do_failover = 0;
-       int delta_in_ticks;
+       int delta_in_ticks, extra_ticks;
        int i;
 
        read_lock(&bond->lock);
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
+       extra_ticks = delta_in_ticks / 2;
 
        if (bond->slave_cnt == 0)
                goto re_arm;
@@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                if (slave->link != BOND_LINK_UP) {
                        if (time_in_range(jiffies,
                                trans_start - delta_in_ticks,
-                               trans_start + delta_in_ticks) &&
+                               trans_start + delta_in_ticks + extra_ticks) &&
                            time_in_range(jiffies,
                                slave->dev->last_rx - delta_in_ticks,
-                               slave->dev->last_rx + delta_in_ticks)) {
+                               slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
 
                                slave->link  = BOND_LINK_UP;
                                bond_set_active_slave(slave);
@@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                         */
                        if (!time_in_range(jiffies,
                                trans_start - delta_in_ticks,
-                               trans_start + 2 * delta_in_ticks) ||
+                               trans_start + 2 * delta_in_ticks + extra_ticks) ||
                            !time_in_range(jiffies,
                                slave->dev->last_rx - delta_in_ticks,
-                               slave->dev->last_rx + 2 * delta_in_ticks)) {
+                               slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
 
                                slave->link  = BOND_LINK_DOWN;
                                bond_set_backup_slave(slave);
@@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
        struct slave *slave;
        int i, commit = 0;
        unsigned long trans_start;
+       int extra_ticks;
+
+       /* All the time comparisons below need some extra time. Otherwise, on
+        * fast networks the ARP probe/reply may arrive within the same jiffy
+        * as it was sent.  Then, the next time the ARP monitor is run, one
+        * arp_interval will already have passed in the comparisons.
+        */
+       extra_ticks = delta_in_ticks / 2;
 
        bond_for_each_slave(bond, slave, i) {
                slave->new_link = BOND_LINK_NOCHANGE;
@@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                if (slave->link != BOND_LINK_UP) {
                        if (time_in_range(jiffies,
                                slave_last_rx(bond, slave) - delta_in_ticks,
-                               slave_last_rx(bond, slave) + delta_in_ticks)) {
+                               slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
 
                                slave->new_link = BOND_LINK_UP;
                                commit++;
@@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 */
                if (time_in_range(jiffies,
                                  slave->jiffies - delta_in_ticks,
-                                 slave->jiffies + 2 * delta_in_ticks))
+                                 slave->jiffies + 2 * delta_in_ticks + extra_ticks))
                        continue;
 
                /*
@@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                    !bond->current_arp_slave &&
                    !time_in_range(jiffies,
                        slave_last_rx(bond, slave) - delta_in_ticks,
-                       slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+                       slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
 
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
@@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                if (bond_is_active_slave(slave) &&
                    (!time_in_range(jiffies,
                        trans_start - delta_in_ticks,
-                       trans_start + 2 * delta_in_ticks) ||
+                       trans_start + 2 * delta_in_ticks + extra_ticks) ||
                     !time_in_range(jiffies,
                        slave_last_rx(bond, slave) - delta_in_ticks,
-                       slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+                       slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
 
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
@@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
                        if ((!bond->curr_active_slave &&
                             time_in_range(jiffies,
                                           trans_start - delta_in_ticks,
-                                          trans_start + delta_in_ticks)) ||
+                                          trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
                            bond->curr_active_slave != slave) {
                                slave->link = BOND_LINK_UP;
                                if (bond->current_arp_slave) {
@@ -3203,7 +3212,7 @@ re_arm:
                        read_unlock(&bond->lock);
                        return;
                }
-               netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
                rtnl_unlock();
        }
 }
@@ -3351,57 +3360,94 @@ static struct notifier_block bond_netdev_notifier = {
 
 /*---------------------------- Hashing Policies -----------------------------*/
 
+/*
+ * Hash for the output device based upon layer 2 data
+ */
+static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+{
+       struct ethhdr *data = (struct ethhdr *)skb->data;
+
+       if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
+               return (data->h_dest[5] ^ data->h_source[5]) % count;
+
+       return 0;
+}
+
 /*
  * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP mimic bond_xmit_hash_policy_l2()
+ * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
  */
 static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph = ip_hdr(skb);
-
-       if (skb->protocol == htons(ETH_P_IP)) {
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       u32 v6hash;
+       __be32 *s, *d;
+
+       if (skb->protocol == htons(ETH_P_IP) &&
+           skb_network_header_len(skb) >= sizeof(*iph)) {
+               iph = ip_hdr(skb);
                return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
                        (data->h_dest[5] ^ data->h_source[5])) % count;
+       } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+               ipv6h = ipv6_hdr(skb);
+               s = &ipv6h->saddr.s6_addr32[0];
+               d = &ipv6h->daddr.s6_addr32[0];
+               v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+               v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
+               return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
        }
 
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
+       return bond_xmit_hash_policy_l2(skb, count);
 }
 
 /*
  * Hash for the output device based upon layer 3 and layer 4 data. If
  * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, mimic bond_xmit_hash_policy_l2()
+ * altogether not IP, fall back on bond_xmit_hash_policy_l2()
  */
 static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-       struct iphdr *iph = ip_hdr(skb);
-       __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
-       int layer4_xor = 0;
-
-       if (skb->protocol == htons(ETH_P_IP)) {
+       u32 layer4_xor = 0;
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6h;
+       __be32 *s, *d;
+       __be16 *layer4hdr;
+
+       if (skb->protocol == htons(ETH_P_IP) &&
+           skb_network_header_len(skb) >= sizeof(*iph)) {
+               iph = ip_hdr(skb);
                if (!ip_is_fragment(iph) &&
                    (iph->protocol == IPPROTO_TCP ||
-                    iph->protocol == IPPROTO_UDP)) {
-                       layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
+                    iph->protocol == IPPROTO_UDP) &&
+                   (skb_headlen(skb) - skb_network_offset(skb) >=
+                    iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
+                       layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
+                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
                }
                return (layer4_xor ^
                        ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-
+       } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                  skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+               ipv6h = ipv6_hdr(skb);
+               if ((ipv6h->nexthdr == IPPROTO_TCP ||
+                    ipv6h->nexthdr == IPPROTO_UDP) &&
+                   (skb_headlen(skb) - skb_network_offset(skb) >=
+                    sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
+                       layer4hdr = (__be16 *)(ipv6h + 1);
+                       layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+               }
+               s = &ipv6h->saddr.s6_addr32[0];
+               d = &ipv6h->daddr.s6_addr32[0];
+               layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+               layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
+                              (layer4_xor >> 8);
+               return layer4_xor % count;
        }
 
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
-}
-
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
-{
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-
-       return (data->h_dest[5] ^ data->h_source[5]) % count;
+       return bond_xmit_hash_policy_l2(skb, count);
 }
 
 /*-------------------------- Device entry points ----------------------------*/
index 4c538e3886553f678729ebe2aa42affd021cc755..e5180dfddba54dc6918b8dbb3e2c8643316e130b 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/if_ether.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/can.h>
 #include <linux/can/dev.h>
@@ -45,6 +46,9 @@
 #define IF_ENUM_REG_LEN                11
 #define C_CAN_IFACE(reg, iface)        (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
 
+/* control extension register D_CAN specific */
+#define CONTROL_EX_PDR         BIT(8)
+
 /* control register */
 #define CONTROL_TEST           BIT(7)
 #define CONTROL_CCE            BIT(6)
@@ -64,6 +68,7 @@
 #define TEST_BASIC             BIT(2)
 
 /* status register */
+#define STATUS_PDA             BIT(10)
 #define STATUS_BOFF            BIT(7)
 #define STATUS_EWARN           BIT(6)
 #define STATUS_EPASS           BIT(5)
 /* minimum timeout for checking BUSY status */
 #define MIN_TIMEOUT_VALUE      6
 
+/* Wait for ~1 sec for INIT bit */
+#define INIT_WAIT_MS           1000
+
 /* napi related */
 #define C_CAN_NAPI_WEIGHT      C_CAN_MSG_OBJ_RX_NUM
 
@@ -201,6 +209,30 @@ static const struct can_bittiming_const c_can_bittiming_const = {
        .brp_inc = 1,
 };
 
+static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_enable(priv->device);
+}
+
+static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_disable(priv->device);
+}
+
+static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_get_sync(priv->device);
+}
+
+static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
+{
+       if (priv->device)
+               pm_runtime_put_sync(priv->device);
+}
+
 static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
 {
        return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -673,11 +705,15 @@ static int c_can_get_berr_counter(const struct net_device *dev,
        unsigned int reg_err_counter;
        struct c_can_priv *priv = netdev_priv(dev);
 
+       c_can_pm_runtime_get_sync(priv);
+
        reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
        bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
                                ERR_CNT_REC_SHIFT;
        bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
 
+       c_can_pm_runtime_put_sync(priv);
+
        return 0;
 }
 
@@ -1053,11 +1089,13 @@ static int c_can_open(struct net_device *dev)
        int err;
        struct c_can_priv *priv = netdev_priv(dev);
 
+       c_can_pm_runtime_get_sync(priv);
+
        /* open the can device */
        err = open_candev(dev);
        if (err) {
                netdev_err(dev, "failed to open can device\n");
-               return err;
+               goto exit_open_fail;
        }
 
        /* register interrupt handler */
@@ -1079,6 +1117,8 @@ static int c_can_open(struct net_device *dev)
 
 exit_irq_fail:
        close_candev(dev);
+exit_open_fail:
+       c_can_pm_runtime_put_sync(priv);
        return err;
 }
 
@@ -1091,6 +1131,7 @@ static int c_can_close(struct net_device *dev)
        c_can_stop(dev);
        free_irq(dev->irq, dev);
        close_candev(dev);
+       c_can_pm_runtime_put_sync(priv);
 
        return 0;
 }
@@ -1119,6 +1160,77 @@ struct net_device *alloc_c_can_dev(void)
 }
 EXPORT_SYMBOL_GPL(alloc_c_can_dev);
 
+#ifdef CONFIG_PM
+int c_can_power_down(struct net_device *dev)
+{
+       u32 val;
+       unsigned long time_out;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (!(dev->flags & IFF_UP))
+               return 0;
+
+       WARN_ON(priv->type != BOSCH_D_CAN);
+
+       /* set PDR value so the device goes to power down mode */
+       val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
+       val |= CONTROL_EX_PDR;
+       priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
+
+       /* Wait for the PDA bit to get set */
+       time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
+       while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
+                               time_after(time_out, jiffies))
+               cpu_relax();
+
+       if (time_after(jiffies, time_out))
+               return -ETIMEDOUT;
+
+       c_can_stop(dev);
+
+       c_can_pm_runtime_put_sync(priv);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(c_can_power_down);
+
+int c_can_power_up(struct net_device *dev)
+{
+       u32 val;
+       unsigned long time_out;
+       struct c_can_priv *priv = netdev_priv(dev);
+
+       if (!(dev->flags & IFF_UP))
+               return 0;
+
+       WARN_ON(priv->type != BOSCH_D_CAN);
+
+       c_can_pm_runtime_get_sync(priv);
+
+       /* Clear PDR and INIT bits */
+       val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
+       val &= ~CONTROL_EX_PDR;
+       priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
+       val = priv->read_reg(priv, C_CAN_CTRL_REG);
+       val &= ~CONTROL_INIT;
+       priv->write_reg(priv, C_CAN_CTRL_REG, val);
+
+       /* Wait for the PDA bit to get clear */
+       time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
+       while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
+                               time_after(time_out, jiffies))
+               cpu_relax();
+
+       if (time_after(jiffies, time_out))
+               return -ETIMEDOUT;
+
+       c_can_start(dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(c_can_power_up);
+#endif
+
 void free_c_can_dev(struct net_device *dev)
 {
        free_candev(dev);
@@ -1133,10 +1245,19 @@ static const struct net_device_ops c_can_netdev_ops = {
 
 int register_c_can_dev(struct net_device *dev)
 {
+       struct c_can_priv *priv = netdev_priv(dev);
+       int err;
+
+       c_can_pm_runtime_enable(priv);
+
        dev->flags |= IFF_ECHO; /* we support local echo */
        dev->netdev_ops = &c_can_netdev_ops;
 
-       return register_candev(dev);
+       err = register_candev(dev);
+       if (err)
+               c_can_pm_runtime_disable(priv);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(register_c_can_dev);
 
@@ -1144,10 +1265,9 @@ void unregister_c_can_dev(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       /* disable all interrupts */
-       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
-
        unregister_candev(dev);
+
+       c_can_pm_runtime_disable(priv);
 }
 EXPORT_SYMBOL_GPL(unregister_c_can_dev);
 
index 01a7049ab990eacd173c76edecede0b912e14382..e5ed41dafa1b94aa234749064813c587aa79c468 100644 (file)
@@ -24,6 +24,7 @@
 
 enum reg {
        C_CAN_CTRL_REG = 0,
+       C_CAN_CTRL_EX_REG,
        C_CAN_STS_REG,
        C_CAN_ERR_CNT_REG,
        C_CAN_BTR_REG,
@@ -104,6 +105,7 @@ static const u16 reg_map_c_can[] = {
 
 static const u16 reg_map_d_can[] = {
        [C_CAN_CTRL_REG]        = 0x00,
+       [C_CAN_CTRL_EX_REG]     = 0x02,
        [C_CAN_STS_REG]         = 0x04,
        [C_CAN_ERR_CNT_REG]     = 0x08,
        [C_CAN_BTR_REG]         = 0x0C,
@@ -143,8 +145,9 @@ static const u16 reg_map_d_can[] = {
 };
 
 enum c_can_dev_id {
-       C_CAN_DEVTYPE,
-       D_CAN_DEVTYPE,
+       BOSCH_C_CAN_PLATFORM,
+       BOSCH_C_CAN,
+       BOSCH_D_CAN,
 };
 
 /* c_can private data structure */
@@ -152,6 +155,7 @@ struct c_can_priv {
        struct can_priv can;    /* must be the first member */
        struct napi_struct napi;
        struct net_device *dev;
+       struct device *device;
        int tx_object;
        int current_status;
        int last_status;
@@ -164,6 +168,7 @@ struct c_can_priv {
        unsigned int tx_echo;
        void *priv;             /* for board-specific data */
        u16 irqstatus;
+       enum c_can_dev_id type;
 };
 
 struct net_device *alloc_c_can_dev(void);
@@ -171,4 +176,9 @@ void free_c_can_dev(struct net_device *dev);
 int register_c_can_dev(struct net_device *dev);
 void unregister_c_can_dev(struct net_device *dev);
 
+#ifdef CONFIG_PM
+int c_can_power_up(struct net_device *dev);
+int c_can_power_down(struct net_device *dev);
+#endif
+
 #endif /* C_CAN_H */
index 1011146ea51319accbdcf6658d9c22de723e3be7..3d7830bcd2bf83fa55346754f74c2205eae4b3f7 100644 (file)
@@ -120,10 +120,10 @@ static int __devinit c_can_pci_probe(struct pci_dev *pdev,
 
        /* Configure CAN type */
        switch (c_can_pci_data->type) {
-       case C_CAN_DEVTYPE:
+       case BOSCH_C_CAN:
                priv->regs = reg_map_c_can;
                break;
-       case D_CAN_DEVTYPE:
+       case BOSCH_D_CAN:
                priv->regs = reg_map_d_can;
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
                break;
@@ -192,7 +192,7 @@ static void __devexit c_can_pci_remove(struct pci_dev *pdev)
 }
 
 static struct c_can_pci_data c_can_sta2x11= {
-       .type = C_CAN_DEVTYPE,
+       .type = BOSCH_C_CAN,
        .reg_align = C_CAN_REG_ALIGN_32,
        .freq = 52000000, /* 52 Mhz */
 };
index 6ff7ad006c300b5a9c499e6bf3465e4cb8b7f2bc..ee1416132aba2e1f9582b7911f67e94eb21b3084 100644 (file)
@@ -30,6 +30,9 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
 
 #include <linux/can/dev.h>
 
@@ -65,17 +68,58 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
        writew(val, priv->base + 2 * priv->regs[index]);
 }
 
+static struct platform_device_id c_can_id_table[] = {
+       [BOSCH_C_CAN_PLATFORM] = {
+               .name = KBUILD_MODNAME,
+               .driver_data = BOSCH_C_CAN,
+       },
+       [BOSCH_C_CAN] = {
+               .name = "c_can",
+               .driver_data = BOSCH_C_CAN,
+       },
+       [BOSCH_D_CAN] = {
+               .name = "d_can",
+               .driver_data = BOSCH_D_CAN,
+       }, {
+       }
+};
+
+static const struct of_device_id c_can_of_table[] = {
+       { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
+       { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
+       { /* sentinel */ },
+};
+
 static int __devinit c_can_plat_probe(struct platform_device *pdev)
 {
        int ret;
        void __iomem *addr;
        struct net_device *dev;
        struct c_can_priv *priv;
+       const struct of_device_id *match;
        const struct platform_device_id *id;
+       struct pinctrl *pinctrl;
        struct resource *mem;
        int irq;
        struct clk *clk;
 
+       if (pdev->dev.of_node) {
+               match = of_match_device(c_can_of_table, &pdev->dev);
+               if (!match) {
+                       dev_err(&pdev->dev, "Failed to find matching dt id\n");
+                       ret = -EINVAL;
+                       goto exit;
+               }
+               id = match->data;
+       } else {
+               id = platform_get_device_id(pdev);
+       }
+
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl))
+               dev_warn(&pdev->dev,
+                       "failed to configure pins from driver\n");
+
        /* get the appropriate clk */
        clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
@@ -114,9 +158,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
        }
 
        priv = netdev_priv(dev);
-       id = platform_get_device_id(pdev);
        switch (id->driver_data) {
-       case C_CAN_DEVTYPE:
+       case BOSCH_C_CAN:
                priv->regs = reg_map_c_can;
                switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
                case IORESOURCE_MEM_32BIT:
@@ -130,7 +173,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
                        break;
                }
                break;
-       case D_CAN_DEVTYPE:
+       case BOSCH_D_CAN:
                priv->regs = reg_map_d_can;
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
                priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
@@ -143,8 +186,10 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
 
        dev->irq = irq;
        priv->base = addr;
+       priv->device = &pdev->dev;
        priv->can.clock.freq = clk_get_rate(clk);
        priv->priv = clk;
+       priv->type = id->driver_data;
 
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
@@ -195,27 +240,75 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct platform_device_id c_can_id_table[] = {
-       {
-               .name = KBUILD_MODNAME,
-               .driver_data = C_CAN_DEVTYPE,
-       }, {
-               .name = "c_can",
-               .driver_data = C_CAN_DEVTYPE,
-       }, {
-               .name = "d_can",
-               .driver_data = D_CAN_DEVTYPE,
-       }, {
+#ifdef CONFIG_PM
+static int c_can_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       int ret;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(ndev);
+
+       if (priv->type != BOSCH_D_CAN) {
+               dev_warn(&pdev->dev, "Not supported\n");
+               return 0;
        }
-};
+
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+       }
+
+       ret = c_can_power_down(ndev);
+       if (ret) {
+               netdev_err(ndev, "failed to enter power down mode\n");
+               return ret;
+       }
+
+       priv->can.state = CAN_STATE_SLEEPING;
+
+       return 0;
+}
+
+static int c_can_resume(struct platform_device *pdev)
+{
+       int ret;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(ndev);
+
+       if (priv->type != BOSCH_D_CAN) {
+               dev_warn(&pdev->dev, "Not supported\n");
+               return 0;
+       }
+
+       ret = c_can_power_up(ndev);
+       if (ret) {
+               netdev_err(ndev, "Still in power down mode\n");
+               return ret;
+       }
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       if (netif_running(ndev)) {
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
+
+       return 0;
+}
+#else
+#define c_can_suspend NULL
+#define c_can_resume NULL
+#endif
 
 static struct platform_driver c_can_plat_driver = {
        .driver = {
                .name = KBUILD_MODNAME,
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(c_can_of_table),
        },
        .probe = c_can_plat_probe,
        .remove = __devexit_p(c_can_plat_remove),
+       .suspend = c_can_suspend,
+       .resume = c_can_resume,
        .id_table = c_can_id_table,
 };
 
index c5f143165f80cec199b870a17227a4bd42c3a610..c78ecfca1e4582b8e57a2a42a2e14152f3640483 100644 (file)
 
 #define FLEXCAN_MB_CODE_MASK           (0xf0ffffff)
 
+/* FLEXCAN hardware feature flags */
+#define FLEXCAN_HAS_V10_FEATURES       BIT(1) /* For core version >= 10 */
+#define FLEXCAN_HAS_BROKEN_ERR_STATE   BIT(2) /* Broken error state handling */
+
 /* Structure of the message buffer */
 struct flexcan_mb {
        u32 can_ctrl;
@@ -178,7 +182,7 @@ struct flexcan_regs {
 };
 
 struct flexcan_devtype_data {
-       u32 hw_ver;     /* hardware controller version */
+       u32 features;   /* hardware controller features */
 };
 
 struct flexcan_priv {
@@ -197,11 +201,11 @@ struct flexcan_priv {
 };
 
 static struct flexcan_devtype_data fsl_p1010_devtype_data = {
-       .hw_ver = 3,
+       .features = FLEXCAN_HAS_BROKEN_ERR_STATE,
 };
-
+static struct flexcan_devtype_data fsl_imx28_devtype_data;
 static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
-       .hw_ver = 10,
+       .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -741,15 +745,19 @@ static int flexcan_chip_start(struct net_device *dev)
         * enable tx and rx warning interrupt
         * enable bus off interrupt
         * (== FLEXCAN_CTRL_ERR_STATE)
-        *
-        * _note_: we enable the "error interrupt"
-        * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
-        * warning or bus passive interrupts.
         */
        reg_ctrl = flexcan_read(&regs->ctrl);
        reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
        reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
-               FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK;
+               FLEXCAN_CTRL_ERR_STATE;
+       /*
+        * enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
+        * on most Flexcan cores, too. Otherwise we don't get
+        * any error warning or passive interrupts.
+        */
+       if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
+           priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+               reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
 
        /* save for later use */
        priv->reg_ctrl_default = reg_ctrl;
@@ -772,7 +780,7 @@ static int flexcan_chip_start(struct net_device *dev)
        flexcan_write(0x0, &regs->rx14mask);
        flexcan_write(0x0, &regs->rx15mask);
 
-       if (priv->devtype_data->hw_ver >= 10)
+       if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
                flexcan_write(0x0, &regs->rxfgmask);
 
        flexcan_transceiver_switch(priv, 1);
@@ -954,6 +962,7 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
 
 static const struct of_device_id flexcan_of_match[] = {
        { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
        { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
        { /* sentinel */ },
 };
index 8a8df82988d13d238e2b7b8448e6b691bc82300a..c975999bb05587e400e635ecd65c7d51c8dd2d24 100644 (file)
@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
 
                if (!clock_name || !strcmp(clock_name, "sys")) {
                        sys_clk = clk_get(&ofdev->dev, "sys_clk");
-                       if (!sys_clk) {
+                       if (IS_ERR(sys_clk)) {
                                dev_err(&ofdev->dev, "couldn't get sys_clk\n");
                                goto exit_unmap;
                        }
@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
 
                if (clocksrc < 0) {
                        ref_clk = clk_get(&ofdev->dev, "ref_clk");
-                       if (!ref_clk) {
+                       if (IS_ERR(ref_clk)) {
                                dev_err(&ofdev->dev, "couldn't get ref_clk\n");
                                goto exit_unmap;
                        }
index 4c4f33d482d2faa44d36bb1cfe669470032eb8b3..25011dbe1b96f15b951ba555477f4ba14be94b0f 100644 (file)
@@ -156,8 +156,13 @@ static void set_normal_mode(struct net_device *dev)
                }
 
                /* set chip to normal mode */
-               priv->write_reg(priv, REG_MOD, 0x00);
+               if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+                       priv->write_reg(priv, REG_MOD, MOD_LOM);
+               else
+                       priv->write_reg(priv, REG_MOD, 0x00);
+
                udelay(10);
+
                status = priv->read_reg(priv, REG_MOD);
        }
 
@@ -310,7 +315,10 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
 
        can_put_echo_skb(skb, dev, 0);
 
-       sja1000_write_cmdreg(priv, CMD_TR);
+       if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+               sja1000_write_cmdreg(priv, CMD_TR | CMD_AT);
+       else
+               sja1000_write_cmdreg(priv, CMD_TR);
 
        return NETDEV_TX_OK;
 }
@@ -505,10 +513,18 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
                        netdev_warn(dev, "wakeup interrupt\n");
 
                if (isrc & IRQ_TI) {
-                       /* transmission complete interrupt */
-                       stats->tx_bytes += priv->read_reg(priv, REG_FI) & 0xf;
-                       stats->tx_packets++;
-                       can_get_echo_skb(dev, 0);
+                       /* transmission buffer released */
+                       if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT &&
+                           !(status & SR_TCS)) {
+                               stats->tx_errors++;
+                               can_free_echo_skb(dev, 0);
+                       } else {
+                               /* transmission complete */
+                               stats->tx_bytes +=
+                                       priv->read_reg(priv, REG_FI) & 0xf;
+                               stats->tx_packets++;
+                               can_get_echo_skb(dev, 0);
+                       }
                        netif_wake_queue(dev);
                }
                if (isrc & IRQ_RI) {
@@ -605,7 +621,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
        priv->can.do_set_mode = sja1000_set_mode;
        priv->can.do_get_berr_counter = sja1000_get_berr_counter;
        priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
-               CAN_CTRLMODE_BERR_REPORTING;
+               CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY |
+               CAN_CTRLMODE_ONE_SHOT;
 
        spin_lock_init(&priv->cmdreg_lock);
 
index d2f91f737871889e59f83b227abed4751f04c171..c4643c400d462bfc63ce7b4df6c76558e4e92e50 100644 (file)
@@ -53,7 +53,7 @@ static struct peak_usb_adapter *peak_usb_adapters_list[] = {
  * dump memory
  */
 #define DUMP_WIDTH     16
-void dump_mem(char *prompt, void *p, int l)
+void pcan_dump_mem(char *prompt, void *p, int l)
 {
        pr_info("%s dumping %s (%d bytes):\n",
                PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -203,9 +203,9 @@ static void peak_usb_read_bulk_callback(struct urb *urb)
                if (dev->state & PCAN_USB_STATE_STARTED) {
                        err = dev->adapter->dev_decode_buf(dev, urb);
                        if (err)
-                               dump_mem("received usb message",
-                                       urb->transfer_buffer,
-                                       urb->transfer_buffer_length);
+                               pcan_dump_mem("received usb message",
+                                             urb->transfer_buffer,
+                                             urb->transfer_buffer_length);
                }
        }
 
index 4c775b620be287b9a08eac5ed27a8d390f1930ba..c8e5e91d7cb571f350eef5ff2b0e26cf317d6460 100644 (file)
@@ -131,7 +131,7 @@ struct peak_usb_device {
        struct peak_usb_device *next_siblings;
 };
 
-void dump_mem(char *prompt, void *p, int l);
+void pcan_dump_mem(char *prompt, void *p, int l);
 
 /* common timestamp management */
 void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
index 629c4ba5d49d95792717f75b3a8919543a60b5d0..e1626d92511adc88d084345f7fdbf098a4aafb09 100644 (file)
@@ -292,8 +292,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
                        if (!rec_len) {
                                netdev_err(dev->netdev,
                                           "got unprocessed record in msg\n");
-                               dump_mem("rcvd rsp msg", pum->u.rec_buffer,
-                                        actual_length);
+                               pcan_dump_mem("rcvd rsp msg", pum->u.rec_buffer,
+                                             actual_length);
                                break;
                        }
 
@@ -756,8 +756,8 @@ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb)
 
 fail:
        if (err)
-               dump_mem("received msg",
-                        urb->transfer_buffer, urb->actual_length);
+               pcan_dump_mem("received msg",
+                             urb->transfer_buffer, urb->actual_length);
 
        return err;
 }
index a11af5cc484477283ccb624df88bf072fc2f8088..e4ff38949112d8f245df2e481c28ba0ac7ede17c 100644 (file)
@@ -89,15 +89,6 @@ source "drivers/net/ethernet/marvell/Kconfig"
 source "drivers/net/ethernet/mellanox/Kconfig"
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
-
-config MIPS_SIM_NET
-       tristate "MIPS simulator Network device"
-       depends on MIPS_SIM
-       ---help---
-         The MIPSNET device is a simple Ethernet network device which is
-         emulated by the MIPS Simulator.
-         If you are not using a MIPSsim or are unsure, say N.
-
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
index 878ad32b93f21c8fd8191c152c721c409bd81d25..d4473072654abcf0da782651a76c8d036f1fca9a 100644 (file)
@@ -40,7 +40,6 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
 obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
-obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
 obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
index f15e72e81ac4db2c1fa623c8f09a9820e59e9408..4bd416b72e65a9a5423930e9a52340bb087f2174 100644 (file)
@@ -101,6 +101,7 @@ config TIGON3
        tristate "Broadcom Tigon3 support"
        depends on PCI
        select PHYLIB
+       select HWMON
        ---help---
          This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
 
index eac25236856c85b174cf5c124dab5612d3bb68ab..72897c47b8c849c31cf387c6cd63f42d624be07c 100644 (file)
@@ -23,8 +23,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.72.51-0"
-#define DRV_MODULE_RELDATE      "2012/06/18"
+#define DRV_MODULE_VERSION      "1.78.00-0"
+#define DRV_MODULE_RELDATE      "2012/09/27"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
index e8e97a7d1d06df9a209c741406af32c19a1313e2..30f04a389227bbfcb9cdaf29b563c005afbfa26a 100644 (file)
@@ -2285,7 +2285,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* Wait for all pending SP commands to complete */
        if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
-               bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+               bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
                return -EBUSY;
        }
 
@@ -2333,7 +2333,7 @@ load_error0:
 }
 
 /* must be called with rtnl_lock */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 {
        int i;
        bool global = false;
@@ -2395,7 +2395,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
        /* Cleanup the chip if needed */
        if (unload_mode != UNLOAD_RECOVERY)
-               bnx2x_chip_cleanup(bp, unload_mode);
+               bnx2x_chip_cleanup(bp, unload_mode, keep_link);
        else {
                /* Send the UNLOAD_REQUEST to the MCP */
                bnx2x_send_unload_req(bp, unload_mode);
@@ -2419,7 +2419,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
                bnx2x_free_irq(bp);
 
                /* Report UNLOAD_DONE to MCP */
-               bnx2x_send_unload_done(bp);
+               bnx2x_send_unload_done(bp, false);
        }
 
        /*
@@ -3026,8 +3026,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        first_bd = tx_start_bd;
 
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-       SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
-                mac_type);
+       SET_FLAG(tx_start_bd->general_data,
+                ETH_TX_START_BD_PARSE_NBDS,
+                0);
 
        /* header nbd */
        SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
@@ -3077,13 +3078,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                              &pbd_e2->dst_mac_addr_lo,
                                              eth->h_dest);
                }
+
+               SET_FLAG(pbd_e2_parsing_data,
+                        ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
        } else {
+               u16 global_data = 0;
                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
                /* Set PBD in checksum offload case */
                if (xmit_type & XMIT_CSUM)
                        hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
 
+               SET_FLAG(global_data,
+                        ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
+               pbd_e1x->global_data |= cpu_to_le16(global_data);
        }
 
        /* Setup the data pointer of the first BD of the packet */
@@ -3770,7 +3778,7 @@ int bnx2x_reload_if_running(struct net_device *dev)
        if (unlikely(!netif_running(dev)))
                return 0;
 
-       bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+       bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
        return bnx2x_nic_load(bp, LOAD_NORMAL);
 }
 
@@ -3967,7 +3975,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 
        netif_device_detach(dev);
 
-       bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+       bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
 
        bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
 
index dfd86a55f1dcab583ad08342a21eaae9a97be53f..9c5ea6c5b4c7597059644c57d27f2943cb65b792 100644 (file)
@@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
  *
  * @bp:                driver handle
+ * @keep_link:         true iff link should be kept up
  */
-void bnx2x_send_unload_done(struct bnx2x *bp);
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
 
 /**
  * bnx2x_config_rss_pf - configure RSS parameters in a PF.
@@ -152,6 +153,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
  */
 void bnx2x_link_set(struct bnx2x *bp);
 
+/**
+ * bnx2x_force_link_reset - Forces link reset, and put the PHY
+ * in reset as well.
+ *
+ * @bp:                driver handle
+ */
+void bnx2x_force_link_reset(struct bnx2x *bp);
+
 /**
  * bnx2x_link_test - query link status.
  *
@@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp);
  *
  * @bp:                        driver handle
  * @unload_mode:       COMMON, PORT, FUNCTION
+ * @keep_link:         true iff link should be kept up.
  *
  * - Cleanup MAC configuration.
  * - Closes clients.
  * - etc.
  */
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
 
 /**
  * bnx2x_acquire_hw_lock - acquire HW lock.
@@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
 
 /* dev_close main block */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
 
 /* dev_open main block */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
index 8a73374e52a763ba16d8526a2d3f1aa8b80fb11e..2245c3895409d149c402fa70bdff529790046e28 100644 (file)
@@ -91,25 +91,21 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
        /*
         * Rx COS configuration
         * Changing PFC RX configuration .
-        * In RX COS0 will always be configured to lossy and COS1 to lossless
+        * In RX COS0 will always be configured to lossless and COS1 to lossy
         */
        for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
                pri_bit = 1 << i;
 
-               if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+               if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
                        val |= 1 << (i * 4);
        }
 
        pfc_params.pkt_priority_to_cos = val;
 
        /* RX COS0 */
-       pfc_params.llfc_low_priority_classes = 0;
+       pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
        /* RX COS1 */
-       pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
-
-       /* BRB configuration */
-       pfc_params.cos0_pauseable = false;
-       pfc_params.cos1_pauseable = true;
+       pfc_params.llfc_high_priority_classes = 0;
 
        bnx2x_acquire_phy_lock(bp);
        bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
index ebf40cd7aa1050d716683e806eda505050bf1e40..c65295dded39aa5b1965a9dd07e00974863ca297 100644 (file)
@@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev)
 
        if (netif_running(dev)) {
                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_force_link_reset(bp);
                bnx2x_link_set(bp);
        }
 
@@ -1606,7 +1607,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
        return 0;
 }
 
-static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
        "register_test (offline)    ",
        "memory_test (offline)      ",
        "int_loopback_test (offline)",
@@ -1653,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
                return -EOPNOTSUPP;
        }
 
-       eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+       eee_cfg = bp->link_vars.eee_status;
 
        edata->supported =
                bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
@@ -1690,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
                return -EOPNOTSUPP;
        }
 
-       eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+       eee_cfg = bp->link_vars.eee_status;
 
        if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
                DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
@@ -1739,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
        /* Restart link to propogate changes */
        if (netif_running(dev)) {
                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_force_link_reset(bp);
                bnx2x_link_set(bp);
        }
 
@@ -2038,8 +2040,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        u16 pkt_prod, bd_prod;
        struct sw_tx_bd *tx_buf;
        struct eth_tx_start_bd *tx_start_bd;
-       struct eth_tx_parse_bd_e1x  *pbd_e1x = NULL;
-       struct eth_tx_parse_bd_e2  *pbd_e2 = NULL;
        dma_addr_t mapping;
        union eth_rx_cqe *cqe;
        u8 cqe_fp_flags, cqe_fp_type;
@@ -2130,22 +2130,33 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-       SET_FLAG(tx_start_bd->general_data,
-                ETH_TX_START_BD_ETH_ADDR_TYPE,
-                UNICAST_ADDRESS);
        SET_FLAG(tx_start_bd->general_data,
                 ETH_TX_START_BD_HDR_NBDS,
                 1);
+       SET_FLAG(tx_start_bd->general_data,
+                ETH_TX_START_BD_PARSE_NBDS,
+                0);
 
        /* turn on parsing and get a BD */
        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 
-       pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
-       pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
-
-       memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
-       memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
-
+       if (CHIP_IS_E1x(bp)) {
+               u16 global_data = 0;
+               struct eth_tx_parse_bd_e1x  *pbd_e1x =
+                       &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+               memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+               SET_FLAG(global_data,
+                        ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+               pbd_e1x->global_data = cpu_to_le16(global_data);
+       } else {
+               u32 parsing_data = 0;
+               struct eth_tx_parse_bd_e2  *pbd_e2 =
+                       &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+               memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+               SET_FLAG(parsing_data,
+                        ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+               pbd_e2->parsing_data = cpu_to_le32(parsing_data);
+       }
        wmb();
 
        txdata->tx_db.data.prod += 2;
@@ -2263,7 +2274,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
        if (!netif_running(bp->dev))
                return BNX2X_EXT_LOOPBACK_FAILED;
 
-       bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+       bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
        rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
        if (rc) {
                DP(BNX2X_MSG_ETHTOOL,
@@ -2414,7 +2425,7 @@ static void bnx2x_self_test(struct net_device *dev,
 
                link_up = bp->link_vars.link_up;
 
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
                rc = bnx2x_nic_load(bp, LOAD_DIAG);
                if (rc) {
                        etest->flags |= ETH_TEST_FL_FAILED;
@@ -2446,7 +2457,7 @@ static void bnx2x_self_test(struct net_device *dev,
                        etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
                }
 
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
 
                /* restore input for TX port IF */
                REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
@@ -2534,7 +2545,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       int i, j, k, offset, start;
+       int i, j, k, start;
        char queue_name[MAX_QUEUE_NAME_LEN+1];
 
        switch (stringset) {
@@ -2570,13 +2581,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                        start = 0;
                else
                        start = 4;
-               for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
-                    i++, j++) {
-                       offset = sprintf(buf+32*i, "%s",
-                                        bnx2x_tests_str_arr[j]);
-                       *(buf+offset) = '\0';
-               }
-               break;
+               memcpy(buf, bnx2x_tests_str_arr + start,
+                      ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
        }
 }
 
@@ -2940,7 +2946,7 @@ static int bnx2x_set_channels(struct net_device *dev,
                bnx2x_change_num_queues(bp, channels->combined_count);
                return 0;
        }
-       bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+       bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
        bnx2x_change_num_queues(bp, channels->combined_count);
        return bnx2x_nic_load(bp, LOAD_NORMAL);
 }
index bbc66ced9c25af262f4884de5549c20659f9bc6e..620fe939ecfd357ed1e852bd0350c3687886a8b5 100644 (file)
@@ -88,9 +88,6 @@
 #define TSTORM_ASSERT_LIST_INDEX_OFFSET        (IRO[102].base)
 #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
        (IRO[101].base + ((assertListEntry) * IRO[101].m1))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
-#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
-       (IRO[108].base)
 #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
        (IRO[201].base + ((pfId) * IRO[201].m1))
 #define TSTORM_FUNC_EN_OFFSET(funcId) \
index 76b6e65790f8f5c54dab782a84d2c20c2393f381..18704929e6422ec15f0c59ec8c4abcee8802e532 100644 (file)
@@ -1286,6 +1286,9 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK         0x00ff0000
        #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK         0xff000000
 
+       #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET     0x00000002
+
+       #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA          0x0000100a
        u32 fw_mb_header;
        #define FW_MSG_CODE_MASK                        0xffff0000
        #define FW_MSG_CODE_DRV_LOAD_COMMON             0x10100000
@@ -1909,6 +1912,54 @@ struct lldp_local_mib {
 };
 /***END OF DCBX STRUCTURES DECLARATIONS***/
 
+/***********************************************************/
+/*                         Elink section                   */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+       u32 req_duplex;
+       #define REQ_DUPLEX_PHY0_MASK        0x0000ffff
+       #define REQ_DUPLEX_PHY0_SHIFT       0
+       #define REQ_DUPLEX_PHY1_MASK        0xffff0000
+       #define REQ_DUPLEX_PHY1_SHIFT       16
+       u32 req_flow_ctrl;
+       #define REQ_FLOW_CTRL_PHY0_MASK     0x0000ffff
+       #define REQ_FLOW_CTRL_PHY0_SHIFT    0
+       #define REQ_FLOW_CTRL_PHY1_MASK     0xffff0000
+       #define REQ_FLOW_CTRL_PHY1_SHIFT    16
+       u32 req_line_speed; /* Also determine AutoNeg */
+       #define REQ_LINE_SPD_PHY0_MASK      0x0000ffff
+       #define REQ_LINE_SPD_PHY0_SHIFT     0
+       #define REQ_LINE_SPD_PHY1_MASK      0xffff0000
+       #define REQ_LINE_SPD_PHY1_SHIFT     16
+       u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+       u32 additional_config;
+       #define REQ_FC_AUTO_ADV_MASK        0x0000ffff
+       #define REQ_FC_AUTO_ADV0_SHIFT      0
+       #define NO_LFA_DUE_TO_DCC_MASK      0x00010000
+       u32 lfa_sts;
+       #define LFA_LINK_FLAP_REASON_OFFSET             0
+       #define LFA_LINK_FLAP_REASON_MASK               0x000000ff
+               #define LFA_LINK_DOWN                       0x1
+               #define LFA_LOOPBACK_ENABLED            0x2
+               #define LFA_DUPLEX_MISMATCH                 0x3
+               #define LFA_MFW_IS_TOO_OLD                  0x4
+               #define LFA_LINK_SPEED_MISMATCH         0x5
+               #define LFA_FLOW_CTRL_MISMATCH          0x6
+               #define LFA_SPEED_CAP_MISMATCH          0x7
+               #define LFA_DCC_LFA_DISABLED            0x8
+               #define LFA_EEE_MISMATCH                0x9
+
+       #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET        8
+       #define LINK_FLAP_AVOIDANCE_COUNT_MASK          0x0000ff00
+
+       #define LINK_FLAP_COUNT_OFFSET                  16
+       #define LINK_FLAP_COUNT_MASK                    0x00ff0000
+
+       #define LFA_FLAGS_MASK                          0xff000000
+       #define SHMEM_LFA_DONT_CLEAR_STAT               (1<<24)
+};
+
 struct ncsi_oem_fcoe_features {
        u32 fcoe_features1;
        #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
@@ -2738,8 +2789,8 @@ struct afex_stats {
 };
 
 #define BCM_5710_FW_MAJOR_VERSION                      7
-#define BCM_5710_FW_MINOR_VERSION                      2
-#define BCM_5710_FW_REVISION_VERSION                   51
+#define BCM_5710_FW_MINOR_VERSION                      8
+#define BCM_5710_FW_REVISION_VERSION           2
 #define BCM_5710_FW_ENGINEERING_VERSION                        0
 #define BCM_5710_FW_COMPILE_FLAGS                      1
 
@@ -3861,10 +3912,8 @@ struct eth_rss_update_ramrod_data {
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
        u8 rss_result_mask;
        u8 rss_mode;
        __le32 __reserved2;
@@ -4080,27 +4129,29 @@ struct eth_tx_start_bd {
 #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
 #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
 #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
-#define ETH_TX_START_BD_RESREVED (0x1<<5)
-#define ETH_TX_START_BD_RESREVED_SHIFT 5
-#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
-#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_RESREVED (0x1<<7)
+#define ETH_TX_START_BD_RESREVED_SHIFT 7
 };
 
 /*
  * Tx parsing BD structure for ETH E1/E1h
  */
 struct eth_tx_parse_bd_e1x {
-       u8 global_data;
+       __le16 global_data;
 #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
 #define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
-#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
-#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
        u8 tcp_flags;
 #define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
 #define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
@@ -4119,7 +4170,6 @@ struct eth_tx_parse_bd_e1x {
 #define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
 #define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
        u8 ip_hlen_w;
-       s8 reserved;
        __le16 total_hlen_w;
        __le16 tcp_pseudo_csum;
        __le16 lso_mss;
@@ -4138,14 +4188,16 @@ struct eth_tx_parse_bd_e2 {
        __le16 src_mac_addr_mid;
        __le16 src_mac_addr_hi;
        __le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
 #define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
-#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
-#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
 };
 
 /*
@@ -4913,7 +4965,8 @@ struct flow_control_configuration {
  *
  */
 struct function_start_data {
-       __le16 function_mode;
+       u8 function_mode;
+       u8 reserved;
        __le16 sd_vlan_tag;
        __le16 vif_id;
        u8 path_id;
index 559c396d45cce465ae77999fa0d22508b6d60624..c8f10f0e8a0dea6db58e412989f2d35eddc6528d 100644 (file)
@@ -566,7 +566,7 @@ static const struct {
                u32 e2;         /* 57712 */
                u32 e3;         /* 578xx */
        } reg_mask;             /* Register mask (all valid bits) */
-       char name[7];           /* Block's longest name is 6 characters long
+       char name[8];           /* Block's longest name is 7 characters long
                                 * (name + suffix)
                                 */
 } bnx2x_blocks_parity_data[] = {
index b046beb435b2c490f70ef3bc2f67bf3af16bfcb5..e2e45ee5df33fcc75c491c78643dfe406297aabb 100644 (file)
 #define EDC_MODE_LIMITING                              0x0044
 #define EDC_MODE_PASSIVE_DAC                   0x0055
 
-/* BRB default for class 0 E2 */
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR     170
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR              250
-#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR              10
-#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR               50
-
-/* BRB thresholds for E2*/
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE            170
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE                0
-
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE             250
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE         0
-
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE             10
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE         90
-
-#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE                      50
-#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE          250
-
-/* BRB default for class 0 E3A0 */
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR   290
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR    410
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR    10
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR     50
-
-/* BRB thresholds for E3A0 */
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE          290
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE              0
-
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE           410
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE               0
-
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE           10
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE               170
-
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE            50
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE                410
-
-/* BRB default for E3B0 */
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR   330
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR    490
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR    15
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR     55
-
-/* BRB thresholds for E3B0 2 port mode*/
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE               1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE   0
-
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE                1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE    0
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE                10
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE    1025
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE         50
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE     1025
-
-/* only for E3B0*/
-#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR                       1025
-#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR                        1025
-
-/* Lossy +Lossless GUARANTIED == GUART */
-#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART                 284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_LB_GUART                     236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_LB_GUART                 342
-
-/* Lossy +Lossless*/
-#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART              284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART          236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART              336
-#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST               80
-
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART            0
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST               0
-
-/* BRB thresholds for E3B0 4 port mode */
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE               304
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE   0
-
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE                384
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE    0
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE                10
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE    304
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE         50
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE     384
-
-/* only for E3B0*/
-#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR                       304
-#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR                        384
-#define PFC_E3B0_4P_LB_GUART           120
-
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART            120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST       80
-
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART            80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST       120
-
-/* Pause defines*/
-#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR                      330
-#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR                       490
-#define DEFAULT_E3B0_LB_GUART          40
-
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART           40
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST      0
-
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART           40
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST      0
-
 /* ETS defines*/
 #define DCBX_INVALID_COS                                       (0xFF)
 
@@ -321,6 +207,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
        return val;
 }
 
+/*
+ * bnx2x_check_lfa - This function checks if link reinitialization is required,
+ *                   or link flap can be avoided.
+ *
+ * @params:    link parameters
+ * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
+ *         condition code.
+ */
+static int bnx2x_check_lfa(struct link_params *params)
+{
+       u32 link_status, cfg_idx, lfa_mask, cfg_size;
+       u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
+       u32 saved_val, req_val, eee_status;
+       struct bnx2x *bp = params->bp;
+
+       additional_config =
+               REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, additional_config));
+
+       /* NOTE: must be first condition checked -
+       * to verify DCC bit is cleared in any case!
+       */
+       if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
+               DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
+               REG_WR(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, additional_config),
+                      additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
+               return LFA_DCC_LFA_DISABLED;
+       }
+
+       /* Verify that link is up */
+       link_status = REG_RD(bp, params->shmem_base +
+                            offsetof(struct shmem_region,
+                                     port_mb[params->port].link_status));
+       if (!(link_status & LINK_STATUS_LINK_UP))
+               return LFA_LINK_DOWN;
+
+       /* Verify that loopback mode is not set */
+       if (params->loopback_mode)
+               return LFA_LOOPBACK_ENABLED;
+
+       /* Verify that MFW supports LFA */
+       if (!params->lfa_base)
+               return LFA_MFW_IS_TOO_OLD;
+
+       if (params->num_phys == 3) {
+               cfg_size = 2;
+               lfa_mask = 0xffffffff;
+       } else {
+               cfg_size = 1;
+               lfa_mask = 0xffff;
+       }
+
+       /* Compare Duplex */
+       saved_val = REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, req_duplex));
+       req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
+       if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+               DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
+                              (saved_val & lfa_mask), (req_val & lfa_mask));
+               return LFA_DUPLEX_MISMATCH;
+       }
+       /* Compare Flow Control */
+       saved_val = REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, req_flow_ctrl));
+       req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
+       if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+               DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
+                              (saved_val & lfa_mask), (req_val & lfa_mask));
+               return LFA_FLOW_CTRL_MISMATCH;
+       }
+       /* Compare Link Speed */
+       saved_val = REG_RD(bp, params->lfa_base +
+                          offsetof(struct shmem_lfa, req_line_speed));
+       req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
+       if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+               DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
+                              (saved_val & lfa_mask), (req_val & lfa_mask));
+               return LFA_LINK_SPEED_MISMATCH;
+       }
+
+       for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
+               cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
+                                           offsetof(struct shmem_lfa,
+                                                    speed_cap_mask[cfg_idx]));
+
+               if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
+                       DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
+                                      cur_speed_cap_mask,
+                                      params->speed_cap_mask[cfg_idx]);
+                       return LFA_SPEED_CAP_MISMATCH;
+               }
+       }
+
+       cur_req_fc_auto_adv =
+               REG_RD(bp, params->lfa_base +
+                      offsetof(struct shmem_lfa, additional_config)) &
+               REQ_FC_AUTO_ADV_MASK;
+
+       if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+               DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
+                              cur_req_fc_auto_adv, params->req_fc_auto_adv);
+               return LFA_FLOW_CTRL_MISMATCH;
+       }
+
+       eee_status = REG_RD(bp, params->shmem2_base +
+                           offsetof(struct shmem2_region,
+                                    eee_status[params->port]));
+
+       if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
+            (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
+           ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
+            (params->eee_mode & EEE_MODE_ADV_LPI))) {
+               DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
+                              eee_status);
+               return LFA_EEE_MISMATCH;
+       }
+
+       /* LFA conditions are met */
+       return 0;
+}
 /******************************************************************/
 /*                     EPIO/GPIO section                         */
 /******************************************************************/
@@ -1306,93 +1313,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
        return 0;
 }
 
-/******************************************************************/
-/*                     EEE section                                */
-/******************************************************************/
-static u8 bnx2x_eee_has_cap(struct link_params *params)
-{
-       struct bnx2x *bp = params->bp;
-
-       if (REG_RD(bp, params->shmem2_base) <=
-                  offsetof(struct shmem2_region, eee_status[params->port]))
-               return 0;
-
-       return 1;
-}
-
-static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
-{
-       switch (nvram_mode) {
-       case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
-               *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
-               break;
-       case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
-               *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
-               break;
-       case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
-               *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
-               break;
-       default:
-               *idle_timer = 0;
-               break;
-       }
-
-       return 0;
-}
-
-static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
-{
-       switch (idle_timer) {
-       case EEE_MODE_NVRAM_BALANCED_TIME:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
-               break;
-       case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
-               break;
-       case EEE_MODE_NVRAM_LATENCY_TIME:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
-               break;
-       default:
-               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
-               break;
-       }
-
-       return 0;
-}
-
-static u32 bnx2x_eee_calc_timer(struct link_params *params)
-{
-       u32 eee_mode, eee_idle;
-       struct bnx2x *bp = params->bp;
-
-       if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
-               if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
-                       /* time value in eee_mode --> used directly*/
-                       eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
-               } else {
-                       /* hsi value in eee_mode --> time */
-                       if (bnx2x_eee_nvram_to_time(params->eee_mode &
-                                                   EEE_MODE_NVRAM_MASK,
-                                                   &eee_idle))
-                               return 0;
-               }
-       } else {
-               /* hsi values in nvram --> time*/
-               eee_mode = ((REG_RD(bp, params->shmem_base +
-                                   offsetof(struct shmem_region, dev_info.
-                                   port_feature_config[params->port].
-                                   eee_power_mode)) &
-                            PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
-                           PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
-
-               if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
-                       return 0;
-       }
-
-       return eee_idle;
-}
-
-
 /******************************************************************/
 /*                     PFC section                               */
 /******************************************************************/
@@ -1606,16 +1526,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params,
               NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
 }
 
-static void bnx2x_umac_disable(struct link_params *params)
+static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
 {
        u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+       u32 val;
        struct bnx2x *bp = params->bp;
        if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
                   (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
                return;
-
+       val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
+       if (en)
+               val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
+                       UMAC_COMMAND_CONFIG_REG_RX_ENA);
+       else
+               val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
+                        UMAC_COMMAND_CONFIG_REG_RX_ENA);
        /* Disable RX and TX */
-       REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0);
+       REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 }
 
 static void bnx2x_umac_enable(struct link_params *params,
@@ -1671,6 +1598,16 @@ static void bnx2x_umac_enable(struct link_params *params,
        REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
        udelay(50);
 
+       /* Configure UMAC for EEE */
+       if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+               DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
+               REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
+                      UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
+               REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
+       } else {
+               REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
+       }
+
        /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
        REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
               ((params->mac_addr[2] << 24) |
@@ -1766,11 +1703,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
 
 }
 
-static void bnx2x_xmac_disable(struct link_params *params)
+static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
 {
        u8 port = params->port;
        struct bnx2x *bp = params->bp;
        u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+       u32 val;
 
        if (REG_RD(bp, MISC_REG_RESET_REG_2) &
            MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -1784,7 +1722,12 @@ static void bnx2x_xmac_disable(struct link_params *params)
                REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
                       (pfc_ctrl | (1<<1)));
                DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
-               REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+               val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
+               if (en)
+                       val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+               else
+                       val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+               REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
        }
 }
 
@@ -2087,391 +2030,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-/* PFC BRB internal port configuration params */
-struct bnx2x_pfc_brb_threshold_val {
-       u32 pause_xoff;
-       u32 pause_xon;
-       u32 full_xoff;
-       u32 full_xon;
-};
-
-struct bnx2x_pfc_brb_e3b0_val {
-       u32 per_class_guaranty_mode;
-       u32 lb_guarantied_hyst;
-       u32 full_lb_xoff_th;
-       u32 full_lb_xon_threshold;
-       u32 lb_guarantied;
-       u32 mac_0_class_t_guarantied;
-       u32 mac_0_class_t_guarantied_hyst;
-       u32 mac_1_class_t_guarantied;
-       u32 mac_1_class_t_guarantied_hyst;
-};
-
-struct bnx2x_pfc_brb_th_val {
-       struct bnx2x_pfc_brb_threshold_val pauseable_th;
-       struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
-       struct bnx2x_pfc_brb_threshold_val default_class0;
-       struct bnx2x_pfc_brb_threshold_val default_class1;
-
-};
-static int bnx2x_pfc_brb_get_config_params(
-                               struct link_params *params,
-                               struct bnx2x_pfc_brb_th_val *config_val)
-{
-       struct bnx2x *bp = params->bp;
-       DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
-
-       config_val->default_class1.pause_xoff = 0;
-       config_val->default_class1.pause_xon = 0;
-       config_val->default_class1.full_xoff = 0;
-       config_val->default_class1.full_xon = 0;
-
-       if (CHIP_IS_E2(bp)) {
-               /* Class0 defaults */
-               config_val->default_class0.pause_xoff =
-                       DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
-               config_val->default_class0.pause_xon =
-                       DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
-               config_val->default_class0.full_xoff =
-                       DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
-               config_val->default_class0.full_xon =
-                       DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
-               /* Pause able*/
-               config_val->pauseable_th.pause_xoff =
-                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-               config_val->pauseable_th.pause_xon =
-                       PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
-               config_val->pauseable_th.full_xoff =
-                       PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
-               config_val->pauseable_th.full_xon =
-                       PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
-               /* Non pause able*/
-               config_val->non_pauseable_th.pause_xoff =
-                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.pause_xon =
-                       PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xoff =
-                       PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xon =
-                       PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-       } else if (CHIP_IS_E3A0(bp)) {
-               /* Class0 defaults */
-               config_val->default_class0.pause_xoff =
-                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
-               config_val->default_class0.pause_xon =
-                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
-               config_val->default_class0.full_xoff =
-                       DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
-               config_val->default_class0.full_xon =
-                       DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
-               /* Pause able */
-               config_val->pauseable_th.pause_xoff =
-                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-               config_val->pauseable_th.pause_xon =
-                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
-               config_val->pauseable_th.full_xoff =
-                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
-               config_val->pauseable_th.full_xon =
-                       PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
-               /* Non pause able*/
-               config_val->non_pauseable_th.pause_xoff =
-                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.pause_xon =
-                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xoff =
-                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xon =
-                       PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-       } else if (CHIP_IS_E3B0(bp)) {
-               /* Class0 defaults */
-               config_val->default_class0.pause_xoff =
-                       DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
-               config_val->default_class0.pause_xon =
-                   DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
-               config_val->default_class0.full_xoff =
-                   DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
-               config_val->default_class0.full_xon =
-                   DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
-
-               if (params->phy[INT_PHY].flags &
-                   FLAGS_4_PORT_MODE) {
-                       config_val->pauseable_th.pause_xoff =
-                               PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.pause_xon =
-                               PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
-                       config_val->pauseable_th.full_xoff =
-                               PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.full_xon =
-                               PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
-                       /* Non pause able*/
-                       config_val->non_pauseable_th.pause_xoff =
-                       PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.pause_xon =
-                       PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xoff =
-                       PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xon =
-                       PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-               } else {
-                       config_val->pauseable_th.pause_xoff =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.pause_xon =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
-                       config_val->pauseable_th.full_xoff =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
-                       config_val->pauseable_th.full_xon =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
-                       /* Non pause able*/
-                       config_val->non_pauseable_th.pause_xoff =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.pause_xon =
-                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xoff =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-                       config_val->non_pauseable_th.full_xon =
-                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-               }
-       } else
-           return -EINVAL;
-
-       return 0;
-}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(
-               struct link_params *params,
-               struct bnx2x_pfc_brb_e3b0_val
-               *e3b0_val,
-               struct bnx2x_nig_brb_pfc_port_params *pfc_params,
-               const u8 pfc_enabled)
-{
-       if (pfc_enabled && pfc_params) {
-               e3b0_val->per_class_guaranty_mode = 1;
-               e3b0_val->lb_guarantied_hyst = 80;
-
-               if (params->phy[INT_PHY].flags &
-                   FLAGS_4_PORT_MODE) {
-                       e3b0_val->full_lb_xoff_th =
-                               PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
-                       e3b0_val->full_lb_xon_threshold =
-                               PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
-                       e3b0_val->lb_guarantied =
-                               PFC_E3B0_4P_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
-                       e3b0_val->mac_0_class_t_guarantied_hyst =
-                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
-                       e3b0_val->mac_1_class_t_guarantied =
-                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
-                       e3b0_val->mac_1_class_t_guarantied_hyst =
-                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
-               } else {
-                       e3b0_val->full_lb_xoff_th =
-                               PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
-                       e3b0_val->full_lb_xon_threshold =
-                               PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
-                       e3b0_val->mac_0_class_t_guarantied_hyst =
-                               PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
-                       e3b0_val->mac_1_class_t_guarantied =
-                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
-                       e3b0_val->mac_1_class_t_guarantied_hyst =
-                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
-                       if (pfc_params->cos0_pauseable !=
-                               pfc_params->cos1_pauseable) {
-                               /* Nonpauseable= Lossy + pauseable = Lossless*/
-                               e3b0_val->lb_guarantied =
-                                       PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
-                               e3b0_val->mac_0_class_t_guarantied =
-                              PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
-                       } else if (pfc_params->cos0_pauseable) {
-                               /* Lossless +Lossless*/
-                               e3b0_val->lb_guarantied =
-                                       PFC_E3B0_2P_PAUSE_LB_GUART;
-                               e3b0_val->mac_0_class_t_guarantied =
-                                  PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
-                       } else {
-                               /* Lossy +Lossy*/
-                               e3b0_val->lb_guarantied =
-                                       PFC_E3B0_2P_NON_PAUSE_LB_GUART;
-                               e3b0_val->mac_0_class_t_guarantied =
-                              PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
-                       }
-               }
-       } else {
-               e3b0_val->per_class_guaranty_mode = 0;
-               e3b0_val->lb_guarantied_hyst = 0;
-               e3b0_val->full_lb_xoff_th =
-                       DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
-               e3b0_val->full_lb_xon_threshold =
-                       DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
-               e3b0_val->lb_guarantied =
-                       DEFAULT_E3B0_LB_GUART;
-               e3b0_val->mac_0_class_t_guarantied =
-                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
-               e3b0_val->mac_0_class_t_guarantied_hyst =
-                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
-               e3b0_val->mac_1_class_t_guarantied =
-                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
-               e3b0_val->mac_1_class_t_guarantied_hyst =
-                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
-       }
-}
-static int bnx2x_update_pfc_brb(struct link_params *params,
-                               struct link_vars *vars,
-                               struct bnx2x_nig_brb_pfc_port_params
-                               *pfc_params)
-{
-       struct bnx2x *bp = params->bp;
-       struct bnx2x_pfc_brb_th_val config_val = { {0} };
-       struct bnx2x_pfc_brb_threshold_val *reg_th_config =
-               &config_val.pauseable_th;
-       struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
-       const int set_pfc = params->feature_config_flags &
-               FEATURE_CONFIG_PFC_ENABLED;
-       const u8 pfc_enabled = (set_pfc && pfc_params);
-       int bnx2x_status = 0;
-       u8 port = params->port;
-
-       /* default - pause configuration */
-       reg_th_config = &config_val.pauseable_th;
-       bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
-       if (bnx2x_status)
-               return bnx2x_status;
-
-       if (pfc_enabled) {
-               /* First COS */
-               if (pfc_params->cos0_pauseable)
-                       reg_th_config = &config_val.pauseable_th;
-               else
-                       reg_th_config = &config_val.non_pauseable_th;
-       } else
-               reg_th_config = &config_val.default_class0;
-       /* The number of free blocks below which the pause signal to class 0
-        * of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
-              BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
-              reg_th_config->pause_xoff);
-       /* The number of free blocks above which the pause signal to class 0
-        * of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
-              BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
-       /* The number of free blocks below which the full signal to class 0
-        * of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
-              BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
-       /* The number of free blocks above which the full signal to class 0
-        * of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
-              BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
-
-       if (pfc_enabled) {
-               /* Second COS */
-               if (pfc_params->cos1_pauseable)
-                       reg_th_config = &config_val.pauseable_th;
-               else
-                       reg_th_config = &config_val.non_pauseable_th;
-       } else
-               reg_th_config = &config_val.default_class1;
-       /* The number of free blocks below which the pause signal to
-        * class 1 of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
-              BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
-              reg_th_config->pause_xoff);
-
-       /* The number of free blocks above which the pause signal to
-        * class 1 of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
-              BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
-              reg_th_config->pause_xon);
-       /* The number of free blocks below which the full signal to
-        * class 1 of MAC #n is asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
-              BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
-              reg_th_config->full_xoff);
-       /* The number of free blocks above which the full signal to
-        * class 1 of MAC #n is de-asserted. n=0,1
-        */
-       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
-              BRB1_REG_FULL_1_XON_THRESHOLD_0,
-              reg_th_config->full_xon);
-
-       if (CHIP_IS_E3B0(bp)) {
-               bnx2x_pfc_brb_get_e3b0_config_params(
-                       params,
-                       &e3b0_val,
-                       pfc_params,
-                       pfc_enabled);
-
-               REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
-                          e3b0_val.per_class_guaranty_mode);
-
-               /* The hysteresis on the guarantied buffer space for the Lb
-                * port before signaling XON.
-                */
-               REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
-                          e3b0_val.lb_guarantied_hyst);
-
-               /* The number of free blocks below which the full signal to the
-                * LB port is asserted.
-                */
-               REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
-                      e3b0_val.full_lb_xoff_th);
-               /* The number of free blocks above which the full signal to the
-                * LB port is de-asserted.
-                */
-               REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
-                      e3b0_val.full_lb_xon_threshold);
-               /* The number of blocks guarantied for the MAC #n port. n=0,1
-                */
-
-               /* The number of blocks guarantied for the LB port. */
-               REG_WR(bp, BRB1_REG_LB_GUARANTIED,
-                      e3b0_val.lb_guarantied);
-
-               /* The number of blocks guarantied for the MAC #n port. */
-               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
-                      2 * e3b0_val.mac_0_class_t_guarantied);
-               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
-                      2 * e3b0_val.mac_1_class_t_guarantied);
-               /* The number of blocks guarantied for class #t in MAC0. t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
-                      e3b0_val.mac_0_class_t_guarantied);
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
-                      e3b0_val.mac_0_class_t_guarantied);
-               /* The hysteresis on the guarantied buffer space for class in
-                * MAC0.  t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
-                      e3b0_val.mac_0_class_t_guarantied_hyst);
-               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
-                      e3b0_val.mac_0_class_t_guarantied_hyst);
-
-               /* The number of blocks guarantied for class #t in MAC1.t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
-                      e3b0_val.mac_1_class_t_guarantied);
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
-                      e3b0_val.mac_1_class_t_guarantied);
-               /* The hysteresis on the guarantied buffer space for class #t
-                * in MAC1.  t=0,1
-                */
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
-                      e3b0_val.mac_1_class_t_guarantied_hyst);
-               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
-                      e3b0_val.mac_1_class_t_guarantied_hyst);
-       }
-
-       return bnx2x_status;
-}
-
 /******************************************************************************
 * Description:
 *  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
@@ -2529,16 +2087,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
                        port_mb[params->port].link_status), link_status);
 }
 
-static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
-{
-       struct bnx2x *bp = params->bp;
-
-       if (bnx2x_eee_has_cap(params))
-               REG_WR(bp, params->shmem2_base +
-                      offsetof(struct shmem2_region,
-                               eee_status[params->port]), eee_status);
-}
-
 static void bnx2x_update_pfc_nig(struct link_params *params,
                struct link_vars *vars,
                struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2658,11 +2206,6 @@ int bnx2x_update_pfc(struct link_params *params,
        /* Update NIG params */
        bnx2x_update_pfc_nig(params, vars, pfc_params);
 
-       /* Update BRB params */
-       bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
-       if (bnx2x_status)
-               return bnx2x_status;
-
        if (!vars->link_up)
                return bnx2x_status;
 
@@ -2827,16 +2370,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
 
 static int bnx2x_bmac_enable(struct link_params *params,
                             struct link_vars *vars,
-                            u8 is_lb)
+                            u8 is_lb, u8 reset_bmac)
 {
        int rc = 0;
        u8 port = params->port;
        struct bnx2x *bp = params->bp;
        u32 val;
        /* Reset and unreset the BigMac */
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-       usleep_range(1000, 2000);
+       if (reset_bmac) {
+               REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+                      (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+               usleep_range(1000, 2000);
+       }
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
               (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2868,37 +2413,28 @@ static int bnx2x_bmac_enable(struct link_params *params,
        return rc;
 }
 
-static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
+static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
 {
        u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
                        NIG_REG_INGRESS_BMAC0_MEM;
        u32 wb_data[2];
        u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
 
+       if (CHIP_IS_E2(bp))
+               bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
+       else
+               bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
        /* Only if the bmac is out of reset */
        if (REG_RD(bp, MISC_REG_RESET_REG_2) &
                        (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
            nig_bmac_enable) {
-
-               if (CHIP_IS_E2(bp)) {
-                       /* Clear Rx Enable bit in BMAC_CONTROL register */
-                       REG_RD_DMAE(bp, bmac_addr +
-                                   BIGMAC2_REGISTER_BMAC_CONTROL,
-                                   wb_data, 2);
-                       wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
-                       REG_WR_DMAE(bp, bmac_addr +
-                                   BIGMAC2_REGISTER_BMAC_CONTROL,
-                                   wb_data, 2);
-               } else {
-                       /* Clear Rx Enable bit in BMAC_CONTROL register */
-                       REG_RD_DMAE(bp, bmac_addr +
-                                       BIGMAC_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
+               /* Clear Rx Enable bit in BMAC_CONTROL register */
+               REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
+               if (en)
+                       wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
+               else
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
-                       REG_WR_DMAE(bp, bmac_addr +
-                                       BIGMAC_REGISTER_BMAC_CONTROL,
-                                       wb_data, 2);
-               }
+               REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
                usleep_range(1000, 2000);
        }
 }
@@ -3233,6 +2769,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                               EMAC_MDIO_STATUS_10MB);
        return rc;
 }
+
+/******************************************************************/
+/*                     EEE section                                */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+       struct bnx2x *bp = params->bp;
+
+       if (REG_RD(bp, params->shmem2_base) <=
+                  offsetof(struct shmem2_region, eee_status[params->port]))
+               return 0;
+
+       return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+       switch (nvram_mode) {
+       case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+               *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+               break;
+       case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+               *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+               break;
+       case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+               *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+               break;
+       default:
+               *idle_timer = 0;
+               break;
+       }
+
+       return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+       switch (idle_timer) {
+       case EEE_MODE_NVRAM_BALANCED_TIME:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+               break;
+       case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+               break;
+       case EEE_MODE_NVRAM_LATENCY_TIME:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+               break;
+       default:
+               *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+               break;
+       }
+
+       return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+       u32 eee_mode, eee_idle;
+       struct bnx2x *bp = params->bp;
+
+       if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+               if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+                       /* time value in eee_mode --> used directly*/
+                       eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+               } else {
+                       /* hsi value in eee_mode --> time */
+                       if (bnx2x_eee_nvram_to_time(params->eee_mode &
+                                                   EEE_MODE_NVRAM_MASK,
+                                                   &eee_idle))
+                               return 0;
+               }
+       } else {
+               /* hsi values in nvram --> time*/
+               eee_mode = ((REG_RD(bp, params->shmem_base +
+                                   offsetof(struct shmem_region, dev_info.
+                                   port_feature_config[params->port].
+                                   eee_power_mode)) &
+                            PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+                           PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+               if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+                       return 0;
+       }
+
+       return eee_idle;
+}
+
+static int bnx2x_eee_set_timers(struct link_params *params,
+                                  struct link_vars *vars)
+{
+       u32 eee_idle = 0, eee_mode;
+       struct bnx2x *bp = params->bp;
+
+       eee_idle = bnx2x_eee_calc_timer(params);
+
+       if (eee_idle) {
+               REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+                      eee_idle);
+       } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+                  (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+                  (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+               DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+               return -EINVAL;
+       }
+
+       vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+       if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+               /* eee_idle in 1u --> eee_status in 16u */
+               eee_idle >>= 4;
+               vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+                                   SHMEM_EEE_TIME_OUTPUT_BIT;
+       } else {
+               if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+                       return -EINVAL;
+               vars->eee_status |= eee_mode;
+       }
+
+       return 0;
+}
+
+static int bnx2x_eee_initial_config(struct link_params *params,
+                                    struct link_vars *vars, u8 mode)
+{
+       vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
+
+       /* Propogate params' bits --> vars (for migration exposure) */
+       if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+               vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+       else
+               vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+       if (params->eee_mode & EEE_MODE_ADV_LPI)
+               vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+       else
+               vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+       return bnx2x_eee_set_timers(params, vars);
+}
+
+static int bnx2x_eee_disable(struct bnx2x_phy *phy,
+                               struct link_params *params,
+                               struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+
+       /* Make Certain LPI is disabled */
+       REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
+
+       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+       return 0;
+}
+
+static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars, u8 modes)
+{
+       struct bnx2x *bp = params->bp;
+       u16 val = 0;
+
+       /* Mask events preventing LPI generation */
+       REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+       if (modes & SHMEM_EEE_10G_ADV) {
+               DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+               val |= 0x8;
+       }
+       if (modes & SHMEM_EEE_1G_ADV) {
+               DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
+               val |= 0x4;
+       }
+
+       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
+
+       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+       vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+       return 0;
+}
+
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+       struct bnx2x *bp = params->bp;
+
+       if (bnx2x_eee_has_cap(params))
+               REG_WR(bp, params->shmem2_base +
+                      offsetof(struct shmem2_region,
+                               eee_status[params->port]), eee_status);
+}
+
+static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
+                                 struct link_params *params,
+                                 struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+       u16 adv = 0, lp = 0;
+       u32 lp_adv = 0;
+       u8 neg = 0;
+
+       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
+       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
+
+       if (lp & 0x2) {
+               lp_adv |= SHMEM_EEE_100M_ADV;
+               if (adv & 0x2) {
+                       if (vars->line_speed == SPEED_100)
+                               neg = 1;
+                       DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
+               }
+       }
+       if (lp & 0x14) {
+               lp_adv |= SHMEM_EEE_1G_ADV;
+               if (adv & 0x14) {
+                       if (vars->line_speed == SPEED_1000)
+                               neg = 1;
+                       DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
+               }
+       }
+       if (lp & 0x68) {
+               lp_adv |= SHMEM_EEE_10G_ADV;
+               if (adv & 0x68) {
+                       if (vars->line_speed == SPEED_10000)
+                               neg = 1;
+                       DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
+               }
+       }
+
+       vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+       vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+       if (neg) {
+               DP(NETIF_MSG_LINK, "EEE is active\n");
+               vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+       }
+
+}
+
 /******************************************************************/
 /*                     BSC access functions from E3              */
 /******************************************************************/
@@ -3754,6 +3529,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
  * init configuration, and set/clear SGMII flag. Internal
  * phy init is done purely in phy_init stage.
  */
+
+static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
+                                              struct link_params *params)
+{
+       struct bnx2x *bp = params->bp;
+
+       DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
+       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+                        MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
+       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+                                MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+}
+
 static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                                        struct link_params *params,
                                        struct link_vars *vars) {
@@ -4013,13 +3801,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
        bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
                                 MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
 
-       /* Enable LPI pass through */
-       DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
-       bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                        MDIO_WC_REG_EEE_COMBO_CONTROL0,
-                        0x7c);
-       bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
-                                MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+       bnx2x_warpcore_set_lpi_passthrough(phy, params);
 
        /* 10G XFI Full Duplex */
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4116,6 +3898,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
 
+       bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
        if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
                /* SGMII Autoneg */
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4409,7 +4193,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                           "serdes_net_if = 0x%x\n",
                       vars->line_speed, serdes_net_if);
        bnx2x_set_aer_mmd(params, phy);
-
+       bnx2x_warpcore_reset_lane(bp, phy, 1);
        vars->phy_flags |= PHY_XGXS_FLAG;
        if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
            (phy->req_line_speed &&
@@ -4718,6 +4502,10 @@ void bnx2x_link_status_update(struct link_params *params,
        vars->link_status = REG_RD(bp, params->shmem_base +
                                   offsetof(struct shmem_region,
                                            port_mb[port].link_status));
+       if (bnx2x_eee_has_cap(params))
+               vars->eee_status = REG_RD(bp, params->shmem2_base +
+                                         offsetof(struct shmem2_region,
+                                                  eee_status[params->port]));
 
        vars->phy_flags = PHY_XGXS_FLAG;
        bnx2x_sync_link(params, vars);
@@ -6530,25 +6318,21 @@ static int bnx2x_update_link_down(struct link_params *params,
        usleep_range(10000, 20000);
        /* Reset BigMac/Xmac */
        if (CHIP_IS_E1x(bp) ||
-           CHIP_IS_E2(bp)) {
-               bnx2x_bmac_rx_disable(bp, params->port);
-               REG_WR(bp, GRCBASE_MISC +
-                      MISC_REGISTERS_RESET_REG_2_CLEAR,
-              (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-       }
+           CHIP_IS_E2(bp))
+               bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
        if (CHIP_IS_E3(bp)) {
                /* Prevent LPI Generation by chip */
                REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
                       0);
-               REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
                REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
                       0);
                vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
                                      SHMEM_EEE_ACTIVE_BIT);
 
                bnx2x_update_mng_eee(params, vars->eee_status);
-               bnx2x_xmac_disable(params);
-               bnx2x_umac_disable(params);
+               bnx2x_set_xmac_rxtx(params, 0);
+               bnx2x_set_umac_rxtx(params, 0);
        }
 
        return 0;
@@ -6600,7 +6384,7 @@ static int bnx2x_update_link_up(struct link_params *params,
        if ((CHIP_IS_E1x(bp) ||
             CHIP_IS_E2(bp))) {
                if (link_10g) {
-                       if (bnx2x_bmac_enable(params, vars, 0) ==
+                       if (bnx2x_bmac_enable(params, vars, 0, 1) ==
                            -ESRCH) {
                                DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
                                vars->link_up = 0;
@@ -7207,6 +6991,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params,
        msleep(500);
 }
 
+static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
+                                    struct link_params *params,
+                                    u32 action)
+{
+       struct bnx2x *bp = params->bp;
+       switch (action) {
+       case PHY_INIT:
+               /* Enable LASI */
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
+               break;
+       }
+}
+
 static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
                                  struct link_params *params,
                                  struct link_vars *vars)
@@ -7227,12 +7027,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
                       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
-       /* Enable LASI */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
-
+       bnx2x_8073_specific_func(phy, params, PHY_INIT);
        bnx2x_8073_set_pause_cl37(params, phy, vars);
 
        bnx2x_cl45_read(bp, phy,
@@ -8267,7 +8062,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
                                     u32 action)
 {
        struct bnx2x *bp = params->bp;
-
+       u16 val;
        switch (action) {
        case DISABLE_TX:
                bnx2x_sfp_set_transmitter(params, phy, 0);
@@ -8276,6 +8071,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
                if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
                        bnx2x_sfp_set_transmitter(params, phy, 1);
                break;
+       case PHY_INIT:
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+                                (1<<2) | (1<<5));
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+                                0);
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
+               /* Make MOD_ABS give interrupt on change */
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+                               &val);
+               val |= (1<<12);
+               if (phy->flags & FLAGS_NOC)
+                       val |= (3<<5);
+               /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+                * status which reflect SFP+ module over-current
+                */
+               if (!(phy->flags & FLAGS_NOC))
+                       val &= 0xff8f; /* Reset bits 4-6 */
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+                                val);
+
+               /* Set 2-wire transfer rate of SFP+ module EEPROM
+                * to 100Khz since some DACs(direct attached cables) do
+                * not work at 400Khz.
+                */
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_PMA_DEVAD,
+                                MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+                                0xa001);
+               break;
        default:
                DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
                   action);
@@ -9058,28 +8887,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                                  struct link_vars *vars)
 {
        u32 tx_en_mode;
-       u16 tmp1, val, mod_abs, tmp2;
-       u16 rx_alarm_ctrl_val;
-       u16 lasi_ctrl_val;
+       u16 tmp1, mod_abs, tmp2;
        struct bnx2x *bp = params->bp;
        /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
 
        bnx2x_wait_reset_complete(bp, phy, params);
-       rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
-       /* Should be 0x6 to enable XS on Tx side. */
-       lasi_ctrl_val = 0x0006;
 
        DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
-       /* Enable LASI */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
-                        rx_alarm_ctrl_val);
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
-                        0);
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
 
+       bnx2x_8727_specific_func(phy, params, PHY_INIT);
        /* Initially configure MOD_ABS to interrupt when module is
         * presence( bit 8)
         */
@@ -9095,25 +8911,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-
        /* Enable/Disable PHY transmitter output */
        bnx2x_set_disable_pmd_transmit(params, phy, 0);
 
-       /* Make MOD_ABS give interrupt on change */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
-                       &val);
-       val |= (1<<12);
-       if (phy->flags & FLAGS_NOC)
-               val |= (3<<5);
-
-       /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
-        * status which reflect SFP+ module over-current
-        */
-       if (!(phy->flags & FLAGS_NOC))
-               val &= 0xff8f; /* Reset bits 4-6 */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
-
        bnx2x_8727_power_module(bp, phy, 1);
 
        bnx2x_cl45_read(bp, phy,
@@ -9123,13 +8923,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
                        MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
 
        bnx2x_8727_config_speed(phy, params);
-       /* Set 2-wire transfer rate of SFP+ module EEPROM
-        * to 100Khz since some DACs(direct attached cables) do
-        * not work at 400Khz.
-        */
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
-                        0xa001);
+
 
        /* Set TX PreEmphasis if needed */
        if ((params->feature_config_flags &
@@ -9558,6 +9352,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
                         0xFFFB, 0xFFFD);
 }
 
+static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
+                                     struct link_params *params,
+                                     u32 action)
+{
+       struct bnx2x *bp = params->bp;
+       switch (action) {
+       case PHY_INIT:
+               if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+                       /* Save spirom version */
+                       bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+               }
+               /* This phy uses the NIG latch mechanism since link indication
+                * arrives through its LED4 and not via its LASI signal, so we
+                * get steady signal instead of clear on read
+                */
+               bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+                             1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+               bnx2x_848xx_set_led(bp, phy);
+               break;
+       }
+}
+
 static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                                       struct link_params *params,
                                       struct link_vars *vars)
@@ -9565,22 +9382,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
 
-       if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-               /* Save spirom version */
-               bnx2x_save_848xx_spirom_version(phy, bp, params->port);
-       }
-       /* This phy uses the NIG latch mechanism since link indication
-        * arrives through its LED4 and not via its LASI signal, so we
-        * get steady signal instead of clear on read
-        */
-       bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
-                     1 << NIG_LATCH_BC_ENABLE_MI_INT);
-
+       bnx2x_848xx_specific_func(phy, params, PHY_INIT);
        bnx2x_cl45_write(bp, phy,
                         MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
 
-       bnx2x_848xx_set_led(bp, phy);
-
        /* set 1000 speed advertisement */
        bnx2x_cl45_read(bp, phy,
                        MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
@@ -9887,39 +9692,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
        return 0;
 }
 
-static int bnx2x_8483x_eee_timers(struct link_params *params,
-                                  struct link_vars *vars)
-{
-       u32 eee_idle = 0, eee_mode;
-       struct bnx2x *bp = params->bp;
-
-       eee_idle = bnx2x_eee_calc_timer(params);
-
-       if (eee_idle) {
-               REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
-                      eee_idle);
-       } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
-                  (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
-                  (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
-               DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
-               return -EINVAL;
-       }
-
-       vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
-       if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
-               /* eee_idle in 1u --> eee_status in 16u */
-               eee_idle >>= 4;
-               vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
-                                   SHMEM_EEE_TIME_OUTPUT_BIT;
-       } else {
-               if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
-                       return -EINVAL;
-               vars->eee_status |= eee_mode;
-       }
-
-       return 0;
-}
-
 static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
                                   struct link_params *params,
                                   struct link_vars *vars)
@@ -9930,10 +9702,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
 
        DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
 
-       /* Make Certain LPI is disabled */
-       REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
-       REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
-
        /* Prevent Phy from working in EEE and advertising it */
        rc = bnx2x_84833_cmd_hdlr(phy, params,
                PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
@@ -9942,10 +9710,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
                return rc;
        }
 
-       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
-       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
-
-       return 0;
+       return bnx2x_eee_disable(phy, params, vars);
 }
 
 static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
@@ -9956,8 +9721,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 cmd_args = 1;
 
-       DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
-
        rc = bnx2x_84833_cmd_hdlr(phy, params,
                PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
        if (rc) {
@@ -9965,15 +9728,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
                return rc;
        }
 
-       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
-
-       /* Mask events preventing LPI generation */
-       REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
-
-       vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
-       vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
-
-       return 0;
+       return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
 }
 
 #define PHY84833_CONSTANT_LATENCY 1193
@@ -10105,22 +9860,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                        MDIO_84833_TOP_CFG_FW_REV, &val);
 
        /* Configure EEE support */
-       if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
-               phy->flags |= FLAGS_EEE_10GBT;
-               vars->eee_status |= SHMEM_EEE_10G_ADV <<
-                                   SHMEM_EEE_SUPPORTED_SHIFT;
-               /* Propogate params' bits --> vars (for migration exposure) */
-               if (params->eee_mode & EEE_MODE_ENABLE_LPI)
-                       vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
-               else
-                       vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
-
-               if (params->eee_mode & EEE_MODE_ADV_LPI)
-                       vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
-               else
-                       vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
-
-               rc = bnx2x_8483x_eee_timers(params, vars);
+       if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
+           (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
+           bnx2x_eee_has_cap(params)) {
+               rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
                if (rc) {
                        DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
                        bnx2x_8483x_disable_eee(phy, params, vars);
@@ -10139,7 +9882,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                        return rc;
                }
        } else {
-               phy->flags &= ~FLAGS_EEE_10GBT;
                vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
        }
 
@@ -10278,29 +10020,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
                                LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
 
                /* Determine if EEE was negotiated */
-               if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-                       u32 eee_shmem = 0;
-
-                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
-                                       MDIO_AN_REG_EEE_ADV, &val1);
-                       bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
-                                       MDIO_AN_REG_LP_EEE_ADV, &val2);
-                       if ((val1 & val2) & 0x8) {
-                               DP(NETIF_MSG_LINK, "EEE negotiated\n");
-                               vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
-                       }
-
-                       if (val2 & 0x12)
-                               eee_shmem |= SHMEM_EEE_100M_ADV;
-                       if (val2 & 0x4)
-                               eee_shmem |= SHMEM_EEE_1G_ADV;
-                       if (val2 & 0x68)
-                               eee_shmem |= SHMEM_EEE_10G_ADV;
-
-                       vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
-                       vars->eee_status |= (eee_shmem <<
-                                            SHMEM_EEE_LP_ADV_STATUS_SHIFT);
-               }
+               if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+                       bnx2x_eee_an_resolve(phy, params, vars);
        }
 
        return link_up;
@@ -10569,6 +10290,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
 /******************************************************************/
 /*                     54618SE PHY SECTION                       */
 /******************************************************************/
+static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
+                                       struct link_params *params,
+                                       u32 action)
+{
+       struct bnx2x *bp = params->bp;
+       u16 temp;
+       switch (action) {
+       case PHY_INIT:
+               /* Configure LED4: set to INTR (0x6). */
+               /* Accessing shadow register 0xe. */
+               bnx2x_cl22_write(bp, phy,
+                                MDIO_REG_GPHY_SHADOW,
+                                MDIO_REG_GPHY_SHADOW_LED_SEL2);
+               bnx2x_cl22_read(bp, phy,
+                               MDIO_REG_GPHY_SHADOW,
+                               &temp);
+               temp &= ~(0xf << 4);
+               temp |= (0x6 << 4);
+               bnx2x_cl22_write(bp, phy,
+                                MDIO_REG_GPHY_SHADOW,
+                                MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+               /* Configure INTR based on link status change. */
+               bnx2x_cl22_write(bp, phy,
+                                MDIO_REG_INTR_MASK,
+                                ~MDIO_REG_INTR_MASK_LINK_STATUS);
+               break;
+       }
+}
+
 static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
                                               struct link_params *params,
                                               struct link_vars *vars)
@@ -10606,24 +10356,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        /* Wait for GPHY to reset */
        msleep(50);
 
-       /* Configure LED4: set to INTR (0x6). */
-       /* Accessing shadow register 0xe. */
-       bnx2x_cl22_write(bp, phy,
-                       MDIO_REG_GPHY_SHADOW,
-                       MDIO_REG_GPHY_SHADOW_LED_SEL2);
-       bnx2x_cl22_read(bp, phy,
-                       MDIO_REG_GPHY_SHADOW,
-                       &temp);
-       temp &= ~(0xf << 4);
-       temp |= (0x6 << 4);
-       bnx2x_cl22_write(bp, phy,
-                       MDIO_REG_GPHY_SHADOW,
-                       MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
-       /* Configure INTR based on link status change. */
-       bnx2x_cl22_write(bp, phy,
-                       MDIO_REG_INTR_MASK,
-                       ~MDIO_REG_INTR_MASK_LINK_STATUS);
 
+       bnx2x_54618se_specific_func(phy, params, PHY_INIT);
        /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
        bnx2x_cl22_write(bp, phy,
                        MDIO_REG_GPHY_SHADOW,
@@ -10728,28 +10462,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK, "Setting 10M force\n");
        }
 
-       /* Check if we should turn on Auto-GrEEEn */
-       bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
-       if (temp == MDIO_REG_GPHY_ID_54618SE) {
-               if (params->feature_config_flags &
-                   FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
-                       temp = 6;
-                       DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+       if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
+               int rc;
+
+               bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
+                                MDIO_REG_GPHY_EXP_ACCESS_TOP |
+                                MDIO_REG_GPHY_EXP_TOP_2K_BUF);
+               bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
+               temp &= 0xfffe;
+               bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
+
+               rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
+               if (rc) {
+                       DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+                       bnx2x_eee_disable(phy, params, vars);
+               } else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
+                          (phy->req_duplex == DUPLEX_FULL) &&
+                          (bnx2x_eee_calc_timer(params) ||
+                           !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
+                       /* Need to advertise EEE only when requested,
+                        * and either no LPI assertion was requested,
+                        * or it was requested and a valid timer was set.
+                        * Also notice full duplex is required for EEE.
+                        */
+                       bnx2x_eee_advertise(phy, params, vars,
+                                           SHMEM_EEE_1G_ADV);
                } else {
-                       temp = 0;
-                       DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+                       DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
+                       bnx2x_eee_disable(phy, params, vars);
+               }
+       } else {
+               vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
+                                   SHMEM_EEE_SUPPORTED_SHIFT;
+
+               if (phy->flags & FLAGS_EEE) {
+                       /* Handle legacy auto-grEEEn */
+                       if (params->feature_config_flags &
+                           FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+                               temp = 6;
+                               DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+                       } else {
+                               temp = 0;
+                               DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
+                       }
+                       bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+                                        MDIO_AN_REG_EEE_ADV, temp);
                }
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_DATA_REG,
-                                MDIO_REG_GPHY_EEE_ADV);
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_ADDR_REG,
-                                (0x1 << 14) | MDIO_AN_DEVAD);
-               bnx2x_cl22_write(bp, phy,
-                                MDIO_REG_GPHY_CL45_DATA_REG,
-                                temp);
        }
 
        bnx2x_cl22_write(bp, phy,
@@ -10896,29 +10654,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
                           vars->line_speed);
 
-               /* Report whether EEE is resolved. */
-               bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
-               if (val == MDIO_REG_GPHY_ID_54618SE) {
-                       if (vars->link_status &
-                           LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
-                               val = 0;
-                       else {
-                               bnx2x_cl22_write(bp, phy,
-                                       MDIO_REG_GPHY_CL45_ADDR_REG,
-                                       MDIO_AN_DEVAD);
-                               bnx2x_cl22_write(bp, phy,
-                                       MDIO_REG_GPHY_CL45_DATA_REG,
-                                       MDIO_REG_GPHY_EEE_RESOLVED);
-                               bnx2x_cl22_write(bp, phy,
-                                       MDIO_REG_GPHY_CL45_ADDR_REG,
-                                       (0x1 << 14) | MDIO_AN_DEVAD);
-                               bnx2x_cl22_read(bp, phy,
-                                       MDIO_REG_GPHY_CL45_DATA_REG,
-                                       &val);
-                       }
-                       DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
-               }
-
                bnx2x_ext_phy_resolve_fc(phy, params, vars);
 
                if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
@@ -10948,6 +10683,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
                        if (val & (1<<11))
                                vars->link_status |=
                                  LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+                       if ((phy->flags & FLAGS_EEE) &&
+                           bnx2x_eee_has_cap(params))
+                               bnx2x_eee_an_resolve(phy, params, vars);
                }
        }
        return link_up;
@@ -11353,7 +11092,7 @@ static struct bnx2x_phy phy_8073 = {
        .format_fw_ver  = (format_fw_ver_t)bnx2x_format_ver,
        .hw_reset       = (hw_reset_t)NULL,
        .set_link_led   = (set_link_led_t)NULL,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
 };
 static struct bnx2x_phy phy_8705 = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11546,7 +11285,7 @@ static struct bnx2x_phy phy_84823 = {
        .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
        .hw_reset       = (hw_reset_t)NULL,
        .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
 static struct bnx2x_phy phy_84833 = {
@@ -11555,8 +11294,7 @@ static struct bnx2x_phy phy_84833 = {
        .def_md_devad   = 0,
        .flags          = (FLAGS_FAN_FAILURE_DET_REQ |
                           FLAGS_REARM_LATCH_SIGNAL |
-                          FLAGS_TX_ERROR_CHECK |
-                          FLAGS_EEE_10GBT),
+                          FLAGS_TX_ERROR_CHECK),
        .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
        .mdio_ctrl      = 0,
@@ -11582,7 +11320,7 @@ static struct bnx2x_phy phy_84833 = {
        .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
        .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
        .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
 static struct bnx2x_phy phy_54618se = {
@@ -11616,7 +11354,7 @@ static struct bnx2x_phy phy_54618se = {
        .format_fw_ver  = (format_fw_ver_t)NULL,
        .hw_reset       = (hw_reset_t)NULL,
        .set_link_led   = (set_link_led_t)bnx2x_5461x_set_link_led,
-       .phy_specific_func = (phy_specific_func_t)NULL
+       .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
 };
 /*****************************************************************/
 /*                                                               */
@@ -11862,6 +11600,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
                *phy = phy_54618se;
+               if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+                       phy->flags |= FLAGS_EEE;
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
                *phy = phy_7101;
@@ -12141,7 +11881,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
                bnx2x_xgxs_deassert(params);
 
                /* set bmac loopback */
-               bnx2x_bmac_enable(params, vars, 1);
+               bnx2x_bmac_enable(params, vars, 1, 1);
 
                REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 }
@@ -12233,7 +11973,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
                if (USES_WARPCORE(bp))
                        bnx2x_xmac_enable(params, vars, 0);
                else
-                       bnx2x_bmac_enable(params, vars, 0);
+                       bnx2x_bmac_enable(params, vars, 0, 1);
        }
 
                if (params->loopback_mode == LOOPBACK_XGXS) {
@@ -12258,8 +11998,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
        bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
 }
 
+static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+{
+       struct bnx2x *bp = params->bp;
+       u8 val = en * 0x1F;
+
+       /* Open the gate between the NIG to the BRB */
+       if (!CHIP_IS_E1x(bp))
+               val |= en * 0x20;
+       REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
+
+       if (!CHIP_IS_E1(bp)) {
+               REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
+                      en*0x3);
+       }
+
+       REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+                   NIG_REG_LLH0_BRB1_NOT_MCP), en);
+}
+static int bnx2x_avoid_link_flap(struct link_params *params,
+                                           struct link_vars *vars)
+{
+       u32 phy_idx;
+       u32 dont_clear_stat, lfa_sts;
+       struct bnx2x *bp = params->bp;
+
+       /* Sync the link parameters */
+       bnx2x_link_status_update(params, vars);
+
+       /*
+        * The module verification was already done by previous link owner,
+        * so this call is meant only to get warning message
+        */
+
+       for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
+               struct bnx2x_phy *phy = &params->phy[phy_idx];
+               if (phy->phy_specific_func) {
+                       DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
+                       phy->phy_specific_func(phy, params, PHY_INIT);
+               }
+               if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
+                   (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
+                   (phy->media_type == ETH_PHY_DA_TWINAX))
+                       bnx2x_verify_sfp_module(phy, params);
+       }
+       lfa_sts = REG_RD(bp, params->lfa_base +
+                        offsetof(struct shmem_lfa,
+                                 lfa_sts));
+
+       dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
+
+       /* Re-enable the NIG/MAC */
+       if (CHIP_IS_E3(bp)) {
+               if (!dont_clear_stat) {
+                       REG_WR(bp, GRCBASE_MISC +
+                              MISC_REGISTERS_RESET_REG_2_CLEAR,
+                              (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+                               params->port));
+                       REG_WR(bp, GRCBASE_MISC +
+                              MISC_REGISTERS_RESET_REG_2_SET,
+                              (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+                               params->port));
+               }
+               if (vars->line_speed < SPEED_10000)
+                       bnx2x_umac_enable(params, vars, 0);
+               else
+                       bnx2x_xmac_enable(params, vars, 0);
+       } else {
+               if (vars->line_speed < SPEED_10000)
+                       bnx2x_emac_enable(params, vars, 0);
+               else
+                       bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
+       }
+
+       /* Increment LFA count */
+       lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
+                  (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
+                      LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
+                   << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
+       /* Clear link flap reason */
+       lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+
+       /* Disable NIG DRAIN */
+       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+       /* Enable interrupts */
+       bnx2x_link_int_enable(params);
+       return 0;
+}
+
+static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
+                                        struct link_vars *vars,
+                                        int lfa_status)
+{
+       u32 lfa_sts, cfg_idx, tmp_val;
+       struct bnx2x *bp = params->bp;
+
+       bnx2x_link_reset(params, vars, 1);
+
+       if (!params->lfa_base)
+               return;
+       /* Store the new link parameters */
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, req_duplex),
+              params->req_duplex[0] | (params->req_duplex[1] << 16));
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, req_flow_ctrl),
+              params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, req_line_speed),
+              params->req_line_speed[0] | (params->req_line_speed[1] << 16));
+
+       for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
+               REG_WR(bp, params->lfa_base +
+                      offsetof(struct shmem_lfa,
+                               speed_cap_mask[cfg_idx]),
+                      params->speed_cap_mask[cfg_idx]);
+       }
+
+       tmp_val = REG_RD(bp, params->lfa_base +
+                        offsetof(struct shmem_lfa, additional_config));
+       tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
+       tmp_val |= params->req_fc_auto_adv;
+
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, additional_config), tmp_val);
+
+       lfa_sts = REG_RD(bp, params->lfa_base +
+                        offsetof(struct shmem_lfa, lfa_sts));
+
+       /* Clear the "Don't Clear Statistics" bit, and set reason */
+       lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
+
+       /* Set link flap reason */
+       lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+       lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
+                   LFA_LINK_FLAP_REASON_OFFSET);
+
+       /* Increment link flap counter */
+       lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
+                  (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
+                      LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
+                   << LINK_FLAP_COUNT_OFFSET));
+       REG_WR(bp, params->lfa_base +
+              offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+       /* Proceed with regular link initialization */
+}
+
 int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
 {
+       int lfa_status;
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "Phy Initialization started\n");
        DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
@@ -12274,6 +12167,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
        vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
        vars->mac_type = MAC_TYPE_NONE;
        vars->phy_flags = 0;
+       /* Driver opens NIG-BRB filters */
+       bnx2x_set_rx_filter(params, 1);
+       /* Check if link flap can be avoided */
+       lfa_status = bnx2x_check_lfa(params);
+
+       if (lfa_status == 0) {
+               DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
+               return bnx2x_avoid_link_flap(params, vars);
+       }
+
+       DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
+                      lfa_status);
+       bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
 
        /* Disable attentions */
        bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -12356,13 +12262,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
                REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
        }
 
-       /* Stop BigMac rx */
-       if (!CHIP_IS_E3(bp))
-               bnx2x_bmac_rx_disable(bp, port);
-       else {
-               bnx2x_xmac_disable(params);
-               bnx2x_umac_disable(params);
-       }
+               if (!CHIP_IS_E3(bp)) {
+                       bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+               } else {
+                       bnx2x_set_xmac_rxtx(params, 0);
+                       bnx2x_set_umac_rxtx(params, 0);
+               }
        /* Disable emac */
        if (!CHIP_IS_E3(bp))
                REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -12420,6 +12325,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
        vars->phy_flags = 0;
        return 0;
 }
+int bnx2x_lfa_reset(struct link_params *params,
+                              struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+       vars->link_up = 0;
+       vars->phy_flags = 0;
+       if (!params->lfa_base)
+               return bnx2x_link_reset(params, vars, 1);
+       /*
+        * Activate NIG drain so that during this time the device won't send
+        * anything while it is unable to response.
+        */
+       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+
+       /*
+        * Close gracefully the gate from BMAC to NIG such that no half packets
+        * are passed.
+        */
+       if (!CHIP_IS_E3(bp))
+               bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+       if (CHIP_IS_E3(bp)) {
+               bnx2x_set_xmac_rxtx(params, 0);
+               bnx2x_set_umac_rxtx(params, 0);
+       }
+       /* Wait 10ms for the pipe to clean up*/
+       usleep_range(10000, 20000);
+
+       /* Clean the NIG-BRB using the network filters in a way that will
+        * not cut a packet in the middle.
+        */
+       bnx2x_set_rx_filter(params, 0);
+
+       /*
+        * Re-open the gate between the BMAC and the NIG, after verifying the
+        * gate to the BRB is closed, otherwise packets may arrive to the
+        * firmware before driver had initialized it. The target is to achieve
+        * minimum management protocol down time.
+        */
+       if (!CHIP_IS_E3(bp))
+               bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
+
+       if (CHIP_IS_E3(bp)) {
+               bnx2x_set_xmac_rxtx(params, 1);
+               bnx2x_set_umac_rxtx(params, 1);
+       }
+       /* Disable NIG drain */
+       REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+       return 0;
+}
 
 /****************************************************************************/
 /*                             Common function                             */
index 51cac8130051e27a0994c288712346698280d740..9165b89a4b1923d6948bcefba018f2d6cc707633 100644 (file)
@@ -155,7 +155,7 @@ struct bnx2x_phy {
 #define FLAGS_DUMMY_READ               (1<<9)
 #define FLAGS_MDC_MDIO_WA_B0           (1<<10)
 #define FLAGS_TX_ERROR_CHECK           (1<<12)
-#define FLAGS_EEE_10GBT                        (1<<13)
+#define FLAGS_EEE                      (1<<13)
 
        /* preemphasis values for the rx side */
        u16 rx_preemphasis[4];
@@ -216,6 +216,7 @@ struct bnx2x_phy {
        phy_specific_func_t phy_specific_func;
 #define DISABLE_TX     1
 #define ENABLE_TX      2
+#define PHY_INIT       3
 };
 
 /* Inputs parameters to the CLC */
@@ -304,6 +305,8 @@ struct link_params {
        struct bnx2x *bp;
        u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
                                req_flow_ctrl is set to AUTO */
+       u16 rsrv1;
+       u32 lfa_base;
 };
 
 /* Output parameters */
@@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
    to 0 */
 int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
                     u8 reset_ext_phy);
-
+int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
 /* bnx2x_link_update should be called upon link interrupt */
 int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
 
index e11485ca037dc223fd6f9e7420387c16c4251a50..f7ed122f40717ee4392041e09e1b2396e640370b 100644 (file)
@@ -2166,7 +2166,6 @@ void bnx2x_link_set(struct bnx2x *bp)
 {
        if (!BP_NOMCP(bp)) {
                bnx2x_acquire_phy_lock(bp);
-               bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
                bnx2x_release_phy_lock(bp);
 
@@ -2179,12 +2178,19 @@ static void bnx2x__link_reset(struct bnx2x *bp)
 {
        if (!BP_NOMCP(bp)) {
                bnx2x_acquire_phy_lock(bp);
-               bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+               bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
                bnx2x_release_phy_lock(bp);
        } else
                BNX2X_ERR("Bootcode is missing - can not reset link\n");
 }
 
+void bnx2x_force_link_reset(struct bnx2x *bp)
+{
+       bnx2x_acquire_phy_lock(bp);
+       bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+       bnx2x_release_phy_lock(bp);
+}
+
 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
 {
        u8 rc = 0;
@@ -6751,7 +6757,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        u32 low, high;
        u32 val;
 
-       bnx2x__link_reset(bp);
 
        DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
 
@@ -8244,12 +8249,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
  *
  * @bp:                driver handle
+ * @keep_link:         true iff link should be kept up
  */
-void bnx2x_send_unload_done(struct bnx2x *bp)
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
 {
+       u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
+
        /* Report UNLOAD_DONE to MCP */
        if (!BP_NOMCP(bp))
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
 }
 
 static int bnx2x_func_wait_started(struct bnx2x *bp)
@@ -8318,7 +8326,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
        return 0;
 }
 
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
 {
        int port = BP_PORT(bp);
        int i, rc = 0;
@@ -8440,7 +8448,7 @@ unload_error:
 
 
        /* Report UNLOAD_DONE to MCP */
-       bnx2x_send_unload_done(bp);
+       bnx2x_send_unload_done(bp, keep_link);
 }
 
 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -8852,7 +8860,8 @@ int bnx2x_leader_reset(struct bnx2x *bp)
         * driver is owner of the HW
         */
        if (!global && !BP_NOMCP(bp)) {
-               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+                                            DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
                if (!load_code) {
                        BNX2X_ERR("MCP response failure, aborting\n");
                        rc = -EAGAIN;
@@ -8958,7 +8967,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
 
                        /* Stop the driver */
                        /* If interface has been removed - break */
-                       if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+                       if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
                                return;
 
                        bp->recovery_state = BNX2X_RECOVERY_WAIT;
@@ -9124,7 +9133,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                bp->sp_rtnl_state = 0;
                smp_mb();
 
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                bnx2x_nic_load(bp, LOAD_NORMAL);
 
                goto sp_rtnl_exit;
@@ -9310,7 +9319,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
 
 static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
 {
-       u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+       u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
+                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
        if (!rc) {
                BNX2X_ERR("MCP response failure, aborting\n");
                return -EBUSY;
@@ -11000,7 +11010,7 @@ static int bnx2x_close(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
 
        /* Unload the driver, release IRQs */
-       bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+       bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
 
        /* Power off */
        bnx2x_set_power_state(bp, PCI_D3hot);
index 28a0bcfe61ff9a4224d5b1f3442400a363bce79b..1b1999d34c7180f41649b606695ecccfbcb237c5 100644 (file)
 #define UMAC_COMMAND_CONFIG_REG_SW_RESET                        (0x1<<13)
 #define UMAC_COMMAND_CONFIG_REG_TX_ENA                          (0x1<<0)
 #define UMAC_REG_COMMAND_CONFIG                                         0x8
+/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
+ * state from LPI state when it receives packet for transmission. The
+ * decrement unit is 1 micro-second. */
+#define UMAC_REG_EEE_WAKE_TIMER                                         0x6c
 /* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
  * to bit 17 of the MAC address etc. */
 #define UMAC_REG_MAC_ADDR0                                      0xc
 /* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
  * logic to check frames. */
 #define UMAC_REG_MAXFR                                          0x14
+#define UMAC_REG_UMAC_EEE_CTRL                                  0x64
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN                           (0x1<<3)
 /* [RW 8] The event id for aggregated interrupt 0 */
 #define USDM_REG_AGG_INT_EVENT_0                                0xc4038
 #define USDM_REG_AGG_INT_EVENT_1                                0xc403c
@@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/
 /* BCM84833 only */
 #define MDIO_84833_TOP_CFG_FW_REV                      0x400f
 #define MDIO_84833_TOP_CFG_FW_EEE              0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE           0x1f81
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1                        0x401a
 #define MDIO_84833_SUPER_ISOLATE               0x8000
 /* These are mailbox register set used by 84833. */
@@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_REG_GPHY_ID_54618SE               0x5cd5
 #define MDIO_REG_GPHY_CL45_ADDR_REG                    0xd
 #define MDIO_REG_GPHY_CL45_DATA_REG                    0xe
-#define MDIO_REG_GPHY_EEE_ADV                  0x3c
-#define MDIO_REG_GPHY_EEE_1G           (0x1 << 2)
-#define MDIO_REG_GPHY_EEE_100          (0x1 << 1)
 #define MDIO_REG_GPHY_EEE_RESOLVED             0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE                  0x15
+#define MDIO_REG_GPHY_EXP_ACCESS                       0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP           0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF           0x40
 #define MDIO_REG_GPHY_AUX_STATUS                       0x19
 #define MDIO_REG_INTR_STATUS                           0x1a
 #define MDIO_REG_INTR_MASK                             0x1b
index 62f754bd0dfe65704a1af826a754a720dc8cfd43..71971a161bd199746595d501691f19300c5ff2ad 100644 (file)
@@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
                         */
                        list_add_tail(&spacer.link, &o->pending_comp);
                        mb();
-                       list_del(&elem->link);
-                       list_add_tail(&elem->link, &o->pending_comp);
+                       list_move_tail(&elem->link, &o->pending_comp);
                        list_del(&spacer.link);
                } else
                        break;
@@ -5620,7 +5619,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
        memset(rdata, 0, sizeof(*rdata));
 
        /* Fill the ramrod data with provided parameters */
-       rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+       rdata->function_mode = (u8)start_params->mf_mode;
        rdata->sd_vlan_tag   = cpu_to_le16(start_params->sd_vlan_tag);
        rdata->path_id       = BP_PATH(bp);
        rdata->network_cos_mode = start_params->network_cos_mode;
index a1d0446b39b356dd69e0b77e69f6285ba37edb63..348ed02d3c69928c3991d7c8de046ccf95c0a3c9 100644 (file)
@@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref)
 #endif
 }
 
-static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
 {
-       u16 res = sizeof(struct host_port_stats) >> 2;
+       u16 res = 0;
 
-       /* if PFC stats are not supported by the MFW, don't DMA them */
-       if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
-               res -= (sizeof(u32)*4) >> 2;
+       /* 'newest' convention - shmem2 cotains the size of the port stats */
+       if (SHMEM2_HAS(bp, sizeof_port_stats)) {
+               u32 size = SHMEM2_RD(bp, sizeof_port_stats);
+               if (size)
+                       res = size;
 
+               /* prevent newer BC from causing buffer overflow */
+               if (res > sizeof(struct host_port_stats))
+                       res = sizeof(struct host_port_stats);
+       }
+
+       /* Older convention - all BCs support the port stats' fields up until
+        * the 'not_used' field
+        */
+       if (!res) {
+               res = offsetof(struct host_port_stats, not_used) + 4;
+
+               /* if PFC stats are supported by the MFW, DMA them as well */
+               if (bp->flags & BC_SUPPORTS_PFC_STATS) {
+                       res += offsetof(struct host_port_stats,
+                                       pfc_frames_rx_lo) -
+                              offsetof(struct host_port_stats,
+                                       pfc_frames_tx_hi) + 4 ;
+               }
+       }
+
+       res >>= 2;
+
+       WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
        return res;
 }
 
index 3b4fc61f24cfe1cb047dbbd16e86d7180a102046..cc8434fd606e2a089e20708f4ee12794fdfdd1e1 100644 (file)
@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev)
        }
 }
 
-static void __cnic_free_uio(struct cnic_uio_dev *udev)
+static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
 {
-       uio_unregister_device(&udev->cnic_uinfo);
-
        if (udev->l2_buf) {
                dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
                                  udev->l2_buf, udev->l2_buf_map);
@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev)
                udev->l2_ring = NULL;
        }
 
+}
+
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
+{
+       uio_unregister_device(&udev->cnic_uinfo);
+
+       __cnic_free_uio_rings(udev);
+
        pci_dev_put(udev->pdev);
        kfree(udev);
 }
@@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev)
        if (udev) {
                udev->dev = NULL;
                cp->udev = NULL;
+               if (udev->uio_dev == -1)
+                       __cnic_free_uio_rings(udev);
        }
 
        cnic_free_context(dev);
@@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
        return 0;
 }
 
+static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
+{
+       struct cnic_local *cp = udev->dev->cnic_priv;
+
+       if (udev->l2_ring)
+               return 0;
+
+       udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+       udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+                                          &udev->l2_ring_map,
+                                          GFP_KERNEL | __GFP_COMP);
+       if (!udev->l2_ring)
+               return -ENOMEM;
+
+       udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+       udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+       udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+                                         &udev->l2_buf_map,
+                                         GFP_KERNEL | __GFP_COMP);
+       if (!udev->l2_buf) {
+               __cnic_free_uio_rings(udev);
+               return -ENOMEM;
+       }
+
+       return 0;
+
+}
+
 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
 {
        struct cnic_local *cp = dev->cnic_priv;
@@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
        list_for_each_entry(udev, &cnic_udev_list, list) {
                if (udev->pdev == dev->pcidev) {
                        udev->dev = dev;
+                       if (__cnic_alloc_uio_rings(udev, pages)) {
+                               udev->dev = NULL;
+                               read_unlock(&cnic_dev_lock);
+                               return -ENOMEM;
+                       }
                        cp->udev = udev;
                        read_unlock(&cnic_dev_lock);
                        return 0;
@@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
 
        udev->dev = dev;
        udev->pdev = dev->pcidev;
-       udev->l2_ring_size = pages * BCM_PAGE_SIZE;
-       udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
-                                          &udev->l2_ring_map,
-                                          GFP_KERNEL | __GFP_COMP);
-       if (!udev->l2_ring)
-               goto err_udev;
 
-       udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
-       udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
-       udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
-                                         &udev->l2_buf_map,
-                                         GFP_KERNEL | __GFP_COMP);
-       if (!udev->l2_buf)
-               goto err_dma;
+       if (__cnic_alloc_uio_rings(udev, pages))
+               goto err_udev;
 
        write_lock(&cnic_dev_lock);
        list_add(&udev->list, &cnic_udev_list);
@@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
        cp->udev = udev;
 
        return 0;
- err_dma:
-       dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
-                         udev->l2_ring, udev->l2_ring_map);
+
  err_udev:
        kfree(udev);
        return -ENOMEM;
@@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
        if (ret)
                goto error;
 
-       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+       if (CNIC_SUPPORTS_FCOE(cp)) {
                ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
                if (ret)
                        goto error;
@@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
        if (ret)
                goto error;
 
+       if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+               return 0;
+
        cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
 
        cp->l2_rx_ring_size = 15;
@@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
                        IGU_INT_DISABLE, 0);
 }
 
+static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
+                          IGU_INT_ENABLE, 1);
+}
+
+static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
+                       IGU_INT_ENABLE, 1);
+}
+
 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
 {
        u32 last_status = *info->status_idx_ptr;
@@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data)
                CNIC_WR16(dev, cp->kcq1.io_addr,
                          cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-               if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
-                       cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
-                                          status_idx, IGU_INT_ENABLE, 1);
+               if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
+                       cp->arm_int(dev, status_idx);
                        break;
                }
 
@@ -4845,6 +4891,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
        buf_map = udev->l2_buf_map;
        for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
                struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+               struct eth_tx_parse_bd_e1x *pbd_e1x =
+                       &((txbd + 1)->parse_bd_e1x);
+               struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
                struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
 
                start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
@@ -4854,10 +4903,15 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
                start_bd->nbytes = cpu_to_le16(0x10);
                start_bd->nbd = cpu_to_le16(3);
                start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-               start_bd->general_data = (UNICAST_ADDRESS <<
-                       ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+               start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
                start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
 
+               if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+                       pbd_e2->parsing_data = (UNICAST_ADDRESS <<
+                                ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+               else
+                        pbd_e1x->global_data = (UNICAST_ADDRESS <<
+                               ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
        }
 
        val = (u64) ring_map >> 32;
@@ -5308,7 +5362,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
                /* Need to wait for the ring shutdown event to complete
                 * before clearing the CNIC_UP flag.
                 */
-               while (cp->udev->uio_dev != -1 && i < 15) {
+               while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
                        msleep(100);
                        i++;
                }
@@ -5473,8 +5527,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
 
        if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
                cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
-       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
-           !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+       if (CNIC_SUPPORTS_FCOE(cp))
                cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
 
        if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
@@ -5492,10 +5545,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        cp->stop_cm = cnic_cm_stop_bnx2x_hw;
        cp->enable_int = cnic_enable_bnx2x_int;
        cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
-       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+       if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
                cp->ack_int = cnic_ack_bnx2x_e2_msix;
-       else
+               cp->arm_int = cnic_arm_bnx2x_e2_msix;
+       } else {
                cp->ack_int = cnic_ack_bnx2x_msix;
+               cp->arm_int = cnic_arm_bnx2x_msix;
+       }
        cp->close_conn = cnic_close_bnx2x_conn;
        return cdev;
 }
index 30328097f516375ee7db7fe9179e178c71cf3375..148604c3fa0c79c51bf0fd1dc75edc3f7598c670 100644 (file)
@@ -334,6 +334,7 @@ struct cnic_local {
        void                    (*enable_int)(struct cnic_dev *);
        void                    (*disable_int_sync)(struct cnic_dev *);
        void                    (*ack_int)(struct cnic_dev *);
+       void                    (*arm_int)(struct cnic_dev *, u32 index);
        void                    (*close_conn)(struct cnic_sock *, u32 opcode);
 };
 
@@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next {
          MAX_STAT_COUNTER_ID_E1))
 #endif
 
+#define CNIC_SUPPORTS_FCOE(cp)                                 \
+       (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) &&                \
+        !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+
 #define CNIC_RAMROD_TMO                        (HZ / 4)
 
 #endif
index 382c98b0cc0c6feb959cd0f4e9dca7a078159c84..ede3db35d757e9c51a5f07cef320ecf323515295 100644 (file)
@@ -896,7 +896,7 @@ struct tstorm_tcp_tcp_ag_context_section {
        u32 snd_nxt;
        u32 rtt_seq;
        u32 rtt_time;
-       u32 __reserved66;
+       u32 wnd_right_edge_local;
        u32 wnd_right_edge;
        u32 tcp_agg_vars1;
 #define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
index 5cb88881bba1134776731b7f438d876ed47dc441..865095aad1f6494d985f4dbe15ab1ae198e7e74f 100644 (file)
@@ -14,8 +14,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION    "2.5.12"
-#define CNIC_MODULE_RELDATE    "June 29, 2012"
+#define CNIC_MODULE_VERSION    "2.5.14"
+#define CNIC_MODULE_RELDATE    "Sep 30, 2012"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
index 388d3221393701cfb2366204f06aa6e191fc2947..46280ba4c5d415930746c0cd7dc7c7e8b5790a95 100644 (file)
 #include <linux/prefetch.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
-#if IS_ENABLED(CONFIG_HWMON)
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
-#endif
 
 #include <net/checksum.h>
 #include <net/ip.h>
@@ -92,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    124
+#define TG3_MIN_NUM                    125
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "March 21, 2012"
+#define DRV_MODULE_RELDATE     "September 26, 2012"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -6263,7 +6261,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
                u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
 
                tp->rx_refill = false;
-               for (i = 1; i < tp->irq_cnt; i++)
+               for (i = 1; i <= tp->rxq_cnt; i++)
                        err |= tg3_rx_prodring_xfer(tp, dpr,
                                                    &tp->napi[i].prodring);
 
@@ -7592,15 +7590,11 @@ static int tg3_init_rings(struct tg3 *tp)
        return 0;
 }
 
-/*
- * Must not be invoked with interrupt sources disabled and
- * the hardware shutdown down.
- */
-static void tg3_free_consistent(struct tg3 *tp)
+static void tg3_mem_tx_release(struct tg3 *tp)
 {
        int i;
 
-       for (i = 0; i < tp->irq_cnt; i++) {
+       for (i = 0; i < tp->irq_max; i++) {
                struct tg3_napi *tnapi = &tp->napi[i];
 
                if (tnapi->tx_ring) {
@@ -7611,17 +7605,114 @@ static void tg3_free_consistent(struct tg3 *tp)
 
                kfree(tnapi->tx_buffers);
                tnapi->tx_buffers = NULL;
+       }
+}
 
-               if (tnapi->rx_rcb) {
-                       dma_free_coherent(&tp->pdev->dev,
-                                         TG3_RX_RCB_RING_BYTES(tp),
-                                         tnapi->rx_rcb,
-                                         tnapi->rx_rcb_mapping);
-                       tnapi->rx_rcb = NULL;
-               }
+static int tg3_mem_tx_acquire(struct tg3 *tp)
+{
+       int i;
+       struct tg3_napi *tnapi = &tp->napi[0];
+
+       /* If multivector TSS is enabled, vector 0 does not handle
+        * tx interrupts.  Don't allocate any resources for it.
+        */
+       if (tg3_flag(tp, ENABLE_TSS))
+               tnapi++;
+
+       for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
+               tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
+                                           TG3_TX_RING_SIZE, GFP_KERNEL);
+               if (!tnapi->tx_buffers)
+                       goto err_out;
+
+               tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+                                                   TG3_TX_RING_BYTES,
+                                                   &tnapi->tx_desc_mapping,
+                                                   GFP_KERNEL);
+               if (!tnapi->tx_ring)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       tg3_mem_tx_release(tp);
+       return -ENOMEM;
+}
+
+static void tg3_mem_rx_release(struct tg3 *tp)
+{
+       int i;
+
+       for (i = 0; i < tp->irq_max; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
 
                tg3_rx_prodring_fini(tp, &tnapi->prodring);
 
+               if (!tnapi->rx_rcb)
+                       continue;
+
+               dma_free_coherent(&tp->pdev->dev,
+                                 TG3_RX_RCB_RING_BYTES(tp),
+                                 tnapi->rx_rcb,
+                                 tnapi->rx_rcb_mapping);
+               tnapi->rx_rcb = NULL;
+       }
+}
+
+static int tg3_mem_rx_acquire(struct tg3 *tp)
+{
+       unsigned int i, limit;
+
+       limit = tp->rxq_cnt;
+
+       /* If RSS is enabled, we need a (dummy) producer ring
+        * set on vector zero.  This is the true hw prodring.
+        */
+       if (tg3_flag(tp, ENABLE_RSS))
+               limit++;
+
+       for (i = 0; i < limit; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+
+               if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+                       goto err_out;
+
+               /* If multivector RSS is enabled, vector 0
+                * does not handle rx or tx interrupts.
+                * Don't allocate any resources for it.
+                */
+               if (!i && tg3_flag(tp, ENABLE_RSS))
+                       continue;
+
+               tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+                                                  TG3_RX_RCB_RING_BYTES(tp),
+                                                  &tnapi->rx_rcb_mapping,
+                                                  GFP_KERNEL);
+               if (!tnapi->rx_rcb)
+                       goto err_out;
+
+               memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+       }
+
+       return 0;
+
+err_out:
+       tg3_mem_rx_release(tp);
+       return -ENOMEM;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+       int i;
+
+       for (i = 0; i < tp->irq_cnt; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+
                if (tnapi->hw_status) {
                        dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
                                          tnapi->hw_status,
@@ -7630,6 +7721,9 @@ static void tg3_free_consistent(struct tg3 *tp)
                }
        }
 
+       tg3_mem_rx_release(tp);
+       tg3_mem_tx_release(tp);
+
        if (tp->hw_stats) {
                dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
                                  tp->hw_stats, tp->stats_mapping);
@@ -7668,72 +7762,38 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
                sblk = tnapi->hw_status;
 
-               if (tg3_rx_prodring_init(tp, &tnapi->prodring))
-                       goto err_out;
+               if (tg3_flag(tp, ENABLE_RSS)) {
+                       u16 *prodptr = 0;
 
-               /* If multivector TSS is enabled, vector 0 does not handle
-                * tx interrupts.  Don't allocate any resources for it.
-                */
-               if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
-                   (i && tg3_flag(tp, ENABLE_TSS))) {
-                       tnapi->tx_buffers = kzalloc(
-                                              sizeof(struct tg3_tx_ring_info) *
-                                              TG3_TX_RING_SIZE, GFP_KERNEL);
-                       if (!tnapi->tx_buffers)
-                               goto err_out;
-
-                       tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
-                                                           TG3_TX_RING_BYTES,
-                                                       &tnapi->tx_desc_mapping,
-                                                           GFP_KERNEL);
-                       if (!tnapi->tx_ring)
-                               goto err_out;
-               }
-
-               /*
-                * When RSS is enabled, the status block format changes
-                * slightly.  The "rx_jumbo_consumer", "reserved",
-                * and "rx_mini_consumer" members get mapped to the
-                * other three rx return ring producer indexes.
-                */
-               switch (i) {
-               default:
-                       if (tg3_flag(tp, ENABLE_RSS)) {
-                               tnapi->rx_rcb_prod_idx = NULL;
+                       /*
+                        * When RSS is enabled, the status block format changes
+                        * slightly.  The "rx_jumbo_consumer", "reserved",
+                        * and "rx_mini_consumer" members get mapped to the
+                        * other three rx return ring producer indexes.
+                        */
+                       switch (i) {
+                       case 1:
+                               prodptr = &sblk->idx[0].rx_producer;
+                               break;
+                       case 2:
+                               prodptr = &sblk->rx_jumbo_consumer;
+                               break;
+                       case 3:
+                               prodptr = &sblk->reserved;
+                               break;
+                       case 4:
+                               prodptr = &sblk->rx_mini_consumer;
                                break;
                        }
-                       /* Fall through */
-               case 1:
+                       tnapi->rx_rcb_prod_idx = prodptr;
+               } else {
                        tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
-                       break;
-               case 2:
-                       tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
-                       break;
-               case 3:
-                       tnapi->rx_rcb_prod_idx = &sblk->reserved;
-                       break;
-               case 4:
-                       tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
-                       break;
                }
-
-               /*
-                * If multivector RSS is enabled, vector 0 does not handle
-                * rx or tx interrupts.  Don't allocate any resources for it.
-                */
-               if (!i && tg3_flag(tp, ENABLE_RSS))
-                       continue;
-
-               tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
-                                                  TG3_RX_RCB_RING_BYTES(tp),
-                                                  &tnapi->rx_rcb_mapping,
-                                                  GFP_KERNEL);
-               if (!tnapi->rx_rcb)
-                       goto err_out;
-
-               memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
        }
 
+       if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
+               goto err_out;
+
        return 0;
 
 err_out:
@@ -8247,9 +8307,10 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
                              nic_addr);
 }
 
-static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+
+static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
 {
-       int i;
+       int i = 0;
 
        if (!tg3_flag(tp, ENABLE_TSS)) {
                tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
@@ -8259,31 +8320,43 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
                tw32(HOSTCC_TXCOL_TICKS, 0);
                tw32(HOSTCC_TXMAX_FRAMES, 0);
                tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+               for (; i < tp->txq_cnt; i++) {
+                       u32 reg;
+
+                       reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_coalesce_usecs);
+                       reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_max_coalesced_frames);
+                       reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+                       tw32(reg, ec->tx_max_coalesced_frames_irq);
+               }
        }
 
+       for (; i < tp->irq_max - 1; i++) {
+               tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+               tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+               tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+       }
+}
+
+static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+       int i = 0;
+       u32 limit = tp->rxq_cnt;
+
        if (!tg3_flag(tp, ENABLE_RSS)) {
                tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
                tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
                tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+               limit--;
        } else {
                tw32(HOSTCC_RXCOL_TICKS, 0);
                tw32(HOSTCC_RXMAX_FRAMES, 0);
                tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
        }
 
-       if (!tg3_flag(tp, 5705_PLUS)) {
-               u32 val = ec->stats_block_coalesce_usecs;
-
-               tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
-               tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
-
-               if (!netif_carrier_ok(tp->dev))
-                       val = 0;
-
-               tw32(HOSTCC_STAT_COAL_TICKS, val);
-       }
-
-       for (i = 0; i < tp->irq_cnt - 1; i++) {
+       for (; i < limit; i++) {
                u32 reg;
 
                reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
@@ -8292,27 +8365,30 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
                tw32(reg, ec->rx_max_coalesced_frames);
                reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
                tw32(reg, ec->rx_max_coalesced_frames_irq);
-
-               if (tg3_flag(tp, ENABLE_TSS)) {
-                       reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
-                       tw32(reg, ec->tx_coalesce_usecs);
-                       reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
-                       tw32(reg, ec->tx_max_coalesced_frames);
-                       reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
-                       tw32(reg, ec->tx_max_coalesced_frames_irq);
-               }
        }
 
        for (; i < tp->irq_max - 1; i++) {
                tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
                tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
                tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+       }
+}
 
-               if (tg3_flag(tp, ENABLE_TSS)) {
-                       tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
-                       tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
-                       tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
-               }
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+       tg3_coal_tx_init(tp, ec);
+       tg3_coal_rx_init(tp, ec);
+
+       if (!tg3_flag(tp, 5705_PLUS)) {
+               u32 val = ec->stats_block_coalesce_usecs;
+
+               tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+               tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+               if (!netif_carrier_ok(tp->dev))
+                       val = 0;
+
+               tw32(HOSTCC_STAT_COAL_TICKS, val);
        }
 }
 
@@ -8570,13 +8646,12 @@ static void __tg3_set_rx_mode(struct net_device *dev)
        }
 }
 
-static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
 {
        int i;
 
        for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
-               tp->rss_ind_tbl[i] =
-                       ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+               tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
 }
 
 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
@@ -8598,7 +8673,7 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
        }
 
        if (i != TG3_RSS_INDIR_TBL_SIZE)
-               tg3_rss_init_dflt_indir_tbl(tp);
+               tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
 }
 
 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
@@ -9495,7 +9570,6 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
        return tg3_reset_hw(tp, reset_phy);
 }
 
-#if IS_ENABLED(CONFIG_HWMON)
 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
 {
        int i;
@@ -9548,22 +9622,17 @@ static const struct attribute_group tg3_group = {
        .attrs = tg3_attributes,
 };
 
-#endif
-
 static void tg3_hwmon_close(struct tg3 *tp)
 {
-#if IS_ENABLED(CONFIG_HWMON)
        if (tp->hwmon_dev) {
                hwmon_device_unregister(tp->hwmon_dev);
                tp->hwmon_dev = NULL;
                sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
        }
-#endif
 }
 
 static void tg3_hwmon_open(struct tg3 *tp)
 {
-#if IS_ENABLED(CONFIG_HWMON)
        int i, err;
        u32 size = 0;
        struct pci_dev *pdev = tp->pdev;
@@ -9595,7 +9664,6 @@ static void tg3_hwmon_open(struct tg3 *tp)
                dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
                sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
        }
-#endif
 }
 
 
@@ -10119,21 +10187,43 @@ static int tg3_request_firmware(struct tg3 *tp)
        return 0;
 }
 
-static bool tg3_enable_msix(struct tg3 *tp)
+static u32 tg3_irq_count(struct tg3 *tp)
 {
-       int i, rc;
-       struct msix_entry msix_ent[tp->irq_max];
+       u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
 
-       tp->irq_cnt = netif_get_num_default_rss_queues();
-       if (tp->irq_cnt > 1) {
+       if (irq_cnt > 1) {
                /* We want as many rx rings enabled as there are cpus.
                 * In multiqueue MSI-X mode, the first MSI-X vector
                 * only deals with link interrupts, etc, so we add
                 * one to the number of vectors we are requesting.
                 */
-               tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+               irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
        }
 
+       return irq_cnt;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+       int i, rc;
+       struct msix_entry msix_ent[tp->irq_max];
+
+       tp->txq_cnt = tp->txq_req;
+       tp->rxq_cnt = tp->rxq_req;
+       if (!tp->rxq_cnt)
+               tp->rxq_cnt = netif_get_num_default_rss_queues();
+       if (tp->rxq_cnt > tp->rxq_max)
+               tp->rxq_cnt = tp->rxq_max;
+
+       /* Disable multiple TX rings by default.  Simple round-robin hardware
+        * scheduling of the TX rings can cause starvation of rings with
+        * small packets when other rings have TSO or jumbo packets.
+        */
+       if (!tp->txq_req)
+               tp->txq_cnt = 1;
+
+       tp->irq_cnt = tg3_irq_count(tp);
+
        for (i = 0; i < tp->irq_max; i++) {
                msix_ent[i].entry  = i;
                msix_ent[i].vector = 0;
@@ -10148,27 +10238,28 @@ static bool tg3_enable_msix(struct tg3 *tp)
                netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
                              tp->irq_cnt, rc);
                tp->irq_cnt = rc;
+               tp->rxq_cnt = max(rc - 1, 1);
+               if (tp->txq_cnt)
+                       tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
        }
 
        for (i = 0; i < tp->irq_max; i++)
                tp->napi[i].irq_vec = msix_ent[i].vector;
 
-       netif_set_real_num_tx_queues(tp->dev, 1);
-       rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
-       if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+       if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
                pci_disable_msix(tp->pdev);
                return false;
        }
 
-       if (tp->irq_cnt > 1) {
-               tg3_flag_set(tp, ENABLE_RSS);
+       if (tp->irq_cnt == 1)
+               return true;
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
-                       tg3_flag_set(tp, ENABLE_TSS);
-                       netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
-               }
-       }
+       tg3_flag_set(tp, ENABLE_RSS);
+
+       if (tp->txq_cnt > 1)
+               tg3_flag_set(tp, ENABLE_TSS);
+
+       netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
 
        return true;
 }
@@ -10202,6 +10293,11 @@ defcfg:
        if (!tg3_flag(tp, USING_MSIX)) {
                tp->irq_cnt = 1;
                tp->napi[0].irq_vec = tp->pdev->irq;
+       }
+
+       if (tp->irq_cnt == 1) {
+               tp->txq_cnt = 1;
+               tp->rxq_cnt = 1;
                netif_set_real_num_tx_queues(tp->dev, 1);
                netif_set_real_num_rx_queues(tp->dev, 1);
        }
@@ -10219,38 +10315,11 @@ static void tg3_ints_fini(struct tg3 *tp)
        tg3_flag_clear(tp, ENABLE_TSS);
 }
 
-static int tg3_open(struct net_device *dev)
+static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
 {
-       struct tg3 *tp = netdev_priv(dev);
+       struct net_device *dev = tp->dev;
        int i, err;
 
-       if (tp->fw_needed) {
-               err = tg3_request_firmware(tp);
-               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
-                       if (err)
-                               return err;
-               } else if (err) {
-                       netdev_warn(tp->dev, "TSO capability disabled\n");
-                       tg3_flag_clear(tp, TSO_CAPABLE);
-               } else if (!tg3_flag(tp, TSO_CAPABLE)) {
-                       netdev_notice(tp->dev, "TSO capability restored\n");
-                       tg3_flag_set(tp, TSO_CAPABLE);
-               }
-       }
-
-       netif_carrier_off(tp->dev);
-
-       err = tg3_power_up(tp);
-       if (err)
-               return err;
-
-       tg3_full_lock(tp, 0);
-
-       tg3_disable_ints(tp);
-       tg3_flag_clear(tp, INIT_COMPLETE);
-
-       tg3_full_unlock(tp);
-
        /*
         * Setup interrupts first so we know how
         * many NAPI resources to allocate
@@ -10284,7 +10353,7 @@ static int tg3_open(struct net_device *dev)
 
        tg3_full_lock(tp, 0);
 
-       err = tg3_init_hw(tp, 1);
+       err = tg3_init_hw(tp, reset_phy);
        if (err) {
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
                tg3_free_rings(tp);
@@ -10295,7 +10364,7 @@ static int tg3_open(struct net_device *dev)
        if (err)
                goto err_out3;
 
-       if (tg3_flag(tp, USING_MSI)) {
+       if (test_irq && tg3_flag(tp, USING_MSI)) {
                err = tg3_test_msi(tp);
 
                if (err) {
@@ -10351,20 +10420,18 @@ err_out2:
 
 err_out1:
        tg3_ints_fini(tp);
-       tg3_frob_aux_power(tp, false);
-       pci_set_power_state(tp->pdev, PCI_D3hot);
+
        return err;
 }
 
-static int tg3_close(struct net_device *dev)
+static void tg3_stop(struct tg3 *tp)
 {
        int i;
-       struct tg3 *tp = netdev_priv(dev);
 
        tg3_napi_disable(tp);
        tg3_reset_task_cancel(tp);
 
-       netif_tx_stop_all_queues(dev);
+       netif_tx_disable(tp->dev);
 
        tg3_timer_stop(tp);
 
@@ -10389,13 +10456,60 @@ static int tg3_close(struct net_device *dev)
 
        tg3_ints_fini(tp);
 
-       /* Clear stats across close / open calls */
-       memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
-       memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
-
        tg3_napi_fini(tp);
 
        tg3_free_consistent(tp);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       int err;
+
+       if (tp->fw_needed) {
+               err = tg3_request_firmware(tp);
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+                       if (err)
+                               return err;
+               } else if (err) {
+                       netdev_warn(tp->dev, "TSO capability disabled\n");
+                       tg3_flag_clear(tp, TSO_CAPABLE);
+               } else if (!tg3_flag(tp, TSO_CAPABLE)) {
+                       netdev_notice(tp->dev, "TSO capability restored\n");
+                       tg3_flag_set(tp, TSO_CAPABLE);
+               }
+       }
+
+       netif_carrier_off(tp->dev);
+
+       err = tg3_power_up(tp);
+       if (err)
+               return err;
+
+       tg3_full_lock(tp, 0);
+
+       tg3_disable_ints(tp);
+       tg3_flag_clear(tp, INIT_COMPLETE);
+
+       tg3_full_unlock(tp);
+
+       err = tg3_start(tp, true, true);
+       if (err) {
+               tg3_frob_aux_power(tp, false);
+               pci_set_power_state(tp->pdev, PCI_D3hot);
+       }
+       return err;
+}
+
+static int tg3_close(struct net_device *dev)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       tg3_stop(tp);
+
+       /* Clear stats across close / open calls */
+       memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+       memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
 
        tg3_power_down(tp);
 
@@ -11185,11 +11299,11 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
        switch (info->cmd) {
        case ETHTOOL_GRXRINGS:
                if (netif_running(tp->dev))
-                       info->data = tp->irq_cnt;
+                       info->data = tp->rxq_cnt;
                else {
                        info->data = num_online_cpus();
-                       if (info->data > TG3_IRQ_MAX_VECS_RSS)
-                               info->data = TG3_IRQ_MAX_VECS_RSS;
+                       if (info->data > TG3_RSS_MAX_NUM_QS)
+                               info->data = TG3_RSS_MAX_NUM_QS;
                }
 
                /* The first interrupt vector only
@@ -11246,6 +11360,58 @@ static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
        return 0;
 }
 
+static void tg3_get_channels(struct net_device *dev,
+                            struct ethtool_channels *channel)
+{
+       struct tg3 *tp = netdev_priv(dev);
+       u32 deflt_qs = netif_get_num_default_rss_queues();
+
+       channel->max_rx = tp->rxq_max;
+       channel->max_tx = tp->txq_max;
+
+       if (netif_running(dev)) {
+               channel->rx_count = tp->rxq_cnt;
+               channel->tx_count = tp->txq_cnt;
+       } else {
+               if (tp->rxq_req)
+                       channel->rx_count = tp->rxq_req;
+               else
+                       channel->rx_count = min(deflt_qs, tp->rxq_max);
+
+               if (tp->txq_req)
+                       channel->tx_count = tp->txq_req;
+               else
+                       channel->tx_count = min(deflt_qs, tp->txq_max);
+       }
+}
+
+static int tg3_set_channels(struct net_device *dev,
+                           struct ethtool_channels *channel)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!tg3_flag(tp, SUPPORT_MSIX))
+               return -EOPNOTSUPP;
+
+       if (channel->rx_count > tp->rxq_max ||
+           channel->tx_count > tp->txq_max)
+               return -EINVAL;
+
+       tp->rxq_req = channel->rx_count;
+       tp->txq_req = channel->tx_count;
+
+       if (!netif_running(dev))
+               return 0;
+
+       tg3_stop(tp);
+
+       netif_carrier_off(dev);
+
+       tg3_start(tp, true, false);
+
+       return 0;
+}
+
 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
        switch (stringset) {
@@ -12494,6 +12660,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
        .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
        .get_rxfh_indir         = tg3_get_rxfh_indir,
        .set_rxfh_indir         = tg3_set_rxfh_indir,
+       .get_channels           = tg3_get_channels,
+       .set_channels           = tg3_set_channels,
        .get_ts_info            = ethtool_op_get_ts_info,
 };
 
@@ -14510,10 +14678,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                if (tg3_flag(tp, 57765_PLUS)) {
                        tg3_flag_set(tp, SUPPORT_MSIX);
                        tp->irq_max = TG3_IRQ_MAX_VECS;
-                       tg3_rss_init_dflt_indir_tbl(tp);
                }
        }
 
+       tp->txq_max = 1;
+       tp->rxq_max = 1;
+       if (tp->irq_max > 1) {
+               tp->rxq_max = TG3_RSS_MAX_NUM_QS;
+               tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
+
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+                       tp->txq_max = tp->irq_max - 1;
+       }
+
        if (tg3_flag(tp, 5755_PLUS) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tg3_flag_set(tp, SHORT_DMA_BUG);
index 6d52cb28682674b6bab84f39450b8149eb0ca569..d9308c32102e998fb22c45f9ee5c21482286072a 100644 (file)
@@ -2860,7 +2860,8 @@ struct tg3_rx_prodring_set {
        dma_addr_t                      rx_jmb_mapping;
 };
 
-#define TG3_IRQ_MAX_VECS_RSS           5
+#define TG3_RSS_MAX_NUM_QS             4
+#define TG3_IRQ_MAX_VECS_RSS           (TG3_RSS_MAX_NUM_QS + 1)
 #define TG3_IRQ_MAX_VECS               TG3_IRQ_MAX_VECS_RSS
 
 struct tg3_napi {
@@ -3037,6 +3038,9 @@ struct tg3 {
        void                            (*write32_tx_mbox) (struct tg3 *, u32,
                                                            u32);
        u32                             dma_limit;
+       u32                             txq_req;
+       u32                             txq_cnt;
+       u32                             txq_max;
 
        /* begin "rx thread" cacheline section */
        struct tg3_napi                 napi[TG3_IRQ_MAX_VECS];
@@ -3051,6 +3055,9 @@ struct tg3 {
        u32                             rx_std_max_post;
        u32                             rx_offset;
        u32                             rx_pkt_map_sz;
+       u32                             rxq_req;
+       u32                             rxq_cnt;
+       u32                             rxq_max;
        bool                            rx_refill;
 
 
index b441f33258e7c48c2cd1ce5afd5a2c021cbde98a..ce1eac529470de89cdbc2107a649affbfedb3761 100644 (file)
@@ -3268,6 +3268,7 @@ bnad_pci_probe(struct pci_dev *pdev,
         *      Output : using_dac = 1 for 64 bit DMA
         *                         = 0 for 32 bit DMA
         */
+       using_dac = false;
        err = bnad_pci_init(bnad, pdev, &using_dac);
        if (err)
                goto unlock_mutex;
index 875bbb999aa24d1e786e05da4548f9cfa6142a33..9c9f3260344a346175c149824c249cd3ebf72be1 100644 (file)
@@ -1394,7 +1394,7 @@ static int offload_close(struct t3cdev *tdev)
        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
 
        /* Flush work scheduled while releasing TIDs */
-       flush_work_sync(&td->tid_release_task);
+       flush_work(&td->tid_release_task);
 
        tdev->lldev = NULL;
        cxgb3_set_dummy_ops(tdev);
index ec2dafe8ae5bbb15b0bfaad2be9c5cd4bd2bf2a7..745a1f53361f379b9c075d190f7919c0f0866e4c 100644 (file)
@@ -67,12 +67,12 @@ enum {
 };
 
 enum {
-       MEMWIN0_APERTURE = 65536,
-       MEMWIN0_BASE     = 0x30000,
+       MEMWIN0_APERTURE = 2048,
+       MEMWIN0_BASE     = 0x1b800,
        MEMWIN1_APERTURE = 32768,
        MEMWIN1_BASE     = 0x28000,
-       MEMWIN2_APERTURE = 2048,
-       MEMWIN2_BASE     = 0x1b800,
+       MEMWIN2_APERTURE = 65536,
+       MEMWIN2_BASE     = 0x30000,
 };
 
 enum dev_master {
@@ -211,6 +211,9 @@ struct tp_err_stats {
 struct tp_params {
        unsigned int ntxchan;        /* # of Tx channels */
        unsigned int tre;            /* log2 of core clocks per TP tick */
+
+       uint32_t dack_re;            /* DACK timer resolution */
+       unsigned short tx_modq[NCHAN];  /* channel to modulation queue map */
 };
 
 struct vpd_params {
@@ -315,6 +318,10 @@ enum {                                 /* adapter flags */
        USING_MSI          = (1 << 1),
        USING_MSIX         = (1 << 2),
        FW_OK              = (1 << 4),
+       RSS_TNLALLLOOKUP   = (1 << 5),
+       USING_SOFT_PARAMS  = (1 << 6),
+       MASTER_PF          = (1 << 7),
+       FW_OFLD_CONN       = (1 << 9),
 };
 
 struct rx_sw_desc;
@@ -467,6 +474,11 @@ struct sge {
        u16 rdma_rxq[NCHAN];
        u16 timer_val[SGE_NTIMERS];
        u8 counter_val[SGE_NCOUNTERS];
+       u32 fl_pg_order;            /* large page allocation size */
+       u32 stat_len;               /* length of status page at ring end */
+       u32 pktshift;               /* padding between CPL & packet data */
+       u32 fl_align;               /* response queue message alignment */
+       u32 fl_starve_thres;        /* Free List starvation threshold */
        unsigned int starve_thres;
        u8 idma_state[2];
        unsigned int egr_start;
@@ -511,6 +523,8 @@ struct adapter {
        struct net_device *port[MAX_NPORTS];
        u8 chan_map[NCHAN];                   /* channel -> port map */
 
+       unsigned int l2t_start;
+       unsigned int l2t_end;
        struct l2t_data *l2t;
        void *uld_handle[CXGB4_ULD_MAX];
        struct list_head list_node;
@@ -619,7 +633,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
                          struct net_device *dev, unsigned int iqid);
 irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
-void t4_sge_init(struct adapter *adap);
+int t4_sge_init(struct adapter *adap);
 void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
 extern int dbfifo_int_thresh;
@@ -638,6 +652,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap,
        return (us * adap->params.vpd.cclk) / 1000;
 }
 
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+                                           unsigned int ticks)
+{
+       /* add Core Clock / 2 to round ticks to nearest uS */
+       return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
+               adapter->params.vpd.cclk);
+}
+
 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
                      u32 val);
 
@@ -656,6 +678,9 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
        return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
 }
 
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+                      unsigned int data_reg, const u32 *vals,
+                      unsigned int nregs, unsigned int start_idx);
 void t4_intr_enable(struct adapter *adapter);
 void t4_intr_disable(struct adapter *adapter);
 int t4_slow_intr_handler(struct adapter *adapter);
@@ -664,8 +689,12 @@ int t4_wait_dev_ready(struct adapter *adap);
 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
                  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
+                   __be32 *buf);
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
+int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+unsigned int t4_flash_cfg_addr(struct adapter *adapter);
 int t4_check_fw_version(struct adapter *adapter);
 int t4_prep_adapter(struct adapter *adapter);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
@@ -680,6 +709,8 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
 
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+                           unsigned int mask, unsigned int val);
 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
                         struct tp_tcp_stats *v6);
 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -695,6 +726,16 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
 int t4_early_init(struct adapter *adap, unsigned int mbox);
 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+                 const u8 *fw_data, unsigned int size, int force);
+int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
+                     unsigned int mtype, unsigned int maddr,
+                     u32 *finiver, u32 *finicsum, u32 *cfcsum);
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+                         unsigned int cache_line_size);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int nparams, const u32 *params,
                    u32 *val);
index 933985420acbbfc7d0503bba1913125961963ab1..6b9f6bb2f7edb9e505c69f8ce40507b2093df131 100644 (file)
  */
 #define MAX_SGE_TIMERVAL 200U
 
-#ifdef CONFIG_PCI_IOV
-/*
- * Virtual Function provisioning constants.  We need two extra Ingress Queues
- * with Interrupt capability to serve as the VF's Firmware Event Queue and
- * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
- * Lists associated with them).  For each Ethernet/Control Egress Queue and
- * for each Free List, we need an Egress Context.
- */
 enum {
+       /*
+        * Physical Function provisioning constants.
+        */
+       PFRES_NVI = 4,                  /* # of Virtual Interfaces */
+       PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
+       PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
+                                        */
+       PFRES_NEQ = 256,                /* # of egress queues */
+       PFRES_NIQ = 0,                  /* # of ingress queues */
+       PFRES_TC = 0,                   /* PCI-E traffic class */
+       PFRES_NEXACTF = 128,            /* # of exact MPS filters */
+
+       PFRES_R_CAPS = FW_CMD_CAP_PF,
+       PFRES_WX_CAPS = FW_CMD_CAP_PF,
+
+#ifdef CONFIG_PCI_IOV
+       /*
+        * Virtual Function provisioning constants.  We need two extra Ingress
+        * Queues with Interrupt capability to serve as the VF's Firmware
+        * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
+        * neither will have Free Lists associated with them).  For each
+        * Ethernet/Control Egress Queue and for each Free List, we need an
+        * Egress Context.
+        */
        VFRES_NPORTS = 1,               /* # of "ports" per VF */
        VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
 
        VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
        VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
        VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
-       VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
        VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
+       VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
        VFRES_TC = 0,                   /* PCI-E traffic class */
        VFRES_NEXACTF = 16,             /* # of exact MPS filters */
 
        VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
        VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
+#endif
 };
 
 /*
@@ -146,7 +163,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
        }
        /*NOTREACHED*/
 }
-#endif
 
 enum {
        MAX_TXQ_ENTRIES      = 16384,
@@ -193,6 +209,7 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
 };
 
 #define FW_FNAME "cxgb4/t4fw.bin"
+#define FW_CFNAME "cxgb4/t4-config.txt"
 
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_AUTHOR("Chelsio Communications");
@@ -201,6 +218,28 @@ MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 MODULE_FIRMWARE(FW_FNAME);
 
+/*
+ * Normally we're willing to become the firmware's Master PF but will be happy
+ * if another PF has already become the Master and initialized the adapter.
+ * Setting "force_init" will cause this driver to forcibly establish itself as
+ * the Master PF and initialize the adapter.
+ */
+static uint force_init;
+
+module_param(force_init, uint, 0644);
+MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
+
+/*
+ * Normally if the firmware we connect to has Configuration File support, we
+ * use that and only fall back to the old Driver-based initialization if the
+ * Configuration File fails for some reason.  If force_old_init is set, then
+ * we'll always use the old Driver-based initialization sequence.
+ */
+static uint force_old_init;
+
+module_param(force_old_init, uint, 0644);
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
+
 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
 module_param(dflt_msg_enable, int, 0644);
@@ -236,6 +275,20 @@ module_param_array(intr_cnt, uint, NULL, 0644);
 MODULE_PARM_DESC(intr_cnt,
                 "thresholds 1..3 for queue interrupt packet counters");
 
+/*
+ * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
+ * offset by 2 bytes in order to have the IP headers line up on 4-byte
+ * boundaries.  This is a requirement for many architectures which will throw
+ * a machine check fault if an attempt is made to access one of the 4-byte IP
+ * header fields on a non-4-byte boundary.  And it's a major performance issue
+ * even on some architectures which allow it like some implementations of the
+ * x86 ISA.  However, some architectures don't mind this and for some very
+ * edge-case performance sensitive applications (like forwarding large volumes
+ * of small packets), setting this DMA offset to 0 will decrease the number of
+ * PCI-E Bus transfers enough to measurably affect performance.
+ */
+static int rx_dma_offset = 2;
+
 static bool vf_acls;
 
 #ifdef CONFIG_PCI_IOV
@@ -248,6 +301,30 @@ module_param_array(num_vf, uint, NULL, 0644);
 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
 #endif
 
+/*
+ * The filter TCAM has a fixed portion and a variable portion.  The fixed
+ * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
+ * ports.  The variable portion is 36 bits which can include things like Exact
+ * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
+ * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
+ * far exceed the 36-bit budget for this "compressed" header portion of the
+ * filter.  Thus, we have a scarce resource which must be carefully managed.
+ *
+ * By default we set this up to mostly match the set of filter matching
+ * capabilities of T3 but with accommodations for some of T4's more
+ * interesting features:
+ *
+ *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
+ *     [Inner] VLAN (17), Port (3), FCoE (1) }
+ */
+enum {
+       TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
+       TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
+       TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
+};
+
+static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+
 static struct dentry *cxgb4_debugfs_root;
 
 static LIST_HEAD(adapter_list);
@@ -852,11 +929,25 @@ static int upgrade_fw(struct adapter *adap)
         */
        if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
            vers > adap->params.fw_vers) {
-               ret = -t4_load_fw(adap, fw->data, fw->size);
+               dev_info(dev, "upgrading firmware ...\n");
+               ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
+                                   /*force=*/false);
                if (!ret)
-                       dev_info(dev, "firmware upgraded to version %pI4 from "
-                                FW_FNAME "\n", &hdr->fw_ver);
+                       dev_info(dev, "firmware successfully upgraded to "
+                                FW_FNAME " (%d.%d.%d.%d)\n",
+                                FW_HDR_FW_VER_MAJOR_GET(vers),
+                                FW_HDR_FW_VER_MINOR_GET(vers),
+                                FW_HDR_FW_VER_MICRO_GET(vers),
+                                FW_HDR_FW_VER_BUILD_GET(vers));
+               else
+                       dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
+       } else {
+               /*
+                * Tell our caller that we didn't upgrade the firmware.
+                */
+               ret = -EINVAL;
        }
+
 out:   release_firmware(fw);
        return ret;
 }
@@ -2470,8 +2561,8 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
                else
                        delta = size - hw_pidx + pidx;
                wmb();
-               t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
-                            V_QID(qid) | V_PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+                            QID(qid) | PIDX(delta));
        }
 out:
        return ret;
@@ -2579,8 +2670,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
                else
                        delta = q->size - hw_pidx + q->db_pidx;
                wmb();
-               t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
-                               V_QID(q->cntxt_id) | V_PIDX(delta));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+                            QID(q->cntxt_id) | PIDX(delta));
        }
 out:
        q->db_disabled = 0;
@@ -2617,9 +2708,9 @@ static void process_db_full(struct work_struct *work)
 
        notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
        drain_db_fifo(adap, dbfifo_drain_delay);
-       t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
-                       F_DBFIFO_HP_INT | F_DBFIFO_LP_INT,
-                       F_DBFIFO_HP_INT | F_DBFIFO_LP_INT);
+       t4_set_reg_field(adap, SGE_INT_ENABLE3,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT);
        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
 }
 
@@ -2639,8 +2730,8 @@ static void process_db_drop(struct work_struct *work)
 
 void t4_db_full(struct adapter *adap)
 {
-       t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
-                       F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0);
+       t4_set_reg_field(adap, SGE_INT_ENABLE3,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
        queue_work(workq, &adap->db_full_task);
 }
 
@@ -3076,6 +3167,10 @@ static void setup_memwin(struct adapter *adap)
        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
                     (bar0 + MEMWIN2_BASE) | BIR(0) |
                     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+}
+
+static void setup_memwin_rdma(struct adapter *adap)
+{
        if (adap->vres.ocq.size) {
                unsigned int start, sz_kb;
 
@@ -3153,6 +3248,488 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
  */
 #define MAX_ATIDS 8192U
 
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration
+ */
+
+/*
+ * Tweak configuration based on module parameters, etc.  Most of these have
+ * defaults assigned to them by Firmware Configuration Files (if we're using
+ * them) but need to be explicitly set if we're using hard-coded
+ * initialization.  But even in the case of using Firmware Configuration
+ * Files, we'd like to expose the ability to change these via module
+ * parameters so these are essentially common tweaks/settings for
+ * Configuration Files and hard-coded initialization ...
+ */
+static int adap_init0_tweaks(struct adapter *adapter)
+{
+       /*
+        * Fix up various Host-Dependent Parameters like Page Size, Cache
+        * Line Size, etc.  The firmware default is for a 4KB Page Size and
+        * 64B Cache Line Size ...
+        */
+       t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
+
+       /*
+        * Process module parameters which affect early initialization.
+        */
+       if (rx_dma_offset != 2 && rx_dma_offset != 0) {
+               dev_err(&adapter->pdev->dev,
+                       "Ignoring illegal rx_dma_offset=%d, using 2\n",
+                       rx_dma_offset);
+               rx_dma_offset = 2;
+       }
+       t4_set_reg_field(adapter, SGE_CONTROL,
+                        PKTSHIFT_MASK,
+                        PKTSHIFT(rx_dma_offset));
+
+       /*
+        * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
+        * adds the pseudo header itself.
+        */
+       t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
+                              CSUM_HAS_PSEUDO_HDR, 0);
+
+       return 0;
+}
+
+/*
+ * Attempt to initialize the adapter via a Firmware Configuration File.
+ */
+static int adap_init0_config(struct adapter *adapter, int reset)
+{
+       struct fw_caps_config_cmd caps_cmd;
+       const struct firmware *cf;
+       unsigned long mtype = 0, maddr = 0;
+       u32 finiver, finicsum, cfcsum;
+       int ret, using_flash;
+
+       /*
+        * Reset device if necessary.
+        */
+       if (reset) {
+               ret = t4_fw_reset(adapter, adapter->mbox,
+                                 PIORSTMODE | PIORST);
+               if (ret < 0)
+                       goto bye;
+       }
+
+       /*
+        * If we have a T4 configuration file under /lib/firmware/cxgb4/,
+        * then use that.  Otherwise, use the configuration file stored
+        * in the adapter flash ...
+        */
+       ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+       if (ret < 0) {
+               using_flash = 1;
+               mtype = FW_MEMTYPE_CF_FLASH;
+               maddr = t4_flash_cfg_addr(adapter);
+       } else {
+               u32 params[7], val[7];
+
+               using_flash = 0;
+               if (cf->size >= FLASH_CFG_MAX_SIZE)
+                       ret = -ENOMEM;
+               else {
+                       params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+                            FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+                       ret = t4_query_params(adapter, adapter->mbox,
+                                             adapter->fn, 0, 1, params, val);
+                       if (ret == 0) {
+                               /*
+                                * For t4_memory_write() below addresses and
+                                * sizes have to be in terms of multiples of 4
+                                * bytes.  So, if the Configuration File isn't
+                                * a multiple of 4 bytes in length we'll have
+                                * to write that out separately since we can't
+                                * guarantee that the bytes following the
+                                * residual byte in the buffer returned by
+                                * request_firmware() are zeroed out ...
+                                */
+                               size_t resid = cf->size & 0x3;
+                               size_t size = cf->size & ~0x3;
+                               __be32 *data = (__be32 *)cf->data;
+
+                               mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
+                               maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
+
+                               ret = t4_memory_write(adapter, mtype, maddr,
+                                                     size, data);
+                               if (ret == 0 && resid != 0) {
+                                       union {
+                                               __be32 word;
+                                               char buf[4];
+                                       } last;
+                                       int i;
+
+                                       last.word = data[size >> 2];
+                                       for (i = resid; i < 4; i++)
+                                               last.buf[i] = 0;
+                                       ret = t4_memory_write(adapter, mtype,
+                                                             maddr + size,
+                                                             4, &last.word);
+                               }
+                       }
+               }
+
+               release_firmware(cf);
+               if (ret)
+                       goto bye;
+       }
+
+       /*
+        * Issue a Capability Configuration command to the firmware to get it
+        * to parse the Configuration File.  We don't use t4_fw_config_file()
+        * because we want the ability to modify various features after we've
+        * processed the configuration file ...
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_READ);
+       caps_cmd.retval_len16 =
+               htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+                     FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+                     FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+                     FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        &caps_cmd);
+       if (ret < 0)
+               goto bye;
+
+       finiver = ntohl(caps_cmd.finiver);
+       finicsum = ntohl(caps_cmd.finicsum);
+       cfcsum = ntohl(caps_cmd.cfcsum);
+       if (finicsum != cfcsum)
+               dev_warn(adapter->pdev_dev, "Configuration File checksum "\
+                        "mismatch: [fini] csum=%#x, computed csum=%#x\n",
+                        finicsum, cfcsum);
+
+       /*
+        * If we're a pure NIC driver then disable all offloading facilities.
+        * This will allow the firmware to optimize aspects of the hardware
+        * configuration which will result in improved performance.
+        */
+       caps_cmd.ofldcaps = 0;
+       caps_cmd.iscsicaps = 0;
+       caps_cmd.rdmacaps = 0;
+       caps_cmd.fcoecaps = 0;
+
+       /*
+        * And now tell the firmware to use the configuration we just loaded.
+        */
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_WRITE);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        NULL);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Tweak configuration based on system architecture, module
+        * parameters, etc.
+        */
+       ret = adap_init0_tweaks(adapter);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * And finally tell the firmware to initialize itself using the
+        * parameters from the Configuration File.
+        */
+       ret = t4_fw_initialize(adapter, adapter->mbox);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Return successfully and note that we're operating with parameters
+        * not supplied by the driver, rather than from hard-wired
+        * initialization constants burried in the driver.
+        */
+       adapter->flags |= USING_SOFT_PARAMS;
+       dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
+                "Configuration File %s, version %#x, computed checksum %#x\n",
+                (using_flash
+                 ? "in device FLASH"
+                 : "/lib/firmware/" FW_CFNAME),
+                finiver, cfcsum);
+       return 0;
+
+       /*
+        * Something bad happened.  Return the error ...  (If the "error"
+        * is that there's no Configuration File on the adapter we don't
+        * want to issue a warning since this is fairly common.)
+        */
+bye:
+       if (ret != -ENOENT)
+               dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
+                        -ret);
+       return ret;
+}
+
+/*
+ * Attempt to initialize the adapter via hard-coded, driver supplied
+ * parameters ...
+ */
+static int adap_init0_no_config(struct adapter *adapter, int reset)
+{
+       struct sge *s = &adapter->sge;
+       struct fw_caps_config_cmd caps_cmd;
+       u32 v;
+       int i, ret;
+
+       /*
+        * Reset device if necessary
+        */
+       if (reset) {
+               ret = t4_fw_reset(adapter, adapter->mbox,
+                                 PIORSTMODE | PIORST);
+               if (ret < 0)
+                       goto bye;
+       }
+
+       /*
+        * Get device capabilities and select which we'll be using.
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                                    FW_CMD_REQUEST | FW_CMD_READ);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        &caps_cmd);
+       if (ret < 0)
+               goto bye;
+
+#ifndef CONFIG_CHELSIO_T4_OFFLOAD
+       /*
+        * If we're a pure NIC driver then disable all offloading facilities.
+        * This will allow the firmware to optimize aspects of the hardware
+        * configuration which will result in improved performance.
+        */
+       caps_cmd.ofldcaps = 0;
+       caps_cmd.iscsicaps = 0;
+       caps_cmd.rdmacaps = 0;
+       caps_cmd.fcoecaps = 0;
+#endif
+
+       if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+               if (!vf_acls)
+                       caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+               else
+                       caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+       } else if (vf_acls) {
+               dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
+               goto bye;
+       }
+       caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                             FW_CMD_REQUEST | FW_CMD_WRITE);
+       ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+                        NULL);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Tweak configuration based on system architecture, module
+        * parameters, etc.
+        */
+       ret = adap_init0_tweaks(adapter);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Select RSS Global Mode we want to use.  We use "Basic Virtual"
+        * mode which maps each Virtual Interface to its own section of
+        * the RSS Table and we turn on all map and hash enables ...
+        */
+       adapter->flags |= RSS_TNLALLLOOKUP;
+       ret = t4_config_glbl_rss(adapter, adapter->mbox,
+                                FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+                                FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+                                FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+                                ((adapter->flags & RSS_TNLALLLOOKUP) ?
+                                       FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Set up our own fundamental resource provisioning ...
+        */
+       ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
+                         PFRES_NEQ, PFRES_NETHCTRL,
+                         PFRES_NIQFLINT, PFRES_NIQ,
+                         PFRES_TC, PFRES_NVI,
+                         FW_PFVF_CMD_CMASK_MASK,
+                         pfvfres_pmask(adapter, adapter->fn, 0),
+                         PFRES_NEXACTF,
+                         PFRES_R_CAPS, PFRES_WX_CAPS);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Perform low level SGE initialization.  We need to do this before we
+        * send the firmware the INITIALIZE command because that will cause
+        * any other PF Drivers which are waiting for the Master
+        * Initialization to proceed forward.
+        */
+       for (i = 0; i < SGE_NTIMERS - 1; i++)
+               s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
+       s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+       s->counter_val[0] = 1;
+       for (i = 1; i < SGE_NCOUNTERS; i++)
+               s->counter_val[i] = min(intr_cnt[i - 1],
+                                       THRESHOLD_0_GET(THRESHOLD_0_MASK));
+       t4_sge_init(adapter);
+
+#ifdef CONFIG_PCI_IOV
+       /*
+        * Provision resource limits for Virtual Functions.  We currently
+        * grant them all the same static resource limits except for the Port
+        * Access Rights Mask which we're assigning based on the PF.  All of
+        * the static provisioning stuff for both the PF and VF really needs
+        * to be managed in a persistent manner for each device which the
+        * firmware controls.
+        */
+       {
+               int pf, vf;
+
+               for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+                       if (num_vf[pf] <= 0)
+                               continue;
+
+                       /* VF numbering starts at 1! */
+                       for (vf = 1; vf <= num_vf[pf]; vf++) {
+                               ret = t4_cfg_pfvf(adapter, adapter->mbox,
+                                                 pf, vf,
+                                                 VFRES_NEQ, VFRES_NETHCTRL,
+                                                 VFRES_NIQFLINT, VFRES_NIQ,
+                                                 VFRES_TC, VFRES_NVI,
+                                                 FW_PFVF_CMD_CMASK_GET(
+                                                 FW_PFVF_CMD_CMASK_MASK),
+                                                 pfvfres_pmask(
+                                                 adapter, pf, vf),
+                                                 VFRES_NEXACTF,
+                                                 VFRES_R_CAPS, VFRES_WX_CAPS);
+                               if (ret < 0)
+                                       dev_warn(adapter->pdev_dev,
+                                                "failed to "\
+                                                "provision pf/vf=%d/%d; "
+                                                "err=%d\n", pf, vf, ret);
+                       }
+               }
+       }
+#endif
+
+       /*
+        * Set up the default filter mode.  Later we'll want to implement this
+        * via a firmware command, etc. ...  This needs to be done before the
+        * firmare initialization command ...  If the selected set of fields
+        * isn't equal to the default value, we'll need to make sure that the
+        * field selections will fit in the 36-bit budget.
+        */
+       if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
+               int i, bits = 0;
+
+               for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
+                       switch (tp_vlan_pri_map & (1 << i)) {
+                       case 0:
+                               /* compressed filter field not enabled */
+                               break;
+                       case FCOE_MASK:
+                               bits +=  1;
+                               break;
+                       case PORT_MASK:
+                               bits +=  3;
+                               break;
+                       case VNIC_ID_MASK:
+                               bits += 17;
+                               break;
+                       case VLAN_MASK:
+                               bits += 17;
+                               break;
+                       case TOS_MASK:
+                               bits +=  8;
+                               break;
+                       case PROTOCOL_MASK:
+                               bits +=  8;
+                               break;
+                       case ETHERTYPE_MASK:
+                               bits += 16;
+                               break;
+                       case MACMATCH_MASK:
+                               bits +=  9;
+                               break;
+                       case MPSHITTYPE_MASK:
+                               bits +=  3;
+                               break;
+                       case FRAGMENTATION_MASK:
+                               bits +=  1;
+                               break;
+                       }
+
+               if (bits > 36) {
+                       dev_err(adapter->pdev_dev,
+                               "tp_vlan_pri_map=%#x needs %d bits > 36;"\
+                               " using %#x\n", tp_vlan_pri_map, bits,
+                               TP_VLAN_PRI_MAP_DEFAULT);
+                       tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+               }
+       }
+       v = tp_vlan_pri_map;
+       t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
+                         &v, 1, TP_VLAN_PRI_MAP);
+
+       /*
+        * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
+        * to support any of the compressed filter fields above.  Newer
+        * versions of the firmware do this automatically but it doesn't hurt
+        * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
+        * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
+        * since the firmware automatically turns this on and off when we have
+        * a non-zero number of filters active (since it does have a
+        * performance impact).
+        */
+       if (tp_vlan_pri_map)
+               t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
+                                FIVETUPLELOOKUP_MASK,
+                                FIVETUPLELOOKUP_MASK);
+
+       /*
+        * Tweak some settings.
+        */
+       t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
+                    RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
+                    PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
+                    KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
+
+       /*
+        * Get basic stuff going by issuing the Firmware Initialize command.
+        * Note that this _must_ be after all PFVF commands ...
+        */
+       ret = t4_fw_initialize(adapter, adapter->mbox);
+       if (ret < 0)
+               goto bye;
+
+       /*
+        * Return successfully!
+        */
+       dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
+                "driver parameters\n");
+       return 0;
+
+       /*
+        * Something bad happened.  Return the error ...
+        */
+bye:
+       return ret;
+}
+
 /*
  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
  */
@@ -3162,72 +3739,216 @@ static int adap_init0(struct adapter *adap)
        u32 v, port_vec;
        enum dev_state state;
        u32 params[7], val[7];
-       struct fw_caps_config_cmd c;
-
-       ret = t4_check_fw_version(adap);
-       if (ret == -EINVAL || ret > 0) {
-               if (upgrade_fw(adap) >= 0)             /* recache FW version */
-                       ret = t4_check_fw_version(adap);
-       }
-       if (ret < 0)
-               return ret;
+       int reset = 1, j;
 
-       /* contact FW, request master */
-       ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
+       /*
+        * Contact FW, advertising Master capability (and potentially forcing
+        * ourselves as the Master PF if our module parameter force_init is
+        * set).
+        */
+       ret = t4_fw_hello(adap, adap->mbox, adap->fn,
+                         force_init ? MASTER_MUST : MASTER_MAY,
+                         &state);
        if (ret < 0) {
                dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
                        ret);
                return ret;
        }
+       if (ret == adap->mbox)
+               adap->flags |= MASTER_PF;
+       if (force_init && state == DEV_STATE_INIT)
+               state = DEV_STATE_UNINIT;
 
-       /* reset device */
-       ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
-       if (ret < 0)
-               goto bye;
-
-       for (v = 0; v < SGE_NTIMERS - 1; v++)
-               adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
-       adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
-       adap->sge.counter_val[0] = 1;
-       for (v = 1; v < SGE_NCOUNTERS; v++)
-               adap->sge.counter_val[v] = min(intr_cnt[v - 1],
-                                              THRESHOLD_3_MASK);
-#define FW_PARAM_DEV(param) \
-       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
-        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+       /*
+        * If we're the Master PF Driver and the device is uninitialized,
+        * then let's consider upgrading the firmware ...  (We always want
+        * to check the firmware version number in order to A. get it for
+        * later reporting and B. to warn if the currently loaded firmware
+        * is excessively mismatched relative to the driver.)
+        */
+       ret = t4_check_fw_version(adap);
+       if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
+               if (ret == -EINVAL || ret > 0) {
+                       if (upgrade_fw(adap) >= 0) {
+                               /*
+                                * Note that the chip was reset as part of the
+                                * firmware upgrade so we don't reset it again
+                                * below and grab the new firmware version.
+                                */
+                               reset = 0;
+                               ret = t4_check_fw_version(adap);
+                       }
+               }
+               if (ret < 0)
+                       return ret;
+       }
 
-       params[0] = FW_PARAM_DEV(CCLK);
-       ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
+       /*
+        * Grab VPD parameters.  This should be done after we establish a
+        * connection to the firmware since some of the VPD parameters
+        * (notably the Core Clock frequency) are retrieved via requests to
+        * the firmware.  On the other hand, we need these fairly early on
+        * so we do this right after getting ahold of the firmware.
+        */
+       ret = get_vpd_params(adap, &adap->params.vpd);
        if (ret < 0)
                goto bye;
-       adap->params.vpd.cclk = val[0];
 
-       ret = adap_init1(adap, &c);
+       /*
+        * Find out what ports are available to us.  Note that we need to do
+        * this before calling adap_init0_no_config() since it needs nports
+        * and portvec ...
+        */
+       v =
+           FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+           FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
        if (ret < 0)
                goto bye;
 
+       adap->params.nports = hweight32(port_vec);
+       adap->params.portvec = port_vec;
+
+       /*
+        * If the firmware is initialized already (and we're not forcing a
+        * master initialization), note that we're living with existing
+        * adapter parameters.  Otherwise, it's time to try initializing the
+        * adapter ...
+        */
+       if (state == DEV_STATE_INIT) {
+               dev_info(adap->pdev_dev, "Coming up as %s: "\
+                        "Adapter already initialized\n",
+                        adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+               adap->flags |= USING_SOFT_PARAMS;
+       } else {
+               dev_info(adap->pdev_dev, "Coming up as MASTER: "\
+                        "Initializing adapter\n");
+
+               /*
+                * If the firmware doesn't support Configuration
+                * Files warn user and exit,
+                */
+               if (ret < 0)
+                       dev_warn(adap->pdev_dev, "Firmware doesn't support "
+                                "configuration file.\n");
+               if (force_old_init)
+                       ret = adap_init0_no_config(adap, reset);
+               else {
+                       /*
+                        * Find out whether we're dealing with a version of
+                        * the firmware which has configuration file support.
+                        */
+                       params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+                                    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+                       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+                                             params, val);
+
+                       /*
+                        * If the firmware doesn't support Configuration
+                        * Files, use the old Driver-based, hard-wired
+                        * initialization.  Otherwise, try using the
+                        * Configuration File support and fall back to the
+                        * Driver-based initialization if there's no
+                        * Configuration File found.
+                        */
+                       if (ret < 0)
+                               ret = adap_init0_no_config(adap, reset);
+                       else {
+                               /*
+                                * The firmware provides us with a memory
+                                * buffer where we can load a Configuration
+                                * File from the host if we want to override
+                                * the Configuration File in flash.
+                                */
+
+                               ret = adap_init0_config(adap, reset);
+                               if (ret == -ENOENT) {
+                                       dev_info(adap->pdev_dev,
+                                           "No Configuration File present "
+                                           "on adapter.  Using hard-wired "
+                                           "configuration parameters.\n");
+                                       ret = adap_init0_no_config(adap, reset);
+                               }
+                       }
+               }
+               if (ret < 0) {
+                       dev_err(adap->pdev_dev,
+                               "could not initialize adapter, error %d\n",
+                               -ret);
+                       goto bye;
+               }
+       }
+
+       /*
+        * If we're living with non-hard-coded parameters (either from a
+        * Firmware Configuration File or values programmed by a different PF
+        * Driver), give the SGE code a chance to pull in anything that it
+        * needs ...  Note that this must be called after we retrieve our VPD
+        * parameters in order to know how to convert core ticks to seconds.
+        */
+       if (adap->flags & USING_SOFT_PARAMS) {
+               ret = t4_sge_init(adap);
+               if (ret < 0)
+                       goto bye;
+       }
+
+       /*
+        * Grab some of our basic fundamental operating parameters.
+        */
+#define FW_PARAM_DEV(param) \
+       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+       FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
 #define FW_PARAM_PFVF(param) \
-       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
-        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
-        FW_PARAMS_PARAM_Y(adap->fn))
+       FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+       FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
+       FW_PARAMS_PARAM_Y(0) | \
+       FW_PARAMS_PARAM_Z(0)
 
-       params[0] = FW_PARAM_DEV(PORTVEC);
+       params[0] = FW_PARAM_PFVF(EQ_START);
        params[1] = FW_PARAM_PFVF(L2T_START);
        params[2] = FW_PARAM_PFVF(L2T_END);
        params[3] = FW_PARAM_PFVF(FILTER_START);
        params[4] = FW_PARAM_PFVF(FILTER_END);
        params[5] = FW_PARAM_PFVF(IQFLINT_START);
-       params[6] = FW_PARAM_PFVF(EQ_START);
-       ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
        if (ret < 0)
                goto bye;
-       port_vec = val[0];
+       adap->sge.egr_start = val[0];
+       adap->l2t_start = val[1];
+       adap->l2t_end = val[2];
        adap->tids.ftid_base = val[3];
        adap->tids.nftids = val[4] - val[3] + 1;
        adap->sge.ingr_start = val[5];
-       adap->sge.egr_start = val[6];
 
-       if (c.ofldcaps) {
+       /* query params related to active filter region */
+       params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
+       params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
+       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       /* If Active filter size is set we enable establishing
+        * offload connection through firmware work request
+        */
+       if ((val[0] != val[1]) && (ret >= 0)) {
+               adap->flags |= FW_OFLD_CONN;
+               adap->tids.aftid_base = val[0];
+               adap->tids.aftid_end = val[1];
+       }
+
+#ifdef CONFIG_CHELSIO_T4_OFFLOAD
+       /*
+        * Get device capabilities so we can determine what resources we need
+        * to manage.
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                                    FW_CMD_REQUEST | FW_CMD_READ);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+                        &caps_cmd);
+       if (ret < 0)
+               goto bye;
+
+       if (caps_cmd.ofldcaps) {
                /* query offload-related parameters */
                params[0] = FW_PARAM_DEV(NTID);
                params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -3235,28 +3956,55 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(TDDP_START);
                params[4] = FW_PARAM_PFVF(TDDP_END);
                params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
-                                     val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+                                     params, val);
                if (ret < 0)
                        goto bye;
                adap->tids.ntids = val[0];
                adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
                adap->tids.stid_base = val[1];
                adap->tids.nstids = val[2] - val[1] + 1;
+               /*
+                * Setup server filter region. Divide the availble filter
+                * region into two parts. Regular filters get 1/3rd and server
+                * filters get 2/3rd part. This is only enabled if workarond
+                * path is enabled.
+                * 1. For regular filters.
+                * 2. Server filter: This are special filters which are used
+                * to redirect SYN packets to offload queue.
+                */
+               if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
+                       adap->tids.sftid_base = adap->tids.ftid_base +
+                                       DIV_ROUND_UP(adap->tids.nftids, 3);
+                       adap->tids.nsftids = adap->tids.nftids -
+                                        DIV_ROUND_UP(adap->tids.nftids, 3);
+                       adap->tids.nftids = adap->tids.sftid_base -
+                                               adap->tids.ftid_base;
+               }
                adap->vres.ddp.start = val[3];
                adap->vres.ddp.size = val[4] - val[3] + 1;
                adap->params.ofldq_wr_cred = val[5];
+
+               params[0] = FW_PARAM_PFVF(ETHOFLD_START);
+               params[1] = FW_PARAM_PFVF(ETHOFLD_END);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+                                     params, val);
+               if ((val[0] != val[1]) && (ret >= 0)) {
+                       adap->tids.uotid_base = val[0];
+                       adap->tids.nuotids = val[1] - val[0] + 1;
+               }
+
                adap->params.offload = 1;
        }
-       if (c.rdmacaps) {
+       if (caps_cmd.rdmacaps) {
                params[0] = FW_PARAM_PFVF(STAG_START);
                params[1] = FW_PARAM_PFVF(STAG_END);
                params[2] = FW_PARAM_PFVF(RQ_START);
                params[3] = FW_PARAM_PFVF(RQ_END);
                params[4] = FW_PARAM_PFVF(PBL_START);
                params[5] = FW_PARAM_PFVF(PBL_END);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
-                                     val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+                                     params, val);
                if (ret < 0)
                        goto bye;
                adap->vres.stag.start = val[0];
@@ -3272,8 +4020,7 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(CQ_END);
                params[4] = FW_PARAM_PFVF(OCQ_START);
                params[5] = FW_PARAM_PFVF(OCQ_END);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
-                                     val);
+               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
                if (ret < 0)
                        goto bye;
                adap->vres.qp.start = val[0];
@@ -3283,11 +4030,11 @@ static int adap_init0(struct adapter *adap)
                adap->vres.ocq.start = val[4];
                adap->vres.ocq.size = val[5] - val[4] + 1;
        }
-       if (c.iscsicaps) {
+       if (caps_cmd.iscsicaps) {
                params[0] = FW_PARAM_PFVF(ISCSI_START);
                params[1] = FW_PARAM_PFVF(ISCSI_END);
-               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
-                                     val);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+                                     params, val);
                if (ret < 0)
                        goto bye;
                adap->vres.iscsi.start = val[0];
@@ -3295,63 +4042,33 @@ static int adap_init0(struct adapter *adap)
        }
 #undef FW_PARAM_PFVF
 #undef FW_PARAM_DEV
+#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
 
-       adap->params.nports = hweight32(port_vec);
-       adap->params.portvec = port_vec;
-       adap->flags |= FW_OK;
-
-       /* These are finalized by FW initialization, load their values now */
+       /*
+        * These are finalized by FW initialization, load their values now.
+        */
        v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
        adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+       adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
        t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
                     adap->params.b_wnd);
 
-#ifdef CONFIG_PCI_IOV
-       /*
-        * Provision resource limits for Virtual Functions.  We currently
-        * grant them all the same static resource limits except for the Port
-        * Access Rights Mask which we're assigning based on the PF.  All of
-        * the static provisioning stuff for both the PF and VF really needs
-        * to be managed in a persistent manner for each device which the
-        * firmware controls.
-        */
-       {
-               int pf, vf;
-
-               for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
-                       if (num_vf[pf] <= 0)
-                               continue;
+       /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+       for (j = 0; j < NCHAN; j++)
+               adap->params.tp.tx_modq[j] = j;
 
-                       /* VF numbering starts at 1! */
-                       for (vf = 1; vf <= num_vf[pf]; vf++) {
-                               ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
-                                                 VFRES_NEQ, VFRES_NETHCTRL,
-                                                 VFRES_NIQFLINT, VFRES_NIQ,
-                                                 VFRES_TC, VFRES_NVI,
-                                                 FW_PFVF_CMD_CMASK_MASK,
-                                                 pfvfres_pmask(adap, pf, vf),
-                                                 VFRES_NEXACTF,
-                                                 VFRES_R_CAPS, VFRES_WX_CAPS);
-                               if (ret < 0)
-                                       dev_warn(adap->pdev_dev, "failed to "
-                                                "provision pf/vf=%d/%d; "
-                                                "err=%d\n", pf, vf, ret);
-                       }
-               }
-       }
-#endif
-
-       setup_memwin(adap);
+       adap->flags |= FW_OK;
        return 0;
 
        /*
-        * If a command timed out or failed with EIO FW does not operate within
-        * its spec or something catastrophic happened to HW/FW, stop issuing
-        * commands.
+        * Something bad happened.  If a command timed out or failed with EIO
+        * FW does not operate within its spec or something catastrophic
+        * happened to HW/FW, stop issuing commands.
         */
-bye:   if (ret != -ETIMEDOUT && ret != -EIO)
-               t4_fw_bye(adap, adap->fn);
+bye:
+       if (ret != -ETIMEDOUT && ret != -EIO)
+               t4_fw_bye(adap, adap->mbox);
        return ret;
 }
 
@@ -3806,7 +4523,9 @@ static int __devinit init_one(struct pci_dev *pdev,
        err = t4_prep_adapter(adapter);
        if (err)
                goto out_unmap_bar;
+       setup_memwin(adapter);
        err = adap_init0(adapter);
+       setup_memwin_rdma(adapter);
        if (err)
                goto out_unmap_bar;
 
@@ -3948,8 +4667,11 @@ static void __devexit remove_one(struct pci_dev *pdev)
 {
        struct adapter *adapter = pci_get_drvdata(pdev);
 
+#ifdef CONFIG_PCI_IOV
        pci_disable_sriov(pdev);
 
+#endif
+
        if (adapter) {
                int i;
 
index d79980c5fc630e44396b3b66af7b4a1ded1cf36c..1b899fea1a91427bac31b171cd799fad2fc384f0 100644 (file)
@@ -100,6 +100,8 @@ struct tid_info {
 
        unsigned int nftids;
        unsigned int ftid_base;
+       unsigned int aftid_base;
+       unsigned int aftid_end;
 
        spinlock_t atid_lock ____cacheline_aligned_in_smp;
        union aopen_entry *afree;
index d49933ed551f7a4fca4d6b71f0528aaf9f6a80d6..3ecc087d732d12ea3e61214235f1a99254f87924 100644 (file)
@@ -68,9 +68,6 @@
  */
 #define RX_PKT_SKB_LEN   512
 
-/* Ethernet header padding prepended to RX_PKTs */
-#define RX_PKT_PAD 2
-
 /*
  * Max number of Tx descriptors we clean up at a time.  Should be modest as
  * freeing skbs isn't cheap and it happens while holding locks.  We just need
  */
 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
 
-enum {
-       /* packet alignment in FL buffers */
-       FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
-       /* egress status entry size */
-       STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
-};
-
 struct tx_sw_desc {                /* SW state per Tx descriptor */
        struct sk_buff *skb;
        struct ulptx_sgl *sgl;
@@ -155,16 +145,57 @@ struct rx_sw_desc {                /* SW state per Rx descriptor */
 };
 
 /*
- * The low bits of rx_sw_desc.dma_addr have special meaning.
+ * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
+ * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
+ * We could easily support more but there doesn't seem to be much need for
+ * that ...
+ */
+#define FL_MTU_SMALL 1500
+#define FL_MTU_LARGE 9000
+
+static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
+                                         unsigned int mtu)
+{
+       struct sge *s = &adapter->sge;
+
+       return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
+}
+
+#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
+#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
+
+/*
+ * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
+ * these to specify the buffer size as an index into the SGE Free List Buffer
+ * Size register array.  We also use bit 4, when the buffer has been unmapped
+ * for DMA, but this is of course never sent to the hardware and is only used
+ * to prevent double unmappings.  All of the above requires that the Free List
+ * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
+ * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
+ * Free List Buffer alignment is 32 bytes, this works out for us ...
  */
 enum {
-       RX_LARGE_BUF    = 1 << 0, /* buffer is larger than PAGE_SIZE */
-       RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+       RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
+       RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
+       RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
+
+       /*
+        * XXX We shouldn't depend on being able to use these indices.
+        * XXX Especially when some other Master PF has initialized the
+        * XXX adapter or we use the Firmware Configuration File.  We
+        * XXX should really search through the Host Buffer Size register
+        * XXX array for the appropriately sized buffer indices.
+        */
+       RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
+       RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
+
+       RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
+       RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
 };
 
 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
 {
-       return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+       return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
 }
 
 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
@@ -392,14 +423,35 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
        }
 }
 
-static inline int get_buf_size(const struct rx_sw_desc *d)
+static inline int get_buf_size(struct adapter *adapter,
+                              const struct rx_sw_desc *d)
 {
-#if FL_PG_ORDER > 0
-       return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
-                                             PAGE_SIZE;
-#else
-       return PAGE_SIZE;
-#endif
+       struct sge *s = &adapter->sge;
+       unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+       int buf_size;
+
+       switch (rx_buf_size_idx) {
+       case RX_SMALL_PG_BUF:
+               buf_size = PAGE_SIZE;
+               break;
+
+       case RX_LARGE_PG_BUF:
+               buf_size = PAGE_SIZE << s->fl_pg_order;
+               break;
+
+       case RX_SMALL_MTU_BUF:
+               buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+               break;
+
+       case RX_LARGE_MTU_BUF:
+               buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+               break;
+
+       default:
+               BUG_ON(1);
+       }
+
+       return buf_size;
 }
 
 /**
@@ -418,7 +470,8 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
 
                if (is_buf_mapped(d))
                        dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
-                                      get_buf_size(d), PCI_DMA_FROMDEVICE);
+                                      get_buf_size(adap, d),
+                                      PCI_DMA_FROMDEVICE);
                put_page(d->page);
                d->page = NULL;
                if (++q->cidx == q->size)
@@ -444,7 +497,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
 
        if (is_buf_mapped(d))
                dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
-                              get_buf_size(d), PCI_DMA_FROMDEVICE);
+                              get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
        d->page = NULL;
        if (++q->cidx == q->size)
                q->cidx = 0;
@@ -485,6 +538,7 @@ static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
                              gfp_t gfp)
 {
+       struct sge *s = &adap->sge;
        struct page *pg;
        dma_addr_t mapping;
        unsigned int cred = q->avail;
@@ -493,25 +547,27 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 
        gfp |= __GFP_NOWARN | __GFP_COLD;
 
-#if FL_PG_ORDER > 0
+       if (s->fl_pg_order == 0)
+               goto alloc_small_pages;
+
        /*
         * Prefer large buffers
         */
        while (n) {
-               pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
+               pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
                if (unlikely(!pg)) {
                        q->large_alloc_failed++;
                        break;       /* fall back to single pages */
                }
 
                mapping = dma_map_page(adap->pdev_dev, pg, 0,
-                                      PAGE_SIZE << FL_PG_ORDER,
+                                      PAGE_SIZE << s->fl_pg_order,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
-                       __free_pages(pg, FL_PG_ORDER);
+                       __free_pages(pg, s->fl_pg_order);
                        goto out;   /* do not try small pages for this error */
                }
-               mapping |= RX_LARGE_BUF;
+               mapping |= RX_LARGE_PG_BUF;
                *d++ = cpu_to_be64(mapping);
 
                set_rx_sw_desc(sd, pg, mapping);
@@ -525,8 +581,8 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
                }
                n--;
        }
-#endif
 
+alloc_small_pages:
        while (n--) {
                pg = __skb_alloc_page(gfp, NULL);
                if (unlikely(!pg)) {
@@ -769,8 +825,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
        wmb();            /* write descriptors before telling HW */
        spin_lock(&q->db_lock);
        if (!q->db_disabled) {
-               t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
-                            V_QID(q->cntxt_id) | V_PIDX(n));
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+                            QID(q->cntxt_id) | PIDX(n));
        }
        q->db_pidx = q->pidx;
        spin_unlock(&q->db_lock);
@@ -1519,6 +1575,8 @@ static noinline int handle_trace_pkt(struct adapter *adap,
 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                   const struct cpl_rx_pkt *pkt)
 {
+       struct adapter *adapter = rxq->rspq.adap;
+       struct sge *s = &adapter->sge;
        int ret;
        struct sk_buff *skb;
 
@@ -1529,8 +1587,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
                return;
        }
 
-       copy_frags(skb, gl, RX_PKT_PAD);
-       skb->len = gl->tot_len - RX_PKT_PAD;
+       copy_frags(skb, gl, s->pktshift);
+       skb->len = gl->tot_len - s->pktshift;
        skb->data_len = skb->len;
        skb->truesize += skb->data_len;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1566,6 +1624,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        struct sk_buff *skb;
        const struct cpl_rx_pkt *pkt;
        struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+       struct sge *s = &q->adap->sge;
 
        if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
                return handle_trace_pkt(q->adap, si);
@@ -1585,7 +1644,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                return 0;
        }
 
-       __skb_pull(skb, RX_PKT_PAD);      /* remove ethernet header padding */
+       __skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
        skb->protocol = eth_type_trans(skb, q->netdev);
        skb_record_rx_queue(skb, q->idx);
        if (skb->dev->features & NETIF_F_RXHASH)
@@ -1696,6 +1755,8 @@ static int process_responses(struct sge_rspq *q, int budget)
        int budget_left = budget;
        const struct rsp_ctrl *rc;
        struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+       struct adapter *adapter = q->adap;
+       struct sge *s = &adapter->sge;
 
        while (likely(budget_left)) {
                rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
@@ -1722,7 +1783,7 @@ static int process_responses(struct sge_rspq *q, int budget)
                        /* gather packet fragments */
                        for (frags = 0, fp = si.frags; ; frags++, fp++) {
                                rsd = &rxq->fl.sdesc[rxq->fl.cidx];
-                               bufsz = get_buf_size(rsd);
+                               bufsz = get_buf_size(adapter, rsd);
                                fp->page = rsd->page;
                                fp->offset = q->offset;
                                fp->size = min(bufsz, len);
@@ -1747,7 +1808,7 @@ static int process_responses(struct sge_rspq *q, int budget)
                        si.nfrags = frags + 1;
                        ret = q->handler(q, q->cur_desc, &si);
                        if (likely(ret == 0))
-                               q->offset += ALIGN(fp->size, FL_ALIGN);
+                               q->offset += ALIGN(fp->size, s->fl_align);
                        else
                                restore_rx_bufs(&si, &rxq->fl, frags);
                } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1983,6 +2044,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
 {
        int ret, flsz = 0;
        struct fw_iq_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Size needs to be multiple of 16, including status entry. */
@@ -2015,11 +2077,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                fl->size = roundup(fl->size, 8);
                fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
                                      sizeof(struct rx_sw_desc), &fl->addr,
-                                     &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
+                                     &fl->sdesc, s->stat_len, NUMA_NO_NODE);
                if (!fl->desc)
                        goto fl_nomem;
 
-               flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
+               flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
                c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
                                            FW_IQ_CMD_FL0FETCHRO(1) |
                                            FW_IQ_CMD_FL0DATARO(1) |
@@ -2096,14 +2158,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
 {
        int ret, nentries;
        struct fw_eq_eth_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Add status entries */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
                        sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
-                       &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+                       &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
                        netdev_queue_numa_node_read(netdevq));
        if (!txq->q.desc)
                return -ENOMEM;
@@ -2149,10 +2212,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 {
        int ret, nentries;
        struct fw_eq_ctrl_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Add status entries */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
                                 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
@@ -2200,14 +2264,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
 {
        int ret, nentries;
        struct fw_eq_ofld_cmd c;
+       struct sge *s = &adap->sge;
        struct port_info *pi = netdev_priv(dev);
 
        /* Add status entries */
-       nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
                        sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
-                       &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+                       &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
                        NUMA_NO_NODE);
        if (!txq->q.desc)
                return -ENOMEM;
@@ -2251,8 +2316,10 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
 
 static void free_txq(struct adapter *adap, struct sge_txq *q)
 {
+       struct sge *s = &adap->sge;
+
        dma_free_coherent(adap->pdev_dev,
-                         q->size * sizeof(struct tx_desc) + STAT_LEN,
+                         q->size * sizeof(struct tx_desc) + s->stat_len,
                          q->desc, q->phys_addr);
        q->cntxt_id = 0;
        q->sdesc = NULL;
@@ -2262,6 +2329,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
                         struct sge_fl *fl)
 {
+       struct sge *s = &adap->sge;
        unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
 
        adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
@@ -2276,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
 
        if (fl) {
                free_rx_bufs(adap, fl, fl->avail);
-               dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
+               dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
                                  fl->desc, fl->addr);
                kfree(fl->sdesc);
                fl->sdesc = NULL;
@@ -2408,18 +2476,112 @@ void t4_sge_stop(struct adapter *adap)
  *     Performs SGE initialization needed every time after a chip reset.
  *     We do not initialize any of the queues here, instead the driver
  *     top-level must request them individually.
+ *
+ *     Called in two different modes:
+ *
+ *      1. Perform actual hardware initialization and record hard-coded
+ *         parameters which were used.  This gets used when we're the
+ *         Master PF and the Firmware Configuration File support didn't
+ *         work for some reason.
+ *
+ *      2. We're not the Master PF or initialization was performed with
+ *         a Firmware Configuration File.  In this case we need to grab
+ *         any of the SGE operating parameters that we need to have in
+ *         order to do our job and make sure we can live with them ...
  */
-void t4_sge_init(struct adapter *adap)
+
+static int t4_sge_init_soft(struct adapter *adap)
 {
-       unsigned int i, v;
        struct sge *s = &adap->sge;
-       unsigned int fl_align_log = ilog2(FL_ALIGN);
+       u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
+       u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+       u32 ingress_rx_threshold;
 
-       t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
-                        INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
-                        INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
-                        RXPKTCPLMODE |
-                        (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+       /*
+        * Verify that CPL messages are going to the Ingress Queue for
+        * process_responses() and that only packet data is going to the
+        * Free Lists.
+        */
+       if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
+           RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+               dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Validate the Host Buffer Register Array indices that we want to
+        * use ...
+        *
+        * XXX Note that we should really read through the Host Buffer Size
+        * XXX register array and find the indices of the Buffer Sizes which
+        * XXX meet our needs!
+        */
+       #define READ_FL_BUF(x) \
+               t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
+
+       fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
+       fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
+       fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
+       fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+
+       #undef READ_FL_BUF
+
+       if (fl_small_pg != PAGE_SIZE ||
+           (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+                                 (fl_large_pg & (fl_large_pg-1)) != 0))) {
+               dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
+                       fl_small_pg, fl_large_pg);
+               return -EINVAL;
+       }
+       if (fl_large_pg)
+               s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+
+       if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
+           fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
+               dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
+                       fl_small_mtu, fl_large_mtu);
+               return -EINVAL;
+       }
+
+       /*
+        * Retrieve our RX interrupt holdoff timer values and counter
+        * threshold values from the SGE parameters.
+        */
+       timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
+       timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
+       timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
+       s->timer_val[0] = core_ticks_to_us(adap,
+               TIMERVALUE0_GET(timer_value_0_and_1));
+       s->timer_val[1] = core_ticks_to_us(adap,
+               TIMERVALUE1_GET(timer_value_0_and_1));
+       s->timer_val[2] = core_ticks_to_us(adap,
+               TIMERVALUE2_GET(timer_value_2_and_3));
+       s->timer_val[3] = core_ticks_to_us(adap,
+               TIMERVALUE3_GET(timer_value_2_and_3));
+       s->timer_val[4] = core_ticks_to_us(adap,
+               TIMERVALUE4_GET(timer_value_4_and_5));
+       s->timer_val[5] = core_ticks_to_us(adap,
+               TIMERVALUE5_GET(timer_value_4_and_5));
+
+       ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
+       s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
+       s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
+       s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
+       s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+
+       return 0;
+}
+
+static int t4_sge_init_hard(struct adapter *adap)
+{
+       struct sge *s = &adap->sge;
+
+       /*
+        * Set up our basic SGE mode to deliver CPL messages to our Ingress
+        * Queue and Packet Date to the Free List.
+        */
+       t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
+                        RXPKTCPLMODE_MASK);
 
        /*
         * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
@@ -2433,13 +2595,24 @@ void t4_sge_init(struct adapter *adap)
        t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
                        F_ENABLE_DROP);
 
-       for (i = v = 0; i < 32; i += 4)
-               v |= (PAGE_SHIFT - 10) << i;
-       t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
-#if FL_PG_ORDER > 0
-       t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
-#endif
+       /*
+        * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
+        * t4_fixup_host_params().
+        */
+       s->fl_pg_order = FL_PG_ORDER;
+       if (s->fl_pg_order)
+               t4_write_reg(adap,
+                            SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
+                            PAGE_SIZE << FL_PG_ORDER);
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
+                    FL_MTU_SMALL_BUFSIZE(adap));
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
+                    FL_MTU_LARGE_BUFSIZE(adap));
+
+       /*
+        * Note that the SGE Ingress Packet Count Interrupt Threshold and
+        * Timer Holdoff values must be supplied by our caller.
+        */
        t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
                     THRESHOLD_0(s->counter_val[0]) |
                     THRESHOLD_1(s->counter_val[1]) |
@@ -2449,14 +2622,54 @@ void t4_sge_init(struct adapter *adap)
                     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
                     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
        t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
-                    TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
-                    TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
+                    TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
+                    TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
        t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
-                    TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
-                    TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
+                    TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
+                    TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
+
+       return 0;
+}
+
+int t4_sge_init(struct adapter *adap)
+{
+       struct sge *s = &adap->sge;
+       u32 sge_control;
+       int ret;
+
+       /*
+        * Ingress Padding Boundary and Egress Status Page Size are set up by
+        * t4_fixup_host_params().
+        */
+       sge_control = t4_read_reg(adap, SGE_CONTROL);
+       s->pktshift = PKTSHIFT_GET(sge_control);
+       s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
+       s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
+                           X_INGPADBOUNDARY_SHIFT);
+
+       if (adap->flags & USING_SOFT_PARAMS)
+               ret = t4_sge_init_soft(adap);
+       else
+               ret = t4_sge_init_hard(adap);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * A FL with <= fl_starve_thres buffers is starving and a periodic
+        * timer will attempt to refill it.  This needs to be larger than the
+        * SGE's Egress Congestion Threshold.  If it isn't, then we can get
+        * stuck waiting for new packets while the SGE is waiting for us to
+        * give it more Free List entries.  (Note that the SGE's Egress
+        * Congestion Threshold is in units of 2 Free List pointers.)
+        */
+       s->fl_starve_thres
+               = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
+
        setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
        setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
        s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
        s->idma_state[0] = s->idma_state[1] = 0;
        spin_lock_init(&s->intrq_lock);
+
+       return 0;
 }
index af16013231733212e0e2ca3642d9910799bce118..35b81d8b59e90707fdb37518f76300871b5fd627 100644 (file)
@@ -120,6 +120,28 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
        }
 }
 
+/**
+ *     t4_write_indirect - write indirectly addressed registers
+ *     @adap: the adapter
+ *     @addr_reg: register holding the indirect addresses
+ *     @data_reg: register holding the value for the indirect registers
+ *     @vals: values to write
+ *     @nregs: how many indirect registers to write
+ *     @start_idx: address of first indirect register to write
+ *
+ *     Writes a sequential block of registers that are accessed indirectly
+ *     through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+                      unsigned int data_reg, const u32 *vals,
+                      unsigned int nregs, unsigned int start_idx)
+{
+       while (nregs--) {
+               t4_write_reg(adap, addr_reg, start_idx++);
+               t4_write_reg(adap, data_reg, *vals++);
+       }
+}
+
 /*
  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  */
@@ -330,6 +352,143 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        return 0;
 }
 
+/*
+ *     t4_mem_win_rw - read/write memory through PCIE memory window
+ *     @adap: the adapter
+ *     @addr: address of first byte requested
+ *     @data: MEMWIN0_APERTURE bytes of data containing the requested address
+ *     @dir: direction of transfer 1 => read, 0 => write
+ *
+ *     Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
+ *     MEMWIN0_APERTURE-byte-aligned address that covers the requested
+ *     address @addr.
+ */
+static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
+{
+       int i;
+
+       /*
+        * Setup offset into PCIE memory window.  Address must be a
+        * MEMWIN0_APERTURE-byte-aligned address.  (Read back MA register to
+        * ensure that changes propagate before we attempt to use the new
+        * values.)
+        */
+       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
+                    addr & ~(MEMWIN0_APERTURE - 1));
+       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
+
+       /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
+       for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
+               if (dir)
+                       *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
+               else
+                       t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
+       }
+
+       return 0;
+}
+
+/**
+ *     t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ *     @adap: the adapter
+ *     @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ *     @addr: address within indicated memory type
+ *     @len: amount of memory to transfer
+ *     @buf: host memory buffer
+ *     @dir: direction of transfer 1 => read, 0 => write
+ *
+ *     Reads/writes an [almost] arbitrary memory region in the firmware: the
+ *     firmware memory address, length and host buffer must be aligned on
+ *     32-bit boudaries.  The memory is transferred as a raw byte sequence
+ *     from/to the firmware's memory.  If this memory contains data
+ *     structures which contain multi-byte integers, it's the callers
+ *     responsibility to perform appropriate byte order conversions.
+ */
+static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
+                       __be32 *buf, int dir)
+{
+       u32 pos, start, end, offset, memoffset;
+       int ret;
+
+       /*
+        * Argument sanity checks ...
+        */
+       if ((addr & 0x3) || (len & 0x3))
+               return -EINVAL;
+
+       /*
+        * Offset into the region of memory which is being accessed
+        * MEM_EDC0 = 0
+        * MEM_EDC1 = 1
+        * MEM_MC   = 2
+        */
+       memoffset = (mtype * (5 * 1024 * 1024));
+
+       /* Determine the PCIE_MEM_ACCESS_OFFSET */
+       addr = addr + memoffset;
+
+       /*
+        * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
+        * at a time so we need to round down the start and round up the end.
+        * We'll start copying out of the first line at (addr - start) a word
+        * at a time.
+        */
+       start = addr & ~(MEMWIN0_APERTURE-1);
+       end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
+       offset = (addr - start)/sizeof(__be32);
+
+       for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+               __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
+
+               /*
+                * If we're writing, copy the data from the caller's memory
+                * buffer
+                */
+               if (!dir) {
+                       /*
+                        * If we're doing a partial write, then we need to do
+                        * a read-modify-write ...
+                        */
+                       if (offset || len < MEMWIN0_APERTURE) {
+                               ret = t4_mem_win_rw(adap, pos, data, 1);
+                               if (ret)
+                                       return ret;
+                       }
+                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+                              len > 0) {
+                               data[offset++] = *buf++;
+                               len -= sizeof(__be32);
+                       }
+               }
+
+               /*
+                * Transfer a block of memory and bail if there's an error.
+                */
+               ret = t4_mem_win_rw(adap, pos, data, dir);
+               if (ret)
+                       return ret;
+
+               /*
+                * If we're reading, copy the data into the caller's memory
+                * buffer.
+                */
+               if (dir)
+                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+                              len > 0) {
+                               *buf++ = data[offset++];
+                               len -= sizeof(__be32);
+                       }
+       }
+
+       return 0;
+}
+
+int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
+                   __be32 *buf)
+{
+       return t4_memory_rw(adap, mtype, addr, len, buf, 0);
+}
+
 #define EEPROM_STAT_ADDR   0x7bfc
 #define VPD_BASE           0
 #define VPD_LEN            512
@@ -355,8 +514,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
  *
  *     Reads card parameters stored in VPD EEPROM.
  */
-static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 {
+       u32 cclk_param, cclk_val;
        int i, ret;
        int ec, sn;
        u8 vpd[VPD_LEN], csum;
@@ -418,6 +578,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
        strim(p->sn);
+
+       /*
+        * Ask firmware for the Core Clock since it knows how to translate the
+        * Reference Clock ('V2') VPD field into a Core Clock value ...
+        */
+       cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+                     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+       ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+                             1, &cclk_param, &cclk_val);
+       if (ret)
+               return ret;
+       p->cclk = cclk_val;
+
        return 0;
 }
 
@@ -717,6 +890,77 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
        return ret;
 }
 
+/**
+ *     t4_flash_cfg_addr - return the address of the flash configuration file
+ *     @adapter: the adapter
+ *
+ *     Return the address within the flash where the Firmware Configuration
+ *     File is stored.
+ */
+unsigned int t4_flash_cfg_addr(struct adapter *adapter)
+{
+       if (adapter->params.sf_size == 0x100000)
+               return FLASH_FPGA_CFG_START;
+       else
+               return FLASH_CFG_START;
+}
+
+/**
+ *     t4_load_cfg - download config file
+ *     @adap: the adapter
+ *     @cfg_data: the cfg text file to write
+ *     @size: text file size
+ *
+ *     Write the supplied config text file to the card's serial flash.
+ */
+int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+{
+       int ret, i, n;
+       unsigned int addr;
+       unsigned int flash_cfg_start_sec;
+       unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+       addr = t4_flash_cfg_addr(adap);
+       flash_cfg_start_sec = addr / SF_SEC_SIZE;
+
+       if (size > FLASH_CFG_MAX_SIZE) {
+               dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
+                       FLASH_CFG_MAX_SIZE);
+               return -EFBIG;
+       }
+
+       i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,    /* # of sectors spanned */
+                        sf_sec_size);
+       ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+                                    flash_cfg_start_sec + i - 1);
+       /*
+        * If size == 0 then we're simply erasing the FLASH sectors associated
+        * with the on-adapter Firmware Configuration File.
+        */
+       if (ret || size == 0)
+               goto out;
+
+       /* this will write to the flash up to SF_PAGE_SIZE at a time */
+       for (i = 0; i < size; i += SF_PAGE_SIZE) {
+               if ((size - i) <  SF_PAGE_SIZE)
+                       n = size - i;
+               else
+                       n = SF_PAGE_SIZE;
+               ret = t4_write_flash(adap, addr, n, cfg_data);
+               if (ret)
+                       goto out;
+
+               addr += SF_PAGE_SIZE;
+               cfg_data += SF_PAGE_SIZE;
+       }
+
+out:
+       if (ret)
+               dev_err(adap->pdev_dev, "config file %s failed %d\n",
+                       (size == 0 ? "clear" : "download"), ret);
+       return ret;
+}
+
 /**
  *     t4_load_fw - download firmware
  *     @adap: the adapter
@@ -1018,9 +1262,9 @@ static void sge_intr_handler(struct adapter *adapter)
                { ERR_INVALID_CIDX_INC,
                  "SGE GTS CIDX increment too large", -1, 0 },
                { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-               { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
-               { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
-               { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
+               { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
+               { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
+               { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
                { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
                { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
@@ -1520,7 +1764,7 @@ void t4_intr_enable(struct adapter *adapter)
                     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
                     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
                     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
-                    F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
+                    DBFIFO_HP_INT | DBFIFO_LP_INT |
                     EGRESS_SIZE_ERR);
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
        t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
@@ -1716,6 +1960,23 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
        }
 }
 
+/**
+ *     t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ *     @adap: the adapter
+ *     @addr: the indirect TP register address
+ *     @mask: specifies the field within the register to modify
+ *     @val: new value for the field
+ *
+ *     Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+                           unsigned int mask, unsigned int val)
+{
+       t4_write_reg(adap, TP_PIO_ADDR, addr);
+       val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
+       t4_write_reg(adap, TP_PIO_DATA, val);
+}
+
 /**
  *     init_cong_ctrl - initialize congestion control parameters
  *     @a: the alpha values for congestion control
@@ -2000,9 +2261,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
        struct fw_ldst_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-                           F_FW_CMD_WRITE |
-                           V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
+       c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+                           FW_CMD_WRITE |
+                           FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
        c.cycles_to_len16 = htonl(FW_LEN16(c));
        c.u.addrval.addr = htonl(addr);
        c.u.addrval.val = htonl(val);
@@ -2033,8 +2294,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
        if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
                return -EINVAL;
 
-       t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15);
-       t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET);
+       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
+       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
 
        for (i = 0; i < len; i += 4)
                *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
@@ -2102,39 +2363,129 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 }
 
 /**
- *     t4_fw_hello - establish communication with FW
- *     @adap: the adapter
- *     @mbox: mailbox to use for the FW command
- *     @evt_mbox: mailbox to receive async FW events
- *     @master: specifies the caller's willingness to be the device master
- *     @state: returns the current device state
+ *      t4_fw_hello - establish communication with FW
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @evt_mbox: mailbox to receive async FW events
+ *      @master: specifies the caller's willingness to be the device master
+ *     @state: returns the current device state (if non-NULL)
  *
- *     Issues a command to establish communication with FW.
+ *     Issues a command to establish communication with FW.  Returns either
+ *     an error (negative integer) or the mailbox of the Master PF.
  */
 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
                enum dev_master master, enum dev_state *state)
 {
        int ret;
        struct fw_hello_cmd c;
+       u32 v;
+       unsigned int master_mbox;
+       int retries = FW_CMD_HELLO_RETRIES;
 
+retry:
+       memset(&c, 0, sizeof(c));
        INIT_CMD(c, HELLO, WRITE);
        c.err_to_mbasyncnot = htonl(
                FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
                FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
-               FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
-               FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
+               FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+                                     FW_HELLO_CMD_MBMASTER_MASK) |
+               FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
+               FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
+               FW_HELLO_CMD_CLEARINIT);
 
+       /*
+        * Issue the HELLO command to the firmware.  If it's not successful
+        * but indicates that we got a "busy" or "timeout" condition, retry
+        * the HELLO until we exhaust our retry limit.
+        */
        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
-       if (ret == 0 && state) {
-               u32 v = ntohl(c.err_to_mbasyncnot);
-               if (v & FW_HELLO_CMD_INIT)
-                       *state = DEV_STATE_INIT;
-               else if (v & FW_HELLO_CMD_ERR)
+       if (ret < 0) {
+               if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+                       goto retry;
+               return ret;
+       }
+
+       v = ntohl(c.err_to_mbasyncnot);
+       master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
+       if (state) {
+               if (v & FW_HELLO_CMD_ERR)
                        *state = DEV_STATE_ERR;
+               else if (v & FW_HELLO_CMD_INIT)
+                       *state = DEV_STATE_INIT;
                else
                        *state = DEV_STATE_UNINIT;
        }
-       return ret;
+
+       /*
+        * If we're not the Master PF then we need to wait around for the
+        * Master PF Driver to finish setting up the adapter.
+        *
+        * Note that we also do this wait if we're a non-Master-capable PF and
+        * there is no current Master PF; a Master PF may show up momentarily
+        * and we wouldn't want to fail pointlessly.  (This can happen when an
+        * OS loads lots of different drivers rapidly at the same time).  In
+        * this case, the Master PF returned by the firmware will be
+        * FW_PCIE_FW_MASTER_MASK so the test below will work ...
+        */
+       if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
+           master_mbox != mbox) {
+               int waiting = FW_CMD_HELLO_TIMEOUT;
+
+               /*
+                * Wait for the firmware to either indicate an error or
+                * initialized state.  If we see either of these we bail out
+                * and report the issue to the caller.  If we exhaust the
+                * "hello timeout" and we haven't exhausted our retries, try
+                * again.  Otherwise bail with a timeout error.
+                */
+               for (;;) {
+                       u32 pcie_fw;
+
+                       msleep(50);
+                       waiting -= 50;
+
+                       /*
+                        * If neither Error nor Initialialized are indicated
+                        * by the firmware keep waiting till we exaust our
+                        * timeout ... and then retry if we haven't exhausted
+                        * our retries ...
+                        */
+                       pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+                       if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
+                               if (waiting <= 0) {
+                                       if (retries-- > 0)
+                                               goto retry;
+
+                                       return -ETIMEDOUT;
+                               }
+                               continue;
+                       }
+
+                       /*
+                        * We either have an Error or Initialized condition
+                        * report errors preferentially.
+                        */
+                       if (state) {
+                               if (pcie_fw & FW_PCIE_FW_ERR)
+                                       *state = DEV_STATE_ERR;
+                               else if (pcie_fw & FW_PCIE_FW_INIT)
+                                       *state = DEV_STATE_INIT;
+                       }
+
+                       /*
+                        * If we arrived before a Master PF was selected and
+                        * there's not a valid Master PF, grab its identity
+                        * for our caller.
+                        */
+                       if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
+                           (pcie_fw & FW_PCIE_FW_MASTER_VLD))
+                               master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
+                       break;
+               }
+       }
+
+       return master_mbox;
 }
 
 /**
@@ -2185,6 +2536,334 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *     t4_fw_halt - issue a reset/halt to FW and put uP into RESET
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW RESET command (if desired)
+ *     @force: force uP into RESET even if FW RESET command fails
+ *
+ *     Issues a RESET command to firmware (if desired) with a HALT indication
+ *     and then puts the microprocessor into RESET state.  The RESET command
+ *     will only be issued if a legitimate mailbox is provided (mbox <=
+ *     FW_PCIE_FW_MASTER_MASK).
+ *
+ *     This is generally used in order for the host to safely manipulate the
+ *     adapter without fear of conflicting with whatever the firmware might
+ *     be doing.  The only way out of this state is to RESTART the firmware
+ *     ...
+ */
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+{
+       int ret = 0;
+
+       /*
+        * If a legitimate mailbox is provided, issue a RESET command
+        * with a HALT indication.
+        */
+       if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+               struct fw_reset_cmd c;
+
+               memset(&c, 0, sizeof(c));
+               INIT_CMD(c, RESET, WRITE);
+               c.val = htonl(PIORST | PIORSTMODE);
+               c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
+               ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+       }
+
+       /*
+        * Normally we won't complete the operation if the firmware RESET
+        * command fails but if our caller insists we'll go ahead and put the
+        * uP into RESET.  This can be useful if the firmware is hung or even
+        * missing ...  We'll have to take the risk of putting the uP into
+        * RESET without the cooperation of firmware in that case.
+        *
+        * We also force the firmware's HALT flag to be on in case we bypassed
+        * the firmware RESET command above or we're dealing with old firmware
+        * which doesn't have the HALT capability.  This will serve as a flag
+        * for the incoming firmware to know that it's coming out of a HALT
+        * rather than a RESET ... if it's new enough to understand that ...
+        */
+       if (ret == 0 || force) {
+               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
+               t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
+                                FW_PCIE_FW_HALT);
+       }
+
+       /*
+        * And we always return the result of the firmware RESET command
+        * even when we force the uP into RESET ...
+        */
+       return ret;
+}
+
+/**
+ *     t4_fw_restart - restart the firmware by taking the uP out of RESET
+ *     @adap: the adapter
+ *     @reset: if we want to do a RESET to restart things
+ *
+ *     Restart firmware previously halted by t4_fw_halt().  On successful
+ *     return the previous PF Master remains as the new PF Master and there
+ *     is no need to issue a new HELLO command, etc.
+ *
+ *     We do this in two ways:
+ *
+ *      1. If we're dealing with newer firmware we'll simply want to take
+ *         the chip's microprocessor out of RESET.  This will cause the
+ *         firmware to start up from its start vector.  And then we'll loop
+ *         until the firmware indicates it's started again (PCIE_FW.HALT
+ *         reset to 0) or we timeout.
+ *
+ *      2. If we're dealing with older firmware then we'll need to RESET
+ *         the chip since older firmware won't recognize the PCIE_FW.HALT
+ *         flag and automatically RESET itself on startup.
+ */
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+{
+       if (reset) {
+               /*
+                * Since we're directing the RESET instead of the firmware
+                * doing it automatically, we need to clear the PCIE_FW.HALT
+                * bit.
+                */
+               t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
+
+               /*
+                * If we've been given a valid mailbox, first try to get the
+                * firmware to do the RESET.  If that works, great and we can
+                * return success.  Otherwise, if we haven't been given a
+                * valid mailbox or the RESET command failed, fall back to
+                * hitting the chip with a hammer.
+                */
+               if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+                       t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+                       msleep(100);
+                       if (t4_fw_reset(adap, mbox,
+                                       PIORST | PIORSTMODE) == 0)
+                               return 0;
+               }
+
+               t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
+               msleep(2000);
+       } else {
+               int ms;
+
+               t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+               for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+                       if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
+                               return 0;
+                       msleep(100);
+                       ms += 100;
+               }
+               return -ETIMEDOUT;
+       }
+       return 0;
+}
+
+/**
+ *     t4_fw_upgrade - perform all of the steps necessary to upgrade FW
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW RESET command (if desired)
+ *     @fw_data: the firmware image to write
+ *     @size: image size
+ *     @force: force upgrade even if firmware doesn't cooperate
+ *
+ *     Perform all of the steps necessary for upgrading an adapter's
+ *     firmware image.  Normally this requires the cooperation of the
+ *     existing firmware in order to halt all existing activities
+ *     but if an invalid mailbox token is passed in we skip that step
+ *     (though we'll still put the adapter microprocessor into RESET in
+ *     that case).
+ *
+ *     On successful return the new firmware will have been loaded and
+ *     the adapter will have been fully RESET losing all previous setup
+ *     state.  On unsuccessful return the adapter may be completely hosed ...
+ *     positive errno indicates that the adapter is ~probably~ intact, a
+ *     negative errno indicates that things are looking bad ...
+ */
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+                 const u8 *fw_data, unsigned int size, int force)
+{
+       const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+       int reset, ret;
+
+       ret = t4_fw_halt(adap, mbox, force);
+       if (ret < 0 && !force)
+               return ret;
+
+       ret = t4_load_fw(adap, fw_data, size);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Older versions of the firmware don't understand the new
+        * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+        * restart.  So for newly loaded older firmware we'll have to do the
+        * RESET for it so it starts up on a clean slate.  We can tell if
+        * the newly loaded firmware will handle this right by checking
+        * its header flags to see if it advertises the capability.
+        */
+       reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+       return t4_fw_restart(adap, mbox, reset);
+}
+
+
+/**
+ *     t4_fw_config_file - setup an adapter via a Configuration File
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *     @mtype: the memory type where the Configuration File is located
+ *     @maddr: the memory address where the Configuration File is located
+ *     @finiver: return value for CF [fini] version
+ *     @finicsum: return value for CF [fini] checksum
+ *     @cfcsum: return value for CF computed checksum
+ *
+ *     Issue a command to get the firmware to process the Configuration
+ *     File located at the specified mtype/maddress.  If the Configuration
+ *     File is processed successfully and return value pointers are
+ *     provided, the Configuration File "[fini] section version and
+ *     checksum values will be returned along with the computed checksum.
+ *     It's up to the caller to decide how it wants to respond to the
+ *     checksums not matching but it recommended that a prominant warning
+ *     be emitted in order to help people rapidly identify changed or
+ *     corrupted Configuration Files.
+ *
+ *     Also note that it's possible to modify things like "niccaps",
+ *     "toecaps",etc. between processing the Configuration File and telling
+ *     the firmware to use the new configuration.  Callers which want to
+ *     do this will need to "hand-roll" their own CAPS_CONFIGS commands for
+ *     Configuration Files if they want to do this.
+ */
+int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
+                     unsigned int mtype, unsigned int maddr,
+                     u32 *finiver, u32 *finicsum, u32 *cfcsum)
+{
+       struct fw_caps_config_cmd caps_cmd;
+       int ret;
+
+       /*
+        * Tell the firmware to process the indicated Configuration File.
+        * If there are no errors and the caller has provided return value
+        * pointers for the [fini] section version, checksum and computed
+        * checksum, pass those back to the caller.
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_READ);
+       caps_cmd.retval_len16 =
+               htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+                     FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+                     FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+                     FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
+       if (ret < 0)
+               return ret;
+
+       if (finiver)
+               *finiver = ntohl(caps_cmd.finiver);
+       if (finicsum)
+               *finicsum = ntohl(caps_cmd.finicsum);
+       if (cfcsum)
+               *cfcsum = ntohl(caps_cmd.cfcsum);
+
+       /*
+        * And now tell the firmware to use the configuration we just loaded.
+        */
+       caps_cmd.op_to_write =
+               htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_WRITE);
+       caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+       return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
+}
+
+/**
+ *     t4_fixup_host_params - fix up host-dependent parameters
+ *     @adap: the adapter
+ *     @page_size: the host's Base Page Size
+ *     @cache_line_size: the host's Cache Line Size
+ *
+ *     Various registers in T4 contain values which are dependent on the
+ *     host's Base Page and Cache Line Sizes.  This function will fix all of
+ *     those registers with the appropriate values as passed in ...
+ */
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+                        unsigned int cache_line_size)
+{
+       unsigned int page_shift = fls(page_size) - 1;
+       unsigned int sge_hps = page_shift - 10;
+       unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+       unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+       unsigned int fl_align_log = fls(fl_align) - 1;
+
+       t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
+                    HOSTPAGESIZEPF0(sge_hps) |
+                    HOSTPAGESIZEPF1(sge_hps) |
+                    HOSTPAGESIZEPF2(sge_hps) |
+                    HOSTPAGESIZEPF3(sge_hps) |
+                    HOSTPAGESIZEPF4(sge_hps) |
+                    HOSTPAGESIZEPF5(sge_hps) |
+                    HOSTPAGESIZEPF6(sge_hps) |
+                    HOSTPAGESIZEPF7(sge_hps));
+
+       t4_set_reg_field(adap, SGE_CONTROL,
+                        INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
+                        EGRSTATUSPAGESIZE_MASK,
+                        INGPADBOUNDARY(fl_align_log - 5) |
+                        EGRSTATUSPAGESIZE(stat_len != 64));
+
+       /*
+        * Adjust various SGE Free List Host Buffer Sizes.
+        *
+        * This is something of a crock since we're using fixed indices into
+        * the array which are also known by the sge.c code and the T4
+        * Firmware Configuration File.  We need to come up with a much better
+        * approach to managing this array.  For now, the first four entries
+        * are:
+        *
+        *   0: Host Page Size
+        *   1: 64KB
+        *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+        *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+        *
+        * For the single-MTU buffers in unpacked mode we need to include
+        * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+        * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+        * Padding boundry.  All of these are accommodated in the Factory
+        * Default Firmware Configuration File but we need to adjust it for
+        * this host's cache line size.
+        */
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
+                    & ~(fl_align-1));
+       t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
+                    (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
+                    & ~(fl_align-1));
+
+       t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
+
+       return 0;
+}
+
+/**
+ *     t4_fw_initialize - ask FW to initialize the device
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *
+ *     Issues a command to FW to partially initialize the device.  This
+ *     performs initialization that generally doesn't depend on user input.
+ */
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+       struct fw_initialize_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       INIT_CMD(c, INITIALIZE, WRITE);
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_query_params - query FW or device parameters
  *     @adap: the adapter
@@ -2835,10 +3514,6 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
                return ret;
        }
 
-       ret = get_vpd_params(adapter, &adapter->params.vpd);
-       if (ret < 0)
-               return ret;
-
        init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
 
        /*
@@ -2846,6 +3521,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
         */
        adapter->params.nports = 1;
        adapter->params.portvec = 1;
+       adapter->params.vpd.cclk = 50000;
        return 0;
 }
 
index c26b455f37de54c1075a93e6d100282ef8c6e134..f534ed7e10e9db34b55a5be07f717d93e860a7df 100644 (file)
@@ -58,6 +58,7 @@ enum {
 
 enum {
        SF_PAGE_SIZE = 256,           /* serial flash page size */
+       SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
 };
 
 enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -137,4 +138,83 @@ struct rsp_ctrl {
 #define QINTR_CNT_EN       0x1
 #define QINTR_TIMER_IDX(x) ((x) << 1)
 #define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start)     ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs)  ((nsecs) * SF_SEC_SIZE)
+
+enum {
+       /*
+        * Various Expansion-ROM boot images, etc.
+        */
+       FLASH_EXP_ROM_START_SEC = 0,
+       FLASH_EXP_ROM_NSECS = 6,
+       FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+       FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+       /*
+        * iSCSI Boot Firmware Table (iBFT) and other driver-related
+        * parameters ...
+        */
+       FLASH_IBFT_START_SEC = 6,
+       FLASH_IBFT_NSECS = 1,
+       FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
+       FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+
+       /*
+        * Boot configuration data.
+        */
+       FLASH_BOOTCFG_START_SEC = 7,
+       FLASH_BOOTCFG_NSECS = 1,
+       FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
+       FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+
+       /*
+        * Location of firmware image in FLASH.
+        */
+       FLASH_FW_START_SEC = 8,
+       FLASH_FW_NSECS = 8,
+       FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+       FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+       /*
+        * iSCSI persistent/crash information.
+        */
+       FLASH_ISCSI_CRASH_START_SEC = 29,
+       FLASH_ISCSI_CRASH_NSECS = 1,
+       FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
+       FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+
+       /*
+        * FCoE persistent/crash information.
+        */
+       FLASH_FCOE_CRASH_START_SEC = 30,
+       FLASH_FCOE_CRASH_NSECS = 1,
+       FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
+       FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+
+       /*
+        * Location of Firmware Configuration File in FLASH.  Since the FPGA
+        * "FLASH" is smaller we need to store the Configuration File in a
+        * different location -- which will overlap the end of the firmware
+        * image if firmware ever gets that large ...
+        */
+       FLASH_CFG_START_SEC = 31,
+       FLASH_CFG_NSECS = 1,
+       FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+       FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+       FLASH_FPGA_CFG_START_SEC = 15,
+       FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
+
+       /*
+        * Sectors 32-63 are reserved for FLASH failover.
+        */
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
 #endif /* __T4_HW_H */
index 111fc323f155c4dbab831ac1fab85e2228a3bb6a..a1a8b57200f607971f8f450495a8b7abe9478d5c 100644 (file)
 #define  CIDXINC_SHIFT     0
 #define  CIDXINC(x)        ((x) << CIDXINC_SHIFT)
 
+#define X_RXPKTCPLMODE_SPLIT     1
+#define X_INGPADBOUNDARY_SHIFT 5
+
 #define SGE_CONTROL 0x1008
 #define  DCASYSTYPE             0x00080000U
-#define  RXPKTCPLMODE           0x00040000U
-#define  EGRSTATUSPAGESIZE      0x00020000U
+#define  RXPKTCPLMODE_MASK      0x00040000U
+#define  RXPKTCPLMODE_SHIFT     18
+#define  RXPKTCPLMODE(x)        ((x) << RXPKTCPLMODE_SHIFT)
+#define  EGRSTATUSPAGESIZE_MASK  0x00020000U
+#define  EGRSTATUSPAGESIZE_SHIFT 17
+#define  EGRSTATUSPAGESIZE(x)    ((x) << EGRSTATUSPAGESIZE_SHIFT)
 #define  PKTSHIFT_MASK          0x00001c00U
 #define  PKTSHIFT_SHIFT         10
 #define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
 #define  GLOBALENABLE           0x00000001U
 
 #define SGE_HOST_PAGE_SIZE 0x100c
+
+#define  HOSTPAGESIZEPF7_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF7_SHIFT  28
+#define  HOSTPAGESIZEPF7(x)     ((x) << HOSTPAGESIZEPF7_SHIFT)
+
+#define  HOSTPAGESIZEPF6_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF6_SHIFT  24
+#define  HOSTPAGESIZEPF6(x)     ((x) << HOSTPAGESIZEPF6_SHIFT)
+
+#define  HOSTPAGESIZEPF5_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF5_SHIFT  20
+#define  HOSTPAGESIZEPF5(x)     ((x) << HOSTPAGESIZEPF5_SHIFT)
+
+#define  HOSTPAGESIZEPF4_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF4_SHIFT  16
+#define  HOSTPAGESIZEPF4(x)     ((x) << HOSTPAGESIZEPF4_SHIFT)
+
+#define  HOSTPAGESIZEPF3_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF3_SHIFT  12
+#define  HOSTPAGESIZEPF3(x)     ((x) << HOSTPAGESIZEPF3_SHIFT)
+
+#define  HOSTPAGESIZEPF2_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF2_SHIFT  8
+#define  HOSTPAGESIZEPF2(x)     ((x) << HOSTPAGESIZEPF2_SHIFT)
+
+#define  HOSTPAGESIZEPF1_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF1_SHIFT  4
+#define  HOSTPAGESIZEPF1(x)     ((x) << HOSTPAGESIZEPF1_SHIFT)
+
 #define  HOSTPAGESIZEPF0_MASK   0x0000000fU
 #define  HOSTPAGESIZEPF0_SHIFT  0
 #define  HOSTPAGESIZEPF0(x)     ((x) << HOSTPAGESIZEPF0_SHIFT)
 #define SGE_INT_ENABLE3 0x1040
 #define SGE_FL_BUFFER_SIZE0 0x1044
 #define SGE_FL_BUFFER_SIZE1 0x1048
+#define SGE_FL_BUFFER_SIZE2 0x104c
+#define SGE_FL_BUFFER_SIZE3 0x1050
 #define SGE_INGRESS_RX_THRESHOLD 0x10a0
 #define  THRESHOLD_0_MASK   0x3f000000U
 #define  THRESHOLD_0_SHIFT  24
 #define  THRESHOLD_3(x)     ((x) << THRESHOLD_3_SHIFT)
 #define  THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
 
+#define SGE_CONM_CTRL 0x1094
+#define  EGRTHRESHOLD_MASK   0x00003f00U
+#define  EGRTHRESHOLDshift   8
+#define  EGRTHRESHOLD(x)     ((x) << EGRTHRESHOLDshift)
+#define  EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+
 #define SGE_TIMER_VALUE_0_AND_1 0x10b8
 #define  TIMERVALUE0_MASK   0xffff0000U
 #define  TIMERVALUE0_SHIFT  16
 #define  TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
 
 #define SGE_TIMER_VALUE_2_AND_3 0x10bc
+#define  TIMERVALUE2_MASK   0xffff0000U
+#define  TIMERVALUE2_SHIFT  16
+#define  TIMERVALUE2(x)     ((x) << TIMERVALUE2_SHIFT)
+#define  TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
+#define  TIMERVALUE3_MASK   0x0000ffffU
+#define  TIMERVALUE3_SHIFT  0
+#define  TIMERVALUE3(x)     ((x) << TIMERVALUE3_SHIFT)
+#define  TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
+
 #define SGE_TIMER_VALUE_4_AND_5 0x10c0
+#define  TIMERVALUE4_MASK   0xffff0000U
+#define  TIMERVALUE4_SHIFT  16
+#define  TIMERVALUE4(x)     ((x) << TIMERVALUE4_SHIFT)
+#define  TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
+#define  TIMERVALUE5_MASK   0x0000ffffU
+#define  TIMERVALUE5_SHIFT  0
+#define  TIMERVALUE5(x)     ((x) << TIMERVALUE5_SHIFT)
+#define  TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
+
 #define SGE_DEBUG_INDEX 0x10cc
 #define SGE_DEBUG_DATA_HIGH 0x10d0
 #define SGE_DEBUG_DATA_LOW 0x10d4
 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
 
-#define S_LP_INT_THRESH    12
-#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
 #define S_HP_INT_THRESH    28
+#define M_HP_INT_THRESH 0xfU
 #define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define M_HP_COUNT 0x7ffU
+#define S_HP_COUNT 16
+#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
+#define S_LP_INT_THRESH    12
+#define M_LP_INT_THRESH 0xfU
+#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
+#define M_LP_COUNT 0x7ffU
+#define S_LP_COUNT 0
+#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
 #define A_SGE_DBFIFO_STATUS 0x10a4
 
 #define S_ENABLE_DROP    13
 #define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
 #define F_ENABLE_DROP    V_ENABLE_DROP(1U)
-#define A_SGE_DOORBELL_CONTROL 0x10a8
-
-#define A_SGE_CTXT_CMD 0x11fc
-#define A_SGE_DBQ_CTXT_BADDR 0x1084
-
-#define A_SGE_PF_KDOORBELL 0x0
-
-#define S_QID 15
-#define V_QID(x) ((x) << S_QID)
-
-#define S_PIDX 0
-#define V_PIDX(x) ((x) << S_PIDX)
-
-#define M_LP_COUNT 0x7ffU
-#define S_LP_COUNT 0
-#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
-
-#define M_HP_COUNT 0x7ffU
-#define S_HP_COUNT 16
-#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
-
-#define A_SGE_INT_ENABLE3 0x1040
-
-#define S_DBFIFO_HP_INT 8
-#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
-#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
-
-#define S_DBFIFO_LP_INT 7
-#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
-#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
-
 #define S_DROPPED_DB 0
 #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
 #define F_DROPPED_DB V_DROPPED_DB(1U)
+#define A_SGE_DOORBELL_CONTROL 0x10a8
 
-#define S_ERR_DROPPED_DB 18
-#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
-#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
-
-#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
-
-#define M_HP_INT_THRESH 0xfU
-#define M_LP_INT_THRESH 0xfU
+#define A_SGE_CTXT_CMD 0x11fc
+#define A_SGE_DBQ_CTXT_BADDR 0x1084
 
 #define PCIE_PF_CLI 0x44
 #define PCIE_INT_CAUSE 0x3004
 #define  WINDOW(x)       ((x) << WINDOW_SHIFT)
 #define PCIE_MEM_ACCESS_OFFSET 0x306c
 
+#define PCIE_FW 0x30b8
+
 #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
 #define  RNPP 0x80000000U
 #define  RPCP 0x20000000U
 #define  MEM_WRAP_CLIENT_NUM_MASK   0x0000000fU
 #define  MEM_WRAP_CLIENT_NUM_SHIFT  0
 #define  MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
-
+#define MA_PCIE_FW 0x30b8
 #define MA_PARITY_ERROR_STATUS 0x77f4
 
 #define EDC_0_BASE_ADDR 0x7900
 
 #define CIM_BOOT_CFG 0x7b00
 #define  BOOTADDR_MASK 0xffffff00U
+#define  UPCRST        0x1U
 
 #define CIM_PF_MAILBOX_DATA 0x240
 #define CIM_PF_MAILBOX_CTRL 0x280
 #define  VLANEXTENABLE_MASK  0x0000f000U
 #define  VLANEXTENABLE_SHIFT 12
 
+#define TP_GLOBAL_CONFIG 0x7d08
+#define  FIVETUPLELOOKUP_SHIFT  17
+#define  FIVETUPLELOOKUP_MASK   0x00060000U
+#define  FIVETUPLELOOKUP(x)     ((x) << FIVETUPLELOOKUP_SHIFT)
+#define  FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
+                               FIVETUPLELOOKUP_SHIFT)
+
 #define TP_PARA_REG2 0x7d68
 #define  MAXRXDATA_MASK    0xffff0000U
 #define  MAXRXDATA_SHIFT   16
 #define  TIMERRESOLUTION_MASK   0x00ff0000U
 #define  TIMERRESOLUTION_SHIFT  16
 #define  TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
+#define  DELAYEDACKRESOLUTION_MASK 0x000000ffU
+#define  DELAYEDACKRESOLUTION_SHIFT     0
+#define  DELAYEDACKRESOLUTION_GET(x) \
+       (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
 
 #define TP_SHIFT_CNT 0x7dc0
+#define  SYNSHIFTMAX_SHIFT         24
+#define  SYNSHIFTMAX_MASK          0xff000000U
+#define  SYNSHIFTMAX(x)            ((x) << SYNSHIFTMAX_SHIFT)
+#define  SYNSHIFTMAX_GET(x)        (((x) & SYNSHIFTMAX_MASK) >> \
+                                  SYNSHIFTMAX_SHIFT)
+#define  RXTSHIFTMAXR1_SHIFT       20
+#define  RXTSHIFTMAXR1_MASK        0x00f00000U
+#define  RXTSHIFTMAXR1(x)          ((x) << RXTSHIFTMAXR1_SHIFT)
+#define  RXTSHIFTMAXR1_GET(x)      (((x) & RXTSHIFTMAXR1_MASK) >> \
+                                  RXTSHIFTMAXR1_SHIFT)
+#define  RXTSHIFTMAXR2_SHIFT       16
+#define  RXTSHIFTMAXR2_MASK        0x000f0000U
+#define  RXTSHIFTMAXR2(x)          ((x) << RXTSHIFTMAXR2_SHIFT)
+#define  RXTSHIFTMAXR2_GET(x)      (((x) & RXTSHIFTMAXR2_MASK) >> \
+                                  RXTSHIFTMAXR2_SHIFT)
+#define  PERSHIFTBACKOFFMAX_SHIFT  12
+#define  PERSHIFTBACKOFFMAX_MASK   0x0000f000U
+#define  PERSHIFTBACKOFFMAX(x)     ((x) << PERSHIFTBACKOFFMAX_SHIFT)
+#define  PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
+                                  PERSHIFTBACKOFFMAX_SHIFT)
+#define  PERSHIFTMAX_SHIFT         8
+#define  PERSHIFTMAX_MASK          0x00000f00U
+#define  PERSHIFTMAX(x)            ((x) << PERSHIFTMAX_SHIFT)
+#define  PERSHIFTMAX_GET(x)        (((x) & PERSHIFTMAX_MASK) >> \
+                                  PERSHIFTMAX_SHIFT)
+#define  KEEPALIVEMAXR1_SHIFT      4
+#define  KEEPALIVEMAXR1_MASK       0x000000f0U
+#define  KEEPALIVEMAXR1(x)         ((x) << KEEPALIVEMAXR1_SHIFT)
+#define  KEEPALIVEMAXR1_GET(x)     (((x) & KEEPALIVEMAXR1_MASK) >> \
+                                  KEEPALIVEMAXR1_SHIFT)
+#define KEEPALIVEMAXR2_SHIFT       0
+#define KEEPALIVEMAXR2_MASK        0x0000000fU
+#define KEEPALIVEMAXR2(x)          ((x) << KEEPALIVEMAXR2_SHIFT)
+#define KEEPALIVEMAXR2_GET(x)      (((x) & KEEPALIVEMAXR2_MASK) >> \
+                                  KEEPALIVEMAXR2_SHIFT)
 
 #define TP_CCTRL_TABLE 0x7ddc
 #define TP_MTU_TABLE 0x7de4
 #define TP_INT_CAUSE 0x7e74
 #define  FLMTXFLSTEMPTY 0x40000000U
 
+#define TP_VLAN_PRI_MAP 0x140
+#define  FRAGMENTATION_SHIFT 9
+#define  FRAGMENTATION_MASK  0x00000200U
+#define  MPSHITTYPE_MASK     0x00000100U
+#define  MACMATCH_MASK       0x00000080U
+#define  ETHERTYPE_MASK      0x00000040U
+#define  PROTOCOL_MASK       0x00000020U
+#define  TOS_MASK            0x00000010U
+#define  VLAN_MASK           0x00000008U
+#define  VNIC_ID_MASK        0x00000004U
+#define  PORT_MASK           0x00000002U
+#define  FCOE_SHIFT          0
+#define  FCOE_MASK           0x00000001U
+
 #define TP_INGRESS_CONFIG 0x141
 #define  VNIC                0x00000800U
 #define  CSUM_HAS_PSEUDO_HDR 0x00000400U
index ad53f796b574ca6668d8f2af4658aa222cc1b50d..a6364632b490a7d1a7e57bbb086c74afcbe3181a 100644 (file)
@@ -79,6 +79,8 @@ struct fw_wr_hdr {
 #define FW_WR_FLOWID(x)        ((x) << 8)
 #define FW_WR_LEN16(x) ((x) << 0)
 
+#define HW_TPL_FR_MT_PR_IV_P_FC         0X32B
+
 struct fw_ulptx_wr {
        __be32 op_to_compl;
        __be32 flowid_len16;
@@ -155,6 +157,17 @@ struct fw_eth_tx_pkt_vm_wr {
 
 #define FW_CMD_MAX_TIMEOUT 3000
 
+/*
+ * If a host driver does a HELLO and discovers that there's already a MASTER
+ * selected, we may have to wait for that MASTER to finish issuing RESET,
+ * configuration and INITIALIZE commands.  Also, there's a possibility that
+ * our own HELLO may get lost if it happens right as the MASTER is issuign a
+ * RESET command, so we need to be willing to make a few retries of our HELLO.
+ */
+#define FW_CMD_HELLO_TIMEOUT   (3 * FW_CMD_MAX_TIMEOUT)
+#define FW_CMD_HELLO_RETRIES   3
+
+
 enum fw_cmd_opcodes {
        FW_LDST_CMD                    = 0x01,
        FW_RESET_CMD                   = 0x03,
@@ -304,7 +317,17 @@ struct fw_reset_cmd {
        __be32 op_to_write;
        __be32 retval_len16;
        __be32 val;
-       __be32 r3;
+       __be32 halt_pkd;
+};
+
+#define FW_RESET_CMD_HALT_SHIFT    31
+#define FW_RESET_CMD_HALT_MASK     0x1
+#define FW_RESET_CMD_HALT(x)       ((x) << FW_RESET_CMD_HALT_SHIFT)
+#define FW_RESET_CMD_HALT_GET(x)  \
+       (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK)
+
+enum fw_hellow_cmd {
+       fw_hello_cmd_stage_os           = 0x0
 };
 
 struct fw_hello_cmd {
@@ -315,8 +338,14 @@ struct fw_hello_cmd {
 #define FW_HELLO_CMD_INIT          (1U << 30)
 #define FW_HELLO_CMD_MASTERDIS(x)   ((x) << 29)
 #define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
-#define FW_HELLO_CMD_MBMASTER(x)    ((x) << 24)
+#define FW_HELLO_CMD_MBMASTER_MASK   0xfU
+#define FW_HELLO_CMD_MBMASTER_SHIFT  24
+#define FW_HELLO_CMD_MBMASTER(x)     ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
+#define FW_HELLO_CMD_MBMASTER_GET(x) \
+       (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
 #define FW_HELLO_CMD_MBASYNCNOT(x)  ((x) << 20)
+#define FW_HELLO_CMD_STAGE(x)       ((x) << 17)
+#define FW_HELLO_CMD_CLEARINIT      (1U << 16)
        __be32 fwrev;
 };
 
@@ -401,6 +430,14 @@ enum fw_caps_config_fcoe {
        FW_CAPS_CONFIG_FCOE_TARGET      = 0x00000002,
 };
 
+enum fw_memtype_cf {
+       FW_MEMTYPE_CF_EDC0              = 0x0,
+       FW_MEMTYPE_CF_EDC1              = 0x1,
+       FW_MEMTYPE_CF_EXTMEM            = 0x2,
+       FW_MEMTYPE_CF_FLASH             = 0x4,
+       FW_MEMTYPE_CF_INTERNAL          = 0x5,
+};
+
 struct fw_caps_config_cmd {
        __be32 op_to_write;
        __be32 retval_len16;
@@ -416,10 +453,15 @@ struct fw_caps_config_cmd {
        __be16 r4;
        __be16 iscsicaps;
        __be16 fcoecaps;
-       __be32 r5;
-       __be64 r6;
+       __be32 cfcsum;
+       __be32 finiver;
+       __be32 finicsum;
 };
 
+#define FW_CAPS_CONFIG_CMD_CFVALID          (1U << 27)
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x)    ((x) << 24)
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
+
 /*
  * params command mnemonics
  */
@@ -451,6 +493,7 @@ enum fw_params_param_dev {
        FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
        FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
        FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
+       FW_PARAMS_PARAM_DEV_CF = 0x0D,
 };
 
 /*
@@ -492,6 +535,8 @@ enum fw_params_param_pfvf {
        FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
        FW_PARAMS_PARAM_PFVF_EQ_START   = 0x2B,
        FW_PARAMS_PARAM_PFVF_EQ_END     = 0x2C,
+       FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
+       FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E
 };
 
 /*
@@ -507,8 +552,16 @@ enum fw_params_param_dmaq {
 
 #define FW_PARAMS_MNEM(x)      ((x) << 24)
 #define FW_PARAMS_PARAM_X(x)   ((x) << 16)
-#define FW_PARAMS_PARAM_Y(x)   ((x) << 8)
-#define FW_PARAMS_PARAM_Z(x)   ((x) << 0)
+#define FW_PARAMS_PARAM_Y_SHIFT  8
+#define FW_PARAMS_PARAM_Y_MASK   0xffU
+#define FW_PARAMS_PARAM_Y(x)     ((x) << FW_PARAMS_PARAM_Y_SHIFT)
+#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
+               FW_PARAMS_PARAM_Y_MASK)
+#define FW_PARAMS_PARAM_Z_SHIFT  0
+#define FW_PARAMS_PARAM_Z_MASK   0xffu
+#define FW_PARAMS_PARAM_Z(x)     ((x) << FW_PARAMS_PARAM_Z_SHIFT)
+#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
+               FW_PARAMS_PARAM_Z_MASK)
 #define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
 #define FW_PARAMS_PARAM_YZ(x)  ((x) << 0)
 
@@ -1599,6 +1652,16 @@ struct fw_debug_cmd {
        } u;
 };
 
+#define FW_PCIE_FW_ERR           (1U << 31)
+#define FW_PCIE_FW_INIT          (1U << 30)
+#define FW_PCIE_FW_HALT          (1U << 29)
+#define FW_PCIE_FW_MASTER_VLD    (1U << 15)
+#define FW_PCIE_FW_MASTER_MASK   0x7
+#define FW_PCIE_FW_MASTER_SHIFT  12
+#define FW_PCIE_FW_MASTER(x)     ((x) << FW_PCIE_FW_MASTER_SHIFT)
+#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
+                                FW_PCIE_FW_MASTER_MASK)
+
 struct fw_hdr {
        u8 ver;
        u8 reserved1;
@@ -1613,7 +1676,11 @@ struct fw_hdr {
        u8 intfver_iscsi;
        u8 intfver_fcoe;
        u8 reserved2;
-       __be32  reserved3[27];
+       __u32   reserved3;
+       __u32   reserved4;
+       __u32   reserved5;
+       __be32  flags;
+       __be32  reserved6[23];
 };
 
 #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
@@ -1621,18 +1688,8 @@ struct fw_hdr {
 #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
 #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
 
-#define S_FW_CMD_OP 24
-#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
-
-#define S_FW_CMD_REQUEST 23
-#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
-#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
-
-#define S_FW_CMD_WRITE 21
-#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
-#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
-
-#define S_FW_LDST_CMD_ADDRSPACE 0
-#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+enum fw_hdr_flags {
+       FW_HDR_FLAGS_RESET_HALT = 0x00000001,
+};
 
 #endif /* _T4FW_INTERFACE_H_ */
index 8877fbfefb639ed5e7d6282981dd6f722dd6f5f5..f16745f4b36bf2b2c30bbe8740029aa4327b5020 100644 (file)
@@ -2421,7 +2421,7 @@ int t4vf_sge_init(struct adapter *adapter)
                        fl0, fl1);
                return -EINVAL;
        }
-       if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
+       if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
                dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
                return -EINVAL;
        }
@@ -2431,7 +2431,8 @@ int t4vf_sge_init(struct adapter *adapter)
         */
        if (fl1)
                FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
-       STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
+       STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+                   ? 128 : 64);
        PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
        FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
                         SGE_INGPADBOUNDARY_SHIFT);
index d266c86a53f71245a3d344c2169696949f9afcb0..cf4c05bdf5fe71262abf8fd4ac10e11e0cb0240a 100644 (file)
@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define MAX_RX_POST            BE_NAPI_WEIGHT /* Frags posted at a time */
 #define RX_FRAGS_REFILL_WM     (RX_Q_LEN - MAX_RX_POST)
 
+#define MAX_VFS                        30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN             32
 
 struct be_dma_mem {
@@ -336,7 +337,6 @@ struct phy_info {
        u16 auto_speeds_supported;
        u16 fixed_speeds_supported;
        int link_speed;
-       int forced_port_speed;
        u32 dac_cable_len;
        u32 advertising;
        u32 supported;
index 8c63d06ab12b6ccf899fae8fa13f75904beb22c1..af60bb26e33023ac523b672f76e68dcbdd172f92 100644 (file)
@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
 
                if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
                        dev_warn(&adapter->pdev->dev,
-                                "opcode %d-%d is not permitted\n",
+                                "VF is not privileged to issue opcode %d-%d\n",
                                 opcode, subsystem);
                } else {
                        extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@@ -165,14 +165,13 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
        }
 }
 
-/* Grp5 QOS Speed evt */
+/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
                struct be_async_event_grp5_qos_link_speed *evt)
 {
-       if (evt->physical_port == adapter->port_num) {
-               /* qos_link_speed is in units of 10 Mbps */
-               adapter->phy.link_speed = evt->qos_link_speed * 10;
-       }
+       if (adapter->phy.link_speed >= 0 &&
+           evt->physical_port == adapter->port_num)
+               adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
 }
 
 /*Grp5 PVID evt*/
@@ -717,7 +716,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
 
 /* Use MCC */
 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle, u32 pmac_id)
+                         bool permanent, u32 if_handle, u32 pmac_id)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mac_query *req;
@@ -734,7 +733,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
-       req->type = type;
+       req->type = MAC_ADDRESS_TYPE_NETWORK;
        if (permanent) {
                req->permanent = 1;
        } else {
@@ -1326,9 +1325,28 @@ err:
        return status;
 }
 
-/* Uses synchronous mcc */
-int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
-                            u16 *link_speed, u8 *link_status, u32 dom)
+static int be_mac_to_link_speed(int mac_speed)
+{
+       switch (mac_speed) {
+       case PHY_LINK_SPEED_ZERO:
+               return 0;
+       case PHY_LINK_SPEED_10MBPS:
+               return 10;
+       case PHY_LINK_SPEED_100MBPS:
+               return 100;
+       case PHY_LINK_SPEED_1GBPS:
+               return 1000;
+       case PHY_LINK_SPEED_10GBPS:
+               return 10000;
+       }
+       return 0;
+}
+
+/* Uses synchronous mcc
+ * Returns link_speed in Mbps
+ */
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+                            u8 *link_status, u32 dom)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_link_status *req;
@@ -1357,11 +1375,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
-               if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
-                       if (link_speed)
-                               *link_speed = le16_to_cpu(resp->link_speed);
-                       if (mac_speed)
-                               *mac_speed = resp->mac_speed;
+               if (link_speed) {
+                       *link_speed = resp->link_speed ?
+                                     le16_to_cpu(resp->link_speed) * 10 :
+                                     be_mac_to_link_speed(resp->mac_speed);
+
+                       if (!resp->logical_link_status)
+                               *link_speed = 0;
                }
                if (link_status)
                        *link_status = resp->logical_link_status;
@@ -2405,6 +2425,9 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
                struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
                adapter->be3_native = le32_to_cpu(resp->cap_flags) &
                                        CAPABILITY_BE3_NATIVE_ERX_API;
+               if (!adapter->be3_native)
+                       dev_warn(&adapter->pdev->dev,
+                                "adapter not in advanced mode\n");
        }
 err:
        mutex_unlock(&adapter->mbox_lock);
index 250f19b5f7b6c3887282f28af82dce11bfc8fd53..0936e21e3cff3d6cdf3767dbf1d0e708744f9b26 100644 (file)
@@ -1687,7 +1687,7 @@ struct be_cmd_req_set_ext_fat_caps {
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_fw_wait_ready(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle, u32 pmac_id);
+                                bool permanent, u32 if_handle, u32 pmac_id);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
                        u32 if_id, u32 *pmac_id, u32 domain);
 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
@@ -1714,8 +1714,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
                        int type);
 extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
                        struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
-                                   u16 *link_speed, u8 *link_status, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+                                   u8 *link_status, u32 dom);
 extern int be_cmd_reset(struct be_adapter *adapter);
 extern int be_cmd_get_stats(struct be_adapter *adapter,
                        struct be_dma_mem *nonemb_cmd);
index c0e700653f965ef204ba38286b33dae76289d897..8e6fb0ba6aa9631132686566859a8228c0c1fb86 100644 (file)
@@ -512,28 +512,6 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
        return val;
 }
 
-static int convert_to_et_speed(u32 be_speed)
-{
-       int et_speed = SPEED_10000;
-
-       switch (be_speed) {
-       case PHY_LINK_SPEED_10MBPS:
-               et_speed = SPEED_10;
-               break;
-       case PHY_LINK_SPEED_100MBPS:
-               et_speed = SPEED_100;
-               break;
-       case PHY_LINK_SPEED_1GBPS:
-               et_speed = SPEED_1000;
-               break;
-       case PHY_LINK_SPEED_10GBPS:
-               et_speed = SPEED_10000;
-               break;
-       }
-
-       return et_speed;
-}
-
 bool be_pause_supported(struct be_adapter *adapter)
 {
        return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
@@ -544,27 +522,16 @@ bool be_pause_supported(struct be_adapter *adapter)
 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       u8 port_speed = 0;
-       u16 link_speed = 0;
        u8 link_status;
-       u32 et_speed = 0;
+       u16 link_speed = 0;
        int status;
 
-       if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
-               if (adapter->phy.forced_port_speed < 0) {
-                       status = be_cmd_link_status_query(adapter, &port_speed,
-                                               &link_speed, &link_status, 0);
-                       if (!status)
-                               be_link_status_update(adapter, link_status);
-                       if (link_speed)
-                               et_speed = link_speed * 10;
-                       else if (link_status)
-                               et_speed = convert_to_et_speed(port_speed);
-               } else {
-                       et_speed = adapter->phy.forced_port_speed;
-               }
-
-               ethtool_cmd_speed_set(ecmd, et_speed);
+       if (adapter->phy.link_speed < 0) {
+               status = be_cmd_link_status_query(adapter, &link_speed,
+                                                 &link_status, 0);
+               if (!status)
+                       be_link_status_update(adapter, link_status);
+               ethtool_cmd_speed_set(ecmd, link_speed);
 
                status = be_cmd_get_phy_info(adapter);
                if (status)
@@ -773,8 +740,8 @@ static void
 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       u8 mac_speed = 0;
-       u16 qos_link_speed = 0;
+       int status;
+       u8 link_status = 0;
 
        memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
 
@@ -798,11 +765,11 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
                test->flags |= ETH_TEST_FL_FAILED;
        }
 
-       if (be_cmd_link_status_query(adapter, &mac_speed,
-                                    &qos_link_speed, NULL, 0) != 0) {
+       status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
+       if (status) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = -1;
-       } else if (!mac_speed) {
+       } else if (!link_status) {
                test->flags |= ETH_TEST_FL_FAILED;
                data[4] = 1;
        }
index 95d10472f236c2b98f6a107cb0b7ddb0e4b975d4..eb3f2cb3b93bbef296948f8479ab229783c01a94 100644 (file)
@@ -20,6 +20,7 @@
 #include "be.h"
 #include "be_cmds.h"
 #include <asm/div64.h>
+#include <linux/aer.h>
 
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -240,9 +241,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       status = be_cmd_mac_addr_query(adapter, current_mac,
-                               MAC_ADDRESS_TYPE_NETWORK, false,
-                               adapter->if_handle, 0);
+       status = be_cmd_mac_addr_query(adapter, current_mac, false,
+                                      adapter->if_handle, 0);
        if (status)
                goto err;
 
@@ -1075,7 +1075,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
 {
        struct pci_dev *dev, *pdev = adapter->pdev;
-       int vfs = 0, assigned_vfs = 0, pos, vf_fn;
+       int vfs = 0, assigned_vfs = 0, pos;
        u16 offset, stride;
 
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -1086,9 +1086,7 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
 
        dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
        while (dev) {
-               vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
-               if (dev->is_virtfn && dev->devfn == vf_fn &&
-                       dev->bus->number == pdev->bus->number) {
+               if (dev->is_virtfn && pci_physfn(dev) == pdev) {
                        vfs++;
                        if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
                                assigned_vfs++;
@@ -1896,6 +1894,8 @@ static int be_tx_qs_create(struct be_adapter *adapter)
                        return status;
        }
 
+       dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
+                adapter->num_tx_qs);
        return 0;
 }
 
@@ -1946,10 +1946,9 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
                        return rc;
        }
 
-       if (adapter->num_rx_qs != MAX_RX_QS)
-               dev_info(&adapter->pdev->dev,
-                       "Created only %d receive queues\n", adapter->num_rx_qs);
-
+       dev_info(&adapter->pdev->dev,
+                "created %d RSS queue(s) and 1 default RX queue\n",
+                adapter->num_rx_qs - 1);
        return 0;
 }
 
@@ -2176,8 +2175,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
 {
        u32 num = 0;
        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-            !sriov_want(adapter) && be_physfn(adapter) &&
-            !be_is_mc(adapter)) {
+            !sriov_want(adapter) && be_physfn(adapter)) {
                num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
                num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
        }
@@ -2188,6 +2186,7 @@ static void be_msix_enable(struct be_adapter *adapter)
 {
 #define BE_MIN_MSIX_VECTORS            1
        int i, status, num_vec, num_roce_vec = 0;
+       struct device *dev = &adapter->pdev->dev;
 
        /* If RSS queues are not used, need a vec for default RX Q */
        num_vec = min(be_num_rss_want(adapter), num_online_cpus());
@@ -2212,6 +2211,8 @@ static void be_msix_enable(struct be_adapter *adapter)
                                num_vec) == 0)
                        goto done;
        }
+
+       dev_warn(dev, "MSIx enable failed\n");
        return;
 done:
        if (be_roce_supported(adapter)) {
@@ -2225,6 +2226,7 @@ done:
                }
        } else
                adapter->num_msix_vec = num_vec;
+       dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
        return;
 }
 
@@ -2441,8 +2443,7 @@ static int be_open(struct net_device *netdev)
                be_eq_notify(adapter, eqo->q.id, true, false, 0);
        }
 
-       status = be_cmd_link_status_query(adapter, NULL, NULL,
-                                         &link_status, 0);
+       status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
        if (!status)
                be_link_status_update(adapter, link_status);
 
@@ -2646,8 +2647,8 @@ static int be_vf_setup(struct be_adapter *adapter)
        }
 
        for_all_vfs(adapter, vf_cfg, vf) {
-               status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-                                                 NULL, vf + 1);
+               lnk_speed = 1000;
+               status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
                if (status)
                        goto err;
                vf_cfg->tx_rate = lnk_speed * 10;
@@ -2671,7 +2672,6 @@ static void be_setup_init(struct be_adapter *adapter)
        adapter->be3_native = false;
        adapter->promiscuous = false;
        adapter->eq_next_idx = 0;
-       adapter->phy.forced_port_speed = -1;
 }
 
 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2693,21 +2693,16 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
                status = be_cmd_get_mac_from_list(adapter, mac,
                                                  active_mac, pmac_id, 0);
                if (*active_mac) {
-                       status = be_cmd_mac_addr_query(adapter, mac,
-                                                      MAC_ADDRESS_TYPE_NETWORK,
-                                                      false, if_handle,
-                                                      *pmac_id);
+                       status = be_cmd_mac_addr_query(adapter, mac, false,
+                                                      if_handle, *pmac_id);
                }
        } else if (be_physfn(adapter)) {
                /* For BE3, for PF get permanent MAC */
-               status = be_cmd_mac_addr_query(adapter, mac,
-                                              MAC_ADDRESS_TYPE_NETWORK, true,
-                                              0, 0);
+               status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
                *active_mac = false;
        } else {
                /* For BE3, for VF get soft MAC assigned by PF*/
-               status = be_cmd_mac_addr_query(adapter, mac,
-                                              MAC_ADDRESS_TYPE_NETWORK, false,
+               status = be_cmd_mac_addr_query(adapter, mac, false,
                                               if_handle, 0);
                *active_mac = true;
        }
@@ -2724,6 +2719,8 @@ static int be_get_config(struct be_adapter *adapter)
        if (pos) {
                pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
                                     &dev_num_vfs);
+               if (!lancer_chip(adapter))
+                       dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
                adapter->dev_num_vfs = dev_num_vfs;
        }
        return 0;
@@ -3437,6 +3434,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
        if (mem->va)
                dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
                                  mem->dma);
+       kfree(adapter->pmac_id);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3471,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
        }
        memset(rx_filter->va, 0, rx_filter->size);
 
+       /* primary mac needs 1 pmac entry */
+       adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+                                  sizeof(*adapter->pmac_id), GFP_KERNEL);
+       if (!adapter->pmac_id)
+               return -ENOMEM;
+
        mutex_init(&adapter->mbox_lock);
        spin_lock_init(&adapter->mcc_lock);
        spin_lock_init(&adapter->mcc_cq_lock);
@@ -3543,6 +3547,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
 
        be_ctrl_cleanup(adapter);
 
+       pci_disable_pcie_error_reporting(pdev);
+
        pci_set_drvdata(pdev, NULL);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
@@ -3609,12 +3615,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
        else
                adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
 
-       /* primary mac needs 1 pmac entry */
-       adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
-                                 sizeof(u32), GFP_KERNEL);
-       if (!adapter->pmac_id)
-               return -ENOMEM;
-
        status = be_cmd_get_cntl_attributes(adapter);
        if (status)
                return status;
@@ -3800,6 +3800,23 @@ static bool be_reset_required(struct be_adapter *adapter)
        return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
 }
 
+static char *mc_name(struct be_adapter *adapter)
+{
+       if (adapter->function_mode & FLEX10_MODE)
+               return "FLEX10";
+       else if (adapter->function_mode & VNIC_MODE)
+               return "vNIC";
+       else if (adapter->function_mode & UMC_ENABLED)
+               return "UMC";
+       else
+               return "";
+}
+
+static inline char *func_name(struct be_adapter *adapter)
+{
+       return be_physfn(adapter) ? "PF" : "VF";
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
                        const struct pci_device_id *pdev_id)
 {
@@ -3844,6 +3861,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
                }
        }
 
+       status = pci_enable_pcie_error_reporting(pdev);
+       if (status)
+               dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
+
        status = be_ctrl_init(adapter);
        if (status)
                goto free_netdev;
@@ -3886,7 +3907,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
 
        status = be_setup(adapter);
        if (status)
-               goto msix_disable;
+               goto stats_clean;
 
        be_netdev_init(netdev);
        status = register_netdev(netdev);
@@ -3900,15 +3921,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
 
        be_cmd_query_port_name(adapter, &port_name);
 
-       dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
-                port_name);
+       dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
+                func_name(adapter), mc_name(adapter), port_name);
 
        return 0;
 
 unsetup:
        be_clear(adapter);
-msix_disable:
-       be_msix_disable(adapter);
 stats_clean:
        be_stats_cleanup(adapter);
 ctrl_clean:
@@ -4066,6 +4085,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
        if (status)
                return PCI_ERS_RESULT_DISCONNECT;
 
+       pci_cleanup_aer_uncorrect_error_status(pdev);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
index 3574e1499dfc30db059160e80b8a77766cd8392f..feff51664dcf76974cc62a58be978bbbe83f0cf7 100644 (file)
@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
        ---help---
          This driver supports the MDIO bus used by the gianfar and UCC drivers.
 
+config FSL_XGMAC_MDIO
+       tristate "Freescale XGMAC MDIO"
+       depends on FSL_SOC
+       select PHYLIB
+       ---help---
+         This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
 config UCC_GETH
        tristate "Freescale QE Gigabit Ethernet"
        depends on QUICC_ENGINE
index 1752488c9ee5af1b3dadc54110932fa5b4e46ae8..3d1839afff6574ac96a9f4f2c7955739a8574567 100644 (file)
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
 endif
 obj-$(CONFIG_FS_ENET) += fs_enet/
 obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
 gianfar_driver-objs := gianfar.o \
index 9527b28d70d1976374b812d9a37fbfaaf1c79aaf..c93a05654b46125b6eeda59cf6e0bbfc83c5f9ef 100644 (file)
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
-#include <linux/unistd.h>
 #include <linux/slab.h>
-#include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/crc32.h>
 #include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_mdio.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
 
 #include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/ucc.h>
+#include <asm/ucc.h>   /* for ucc_set_qe_mux_mii_mng() */
 
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
+
+#define MIIMIND_BUSY           0x00000001
+#define MIIMIND_NOTVALID       0x00000004
+#define MIIMCFG_INIT_VALUE     0x00000007
+#define MIIMCFG_RESET          0x80000000
+
+#define MII_READ_COMMAND       0x00000001
+
+struct fsl_pq_mii {
+       u32 miimcfg;    /* MII management configuration reg */
+       u32 miimcom;    /* MII management command reg */
+       u32 miimadd;    /* MII management address reg */
+       u32 miimcon;    /* MII management control reg */
+       u32 miimstat;   /* MII management status reg */
+       u32 miimind;    /* MII management indication reg */
+};
+
+struct fsl_pq_mdio {
+       u8 res1[16];
+       u32 ieventm;    /* MDIO Interrupt event register (for etsec2)*/
+       u32 imaskm;     /* MDIO Interrupt mask register (for etsec2)*/
+       u8 res2[4];
+       u32 emapm;      /* MDIO Event mapping register (for etsec2)*/
+       u8 res3[1280];
+       struct fsl_pq_mii mii;
+       u8 res4[28];
+       u32 utbipar;    /* TBI phy address reg (only on UCC) */
+       u8 res5[2728];
+} __packed;
 
 /* Number of microseconds to wait for an MII register to respond */
 #define MII_TIMEOUT    1000
 
 struct fsl_pq_mdio_priv {
        void __iomem *map;
-       struct fsl_pq_mdio __iomem *regs;
+       struct fsl_pq_mii __iomem *regs;
+       int irqs[PHY_MAX_ADDR];
+};
+
+/*
+ * Per-device-type data.  Each type of device tree node that we support gets
+ * one of these.
+ *
+ * @mii_offset: the offset of the MII registers within the memory map of the
+ * node.  Some nodes define only the MII registers, and some define the whole
+ * MAC (which includes the MII registers).
+ *
+ * @get_tbipa: determines the address of the TBIPA register
+ *
+ * @ucc_configure: a special function for extra QE configuration
+ */
+struct fsl_pq_mdio_data {
+       unsigned int mii_offset;        /* offset of the MII registers */
+       uint32_t __iomem * (*get_tbipa)(void __iomem *p);
+       void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
 };
 
 /*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus attached to the local interface, which may be different from the
- * generic mdio bus (tied to a single interface), waiting until the write is
- * done before returning. This is helpful in programming interfaces like
- * the TBI which control interfaces like onchip SERDES and are always tied to
- * the local mdio pins, which may not be the same as system mdio bus, used for
+ * Write value to the PHY at mii_id at register regnum, on the bus attached
+ * to the local interface, which may be different from the generic mdio bus
+ * (tied to a single interface), waiting until the write is done before
+ * returning. This is helpful in programming interfaces like the TBI which
+ * control interfaces like onchip SERDES and are always tied to the local
+ * mdio pins, which may not be the same as system mdio bus, used for
  * controlling the external PHYs, for example.
  */
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-               int regnum, u16 value)
+static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+               u16 value)
 {
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
 
        /* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
 }
 
 /*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.  All PHY operation
- * done on the bus attached to the local interface,
- * which may be different from the generic mdio bus
- * This is helpful in programming interfaces like
- * the TBI which, in turn, control interfaces like onchip SERDES
- * and are always tied to the local mdio pins, which may not be the
+ * Read the bus for PHY at addr mii_id, register regnum, and return the value.
+ * Clears miimcom first.
+ *
+ * All PHY operation done on the bus attached to the local interface, which
+ * may be different from the generic mdio bus.  This is helpful in programming
+ * interfaces like the TBI which, in turn, control interfaces like on-chip
+ * SERDES and are always tied to the local mdio pins, which may not be the
  * same as system mdio bus, used for controlling the external PHYs, for eg.
  */
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
-               int mii_id, int regnum)
+static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 {
-       u16 value;
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
+       u16 value;
 
        /* Set the PHY address and the register address we want to read */
        out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
        /* Grab the value of the register from miimstat */
        value = in_be32(&regs->miimstat);
 
+       dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
        return value;
 }
 
-static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
-{
-       struct fsl_pq_mdio_priv *priv = bus->priv;
-
-       return priv->regs;
-}
-
-/*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus, waiting until the write is done before returning.
- */
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
-{
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-       /* Write to the local MII regs */
-       return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
-}
-
-/*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value.  Clears miimcom first.
- */
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
-       /* Read the local MII regs */
-       return fsl_pq_local_mdio_read(regs, mii_id, regnum);
-}
-
 /* Reset the MIIM registers, and wait for the bus to free */
 static int fsl_pq_mdio_reset(struct mii_bus *bus)
 {
-       struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+       struct fsl_pq_mdio_priv *priv = bus->priv;
+       struct fsl_pq_mii __iomem *regs = priv->regs;
        u32 status;
 
        mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
        mutex_unlock(&bus->mdio_lock);
 
        if (!status) {
-               printk(KERN_ERR "%s: The MII Bus is stuck!\n",
-                               bus->name);
+               dev_err(&bus->dev, "timeout waiting for MII bus\n");
                return -EBUSY;
        }
 
        return 0;
 }
 
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+/*
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
 {
-       const u32 *addr;
-       u64 taddr = OF_BAD_ADDR;
-
-       addr = of_get_address(np, 0, NULL, NULL);
-       if (addr)
-               taddr = of_translate_address(np, addr);
+       struct gfar __iomem *enet_regs = p;
 
-       snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
-               (unsigned long long)taddr);
+       return &enet_regs->tbipa;
 }
-EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
+/*
+ * Return the TBIPAR address for an eTSEC2 node
+ */
+static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
+{
+       return p;
+}
+#endif
 
-static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+/*
+ * Return the TBIPAR address for a QE MDIO node
+ */
+static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
 {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
-       struct gfar __iomem *enet_regs;
+       struct fsl_pq_mdio __iomem *mdio = p;
 
-       /*
-        * This is mildly evil, but so is our hardware for doing this.
-        * Also, we have to cast back to struct gfar because of
-        * definition weirdness done in gianfar.h.
-        */
-       if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-               of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-               of_device_is_compatible(np, "gianfar")) {
-               enet_regs = (struct gfar __iomem *)regs;
-               return &enet_regs->tbipa;
-       } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-                       of_device_is_compatible(np, "fsl,etsec2-tbi")) {
-               return of_iomap(np, 1);
-       }
-#endif
-       return NULL;
+       return &mdio->utbipar;
 }
 
-
-static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
+/*
+ * Find the UCC node that controls the given MDIO node
+ *
+ * For some reason, the QE MDIO nodes are not children of the UCC devices
+ * that control them.  Therefore, we need to scan all UCC nodes looking for
+ * the one that encompases the given MDIO node.  We do this by comparing
+ * physical addresses.  The 'start' and 'end' addresses of the MDIO node are
+ * passed, and the correct UCC node will cover the entire address range.
+ *
+ * This assumes that there is only one QE MDIO node in the entire device tree.
+ */
+static void ucc_configure(phys_addr_t start, phys_addr_t end)
 {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+       static bool found_mii_master;
        struct device_node *np = NULL;
-       int err = 0;
 
-       for_each_compatible_node(np, NULL, "ucc_geth") {
-               struct resource tempres;
+       if (found_mii_master)
+               return;
 
-               err = of_address_to_resource(np, 0, &tempres);
-               if (err)
+       for_each_compatible_node(np, NULL, "ucc_geth") {
+               struct resource res;
+               const uint32_t *iprop;
+               uint32_t id;
+               int ret;
+
+               ret = of_address_to_resource(np, 0, &res);
+               if (ret < 0) {
+                       pr_debug("fsl-pq-mdio: no address range in node %s\n",
+                                np->full_name);
                        continue;
+               }
 
                /* if our mdio regs fall within this UCC regs range */
-               if ((start >= tempres.start) && (end <= tempres.end)) {
-                       /* Find the id of the UCC */
-                       const u32 *id;
-
-                       id = of_get_property(np, "cell-index", NULL);
-                       if (!id) {
-                               id = of_get_property(np, "device-id", NULL);
-                               if (!id)
-                                       continue;
+               if ((start < res.start) || (end > res.end))
+                       continue;
+
+               iprop = of_get_property(np, "cell-index", NULL);
+               if (!iprop) {
+                       iprop = of_get_property(np, "device-id", NULL);
+                       if (!iprop) {
+                               pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
+                                        np->full_name);
+                               continue;
                        }
+               }
 
-                       *ucc_id = *id;
+               id = be32_to_cpup(iprop);
 
-                       return 0;
+               /*
+                * cell-index and device-id for QE nodes are
+                * numbered from 1, not 0.
+                */
+               if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
+                       pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
+                                np->full_name);
+                       continue;
                }
+
+               pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
+               found_mii_master = true;
        }
+}
 
-       if (err)
-               return err;
-       else
-               return -EINVAL;
-#else
-       return -ENODEV;
 #endif
-}
 
-static int fsl_pq_mdio_probe(struct platform_device *ofdev)
+static struct of_device_id fsl_pq_mdio_match[] = {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+       {
+               .compatible = "fsl,gianfar-tbi",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,gianfar-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .type = "mdio",
+               .compatible = "gianfar",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_gfar_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,etsec2-tbi",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_etsec_tbipa,
+               },
+       },
+       {
+               .compatible = "fsl,etsec2-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+                       .get_tbipa = get_etsec_tbipa,
+               },
+       },
+#endif
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+       {
+               .compatible = "fsl,ucc-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_ucc_tbipa,
+                       .ucc_configure = ucc_configure,
+               },
+       },
+       {
+               /* Legacy UCC MDIO node */
+               .type = "mdio",
+               .compatible = "ucc_geth_phy",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       .get_tbipa = get_ucc_tbipa,
+                       .ucc_configure = ucc_configure,
+               },
+       },
+#endif
+       /* No Kconfig option for Fman support yet */
+       {
+               .compatible = "fsl,fman-mdio",
+               .data = &(struct fsl_pq_mdio_data) {
+                       .mii_offset = 0,
+                       /* Fman TBI operations are handled elsewhere */
+               },
+       },
+
+       {},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
-       struct device_node *np = ofdev->dev.of_node;
+       const struct of_device_id *id =
+               of_match_device(fsl_pq_mdio_match, &pdev->dev);
+       const struct fsl_pq_mdio_data *data = id->data;
+       struct device_node *np = pdev->dev.of_node;
+       struct resource res;
        struct device_node *tbi;
        struct fsl_pq_mdio_priv *priv;
-       struct fsl_pq_mdio __iomem *regs = NULL;
-       void __iomem *map;
-       u32 __iomem *tbipa;
        struct mii_bus *new_bus;
-       int tbiaddr = -1;
-       const u32 *addrp;
-       u64 addr = 0, size = 0;
        int err;
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+       dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
-       new_bus = mdiobus_alloc();
-       if (!new_bus) {
-               err = -ENOMEM;
-               goto err_free_priv;
-       }
+       new_bus = mdiobus_alloc_size(sizeof(*priv));
+       if (!new_bus)
+               return -ENOMEM;
 
+       priv = new_bus->priv;
        new_bus->name = "Freescale PowerQUICC MII Bus",
-       new_bus->read = &fsl_pq_mdio_read,
-       new_bus->write = &fsl_pq_mdio_write,
-       new_bus->reset = &fsl_pq_mdio_reset,
-       new_bus->priv = priv;
-       fsl_pq_mdio_bus_name(new_bus->id, np);
-
-       addrp = of_get_address(np, 0, &size, NULL);
-       if (!addrp) {
-               err = -EINVAL;
-               goto err_free_bus;
+       new_bus->read = &fsl_pq_mdio_read;
+       new_bus->write = &fsl_pq_mdio_write;
+       new_bus->reset = &fsl_pq_mdio_reset;
+       new_bus->irq = priv->irqs;
+
+       err = of_address_to_resource(np, 0, &res);
+       if (err < 0) {
+               dev_err(&pdev->dev, "could not obtain address information\n");
+               goto error;
        }
 
-       /* Set the PHY base address */
-       addr = of_translate_address(np, addrp);
-       if (addr == OF_BAD_ADDR) {
-               err = -EINVAL;
-               goto err_free_bus;
-       }
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+               (unsigned long long)res.start);
 
-       map = ioremap(addr, size);
-       if (!map) {
+       priv->map = of_iomap(np, 0);
+       if (!priv->map) {
                err = -ENOMEM;
-               goto err_free_bus;
+               goto error;
        }
-       priv->map = map;
-
-       if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-                       of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-                       of_device_is_compatible(np, "fsl,ucc-mdio") ||
-                       of_device_is_compatible(np, "ucc_geth_phy"))
-               map -= offsetof(struct fsl_pq_mdio, miimcfg);
-       regs = map;
-       priv->regs = regs;
-
-       new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
 
-       if (NULL == new_bus->irq) {
-               err = -ENOMEM;
-               goto err_unmap_regs;
+       /*
+        * Some device tree nodes represent only the MII registers, and
+        * others represent the MAC and MII registers.  The 'mii_offset' field
+        * contains the offset of the MII registers inside the mapped register
+        * space.
+        */
+       if (data->mii_offset > resource_size(&res)) {
+               dev_err(&pdev->dev, "invalid register map\n");
+               err = -EINVAL;
+               goto error;
        }
+       priv->regs = priv->map + data->mii_offset;
 
-       new_bus->parent = &ofdev->dev;
-       dev_set_drvdata(&ofdev->dev, new_bus);
-
-       if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
-                       of_device_is_compatible(np, "fsl,gianfar-tbi") ||
-                       of_device_is_compatible(np, "fsl,etsec2-mdio") ||
-                       of_device_is_compatible(np, "fsl,etsec2-tbi") ||
-                       of_device_is_compatible(np, "gianfar")) {
-               tbipa = get_gfar_tbipa(regs, np);
-               if (!tbipa) {
-                       err = -EINVAL;
-                       goto err_free_irqs;
-               }
-       } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
-                       of_device_is_compatible(np, "ucc_geth_phy")) {
-               u32 id;
-               static u32 mii_mng_master;
-
-               tbipa = &regs->utbipar;
-
-               if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
-                       goto err_free_irqs;
+       new_bus->parent = &pdev->dev;
+       dev_set_drvdata(&pdev->dev, new_bus);
 
-               if (!mii_mng_master) {
-                       mii_mng_master = id;
-                       ucc_set_qe_mux_mii_mng(id - 1);
+       if (data->get_tbipa) {
+               for_each_child_of_node(np, tbi) {
+                       if (strcmp(tbi->type, "tbi-phy") == 0) {
+                               dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
+                                       strrchr(tbi->full_name, '/') + 1);
+                               break;
+                       }
                }
-       } else {
-               err = -ENODEV;
-               goto err_free_irqs;
-       }
 
-       for_each_child_of_node(np, tbi) {
-               if (!strncmp(tbi->type, "tbi-phy", 8))
-                       break;
-       }
+               if (tbi) {
+                       const u32 *prop = of_get_property(tbi, "reg", NULL);
+                       uint32_t __iomem *tbipa;
 
-       if (tbi) {
-               const u32 *prop = of_get_property(tbi, "reg", NULL);
+                       if (!prop) {
+                               dev_err(&pdev->dev,
+                                       "missing 'reg' property in node %s\n",
+                                       tbi->full_name);
+                               err = -EBUSY;
+                               goto error;
+                       }
 
-               if (prop)
-                       tbiaddr = *prop;
+                       tbipa = data->get_tbipa(priv->map);
 
-               if (tbiaddr == -1) {
-                       err = -EBUSY;
-                       goto err_free_irqs;
-               } else {
-                       out_be32(tbipa, tbiaddr);
+                       out_be32(tbipa, be32_to_cpup(prop));
                }
        }
 
+       if (data->ucc_configure)
+               data->ucc_configure(res.start, res.end);
+
        err = of_mdiobus_register(new_bus, np);
        if (err) {
-               printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
-                               new_bus->name);
-               goto err_free_irqs;
+               dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
+                       new_bus->name);
+               goto error;
        }
 
        return 0;
 
-err_free_irqs:
-       kfree(new_bus->irq);
-err_unmap_regs:
-       iounmap(priv->map);
-err_free_bus:
+error:
+       if (priv->map)
+               iounmap(priv->map);
+
        kfree(new_bus);
-err_free_priv:
-       kfree(priv);
+
        return err;
 }
 
 
-static int fsl_pq_mdio_remove(struct platform_device *ofdev)
+static int fsl_pq_mdio_remove(struct platform_device *pdev)
 {
-       struct device *device = &ofdev->dev;
+       struct device *device = &pdev->dev;
        struct mii_bus *bus = dev_get_drvdata(device);
        struct fsl_pq_mdio_priv *priv = bus->priv;
 
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
        dev_set_drvdata(device, NULL);
 
        iounmap(priv->map);
-       bus->priv = NULL;
        mdiobus_free(bus);
-       kfree(priv);
 
        return 0;
 }
 
-static struct of_device_id fsl_pq_mdio_match[] = {
-       {
-               .type = "mdio",
-               .compatible = "ucc_geth_phy",
-       },
-       {
-               .type = "mdio",
-               .compatible = "gianfar",
-       },
-       {
-               .compatible = "fsl,ucc-mdio",
-       },
-       {
-               .compatible = "fsl,gianfar-tbi",
-       },
-       {
-               .compatible = "fsl,gianfar-mdio",
-       },
-       {
-               .compatible = "fsl,etsec2-tbi",
-       },
-       {
-               .compatible = "fsl,etsec2-mdio",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
-
 static struct platform_driver fsl_pq_mdio_driver = {
        .driver = {
                .name = "fsl-pq_mdio",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
deleted file mode 100644 (file)
index bd17a2a..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
- * Driver for the MDIO bus controller on Freescale PowerQUICC processors
- *
- * Author: Andy Fleming
- * Modifier: Sandeep Gopalpet
- *
- * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-#ifndef __FSL_PQ_MDIO_H
-#define __FSL_PQ_MDIO_H
-
-#define MIIMIND_BUSY            0x00000001
-#define MIIMIND_NOTVALID        0x00000004
-#define MIIMCFG_INIT_VALUE     0x00000007
-#define MIIMCFG_RESET           0x80000000
-
-#define MII_READ_COMMAND       0x00000001
-
-struct fsl_pq_mdio {
-       u8 res1[16];
-       u32 ieventm;    /* MDIO Interrupt event register (for etsec2)*/
-       u32 imaskm;     /* MDIO Interrupt mask register (for etsec2)*/
-       u8 res2[4];
-       u32 emapm;      /* MDIO Event mapping register (for etsec2)*/
-       u8 res3[1280];
-       u32 miimcfg;            /* MII management configuration reg */
-       u32 miimcom;            /* MII management command reg */
-       u32 miimadd;            /* MII management address reg */
-       u32 miimcon;            /* MII management control reg */
-       u32 miimstat;           /* MII management status reg */
-       u32 miimind;            /* MII management indication reg */
-       u8 reserved[28];        /* Space holder */
-       u32 utbipar;            /* TBI phy address reg (only on UCC) */
-       u8 res4[2728];
-} __packed;
-
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
-                         int regnum, u16 value);
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
-int __init fsl_pq_mdio_init(void);
-void fsl_pq_mdio_exit(void);
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
-#endif /* FSL_PQ_MDIO_H */
index d3233f59a82e47b1d70a372c0973e1447d0ef90b..a1b52ec3b930981e240cad7ba4d3522936ed2eab 100644 (file)
 #include <linux/of_net.h>
 
 #include "gianfar.h"
-#include "fsl_pq_mdio.h"
 
 #define TX_TIMEOUT      (1*HZ)
 
@@ -395,7 +394,13 @@ static void gfar_init_mac(struct net_device *ndev)
        if (ndev->features & NETIF_F_IP_CSUM)
                tctrl |= TCTRL_INIT_CSUM;
 
-       tctrl |= TCTRL_TXSCHED_PRIO;
+       if (priv->prio_sched_en)
+               tctrl |= TCTRL_TXSCHED_PRIO;
+       else {
+               tctrl |= TCTRL_TXSCHED_WRRS;
+               gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
+               gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
+       }
 
        gfar_write(&regs->tctrl, tctrl);
 
@@ -1161,6 +1166,9 @@ static int gfar_probe(struct platform_device *ofdev)
        priv->rx_filer_enable = 1;
        /* Enable most messages by default */
        priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+       /* use pritority h/w tx queue scheduling for single queue devices */
+       if (priv->num_tx_queues == 1)
+               priv->prio_sched_en = 1;
 
        /* Carrier starts down, phylib will bring it up */
        netif_carrier_off(dev);
index 2136c7ff5e6d2331bf22195194b7f8b028f98587..4141ef2ddafc3974563b2e16e433d312bd1cb894 100644 (file)
@@ -301,8 +301,16 @@ extern const char gfar_driver_version[];
 #define TCTRL_TFCPAUSE         0x00000008
 #define TCTRL_TXSCHED_MASK     0x00000006
 #define TCTRL_TXSCHED_INIT     0x00000000
+/* priority scheduling */
 #define TCTRL_TXSCHED_PRIO     0x00000002
+/* weighted round-robin scheduling (WRRS) */
 #define TCTRL_TXSCHED_WRRS     0x00000004
+/* default WRRS weight and policy setting,
+ * tailored to the tr03wt and tr47wt registers:
+ * equal weight for all Tx Qs, measured in 64byte units
+ */
+#define DEFAULT_WRRS_WEIGHT    0x18181818
+
 #define TCTRL_INIT_CSUM                (TCTRL_TUCSEN | TCTRL_IPCSEN)
 
 #define IEVENT_INIT_CLEAR      0xffffffff
@@ -1098,7 +1106,8 @@ struct gfar_private {
                extended_hash:1,
                bd_stash_en:1,
                rx_filer_enable:1,
-               wol_en:1; /* Wake-on-LAN enabled */
+               wol_en:1, /* Wake-on-LAN enabled */
+               prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
        unsigned short padding;
 
        /* PHY stuff */
index 0daa66b8eca088735974f7ee45595d22c6be4c56..b9db0e0405636780bf02bc598b12e79ea5f71074 100644 (file)
@@ -510,7 +510,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
 
        spin_unlock_irqrestore(&etsects->lock, flags);
 
-       etsects->clock = ptp_clock_register(&etsects->caps);
+       etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
        if (IS_ERR(etsects->clock)) {
                err = PTR_ERR(etsects->clock);
                goto no_clock;
index 21c6574c5f15cdecd5dd5ce2ccfc02f0e60d423e..164288439220c69f59f755c091afb2c53289c420 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/machdep.h>
 
 #include "ucc_geth.h"
-#include "fsl_pq_mdio.h"
 
 #undef DEBUG
 
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644 (file)
index 0000000..1afb5ea
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ *          Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT        1000
+
+struct tgec_mdio_controller {
+       __be32  reserved[12];
+       __be32  mdio_stat;      /* MDIO configuration and status */
+       __be32  mdio_ctl;       /* MDIO control */
+       __be32  mdio_data;      /* MDIO data */
+       __be32  mdio_addr;      /* MDIO address */
+} __packed;
+
+#define MDIO_STAT_CLKDIV(x)    (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY          (1 << 0)
+#define MDIO_STAT_RD_ER                (1 << 1)
+#define MDIO_CTL_DEV_ADDR(x)   (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)  ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS       (1 << 10)
+#define MDIO_CTL_SCAN_EN       (1 << 11)
+#define MDIO_CTL_POST_INC      (1 << 14)
+#define MDIO_CTL_READ          (1 << 15)
+
+#define MDIO_DATA(x)           (x & 0xffff)
+#define MDIO_DATA_BSY          (1 << 31)
+
+/*
+ * Wait untill the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+                                struct tgec_mdio_controller __iomem *regs)
+{
+       uint32_t status;
+
+       /* Wait till the bus is free */
+       status = spin_event_timeout(
+               !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
+       if (!status) {
+               dev_err(dev, "timeout waiting for bus to be free\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+                                struct tgec_mdio_controller __iomem *regs)
+{
+       uint32_t status;
+
+       /* Wait till the MDIO write is complete */
+       status = spin_event_timeout(
+               !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
+       if (!status) {
+               dev_err(dev, "timeout waiting for operation to complete\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns.  All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       uint16_t dev_addr = regnum >> 16;
+       int ret;
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Set the port and dev addr */
+       out_be32(&regs->mdio_ctl,
+                MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+
+       /* Set the register address */
+       out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Write the value to the register */
+       out_be32(&regs->mdio_data, MDIO_DATA(value));
+
+       ret = xgmac_wait_until_done(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first.  All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       uint16_t dev_addr = regnum >> 16;
+       uint32_t mdio_ctl;
+       uint16_t value;
+       int ret;
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Set the Port and Device Addrs */
+       mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+       out_be32(&regs->mdio_ctl, mdio_ctl);
+
+       /* Set the register address */
+       out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Initiate the read */
+       out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+
+       ret = xgmac_wait_until_done(&bus->dev, regs);
+       if (ret)
+               return ret;
+
+       /* Return all Fs if nothing was there */
+       if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+               dev_err(&bus->dev, "MDIO read error\n");
+               return 0xffff;
+       }
+
+       value = in_be32(&regs->mdio_data) & 0xffff;
+       dev_dbg(&bus->dev, "read %04x\n", value);
+
+       return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int xgmac_mdio_reset(struct mii_bus *bus)
+{
+       struct tgec_mdio_controller __iomem *regs = bus->priv;
+       int ret;
+
+       mutex_lock(&bus->mdio_lock);
+
+       /* Setup the MII Mgmt clock speed */
+       out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+       ret = xgmac_wait_until_free(&bus->dev, regs);
+
+       mutex_unlock(&bus->mdio_lock);
+
+       return ret;
+}
+
+static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct mii_bus *bus;
+       struct resource res;
+       int ret;
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
+               dev_err(&pdev->dev, "could not obtain address\n");
+               return ret;
+       }
+
+       bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+       if (!bus)
+               return -ENOMEM;
+
+       bus->name = "Freescale XGMAC MDIO Bus";
+       bus->read = xgmac_mdio_read;
+       bus->write = xgmac_mdio_write;
+       bus->reset = xgmac_mdio_reset;
+       bus->irq = bus->priv;
+       bus->parent = &pdev->dev;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+       /* Set the PHY base address */
+       bus->priv = of_iomap(np, 0);
+       if (!bus->priv) {
+               ret = -ENOMEM;
+               goto err_ioremap;
+       }
+
+       ret = of_mdiobus_register(bus, np);
+       if (ret) {
+               dev_err(&pdev->dev, "cannot register MDIO bus\n");
+               goto err_registration;
+       }
+
+       dev_set_drvdata(&pdev->dev, bus);
+
+       return 0;
+
+err_registration:
+       iounmap(bus->priv);
+
+err_ioremap:
+       mdiobus_free(bus);
+
+       return ret;
+}
+
+static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
+{
+       struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+
+       mdiobus_unregister(bus);
+       iounmap(bus->priv);
+       mdiobus_free(bus);
+
+       return 0;
+}
+
+static struct of_device_id xgmac_mdio_match[] = {
+       {
+               .compatible = "fsl,fman-xmdio",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+       .driver = {
+               .name = "fsl-fman_xmdio",
+               .of_match_table = xgmac_mdio_match,
+       },
+       .probe = xgmac_mdio_probe,
+       .remove = xgmac_mdio_remove,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");
index fed5080a6b621a07da4837b45b095e1107119d4b..959faf7388e21ba77316d1817f66e337d6f89315 100644 (file)
@@ -150,7 +150,7 @@ config SUN3_82586
 
 config ZNET
        tristate "Zenith Z-Note support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && ISA_DMA_API
+       depends on EXPERIMENTAL && ISA_DMA_API && X86
        ---help---
          The Zenith Z-Note notebook computer has a built-in network
          (Ethernet) card, and this is the Linux driver for it. Note that the
index ba4e0cea3506f80da5cc36a69f22994a7a3e470e..c9479e081b8aa0507a980d6bb1bc11af1d1adbc0 100644 (file)
@@ -865,14 +865,14 @@ static void hardware_init(struct net_device *dev)
        disable_dma(znet->rx_dma);              /* reset by an interrupting task. */
        clear_dma_ff(znet->rx_dma);
        set_dma_mode(znet->rx_dma, DMA_RX_MODE);
-       set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start);
+       set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
        set_dma_count(znet->rx_dma, RX_BUF_SIZE);
        enable_dma(znet->rx_dma);
        /* Now set up the Tx channel. */
        disable_dma(znet->tx_dma);
        clear_dma_ff(znet->tx_dma);
        set_dma_mode(znet->tx_dma, DMA_TX_MODE);
-       set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start);
+       set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
        set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
        enable_dma(znet->tx_dma);
        release_dma_lock(flags);
index 736a7d987db599fb1f855cc74d89dcdc0d3ce640..9089d00f14216431b9bf33db13a51b35414184b9 100644 (file)
@@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev,
 
        ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
                         hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       /* MDI-X => 1; MDI => 0 */
+       if ((hw->media_type == e1000_media_type_copper) &&
+           netif_carrier_ok(netdev))
+               ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+                                                       ETH_TP_MDI_X :
+                                                       ETH_TP_MDI);
+       else
+               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+       if (hw->mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->mdix;
        return 0;
 }
 
@@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
                msleep(1);
 
@@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev,
                ecmd->advertising = hw->autoneg_advertised;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__E1000_RESETTING, &adapter->flags);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->mdix = AUTO_ALL_MODES;
+               else
+                       hw->mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
 
        if (netif_running(adapter->netdev)) {
index f3f9aeb7d1e189f045b294a6716e27ce473133a0..222bfaff4622959df30eb7b89f25a2f7764dcc32 100644 (file)
@@ -2014,6 +2014,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
        }
 
+       netdev_reset_queue(adapter->netdev);
        size = sizeof(struct e1000_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
@@ -3273,6 +3274,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                             nr_frags, mss);
 
        if (count) {
+               netdev_sent_queue(netdev, skb->len);
                skb_tx_timestamp(skb);
 
                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
@@ -3860,6 +3862,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
        unsigned int i, eop;
        unsigned int count = 0;
        unsigned int total_tx_bytes=0, total_tx_packets=0;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3877,6 +3880,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        if (cleaned) {
                                total_tx_packets += buffer_info->segs;
                                total_tx_bytes += buffer_info->bytecount;
+                               if (buffer_info->skb) {
+                                       bytes_compl += buffer_info->skb->len;
+                                       pkts_compl++;
+                               }
+
                        }
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
                        tx_desc->upper.data = 0;
@@ -3890,6 +3898,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
        tx_ring->next_to_clean = i;
 
+       netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
 #define TX_WAKE_THRESHOLD 32
        if (unlikely(count && netif_carrier_ok(netdev) &&
                     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
@@ -4950,6 +4960,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       hw->mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
index 080c89093feb5df6393c6d8f58421ff254f2b05d..c98586408005a0dbbb557e4ae22fc3f49ba04a48 100644 (file)
@@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
  **/
 static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
-       u16 data = er32(POEMB);
+       u32 data = er32(POEMB);
 
        if (active)
                data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
  **/
 static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
 {
-       u16 data = er32(POEMB);
+       u32 data = er32(POEMB);
 
        if (!active) {
                data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
index 0349e2478df8f294b04c4c54fc036cc627312afe..c11ac2756667bbf7205be4b5d1c903b00e7c3e4b 100644 (file)
@@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev,
        else
                ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
 
+       if (hw->phy.mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
        return 0;
 }
 
@@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
@@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev,
                return -EINVAL;
        }
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->phy.media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
 
@@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev,
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__E1000_RESETTING, &adapter->state);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               /*
+                * fix up the value for auto (3 => 0) as zero is mapped
+                * internally to auto
+                */
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->phy.mdix = AUTO_ALL_MODES;
+               else
+                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
 
        if (netif_running(adapter->netdev)) {
                e1000e_down(adapter);
                e1000e_up(adapter);
-       } else {
+       } else
                e1000e_reset(adapter);
-       }
 
        clear_bit(__E1000_RESETTING, &adapter->state);
        return 0;
@@ -1905,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
                return -EINVAL;
 
        if (ec->rx_coalesce_usecs == 4) {
-               adapter->itr = adapter->itr_setting = 4;
+               adapter->itr_setting = 4;
+               adapter->itr = adapter->itr_setting;
        } else if (ec->rx_coalesce_usecs <= 3) {
                adapter->itr = 20000;
                adapter->itr_setting = ec->rx_coalesce_usecs;
index 3f0223ac4c7c2152ca8b125ec582303f3546d980..fb659dd8db038941842e9eb937b94eed8640d98e 100644 (file)
@@ -56,7 +56,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
+#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
 
                        /*
                         * if short on Rx space, Rx wins and must trump Tx
-                        * adjustment or use Early Receive if available
+                        * adjustment
                         */
                        if (pba < min_rx_space)
                                pba = min_rx_space;
@@ -3755,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
        e_dbg("icr is %08X\n", icr);
        if (icr & E1000_ICR_RXSEQ) {
                adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+               /*
+                * Force memory writes to complete before acknowledging the
+                * interrupt is handled.
+                */
                wmb();
        }
 
@@ -3796,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
                goto msi_test_failed;
        }
 
+       /*
+        * Force memory writes to complete before enabling and firing an
+        * interrupt.
+        */
        wmb();
 
        e1000_irq_enable(adapter);
@@ -3807,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
 
        e1000_irq_disable(adapter);
 
-       rmb();
+       rmb();                  /* read flags after interrupt has been fired */
 
        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
                adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4670,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        struct e1000_buffer *buffer_info;
        unsigned int i;
        u32 cmd_length = 0;
-       u16 ipcse = 0, tucse, mss;
+       u16 ipcse = 0, mss;
        u8 ipcss, ipcso, tucss, tucso, hdr_len;
 
        if (!skb_is_gso(skb))
@@ -4704,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
        tucss = skb_transport_offset(skb);
        tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
-       tucse = 0;
 
        cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
                       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -4718,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
        context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
        context_desc->upper_setup.tcp_fields.tucss = tucss;
        context_desc->upper_setup.tcp_fields.tucso = tucso;
-       context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+       context_desc->upper_setup.tcp_fields.tucse = 0;
        context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
        context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
        context_desc->cmd_and_length = cpu_to_le32(cmd_length);
index b860d4f7ea2a950a7b24d0db8ca6f15446f1bfd3..fc62a3f3a5bec8b3e7ce0fba893ae50add89f22a 100644 (file)
@@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
 #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
 
 /* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82577 PHY Diagnostics Status */
 #define I82577_DSTATUS_CABLE_LENGTH       0x03FC
@@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
+       /* Set MDI/MDIX mode */
+       ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+       if (ret_val)
+               return ret_val;
+       phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+       /*
+        * Options:
+        *   0 - Auto (default)
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        */
+       switch (hw->phy.mdix) {
+       case 1:
+               break;
+       case 2:
+               phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+               break;
+       case 0:
+       default:
+               phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+               break;
+       }
+       ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+       if (ret_val)
+               return ret_val;
+
        return e1000_set_master_slave_mode(hw);
 }
 
index ba994fb4cec69bc60baaff7c9407faf9553d40be..ca4641e2f74870c1bac8c147ad231b19ba660e12 100644 (file)
@@ -2223,11 +2223,10 @@ out:
 s32 igb_set_eee_i350(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
-       u32 ipcnfg, eeer, ctrl_ext;
+       u32 ipcnfg, eeer;
 
-       ctrl_ext = rd32(E1000_CTRL_EXT);
-       if ((hw->mac.type != e1000_i350) ||
-           (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+       if ((hw->mac.type < e1000_i350) ||
+           (hw->phy.media_type != e1000_media_type_copper))
                goto out;
        ipcnfg = rd32(E1000_IPCNFG);
        eeer = rd32(E1000_EEER);
@@ -2240,6 +2239,14 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
                        E1000_EEER_RX_LPI_EN |
                        E1000_EEER_LPI_FC);
 
+               /* keep the LPI clock running before EEE is enabled */
+               if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+                       u32 eee_su;
+                       eee_su = rd32(E1000_EEE_SU);
+                       eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
+                       wr32(E1000_EEE_SU, eee_su);
+               }
+
        } else {
                ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
                        E1000_IPCNFG_EEE_100M_AN);
@@ -2249,6 +2256,8 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
        }
        wr32(E1000_IPCNFG, ipcnfg);
        wr32(E1000_EEER, eeer);
+       rd32(E1000_IPCNFG);
+       rd32(E1000_EEER);
 out:
 
        return ret_val;
index ec7e4fe3e3ee24d240880aeb6cee95eec215cd12..de4b41ec3c402da0829357edc5f77c1821369a5e 100644 (file)
 #define E1000_FCRTC_RTH_COAL_SHIFT      4
 #define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision */
 
+/* Timestamp in Rx buffer */
+#define E1000_RXPBS_CFG_TS_EN           0x80000000
+
 /* SerDes Control */
 #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
 
 #define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
 #define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
 #define E1000_ICR_VMMB          0x00000100 /* VM MB event */
+#define E1000_ICR_TS            0x00080000 /* Time Sync Interrupt */
 #define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
 /* If this bit asserted, the driver should claim the interrupt */
 #define E1000_ICR_INT_ASSERTED  0x80000000
 #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
 #define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
 #define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
+#define E1000_IMS_TS        E1000_ICR_TS        /* Time Sync Interrupt */
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
 
 #define E1000_TIMINCA_16NS_SHIFT 24
 
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
+
 #define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
 #define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
 #define E1000_MDICNFG_PHY_MASK    0x03E00000
 #define E1000_IPCNFG_EEE_100M_AN     0x00000004  /* EEE Enable 100M AN */
 #define E1000_EEER_TX_LPI_EN         0x00010000  /* EEE Tx LPI Enable */
 #define E1000_EEER_RX_LPI_EN         0x00020000  /* EEE Rx LPI Enable */
-#define E1000_EEER_FRC_AN            0x10000000 /* Enable EEE in loopback */
+#define E1000_EEER_FRC_AN            0x10000000  /* Enable EEE in loopback */
 #define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */
+#define E1000_EEE_SU_LPI_CLK_STP     0X00800000  /* EEE LPI Clock Stop */
 
 /* SerDes Control */
 #define E1000_GEN_CTL_READY             0x80000000
index 7be98b6f105235f85446972378cfd24badfdf5f0..3404bc79f4cadf76382c5dbfca62b4d4ebe1f693 100644 (file)
@@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
        phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
 
        ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+       if (ret_val)
+               goto out;
+
+       /* Set MDI/MDIX mode */
+       ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+       if (ret_val)
+               goto out;
+       phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+       /*
+        * Options:
+        *   0 - Auto (default)
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        */
+       switch (hw->phy.mdix) {
+       case 1:
+               break;
+       case 2:
+               phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+               break;
+       case 0:
+       default:
+               phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+               break;
+       }
+       ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
 
 out:
        return ret_val;
@@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
        if (ret_val)
                goto out;
 
-       phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
-       phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+       phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
 
        ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
        if (ret_val)
index 34e40619f16b908618b35b9708054d91b3a749f9..6ac3299bfcb9fefe23845b294a72e2119563ec36 100644 (file)
@@ -111,8 +111,9 @@ s32  igb_check_polarity_m88(struct e1000_hw *hw);
 #define I82580_PHY_STATUS2_SPEED_100MBPS  0x0100
 
 /* I82580 PHY Control 2 */
-#define I82580_PHY_CTRL2_AUTO_MDIX        0x0400
-#define I82580_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+#define I82580_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK    0x0600
 
 /* I82580 PHY Diagnostics Status */
 #define I82580_DSTATUS_CABLE_LENGTH       0x03FC
index 28394bea5253fc280e1aba973b20958ed1b9662f..e5db48594e8a929daab778dca1fe7c9b92f0e7fa 100644 (file)
@@ -91,6 +91,8 @@
 #define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
 #define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
 #define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
+#define E1000_TSICR      0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM       0x0B674 /* Interrupt Mask Register */
 
 /* Filtering Registers */
 #define E1000_SAQF(_n) (0x5980 + 4 * (_n))
 /* Energy Efficient Ethernet "EEE" register */
 #define E1000_IPCNFG  0x0E38  /* Internal PHY Configuration */
 #define E1000_EEER    0x0E30  /* Energy Efficient Ethernet */
+#define E1000_EEE_SU  0X0E34  /* EEE Setup */
 
 /* Thermal Sensor Register */
 #define E1000_THSTAT    0x08110 /* Thermal Sensor Status */
index 9e572dd29ab288e98bd72c4859f16fbf839c9c3b..8aad230c0592e03f5f3b753f636f0a38dd35d4a3 100644 (file)
 #include "e1000_mac.h"
 #include "e1000_82575.h"
 
+#ifdef CONFIG_IGB_PTP
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
+#endif /* CONFIG_IGB_PTP */
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
@@ -99,7 +101,6 @@ struct vf_data_storage {
        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
        u16 pf_qos;
        u16 tx_rate;
-       struct pci_dev *vfdev;
 };
 
 #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
@@ -131,9 +132,9 @@ struct vf_data_storage {
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
 /* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_512   512
+#define IGB_RXBUFFER_256   256
 #define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN     IGB_RXBUFFER_512
+#define IGB_RX_HDR_LEN     IGB_RXBUFFER_256
 
 /* How many Tx Descriptors do we need to call netif_wake_queue ? */
 #define IGB_TX_QUEUE_WAKE      16
@@ -167,8 +168,8 @@ struct igb_tx_buffer {
        unsigned int bytecount;
        u16 gso_segs;
        __be16 protocol;
-       dma_addr_t dma;
-       u32 length;
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(len);
        u32 tx_flags;
 };
 
@@ -212,7 +213,6 @@ struct igb_q_vector {
        struct igb_ring_container rx, tx;
 
        struct napi_struct napi;
-       int numa_node;
 
        u16 itr_val;
        u8 set_itr;
@@ -257,7 +257,6 @@ struct igb_ring {
        };
        /* Items past this point are only used during ring alloc / free */
        dma_addr_t dma;                /* phys address of the ring */
-       int numa_node;                  /* node to alloc ring memory on */
 };
 
 enum e1000_ring_flags_t {
@@ -342,7 +341,6 @@ struct igb_adapter {
 
        /* OS defined structs */
        struct pci_dev *pdev;
-       struct hwtstamp_config hwtstamp_config;
 
        spinlock_t stats64_lock;
        struct rtnl_link_stats64 stats64;
@@ -373,15 +371,19 @@ struct igb_adapter {
        int vf_rate_link_speed;
        u32 rss_queues;
        u32 wvbr;
-       int node;
        u32 *shadow_vfta;
 
+#ifdef CONFIG_IGB_PTP
        struct ptp_clock *ptp_clock;
-       struct ptp_clock_info caps;
-       struct delayed_work overflow_work;
+       struct ptp_clock_info ptp_caps;
+       struct delayed_work ptp_overflow_work;
+       struct work_struct ptp_tx_work;
+       struct sk_buff *ptp_tx_skb;
        spinlock_t tmreg_lock;
        struct cyclecounter cc;
        struct timecounter tc;
+#endif /* CONFIG_IGB_PTP */
+
        char fw_version[32];
 };
 
@@ -390,6 +392,7 @@ struct igb_adapter {
 #define IGB_FLAG_QUAD_PORT_A       (1 << 2)
 #define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
 #define IGB_FLAG_DMAC              (1 << 4)
+#define IGB_FLAG_PTP               (1 << 5)
 
 /* DMA Coalescing defines */
 #define IGB_MIN_TXPBSIZE           20408
@@ -435,13 +438,17 @@ extern void igb_power_up_link(struct igb_adapter *);
 extern void igb_set_fw_version(struct igb_adapter *);
 #ifdef CONFIG_IGB_PTP
 extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_remove(struct igb_adapter *adapter);
-
-extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
-                                  struct skb_shared_hwtstamps *hwtstamps,
-                                  u64 systim);
+extern void igb_ptp_stop(struct igb_adapter *adapter);
+extern void igb_ptp_reset(struct igb_adapter *adapter);
+extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+                               union e1000_adv_rx_desc *rx_desc,
+                               struct sk_buff *skb);
+extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+                                 struct ifreq *ifr, int cmd);
+#endif /* CONFIG_IGB_PTP */
 
-#endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
        if (hw->phy.ops.reset)
index 70591117051bf2faa446af5a3b81055f091d491d..2ea012849825224af910ba7189aa850b58610806 100644 (file)
@@ -148,9 +148,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                                   SUPPORTED_100baseT_Full |
                                   SUPPORTED_1000baseT_Full|
                                   SUPPORTED_Autoneg |
-                                  SUPPORTED_TP);
-               ecmd->advertising = (ADVERTISED_TP |
-                                    ADVERTISED_Pause);
+                                  SUPPORTED_TP |
+                                  SUPPORTED_Pause);
+               ecmd->advertising = ADVERTISED_TP;
 
                if (hw->mac.autoneg == 1) {
                        ecmd->advertising |= ADVERTISED_Autoneg;
@@ -158,6 +158,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                        ecmd->advertising |= hw->phy.autoneg_advertised;
                }
 
+               if (hw->mac.autoneg != 1)
+                       ecmd->advertising &= ~(ADVERTISED_Pause |
+                                              ADVERTISED_Asym_Pause);
+
+               if (hw->fc.requested_mode == e1000_fc_full)
+                       ecmd->advertising |= ADVERTISED_Pause;
+               else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+                       ecmd->advertising |= (ADVERTISED_Pause |
+                                             ADVERTISED_Asym_Pause);
+               else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+                       ecmd->advertising |=  ADVERTISED_Asym_Pause;
+               else
+                       ecmd->advertising &= ~(ADVERTISED_Pause |
+                                              ADVERTISED_Asym_Pause);
+
                ecmd->port = PORT_TP;
                ecmd->phy_address = hw->phy.addr;
        } else {
@@ -198,6 +213,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        }
 
        ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       /* MDI-X => 2; MDI =>1; Invalid =>0 */
+       if (hw->phy.media_type == e1000_media_type_copper)
+               ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+                                                     ETH_TP_MDI;
+       else
+               ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+       if (hw->phy.mdix == AUTO_ALL_MODES)
+               ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       else
+               ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
        return 0;
 }
 
@@ -214,6 +242,22 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                return -EINVAL;
        }
 
+       /*
+        * MDI setting is only allowed when autoneg enabled because
+        * some hardware doesn't allow MDI setting when speed or
+        * duplex is forced.
+        */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               if (hw->phy.media_type != e1000_media_type_copper)
+                       return -EOPNOTSUPP;
+
+               if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+                   (ecmd->autoneg != AUTONEG_ENABLE)) {
+                       dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+                       return -EINVAL;
+               }
+       }
+
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
                msleep(1);
 
@@ -227,12 +271,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                        hw->fc.requested_mode = e1000_fc_default;
        } else {
                u32 speed = ethtool_cmd_speed(ecmd);
+               /* calling this overrides forced MDI setting */
                if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
                        clear_bit(__IGB_RESETTING, &adapter->state);
                        return -EINVAL;
                }
        }
 
+       /* MDI-X => 2; MDI => 1; Auto => 3 */
+       if (ecmd->eth_tp_mdix_ctrl) {
+               /*
+                * fix up the value for auto (3 => 0) as zero is mapped
+                * internally to auto
+                */
+               if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+                       hw->phy.mdix = AUTO_ALL_MODES;
+               else
+                       hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+       }
+
        /* reset the link */
        if (netif_running(adapter->netdev)) {
                igb_down(adapter);
@@ -1469,33 +1526,22 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl_reg = 0;
-       u16 phy_reg = 0;
 
        hw->mac.autoneg = false;
 
-       switch (hw->phy.type) {
-       case e1000_phy_m88:
-               /* Auto-MDI/MDIX Off */
-               igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
-               /* reset to update Auto-MDI/MDIX */
-               igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
-               /* autoneg off */
-               igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
-               break;
-       case e1000_phy_82580:
-               /* enable MII loopback */
-               igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
-               break;
-       case e1000_phy_i210:
-               /* set loopback speed in PHY */
-               igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
-                                       &phy_reg);
-               phy_reg |= GS40G_MAC_SPEED_1G;
-               igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
-                                       phy_reg);
-               ctrl_reg = rd32(E1000_CTRL_EXT);
-       default:
-               break;
+       if (hw->phy.type == e1000_phy_m88) {
+               if (hw->phy.id != I210_I_PHY_ID) {
+                       /* Auto-MDI/MDIX Off */
+                       igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+                       /* reset to update Auto-MDI/MDIX */
+                       igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+                       /* autoneg off */
+                       igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+               } else {
+                       /* force 1000, set loopback  */
+                       igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+                       igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+               }
        }
 
        /* add small delay to avoid loopback test failure */
@@ -1513,7 +1559,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
                     E1000_CTRL_FD |     /* Force Duplex to FULL */
                     E1000_CTRL_SLU);    /* Set link up enable bit */
 
-       if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+       if (hw->phy.type == e1000_phy_m88)
                ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
 
        wr32(E1000_CTRL, ctrl_reg);
@@ -1521,11 +1567,10 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
        /* Disable the receiver on the PHY so when a cable is plugged in, the
         * PHY does not begin to autoneg when a cable is reconnected to the NIC.
         */
-       if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+       if (hw->phy.type == e1000_phy_m88)
                igb_phy_disable_receiver(adapter);
 
-       udelay(500);
-
+       mdelay(500);
        return 0;
 }
 
@@ -1785,13 +1830,6 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
                *data = 0;
                goto out;
        }
-       if ((adapter->hw.mac.type == e1000_i210)
-               || (adapter->hw.mac.type == e1000_i211)) {
-               dev_err(&adapter->pdev->dev,
-                       "Loopback test not supported on this part at this time.\n");
-               *data = 0;
-               goto out;
-       }
        *data = igb_setup_desc_rings(adapter);
        if (*data)
                goto out;
@@ -2257,6 +2295,54 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
        }
 }
 
+static int igb_get_ts_info(struct net_device *dev,
+                          struct ethtool_ts_info *info)
+{
+       struct igb_adapter *adapter = netdev_priv(dev);
+
+       switch (adapter->hw.mac.type) {
+#ifdef CONFIG_IGB_PTP
+       case e1000_82576:
+       case e1000_82580:
+       case e1000_i350:
+       case e1000_i210:
+       case e1000_i211:
+               info->so_timestamping =
+                       SOF_TIMESTAMPING_TX_HARDWARE |
+                       SOF_TIMESTAMPING_RX_HARDWARE |
+                       SOF_TIMESTAMPING_RAW_HARDWARE;
+
+               if (adapter->ptp_clock)
+                       info->phc_index = ptp_clock_index(adapter->ptp_clock);
+               else
+                       info->phc_index = -1;
+
+               info->tx_types =
+                       (1 << HWTSTAMP_TX_OFF) |
+                       (1 << HWTSTAMP_TX_ON);
+
+               info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+
+               /* 82576 does not support timestamping all packets. */
+               if (adapter->hw.mac.type >= e1000_82580)
+                       info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+               else
+                       info->rx_filters |=
+                               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+                               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+               return 0;
+#endif /* CONFIG_IGB_PTP */
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static int igb_ethtool_begin(struct net_device *netdev)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2270,38 +2356,6 @@ static void igb_ethtool_complete(struct net_device *netdev)
        pm_runtime_put(&adapter->pdev->dev);
 }
 
-#ifdef CONFIG_IGB_PTP
-static int igb_ethtool_get_ts_info(struct net_device *dev,
-                                  struct ethtool_ts_info *info)
-{
-       struct igb_adapter *adapter = netdev_priv(dev);
-
-       info->so_timestamping =
-               SOF_TIMESTAMPING_TX_HARDWARE |
-               SOF_TIMESTAMPING_RX_HARDWARE |
-               SOF_TIMESTAMPING_RAW_HARDWARE;
-
-       if (adapter->ptp_clock)
-               info->phc_index = ptp_clock_index(adapter->ptp_clock);
-       else
-               info->phc_index = -1;
-
-       info->tx_types =
-               (1 << HWTSTAMP_TX_OFF) |
-               (1 << HWTSTAMP_TX_ON);
-
-       info->rx_filters =
-               (1 << HWTSTAMP_FILTER_NONE) |
-               (1 << HWTSTAMP_FILTER_ALL) |
-               (1 << HWTSTAMP_FILTER_SOME) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
-
-       return 0;
-}
-
-#endif
 static const struct ethtool_ops igb_ethtool_ops = {
        .get_settings           = igb_get_settings,
        .set_settings           = igb_set_settings,
@@ -2328,11 +2382,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
        .get_ethtool_stats      = igb_get_ethtool_stats,
        .get_coalesce           = igb_get_coalesce,
        .set_coalesce           = igb_set_coalesce,
+       .get_ts_info            = igb_get_ts_info,
        .begin                  = igb_ethtool_begin,
        .complete               = igb_ethtool_complete,
-#ifdef CONFIG_IGB_PTP
-       .get_ts_info            = igb_ethtool_get_ts_info,
-#endif
 };
 
 void igb_set_ethtool_ops(struct net_device *netdev)
index f88c822e57a6d3b3a9f54ac3659ecf1852f36b28..e1ceb37ef12e406fd7ff6dff13b08c18548fcea6 100644 (file)
@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
 
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_find_enabled_vfs(struct igb_adapter *adapter);
-static int igb_check_vf_assignment(struct igb_adapter *adapter);
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
 #endif
 
 #ifdef CONFIG_PM
@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
                buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
                pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
                        n, tx_ring->next_to_use, tx_ring->next_to_clean,
-                       (u64)buffer_info->dma,
-                       buffer_info->length,
+                       (u64)dma_unmap_addr(buffer_info, dma),
+                       dma_unmap_len(buffer_info, len),
                        buffer_info->next_to_watch,
                        (u64)buffer_info->time_stamp);
        }
@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
                                " %04X  %p %016llX %p%s\n", i,
                                le64_to_cpu(u0->a),
                                le64_to_cpu(u0->b),
-                               (u64)buffer_info->dma,
-                               buffer_info->length,
+                               (u64)dma_unmap_addr(buffer_info, dma),
+                               dma_unmap_len(buffer_info, len),
                                buffer_info->next_to_watch,
                                (u64)buffer_info->time_stamp,
                                buffer_info->skb, next_desc);
@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
                                print_hex_dump(KERN_INFO, "",
                                        DUMP_PREFIX_ADDRESS,
                                        16, 1, buffer_info->skb->data,
-                                       buffer_info->length, true);
+                                       dma_unmap_len(buffer_info, len),
+                                       true);
                }
        }
 
@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
 {
        struct igb_ring *ring;
        int i;
-       int orig_node = adapter->node;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
-                                   adapter->node);
-               if (!ring)
-                       ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+               ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
                if (!ring)
                        goto err;
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
                ring->dev = &adapter->pdev->dev;
                ring->netdev = adapter->netdev;
-               ring->numa_node = adapter->node;
                /* For 82575, context index must be unique per ring. */
                if (adapter->hw.mac.type == e1000_82575)
                        set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
                adapter->tx_ring[i] = ring;
        }
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
-                                   adapter->node);
-               if (!ring)
-                       ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+               ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
                if (!ring)
                        goto err;
                ring->count = adapter->rx_ring_count;
                ring->queue_index = i;
                ring->dev = &adapter->pdev->dev;
                ring->netdev = adapter->netdev;
-               ring->numa_node = adapter->node;
                /* set flag indicating ring supports SCTP checksum offload */
                if (adapter->hw.mac.type >= e1000_82576)
                        set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
 
                adapter->rx_ring[i] = ring;
        }
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
 
        igb_cache_ring_register(adapter);
 
        return 0;
 
 err:
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
        igb_free_queues(adapter);
 
        return -ENOMEM;
@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
        struct igb_q_vector *q_vector;
        struct e1000_hw *hw = &adapter->hw;
        int v_idx;
-       int orig_node = adapter->node;
 
        for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
-               if ((adapter->num_q_vectors == (adapter->num_rx_queues +
-                                               adapter->num_tx_queues)) &&
-                   (adapter->num_rx_queues == v_idx))
-                       adapter->node = orig_node;
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
-                                       adapter->node);
-               if (!q_vector)
-                       q_vector = kzalloc(sizeof(struct igb_q_vector),
-                                          GFP_KERNEL);
+               q_vector = kzalloc(sizeof(struct igb_q_vector),
+                                  GFP_KERNEL);
                if (!q_vector)
                        goto err_out;
                q_vector->adapter = adapter;
@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
                netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
                adapter->q_vector[v_idx] = q_vector;
        }
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
 
        return 0;
 
 err_out:
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
        igb_free_q_vectors(adapter);
        return -ENOMEM;
 }
@@ -1751,6 +1706,11 @@ void igb_reset(struct igb_adapter *adapter)
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
        wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
+#ifdef CONFIG_IGB_PTP
+       /* Re-enable PTP, where applicable. */
+       igb_ptp_reset(adapter);
+#endif /* CONFIG_IGB_PTP */
+
        igb_get_phy_info(hw);
 }
 
@@ -2180,11 +2140,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        }
 
 #endif
+
 #ifdef CONFIG_IGB_PTP
        /* do hw tstamp init after resetting */
        igb_ptp_init(adapter);
+#endif /* CONFIG_IGB_PTP */
 
-#endif
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
        dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2259,9 +2220,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 
        pm_runtime_get_noresume(&pdev->dev);
 #ifdef CONFIG_IGB_PTP
-       igb_ptp_remove(adapter);
+       igb_ptp_stop(adapter);
+#endif /* CONFIG_IGB_PTP */
 
-#endif
        /*
         * The watchdog timer may be rescheduled, so explicitly
         * disable watchdog from being rescheduled.
@@ -2294,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        /* reclaim resources allocated to VFs */
        if (adapter->vf_data) {
                /* disable iov and allow time for transactions to clear */
-               if (!igb_check_vf_assignment(adapter)) {
+               if (igb_vfs_are_assigned(adapter)) {
+                       dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+               } else {
                        pci_disable_sriov(pdev);
                        msleep(500);
-               } else {
-                       dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
                }
 
                kfree(adapter->vf_data);
@@ -2338,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
 #ifdef CONFIG_PCI_IOV
        struct pci_dev *pdev = adapter->pdev;
        struct e1000_hw *hw = &adapter->hw;
-       int old_vfs = igb_find_enabled_vfs(adapter);
+       int old_vfs = pci_num_vf(adapter->pdev);
        int i;
 
        /* Virtualization features not supported on i210 family. */
@@ -2418,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
                                  VLAN_HLEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-       adapter->node = -1;
-
        spin_lock_init(&adapter->stats64_lock);
 #ifdef CONFIG_PCI_IOV
        switch (hw->mac.type) {
@@ -2666,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
 int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
        struct device *dev = tx_ring->dev;
-       int orig_node = dev_to_node(dev);
        int size;
 
        size = sizeof(struct igb_tx_buffer) * tx_ring->count;
-       tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
-       if (!tx_ring->tx_buffer_info)
-               tx_ring->tx_buffer_info = vzalloc(size);
+
+       tx_ring->tx_buffer_info = vzalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
 
@@ -2680,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
        tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       set_dev_node(dev, tx_ring->numa_node);
        tx_ring->desc = dma_alloc_coherent(dev,
                                           tx_ring->size,
                                           &tx_ring->dma,
                                           GFP_KERNEL);
-       set_dev_node(dev, orig_node);
-       if (!tx_ring->desc)
-               tx_ring->desc = dma_alloc_coherent(dev,
-                                                  tx_ring->size,
-                                                  &tx_ring->dma,
-                                                  GFP_KERNEL);
-
        if (!tx_ring->desc)
                goto err;
 
@@ -2702,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
 
 err:
        vfree(tx_ring->tx_buffer_info);
-       dev_err(dev,
-               "Unable to allocate memory for the transmit descriptor ring\n");
+       tx_ring->tx_buffer_info = NULL;
+       dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -2820,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       int orig_node = dev_to_node(dev);
-       int size, desc_len;
+       int size;
 
        size = sizeof(struct igb_rx_buffer) * rx_ring->count;
-       rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
-       if (!rx_ring->rx_buffer_info)
-               rx_ring->rx_buffer_info = vzalloc(size);
+
+       rx_ring->rx_buffer_info = vzalloc(size);
        if (!rx_ring->rx_buffer_info)
                goto err;
 
-       desc_len = sizeof(union e1000_adv_rx_desc);
 
        /* Round up to nearest 4K */
-       rx_ring->size = rx_ring->count * desc_len;
+       rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-       set_dev_node(dev, rx_ring->numa_node);
        rx_ring->desc = dma_alloc_coherent(dev,
                                           rx_ring->size,
                                           &rx_ring->dma,
                                           GFP_KERNEL);
-       set_dev_node(dev, orig_node);
-       if (!rx_ring->desc)
-               rx_ring->desc = dma_alloc_coherent(dev,
-                                                  rx_ring->size,
-                                                  &rx_ring->dma,
-                                                  GFP_KERNEL);
-
        if (!rx_ring->desc)
                goto err;
 
@@ -2859,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
 err:
        vfree(rx_ring->rx_buffer_info);
        rx_ring->rx_buffer_info = NULL;
-       dev_err(dev, "Unable to allocate memory for the receive descriptor"
-               " ring\n");
+       dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -2898,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 mrqc, rxcsum;
-       u32 j, num_rx_queues, shift = 0, shift2 = 0;
-       union e1000_reta {
-               u32 dword;
-               u8  bytes[4];
-       } reta;
-       static const u8 rsshash[40] = {
-               0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
-               0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
-               0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
-               0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+       u32 j, num_rx_queues, shift = 0;
+       static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
+                                       0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
+                                       0xA32DCB77, 0x0CF23080, 0x3BB7426A,
+                                       0xFA01ACBE };
 
        /* Fill out hash function seeds */
-       for (j = 0; j < 10; j++) {
-               u32 rsskey = rsshash[(j * 4)];
-               rsskey |= rsshash[(j * 4) + 1] << 8;
-               rsskey |= rsshash[(j * 4) + 2] << 16;
-               rsskey |= rsshash[(j * 4) + 3] << 24;
-               array_wr32(E1000_RSSRK(0), j, rsskey);
-       }
+       for (j = 0; j < 10; j++)
+               wr32(E1000_RSSRK(j), rsskey[j]);
 
        num_rx_queues = adapter->rss_queues;
 
-       if (adapter->vfs_allocated_count) {
-               /* 82575 and 82576 supports 2 RSS queues for VMDq */
-               switch (hw->mac.type) {
-               case e1000_i350:
-               case e1000_82580:
-                       num_rx_queues = 1;
-                       shift = 0;
-                       break;
-               case e1000_82576:
+       switch (hw->mac.type) {
+       case e1000_82575:
+               shift = 6;
+               break;
+       case e1000_82576:
+               /* 82576 supports 2 RSS queues for SR-IOV */
+               if (adapter->vfs_allocated_count) {
                        shift = 3;
                        num_rx_queues = 2;
-                       break;
-               case e1000_82575:
-                       shift = 2;
-                       shift2 = 6;
-               default:
-                       break;
                }
-       } else {
-               if (hw->mac.type == e1000_82575)
-                       shift = 6;
+               break;
+       default:
+               break;
        }
 
-       for (j = 0; j < (32 * 4); j++) {
-               reta.bytes[j & 3] = (j % num_rx_queues) << shift;
-               if (shift2)
-                       reta.bytes[j & 3] |= num_rx_queues << shift2;
-               if ((j & 3) == 3)
-                       wr32(E1000_RETA(j >> 2), reta.dword);
+       /*
+        * Populate the indirection table 4 entries at a time.  To do this
+        * we are generating the results for n and n+2 and then interleaving
+        * those with the results with n+1 and n+3.
+        */
+       for (j = 0; j < 32; j++) {
+               /* first pass generates n and n+2 */
+               u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
+               u32 reta = (base & 0x07800780) >> (7 - shift);
+
+               /* second pass generates n+1 and n+3 */
+               base += 0x00010001 * num_rx_queues;
+               reta |= (base & 0x07800780) << (1 + shift);
+
+               wr32(E1000_RETA(j), reta);
        }
 
        /*
@@ -3184,8 +3112,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
 #endif
        srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+#ifdef CONFIG_IGB_PTP
        if (hw->mac.type >= e1000_82580)
                srrctl |= E1000_SRRCTL_TIMESTAMP;
+#endif /* CONFIG_IGB_PTP */
        /* Only set Drop Enable if we are supporting multiple queues */
        if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
                srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3269,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
 {
        if (tx_buffer->skb) {
                dev_kfree_skb_any(tx_buffer->skb);
-               if (tx_buffer->dma)
+               if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
-                                        tx_buffer->dma,
-                                        tx_buffer->length,
+                                        dma_unmap_addr(tx_buffer, dma),
+                                        dma_unmap_len(tx_buffer, len),
                                         DMA_TO_DEVICE);
-       } else if (tx_buffer->dma) {
+       } else if (dma_unmap_len(tx_buffer, len)) {
                dma_unmap_page(ring->dev,
-                              tx_buffer->dma,
-                              tx_buffer->length,
+                              dma_unmap_addr(tx_buffer, dma),
+                              dma_unmap_len(tx_buffer, len),
                               DMA_TO_DEVICE);
        }
        tx_buffer->next_to_watch = NULL;
        tx_buffer->skb = NULL;
-       tx_buffer->dma = 0;
+       dma_unmap_len_set(tx_buffer, len, 0);
        /* buffer_info must be completely set up in the transmit path */
 }
 
@@ -4229,9 +4159,11 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
        if (tx_flags & IGB_TX_FLAGS_VLAN)
                cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
 
+#ifdef CONFIG_IGB_PTP
        /* set timestamp bit if present */
-       if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+       if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
                cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
+#endif /* CONFIG_IGB_PTP */
 
        /* set segmentation bits for TSO */
        if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4275,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
                       const u8 hdr_len)
 {
        struct sk_buff *skb = first->skb;
-       struct igb_tx_buffer *tx_buffer_info;
+       struct igb_tx_buffer *tx_buffer;
        union e1000_adv_tx_desc *tx_desc;
        dma_addr_t dma;
        struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4296,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
                goto dma_error;
 
        /* record length, and DMA address */
-       first->length = size;
-       first->dma = dma;
+       dma_unmap_len_set(first, len, size);
+       dma_unmap_addr_set(first, dma, dma);
        tx_desc->read.buffer_addr = cpu_to_le64(dma);
 
        for (;;) {
@@ -4339,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
                if (dma_mapping_error(tx_ring->dev, dma))
                        goto dma_error;
 
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               tx_buffer_info->length = size;
-               tx_buffer_info->dma = dma;
+               tx_buffer = &tx_ring->tx_buffer_info[i];
+               dma_unmap_len_set(tx_buffer, len, size);
+               dma_unmap_addr_set(tx_buffer, dma, dma);
 
                tx_desc->read.olinfo_status = 0;
                tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4392,9 +4324,9 @@ dma_error:
 
        /* clear dma mappings for failed tx_buffer_info map */
        for (;;) {
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
-               if (tx_buffer_info == first)
+               tx_buffer = &tx_ring->tx_buffer_info[i];
+               igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+               if (tx_buffer == first)
                        break;
                if (i == 0)
                        i = tx_ring->count;
@@ -4440,6 +4372,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
                                struct igb_ring *tx_ring)
 {
+#ifdef CONFIG_IGB_PTP
+       struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+#endif /* CONFIG_IGB_PTP */
        struct igb_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
@@ -4462,10 +4397,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+#ifdef CONFIG_IGB_PTP
+       if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                    !(adapter->ptp_tx_skb))) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+               adapter->ptp_tx_skb = skb_get(skb);
+               if (adapter->hw.mac.type == e1000_82576)
+                       schedule_work(&adapter->ptp_tx_work);
        }
+#endif /* CONFIG_IGB_PTP */
 
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4661,11 +4603,13 @@ void igb_update_stats(struct igb_adapter *adapter,
        bytes = 0;
        packets = 0;
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+               u32 rqdpc = rd32(E1000_RQDPC(i));
                struct igb_ring *ring = adapter->rx_ring[i];
 
-               ring->rx_stats.drops += rqdpc_tmp;
-               net_stats->rx_fifo_errors += rqdpc_tmp;
+               if (rqdpc) {
+                       ring->rx_stats.drops += rqdpc;
+                       net_stats->rx_fifo_errors += rqdpc;
+               }
 
                do {
                        start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
@@ -4755,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
        reg = rd32(E1000_CTRL_EXT);
        if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
                adapter->stats.rxerrc += rd32(E1000_RXERRC);
-               adapter->stats.tncrs += rd32(E1000_TNCRS);
+
+               /* this stat has invalid values on i210/i211 */
+               if ((hw->mac.type != e1000_i210) &&
+                   (hw->mac.type != e1000_i211))
+                       adapter->stats.tncrs += rd32(E1000_TNCRS);
        }
 
        adapter->stats.tsctc += rd32(E1000_TSCTC);
@@ -4852,6 +4800,19 @@ static irqreturn_t igb_msix_other(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+#ifdef CONFIG_IGB_PTP
+       if (icr & E1000_ICR_TS) {
+               u32 tsicr = rd32(E1000_TSICR);
+
+               if (tsicr & E1000_TSICR_TXTS) {
+                       /* acknowledge the interrupt */
+                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
+                       /* retrieve hardware timestamp */
+                       schedule_work(&adapter->ptp_tx_work);
+               }
+       }
+#endif /* CONFIG_IGB_PTP */
+
        wr32(E1000_EIMS, adapter->eims_other);
 
        return IRQ_HANDLED;
@@ -5002,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
 {
        unsigned char mac_addr[ETH_ALEN];
-       struct pci_dev *pdev = adapter->pdev;
-       struct e1000_hw *hw = &adapter->hw;
-       struct pci_dev *pvfdev;
-       unsigned int device_id;
-       u16 thisvf_devfn;
 
        eth_random_addr(mac_addr);
        igb_set_vf_mac(adapter, vf, mac_addr);
 
-       switch (adapter->hw.mac.type) {
-       case e1000_82576:
-               device_id = IGB_82576_VF_DEV_ID;
-               /* VF Stride for 82576 is 2 */
-               thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
-                       (pdev->devfn & 1);
-               break;
-       case e1000_i350:
-               device_id = IGB_I350_VF_DEV_ID;
-               /* VF Stride for I350 is 4 */
-               thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
-                               (pdev->devfn & 3);
-               break;
-       default:
-               device_id = 0;
-               thisvf_devfn = 0;
-               break;
-       }
-
-       pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
-       while (pvfdev) {
-               if (pvfdev->devfn == thisvf_devfn)
-                       break;
-               pvfdev = pci_get_device(hw->vendor_id,
-                                       device_id, pvfdev);
-       }
-
-       if (pvfdev)
-               adapter->vf_data[vf].vfdev = pvfdev;
-       else
-               dev_err(&pdev->dev,
-                       "Couldn't find pci dev ptr for VF %4.4x\n",
-                       thisvf_devfn);
-       return pvfdev != NULL;
+       return 0;
 }
 
-static int igb_find_enabled_vfs(struct igb_adapter *adapter)
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       struct pci_dev *pvfdev;
-       u16 vf_devfn = 0;
-       u16 vf_stride;
-       unsigned int device_id;
-       int vfs_found = 0;
+       struct pci_dev *vfdev;
+       int dev_id;
 
        switch (adapter->hw.mac.type) {
        case e1000_82576:
-               device_id = IGB_82576_VF_DEV_ID;
-               /* VF Stride for 82576 is 2 */
-               vf_stride = 2;
+               dev_id = IGB_82576_VF_DEV_ID;
                break;
        case e1000_i350:
-               device_id = IGB_I350_VF_DEV_ID;
-               /* VF Stride for I350 is 4 */
-               vf_stride = 4;
+               dev_id = IGB_I350_VF_DEV_ID;
                break;
        default:
-               device_id = 0;
-               vf_stride = 0;
-               break;
-       }
-
-       vf_devfn = pdev->devfn + 0x80;
-       pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
-       while (pvfdev) {
-               if (pvfdev->devfn == vf_devfn &&
-                   (pvfdev->bus->number >= pdev->bus->number))
-                       vfs_found++;
-               vf_devfn += vf_stride;
-               pvfdev = pci_get_device(hw->vendor_id,
-                                       device_id, pvfdev);
+               return false;
        }
 
-       return vfs_found;
-}
-
-static int igb_check_vf_assignment(struct igb_adapter *adapter)
-{
-       int i;
-       for (i = 0; i < adapter->vfs_allocated_count; i++) {
-               if (adapter->vf_data[i].vfdev) {
-                       if (adapter->vf_data[i].vfdev->dev_flags &
-                           PCI_DEV_FLAGS_ASSIGNED)
+       /* loop through all the VFs to see if we own any that are assigned */
+       vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+       while (vfdev) {
+               /* if we don't own it we don't care */
+               if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+                       /* if it is assigned we cannot release it */
+                       if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
                                return true;
                }
+
+               vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
        }
+
        return false;
 }
 
@@ -5643,6 +5545,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+#ifdef CONFIG_IGB_PTP
+       if (icr & E1000_ICR_TS) {
+               u32 tsicr = rd32(E1000_TSICR);
+
+               if (tsicr & E1000_TSICR_TXTS) {
+                       /* acknowledge the interrupt */
+                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
+                       /* retrieve hardware timestamp */
+                       schedule_work(&adapter->ptp_tx_work);
+               }
+       }
+#endif /* CONFIG_IGB_PTP */
+
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -5684,6 +5599,19 @@ static irqreturn_t igb_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+#ifdef CONFIG_IGB_PTP
+       if (icr & E1000_ICR_TS) {
+               u32 tsicr = rd32(E1000_TSICR);
+
+               if (tsicr & E1000_TSICR_TXTS) {
+                       /* acknowledge the interrupt */
+                       wr32(E1000_TSICR, E1000_TSICR_TXTS);
+                       /* retrieve hardware timestamp */
+                       schedule_work(&adapter->ptp_tx_work);
+               }
+       }
+#endif /* CONFIG_IGB_PTP */
+
        napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
@@ -5743,37 +5671,6 @@ static int igb_poll(struct napi_struct *napi, int budget)
        return 0;
 }
 
-#ifdef CONFIG_IGB_PTP
-/**
- * igb_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: pointer to q_vector containing needed info
- * @buffer: pointer to igb_tx_buffer structure
- *
- * If we were asked to do hardware stamping and such a time stamp is
- * available, then it must have been for this skb here because we only
- * allow only one such packet into the queue.
- */
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
-                           struct igb_tx_buffer *buffer_info)
-{
-       struct igb_adapter *adapter = q_vector->adapter;
-       struct e1000_hw *hw = &adapter->hw;
-       struct skb_shared_hwtstamps shhwtstamps;
-       u64 regval;
-
-       /* if skb does not support hw timestamp or TX stamp not valid exit */
-       if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
-           !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
-               return;
-
-       regval = rd32(E1000_TXSTMPL);
-       regval |= (u64)rd32(E1000_TXSTMPH) << 32;
-
-       igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
-       skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
-}
-
-#endif
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: pointer to q_vector containing needed info
@@ -5785,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
        struct igb_adapter *adapter = q_vector->adapter;
        struct igb_ring *tx_ring = q_vector->tx.ring;
        struct igb_tx_buffer *tx_buffer;
-       union e1000_adv_tx_desc *tx_desc, *eop_desc;
+       union e1000_adv_tx_desc *tx_desc;
        unsigned int total_bytes = 0, total_packets = 0;
        unsigned int budget = q_vector->tx.work_limit;
        unsigned int i = tx_ring->next_to_clean;
@@ -5797,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
        tx_desc = IGB_TX_DESC(tx_ring, i);
        i -= tx_ring->count;
 
-       for (; budget; budget--) {
-               eop_desc = tx_buffer->next_to_watch;
-
-               /* prevent any other reads prior to eop_desc */
-               rmb();
+       do {
+               union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
 
                /* if next_to_watch is not set then there is no work pending */
                if (!eop_desc)
                        break;
 
+               /* prevent any other reads prior to eop_desc */
+               rmb();
+
                /* if DD is not set pending work has not been completed */
                if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
                        break;
@@ -5818,25 +5715,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                total_bytes += tx_buffer->bytecount;
                total_packets += tx_buffer->gso_segs;
 
-#ifdef CONFIG_IGB_PTP
-               /* retrieve hardware timestamp */
-               igb_tx_hwtstamp(q_vector, tx_buffer);
-
-#endif
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
-               tx_buffer->skb = NULL;
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
-                                tx_buffer->dma,
-                                tx_buffer->length,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
                                 DMA_TO_DEVICE);
 
+               /* clear tx_buffer data */
+               tx_buffer->skb = NULL;
+               dma_unmap_len_set(tx_buffer, len, 0);
+
                /* clear last DMA location and unmap remaining buffers */
                while (tx_desc != eop_desc) {
-                       tx_buffer->dma = 0;
-
                        tx_buffer++;
                        tx_desc++;
                        i++;
@@ -5847,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                        }
 
                        /* unmap any remaining paged data */
-                       if (tx_buffer->dma) {
+                       if (dma_unmap_len(tx_buffer, len)) {
                                dma_unmap_page(tx_ring->dev,
-                                              tx_buffer->dma,
-                                              tx_buffer->length,
+                                              dma_unmap_addr(tx_buffer, dma),
+                                              dma_unmap_len(tx_buffer, len),
                                               DMA_TO_DEVICE);
+                               dma_unmap_len_set(tx_buffer, len, 0);
                        }
                }
 
-               /* clear last DMA location */
-               tx_buffer->dma = 0;
-
                /* move us one more past the eop_desc for start of next pkt */
                tx_buffer++;
                tx_desc++;
@@ -5867,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                        tx_buffer = tx_ring->tx_buffer_info;
                        tx_desc = IGB_TX_DESC(tx_ring, 0);
                }
-       }
+
+               /* issue prefetch for next Tx descriptor */
+               prefetch(tx_desc);
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
 
        netdev_tx_completed_queue(txring_txq(tx_ring),
                                  total_packets, total_bytes);
@@ -5883,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
        if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
                struct e1000_hw *hw = &adapter->hw;
 
-               eop_desc = tx_buffer->next_to_watch;
-
                /* Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i */
                clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
-               if (eop_desc &&
+               if (tx_buffer->next_to_watch &&
                    time_after(jiffies, tx_buffer->time_stamp +
                               (adapter->tx_timeout_factor * HZ)) &&
                    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5912,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
                                tx_ring->next_to_use,
                                tx_ring->next_to_clean,
                                tx_buffer->time_stamp,
-                               eop_desc,
+                               tx_buffer->next_to_watch,
                                jiffies,
-                               eop_desc->wb.status);
+                               tx_buffer->next_to_watch->wb.status);
                        netif_stop_subqueue(tx_ring->netdev,
                                            tx_ring->queue_index);
 
@@ -5994,47 +5889,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
                skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 }
 
-#ifdef CONFIG_IGB_PTP
-static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
-                           union e1000_adv_rx_desc *rx_desc,
-                           struct sk_buff *skb)
-{
-       struct igb_adapter *adapter = q_vector->adapter;
-       struct e1000_hw *hw = &adapter->hw;
-       u64 regval;
-
-       if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
-                                      E1000_RXDADV_STAT_TS))
-               return;
-
-       /*
-        * If this bit is set, then the RX registers contain the time stamp. No
-        * other packet will be time stamped until we read these registers, so
-        * read the registers to make them available again. Because only one
-        * packet can be time stamped at a time, we know that the register
-        * values must belong to this one here and therefore we don't need to
-        * compare any of the additional attributes stored for it.
-        *
-        * If nothing went wrong, then it should have a shared tx_flags that we
-        * can turn into a skb_shared_hwtstamps.
-        */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               u32 *stamp = (u32 *)skb->data;
-               regval = le32_to_cpu(*(stamp + 2));
-               regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
-               skb_pull(skb, IGB_TS_HDR_LEN);
-       } else {
-               if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
-                       return;
-
-               regval = rd32(E1000_RXSTMPL);
-               regval |= (u64)rd32(E1000_RXSTMPH) << 32;
-       }
-
-       igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
-}
-
-#endif
 static void igb_rx_vlan(struct igb_ring *ring,
                        union e1000_adv_rx_desc *rx_desc,
                        struct sk_buff *skb)
@@ -6146,8 +6000,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
                }
 
 #ifdef CONFIG_IGB_PTP
-               igb_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif
+               igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
+#endif /* CONFIG_IGB_PTP */
                igb_rx_hash(rx_ring, rx_desc, skb);
                igb_rx_checksum(rx_ring, rx_desc, skb);
                igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6340,181 +6194,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        return 0;
 }
 
-/**
- * igb_hwtstamp_ioctl - control hardware time stamping
- * @netdev:
- * @ifreq:
- * @cmd:
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- **/
-static int igb_hwtstamp_ioctl(struct net_device *netdev,
-                             struct ifreq *ifr, int cmd)
-{
-       struct igb_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
-       struct hwtstamp_config config;
-       u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
-       u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
-       u32 tsync_rx_cfg = 0;
-       bool is_l4 = false;
-       bool is_l2 = false;
-       u32 regval;
-
-       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
-               return -EFAULT;
-
-       /* reserved for future extensions */
-       if (config.flags)
-               return -EINVAL;
-
-       switch (config.tx_type) {
-       case HWTSTAMP_TX_OFF:
-               tsync_tx_ctl = 0;
-       case HWTSTAMP_TX_ON:
-               break;
-       default:
-               return -ERANGE;
-       }
-
-       switch (config.rx_filter) {
-       case HWTSTAMP_FILTER_NONE:
-               tsync_rx_ctl = 0;
-               break;
-       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-       case HWTSTAMP_FILTER_ALL:
-               /*
-                * register TSYNCRXCFG must be set, therefore it is not
-                * possible to time stamp both Sync and Delay_Req messages
-                * => fall back to time stamping all packets
-                */
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
-               config.rx_filter = HWTSTAMP_FILTER_ALL;
-               break;
-       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
-               is_l4 = true;
-               break;
-       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
-               is_l4 = true;
-               break;
-       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
-               is_l2 = true;
-               is_l4 = true;
-               config.rx_filter = HWTSTAMP_FILTER_SOME;
-               break;
-       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
-       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
-               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
-               is_l2 = true;
-               is_l4 = true;
-               config.rx_filter = HWTSTAMP_FILTER_SOME;
-               break;
-       case HWTSTAMP_FILTER_PTP_V2_EVENT:
-       case HWTSTAMP_FILTER_PTP_V2_SYNC:
-       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
-               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-               is_l2 = true;
-               is_l4 = true;
-               break;
-       default:
-               return -ERANGE;
-       }
-
-       if (hw->mac.type == e1000_82575) {
-               if (tsync_rx_ctl | tsync_tx_ctl)
-                       return -EINVAL;
-               return 0;
-       }
-
-       /*
-        * Per-packet timestamping only works if all packets are
-        * timestamped, so enable timestamping in all packets as
-        * long as one rx filter was configured.
-        */
-       if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
-               tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
-               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
-       }
-
-       /* enable/disable TX */
-       regval = rd32(E1000_TSYNCTXCTL);
-       regval &= ~E1000_TSYNCTXCTL_ENABLED;
-       regval |= tsync_tx_ctl;
-       wr32(E1000_TSYNCTXCTL, regval);
-
-       /* enable/disable RX */
-       regval = rd32(E1000_TSYNCRXCTL);
-       regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
-       regval |= tsync_rx_ctl;
-       wr32(E1000_TSYNCRXCTL, regval);
-
-       /* define which PTP packets are time stamped */
-       wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
-
-       /* define ethertype filter for timestamped packets */
-       if (is_l2)
-               wr32(E1000_ETQF(3),
-                               (E1000_ETQF_FILTER_ENABLE | /* enable filter */
-                                E1000_ETQF_1588 | /* enable timestamping */
-                                ETH_P_1588));     /* 1588 eth protocol type */
-       else
-               wr32(E1000_ETQF(3), 0);
-
-#define PTP_PORT 319
-       /* L4 Queue Filter[3]: filter by destination port and protocol */
-       if (is_l4) {
-               u32 ftqf = (IPPROTO_UDP /* UDP */
-                       | E1000_FTQF_VF_BP /* VF not compared */
-                       | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
-                       | E1000_FTQF_MASK); /* mask all inputs */
-               ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
-               wr32(E1000_IMIR(3), htons(PTP_PORT));
-               wr32(E1000_IMIREXT(3),
-                    (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
-               if (hw->mac.type == e1000_82576) {
-                       /* enable source port check */
-                       wr32(E1000_SPQF(3), htons(PTP_PORT));
-                       ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
-               }
-               wr32(E1000_FTQF(3), ftqf);
-       } else {
-               wr32(E1000_FTQF(3), E1000_FTQF_MASK);
-       }
-       wrfl();
-
-       adapter->hwtstamp_config = config;
-
-       /* clear TX/RX time stamp registers, just to be sure */
-       regval = rd32(E1000_TXSTMPH);
-       regval = rd32(E1000_RXSTMPH);
-
-       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-               -EFAULT : 0;
-}
-
 /**
  * igb_ioctl -
  * @netdev:
@@ -6528,8 +6207,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        case SIOCGMIIREG:
        case SIOCSMIIREG:
                return igb_mii_ioctl(netdev, ifr, cmd);
+#ifdef CONFIG_IGB_PTP
        case SIOCSHWTSTAMP:
-               return igb_hwtstamp_ioctl(netdev, ifr, cmd);
+               return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+#endif /* CONFIG_IGB_PTP */
        default:
                return -EOPNOTSUPP;
        }
@@ -6667,6 +6348,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
        default:
                goto err_inval;
        }
+
+       /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+       adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
        return 0;
 
 err_inval:
index c846ea9131a3ab514a410165de0e15e8ff1697b4..ee21445157a3e8907a7b01267806d10b3ffab6e2 100644 (file)
  *   2^40 * 10^-9 /  60  = 18.3 minutes.
  */
 
-#define IGB_OVERFLOW_PERIOD    (HZ * 60 * 9)
-#define INCPERIOD_82576                (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK    ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576         (16 << IGB_82576_TSYNC_SHIFT)
-#define IGB_NBITS_82580                40
+#define IGB_SYSTIM_OVERFLOW_PERIOD     (HZ * 60 * 9)
+#define INCPERIOD_82576                        (1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK            ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576                 (16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580                        40
 
 /*
  * SYSTIM read access for the 82576
  */
 
-static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
+static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
 {
-       u64 val;
-       u32 lo, hi;
        struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
        struct e1000_hw *hw = &igb->hw;
+       u64 val;
+       u32 lo, hi;
 
        lo = rd32(E1000_SYSTIML);
        hi = rd32(E1000_SYSTIMH);
@@ -99,12 +99,12 @@ static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
  * SYSTIM read access for the 82580
  */
 
-static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
+static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
 {
-       u64 val;
-       u32 lo, hi, jk;
        struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
        struct e1000_hw *hw = &igb->hw;
+       u64 val;
+       u32 lo, hi, jk;
 
        /*
         * The timestamp latches on lowest register read. For the 82580
@@ -121,17 +121,102 @@ static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
        return val;
 }
 
+/*
+ * SYSTIM read access for I210/I211
+ */
+
+static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 sec, nsec, jk;
+
+       /*
+        * The timestamp latches on lowest register read. For I210/I211, the
+        * lowest register is SYSTIMR. Since we only need to provide nanosecond
+        * resolution, we can ignore it.
+        */
+       jk = rd32(E1000_SYSTIMR);
+       nsec = rd32(E1000_SYSTIML);
+       sec = rd32(E1000_SYSTIMH);
+
+       ts->tv_sec = sec;
+       ts->tv_nsec = nsec;
+}
+
+static void igb_ptp_write_i210(struct igb_adapter *adapter,
+                              const struct timespec *ts)
+{
+       struct e1000_hw *hw = &adapter->hw;
+
+       /*
+        * Writing the SYSTIMR register is not necessary as it only provides
+        * sub-nanosecond resolution.
+        */
+       wr32(E1000_SYSTIML, ts->tv_nsec);
+       wr32(E1000_SYSTIMH, ts->tv_sec);
+}
+
+/**
+ * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
+                                      struct skb_shared_hwtstamps *hwtstamps,
+                                      u64 systim)
+{
+       unsigned long flags;
+       u64 ns;
+
+       switch (adapter->hw.mac.type) {
+       case e1000_82576:
+       case e1000_82580:
+       case e1000_i350:
+               spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+               ns = timecounter_cyc2time(&adapter->tc, systim);
+
+               spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+               memset(hwtstamps, 0, sizeof(*hwtstamps));
+               hwtstamps->hwtstamp = ns_to_ktime(ns);
+               break;
+       case e1000_i210:
+       case e1000_i211:
+               memset(hwtstamps, 0, sizeof(*hwtstamps));
+               /* Upper 32 bits contain s, lower 32 bits contain ns. */
+               hwtstamps->hwtstamp = ktime_set(systim >> 32,
+                                               systim & 0xFFFFFFFF);
+               break;
+       default:
+               break;
+       }
+}
+
 /*
  * PTP clock operations
  */
 
-static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
 {
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       struct e1000_hw *hw = &igb->hw;
+       int neg_adj = 0;
        u64 rate;
        u32 incvalue;
-       int neg_adj = 0;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
-       struct e1000_hw *hw = &igb->hw;
 
        if (ppb < 0) {
                neg_adj = 1;
@@ -153,13 +238,14 @@ static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        return 0;
 }
 
-static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
 {
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       struct e1000_hw *hw = &igb->hw;
+       int neg_adj = 0;
        u64 rate;
        u32 inca;
-       int neg_adj = 0;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
-       struct e1000_hw *hw = &igb->hw;
 
        if (ppb < 0) {
                neg_adj = 1;
@@ -178,11 +264,12 @@ static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        return 0;
 }
 
-static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
+static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
 {
-       s64 now;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
        unsigned long flags;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+       s64 now;
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
 
@@ -195,12 +282,32 @@ static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
        return 0;
 }
 
-static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
 {
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
+       struct timespec now, then = ns_to_timespec(delta);
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       igb_ptp_read_i210(igb, &now);
+       now = timespec_add(now, then);
+       igb_ptp_write_i210(igb, (const struct timespec *)&now);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
+}
+
+static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
+                                struct timespec *ts)
+{
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
        u64 ns;
        u32 remainder;
-       unsigned long flags;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
 
        spin_lock_irqsave(&igb->tmreg_lock, flags);
 
@@ -214,11 +321,29 @@ static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
        return 0;
 }
 
-static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
+static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
+                               struct timespec *ts)
 {
-       u64 ns;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
        unsigned long flags;
-       struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       igb_ptp_read_i210(igb, ts);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
+}
+
+static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
+                                const struct timespec *ts)
+{
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
+       u64 ns;
 
        ns = ts->tv_sec * 1000000000ULL;
        ns += ts->tv_nsec;
@@ -232,77 +357,369 @@ static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
        return 0;
 }
 
-static int ptp_82576_enable(struct ptp_clock_info *ptp,
-                           struct ptp_clock_request *rq, int on)
+static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
+                               const struct timespec *ts)
 {
-       return -EOPNOTSUPP;
+       struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+                                              ptp_caps);
+       unsigned long flags;
+
+       spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+       igb_ptp_write_i210(igb, ts);
+
+       spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+       return 0;
 }
 
-static int ptp_82580_enable(struct ptp_clock_info *ptp,
-                           struct ptp_clock_request *rq, int on)
+static int igb_ptp_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *rq, int on)
 {
        return -EOPNOTSUPP;
 }
 
-static void igb_overflow_check(struct work_struct *work)
+/**
+ * igb_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igb_ptp_tx_work(struct work_struct *work)
+{
+       struct igb_adapter *adapter = container_of(work, struct igb_adapter,
+                                                  ptp_tx_work);
+       struct e1000_hw *hw = &adapter->hw;
+       u32 tsynctxctl;
+
+       if (!adapter->ptp_tx_skb)
+               return;
+
+       tsynctxctl = rd32(E1000_TSYNCTXCTL);
+       if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
+               igb_ptp_tx_hwtstamp(adapter);
+       else
+               /* reschedule to check later */
+               schedule_work(&adapter->ptp_tx_work);
+}
+
+static void igb_ptp_overflow_check(struct work_struct *work)
 {
-       struct timespec ts;
        struct igb_adapter *igb =
-               container_of(work, struct igb_adapter, overflow_work.work);
+               container_of(work, struct igb_adapter, ptp_overflow_work.work);
+       struct timespec ts;
 
-       igb_gettime(&igb->caps, &ts);
+       igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
 
        pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
 
-       schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
+       schedule_delayed_work(&igb->ptp_overflow_work,
+                             IGB_SYSTIM_OVERFLOW_PERIOD);
+}
+
+/**
+ * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure.
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct skb_shared_hwtstamps shhwtstamps;
+       u64 regval;
+
+       regval = rd32(E1000_TXSTMPL);
+       regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+
+       igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+       skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+       dev_kfree_skb_any(adapter->ptp_tx_skb);
+       adapter->ptp_tx_skb = NULL;
+}
+
+void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+                        union e1000_adv_rx_desc *rx_desc,
+                        struct sk_buff *skb)
+{
+       struct igb_adapter *adapter = q_vector->adapter;
+       struct e1000_hw *hw = &adapter->hw;
+       u64 regval;
+
+       if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
+                                      E1000_RXDADV_STAT_TS))
+               return;
+
+       /*
+        * If this bit is set, then the RX registers contain the time stamp. No
+        * other packet will be time stamped until we read these registers, so
+        * read the registers to make them available again. Because only one
+        * packet can be time stamped at a time, we know that the register
+        * values must belong to this one here and therefore we don't need to
+        * compare any of the additional attributes stored for it.
+        *
+        * If nothing went wrong, then it should have a shared tx_flags that we
+        * can turn into a skb_shared_hwtstamps.
+        */
+       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+               u32 *stamp = (u32 *)skb->data;
+               regval = le32_to_cpu(*(stamp + 2));
+               regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+               skb_pull(skb, IGB_TS_HDR_LEN);
+       } else {
+               if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+                       return;
+
+               regval = rd32(E1000_RXSTMPL);
+               regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+       }
+
+       igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+
+/**
+ * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+                          struct ifreq *ifr, int cmd)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       struct hwtstamp_config config;
+       u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+       u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+       u32 tsync_rx_cfg = 0;
+       bool is_l4 = false;
+       bool is_l2 = false;
+       u32 regval;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               tsync_tx_ctl = 0;
+       case HWTSTAMP_TX_ON:
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               tsync_rx_ctl = 0;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_ALL:
+               /*
+                * register TSYNCRXCFG must be set, therefore it is not
+                * possible to time stamp both Sync and Delay_Req messages
+                * => fall back to time stamping all packets
+                */
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+               is_l4 = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+               is_l2 = true;
+               is_l4 = true;
+               config.rx_filter = HWTSTAMP_FILTER_SOME;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+               tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+               is_l2 = true;
+               is_l4 = true;
+               config.rx_filter = HWTSTAMP_FILTER_SOME;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               is_l2 = true;
+               is_l4 = true;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (hw->mac.type == e1000_82575) {
+               if (tsync_rx_ctl | tsync_tx_ctl)
+                       return -EINVAL;
+               return 0;
+       }
+
+       /*
+        * Per-packet timestamping only works if all packets are
+        * timestamped, so enable timestamping in all packets as
+        * long as one rx filter was configured.
+        */
+       if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
+               tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+               tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+
+               if ((hw->mac.type == e1000_i210) ||
+                   (hw->mac.type == e1000_i211)) {
+                       regval = rd32(E1000_RXPBS);
+                       regval |= E1000_RXPBS_CFG_TS_EN;
+                       wr32(E1000_RXPBS, regval);
+               }
+       }
+
+       /* enable/disable TX */
+       regval = rd32(E1000_TSYNCTXCTL);
+       regval &= ~E1000_TSYNCTXCTL_ENABLED;
+       regval |= tsync_tx_ctl;
+       wr32(E1000_TSYNCTXCTL, regval);
+
+       /* enable/disable RX */
+       regval = rd32(E1000_TSYNCRXCTL);
+       regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+       regval |= tsync_rx_ctl;
+       wr32(E1000_TSYNCRXCTL, regval);
+
+       /* define which PTP packets are time stamped */
+       wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+       /* define ethertype filter for timestamped packets */
+       if (is_l2)
+               wr32(E1000_ETQF(3),
+                    (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+                     E1000_ETQF_1588 | /* enable timestamping */
+                     ETH_P_1588));     /* 1588 eth protocol type */
+       else
+               wr32(E1000_ETQF(3), 0);
+
+#define PTP_PORT 319
+       /* L4 Queue Filter[3]: filter by destination port and protocol */
+       if (is_l4) {
+               u32 ftqf = (IPPROTO_UDP /* UDP */
+                       | E1000_FTQF_VF_BP /* VF not compared */
+                       | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+                       | E1000_FTQF_MASK); /* mask all inputs */
+               ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+               wr32(E1000_IMIR(3), htons(PTP_PORT));
+               wr32(E1000_IMIREXT(3),
+                    (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+               if (hw->mac.type == e1000_82576) {
+                       /* enable source port check */
+                       wr32(E1000_SPQF(3), htons(PTP_PORT));
+                       ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+               }
+               wr32(E1000_FTQF(3), ftqf);
+       } else {
+               wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+       }
+       wrfl();
+
+       /* clear TX/RX time stamp registers, just to be sure */
+       regval = rd32(E1000_TXSTMPL);
+       regval = rd32(E1000_TXSTMPH);
+       regval = rd32(E1000_RXSTMPL);
+       regval = rd32(E1000_RXSTMPH);
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+               -EFAULT : 0;
 }
 
 void igb_ptp_init(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
+       struct net_device *netdev = adapter->netdev;
 
        switch (hw->mac.type) {
-       case e1000_i210:
-       case e1000_i211:
-       case e1000_i350:
+       case e1000_82576:
+               snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 1000000000;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+               adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+               adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+               adapter->ptp_caps.settime = igb_ptp_settime_82576;
+               adapter->ptp_caps.enable = igb_ptp_enable;
+               adapter->cc.read = igb_ptp_read_82576;
+               adapter->cc.mask = CLOCKSOURCE_MASK(64);
+               adapter->cc.mult = 1;
+               adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
+               /* Dial the nominal frequency. */
+               wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+               break;
        case e1000_82580:
-               adapter->caps.owner     = THIS_MODULE;
-               strcpy(adapter->caps.name, "igb-82580");
-               adapter->caps.max_adj   = 62499999;
-               adapter->caps.n_ext_ts  = 0;
-               adapter->caps.pps       = 0;
-               adapter->caps.adjfreq   = ptp_82580_adjfreq;
-               adapter->caps.adjtime   = igb_adjtime;
-               adapter->caps.gettime   = igb_gettime;
-               adapter->caps.settime   = igb_settime;
-               adapter->caps.enable    = ptp_82580_enable;
-               adapter->cc.read        = igb_82580_systim_read;
-               adapter->cc.mask        = CLOCKSOURCE_MASK(IGB_NBITS_82580);
-               adapter->cc.mult        = 1;
-               adapter->cc.shift       = 0;
+       case e1000_i350:
+               snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 62499999;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+               adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+               adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+               adapter->ptp_caps.settime = igb_ptp_settime_82576;
+               adapter->ptp_caps.enable = igb_ptp_enable;
+               adapter->cc.read = igb_ptp_read_82580;
+               adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+               adapter->cc.mult = 1;
+               adapter->cc.shift = 0;
                /* Enable the timer functions by clearing bit 31. */
                wr32(E1000_TSAUXC, 0x0);
                break;
-
-       case e1000_82576:
-               adapter->caps.owner     = THIS_MODULE;
-               strcpy(adapter->caps.name, "igb-82576");
-               adapter->caps.max_adj   = 1000000000;
-               adapter->caps.n_ext_ts  = 0;
-               adapter->caps.pps       = 0;
-               adapter->caps.adjfreq   = ptp_82576_adjfreq;
-               adapter->caps.adjtime   = igb_adjtime;
-               adapter->caps.gettime   = igb_gettime;
-               adapter->caps.settime   = igb_settime;
-               adapter->caps.enable    = ptp_82576_enable;
-               adapter->cc.read        = igb_82576_systim_read;
-               adapter->cc.mask        = CLOCKSOURCE_MASK(64);
-               adapter->cc.mult        = 1;
-               adapter->cc.shift       = IGB_82576_TSYNC_SHIFT;
-               /* Dial the nominal frequency. */
-               wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+       case e1000_i210:
+       case e1000_i211:
+               snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+               adapter->ptp_caps.owner = THIS_MODULE;
+               adapter->ptp_caps.max_adj = 62499999;
+               adapter->ptp_caps.n_ext_ts = 0;
+               adapter->ptp_caps.pps = 0;
+               adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+               adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
+               adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
+               adapter->ptp_caps.settime = igb_ptp_settime_i210;
+               adapter->ptp_caps.enable = igb_ptp_enable;
+               /* Enable the timer functions by clearing bit 31. */
+               wr32(E1000_TSAUXC, 0x0);
                break;
-
        default:
                adapter->ptp_clock = NULL;
                return;
@@ -310,86 +727,114 @@ void igb_ptp_init(struct igb_adapter *adapter)
 
        wrfl();
 
-       timecounter_init(&adapter->tc, &adapter->cc,
-                        ktime_to_ns(ktime_get_real()));
+       spin_lock_init(&adapter->tmreg_lock);
+       INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+
+       /* Initialize the clock and overflow work for devices that need it. */
+       if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+               struct timespec ts = ktime_to_timespec(ktime_get_real());
 
-       INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
+               igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+       } else {
+               timecounter_init(&adapter->tc, &adapter->cc,
+                                ktime_to_ns(ktime_get_real()));
 
-       spin_lock_init(&adapter->tmreg_lock);
+               INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+                                 igb_ptp_overflow_check);
 
-       schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
+               schedule_delayed_work(&adapter->ptp_overflow_work,
+                                     IGB_SYSTIM_OVERFLOW_PERIOD);
+       }
+
+       /* Initialize the time sync interrupts for devices that support it. */
+       if (hw->mac.type >= e1000_82580) {
+               wr32(E1000_TSIM, E1000_TSIM_TXTS);
+               wr32(E1000_IMS, E1000_IMS_TS);
+       }
 
-       adapter->ptp_clock = ptp_clock_register(&adapter->caps);
+       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+                                               &adapter->pdev->dev);
        if (IS_ERR(adapter->ptp_clock)) {
                adapter->ptp_clock = NULL;
                dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
-       } else
+       } else {
                dev_info(&adapter->pdev->dev, "added PHC on %s\n",
                         adapter->netdev->name);
+               adapter->flags |= IGB_FLAG_PTP;
+       }
 }
 
-void igb_ptp_remove(struct igb_adapter *adapter)
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
 {
        switch (adapter->hw.mac.type) {
-       case e1000_i211:
-       case e1000_i210:
-       case e1000_i350:
-       case e1000_82580:
        case e1000_82576:
-               cancel_delayed_work_sync(&adapter->overflow_work);
+       case e1000_82580:
+       case e1000_i350:
+               cancel_delayed_work_sync(&adapter->ptp_overflow_work);
+               break;
+       case e1000_i210:
+       case e1000_i211:
+               /* No delayed work to cancel. */
                break;
        default:
                return;
        }
 
+       cancel_work_sync(&adapter->ptp_tx_work);
+
        if (adapter->ptp_clock) {
                ptp_clock_unregister(adapter->ptp_clock);
                dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
                         adapter->netdev->name);
+               adapter->flags &= ~IGB_FLAG_PTP;
        }
 }
 
 /**
- * igb_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @hwtstamps: timestamp structure to update
- * @systim: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions.
+ * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
  *
- * The 'tmreg_lock' spinlock is used to protect the consistency of the
- * system time value. This is needed because reading the 64 bit time
- * value involves reading two (or three) 32 bit registers. The first
- * read latches the value. Ditto for writing.
- *
- * In addition, here have extended the system time with an overflow
- * counter in software.
+ * This function handles the reset work required to re-enable the PTP device.
  **/
-void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
-                           struct skb_shared_hwtstamps *hwtstamps,
-                           u64 systim)
+void igb_ptp_reset(struct igb_adapter *adapter)
 {
-       u64 ns;
-       unsigned long flags;
+       struct e1000_hw *hw = &adapter->hw;
+
+       if (!(adapter->flags & IGB_FLAG_PTP))
+               return;
 
        switch (adapter->hw.mac.type) {
+       case e1000_82576:
+               /* Dial the nominal frequency. */
+               wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+               break;
+       case e1000_82580:
+       case e1000_i350:
        case e1000_i210:
        case e1000_i211:
-       case e1000_i350:
-       case e1000_82580:
-       case e1000_82576:
+               /* Enable the timer functions and interrupts. */
+               wr32(E1000_TSAUXC, 0x0);
+               wr32(E1000_TSIM, E1000_TSIM_TXTS);
+               wr32(E1000_IMS, E1000_IMS_TS);
                break;
        default:
+               /* No work to do. */
                return;
        }
 
-       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       /* Re-initialize the timer. */
+       if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+               struct timespec ts = ktime_to_timespec(ktime_get_real());
 
-       ns = timecounter_cyc2time(&adapter->tc, systim);
-
-       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
-
-       memset(hwtstamps, 0, sizeof(*hwtstamps));
-       hwtstamps->hwtstamp = ns_to_ktime(ns);
+               igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+       } else {
+               timecounter_init(&adapter->tc, &adapter->cc,
+                                ktime_to_ns(ktime_get_real()));
+       }
 }
index 5fd5d04c26c9a850543d966655b6b32956293a3b..89f40e51fc134f0537fe8cf6f9f9c3efca44f94e 100644 (file)
@@ -32,7 +32,7 @@
 
 obj-$(CONFIG_IXGBE) += ixgbe.o
 
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
               ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
 
index b9623e9ea895334e4c2ac6cf2b7b3ce86d28b821..5bd26763554c8926282739bf18d308d92c892990 100644 (file)
@@ -78,6 +78,9 @@
 
 /* Supported Rx Buffer Sizes */
 #define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
+#define IXGBE_RXBUFFER_2K    2048
+#define IXGBE_RXBUFFER_3K    3072
+#define IXGBE_RXBUFFER_4K    4096
 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
 
 /*
 #define IXGBE_TX_FLAGS_FSO             (u32)(1 << 6)
 #define IXGBE_TX_FLAGS_TXSW            (u32)(1 << 7)
 #define IXGBE_TX_FLAGS_TSTAMP          (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_NO_IFCS         (u32)(1 << 9)
 #define IXGBE_TX_FLAGS_VLAN_MASK       0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK  0xe0000000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
  * this is twice the size of a half page we need to double the page order
  * for FCoE enabled Rx queues.
  */
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
-       return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
+                                           IXGBE_RXBUFFER_3K;
+#endif
+       return IXGBE_RXBUFFER_2K;
 }
-#else
-#define ixgbe_rx_pg_order(_ring) 0
+
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+#ifdef IXGBE_FCOE
+       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+               return (PAGE_SIZE < 8192) ? 1 : 0;
 #endif
+       return 0;
+}
 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
 
 struct ixgbe_ring_container {
        struct ixgbe_ring *ring;        /* pointer to linked list of rings */
@@ -584,6 +597,9 @@ struct ixgbe_adapter {
 #ifdef CONFIG_IXGBE_HWMON
        struct hwmon_buff ixgbe_hwmon_buff;
 #endif /* CONFIG_IXGBE_HWMON */
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *ixgbe_dbg_adapter;
+#endif /*CONFIG_DEBUG_FS*/
 };
 
 struct ixgbe_fdir_filter {
@@ -712,7 +728,12 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
                                  struct netdev_fcoe_hbainfo *info);
 extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
 #endif /* IXGBE_FCOE */
-
+#ifdef CONFIG_DEBUG_FS
+extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+extern void ixgbe_dbg_init(void);
+extern void ixgbe_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
 {
        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
new file mode 100644 (file)
index 0000000..8d3a218
--- /dev/null
@@ -0,0 +1,300 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "ixgbe.h"
+
+static struct dentry *ixgbe_dbg_root;
+
+static char ixgbe_dbg_reg_ops_buf[256] = "";
+
+/**
+ * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
+ * @inode: inode that was opened
+ * @filp:  file info
+ *
+ * Stash the adapter pointer hiding in the inode into the file pointer where
+ * we can find it later in the read and write calls
+ **/
+static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+/**
+ * ixgbe_dbg_reg_ops_read - read for reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
+                                   size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       char buf[256];
+       int bytes_not_copied;
+       int len;
+
+       /* don't allow partial reads */
+       if (*ppos != 0)
+               return 0;
+
+       len = snprintf(buf, sizeof(buf), "%s: %s\n",
+                      adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
+       if (count < len)
+               return -ENOSPC;
+       bytes_not_copied = copy_to_user(buffer, buf, len);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+
+       *ppos = len;
+       return len;
+}
+
+/**
+ * ixgbe_dbg_reg_ops_write - write into reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
+                                    const char __user *buffer,
+                                    size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       int bytes_not_copied;
+
+       /* don't allow partial writes */
+       if (*ppos != 0)
+               return 0;
+       if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
+               return -ENOSPC;
+
+       bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+       else if (bytes_not_copied < count)
+               count -= bytes_not_copied;
+       else
+               return -ENOSPC;
+       ixgbe_dbg_reg_ops_buf[count] = '\0';
+
+       if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
+               u32 reg, value;
+               int cnt;
+               cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", &reg, &value);
+               if (cnt == 2) {
+                       IXGBE_WRITE_REG(&adapter->hw, reg, value);
+                       value = IXGBE_READ_REG(&adapter->hw, reg);
+                       e_dev_info("write: 0x%08x = 0x%08x\n", reg, value);
+               } else {
+                       e_dev_info("write <reg> <value>\n");
+               }
+       } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) {
+               u32 reg, value;
+               int cnt;
+               cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", &reg);
+               if (cnt == 1) {
+                       value = IXGBE_READ_REG(&adapter->hw, reg);
+                       e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
+               } else {
+                       e_dev_info("read <reg>\n");
+               }
+       } else {
+               e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf);
+               e_dev_info("Available commands:\n");
+               e_dev_info("   read <reg>\n");
+               e_dev_info("   write <reg> <value>\n");
+       }
+       return count;
+}
+
+static const struct file_operations ixgbe_dbg_reg_ops_fops = {
+       .owner = THIS_MODULE,
+       .open =  ixgbe_dbg_reg_ops_open,
+       .read =  ixgbe_dbg_reg_ops_read,
+       .write = ixgbe_dbg_reg_ops_write,
+};
+
+static char ixgbe_dbg_netdev_ops_buf[256] = "";
+
+/**
+ * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
+ * @inode: inode that was opened
+ * @filp: file info
+ *
+ * Stash the adapter pointer hiding in the inode into the file pointer
+ * where we can find it later in the read and write calls
+ **/
+static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
+{
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+/**
+ * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
+                                        char __user *buffer,
+                                        size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       char buf[256];
+       int bytes_not_copied;
+       int len;
+
+       /* don't allow partial reads */
+       if (*ppos != 0)
+               return 0;
+
+       len = snprintf(buf, sizeof(buf), "%s: %s\n",
+                      adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
+       if (count < len)
+               return -ENOSPC;
+       bytes_not_copied = copy_to_user(buffer, buf, len);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+
+       *ppos = len;
+       return len;
+}
+
+/**
+ * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
+                                         const char __user *buffer,
+                                         size_t count, loff_t *ppos)
+{
+       struct ixgbe_adapter *adapter = filp->private_data;
+       int bytes_not_copied;
+
+       /* don't allow partial writes */
+       if (*ppos != 0)
+               return 0;
+       if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
+               return -ENOSPC;
+
+       bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
+                                         buffer, count);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+       else if (bytes_not_copied < count)
+               count -= bytes_not_copied;
+       else
+               return -ENOSPC;
+       ixgbe_dbg_netdev_ops_buf[count] = '\0';
+
+       if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+               adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
+               e_dev_info("tx_timeout called\n");
+       } else {
+               e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
+               e_dev_info("Available commands:\n");
+               e_dev_info("    tx_timeout\n");
+       }
+       return count;
+}
+
+static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
+       .owner = THIS_MODULE,
+       .open = ixgbe_dbg_netdev_ops_open,
+       .read = ixgbe_dbg_netdev_ops_read,
+       .write = ixgbe_dbg_netdev_ops_write,
+};
+
+/**
+ * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter
+ * @adapter: the adapter that is starting up
+ **/
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
+{
+       const char *name = pci_name(adapter->pdev);
+       struct dentry *pfile;
+       adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
+       if (adapter->ixgbe_dbg_adapter) {
+               pfile = debugfs_create_file("reg_ops", 0600,
+                                           adapter->ixgbe_dbg_adapter, adapter,
+                                           &ixgbe_dbg_reg_ops_fops);
+               if (!pfile)
+                       e_dev_err("debugfs reg_ops for %s failed\n", name);
+               pfile = debugfs_create_file("netdev_ops", 0600,
+                                           adapter->ixgbe_dbg_adapter, adapter,
+                                           &ixgbe_dbg_netdev_ops_fops);
+               if (!pfile)
+                       e_dev_err("debugfs netdev_ops for %s failed\n", name);
+       } else {
+               e_dev_err("debugfs entry for %s failed\n", name);
+       }
+}
+
+/**
+ * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
+{
+       if (adapter->ixgbe_dbg_adapter)
+               debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+       adapter->ixgbe_dbg_adapter = NULL;
+}
+
+/**
+ * ixgbe_dbg_init - start up debugfs for the driver
+ **/
+void ixgbe_dbg_init(void)
+{
+       ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
+       if (ixgbe_dbg_root == NULL)
+               pr_err("init of debugfs failed\n");
+}
+
+/**
+ * ixgbe_dbg_exit - clean out the driver's debugfs entries
+ **/
+void ixgbe_dbg_exit(void)
+{
+       debugfs_remove_recursive(ixgbe_dbg_root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
index ee61819d6088e2f35e4586b9dd664ef40e6da06f..868af693821957bfc4c9541e8c99c7117b1d2247 100644 (file)
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
        }
 
        bi->dma = dma;
-       bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+       bi->page_offset = 0;
 
        return true;
 }
@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                return max_len;
 }
 
-static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
-                             union ixgbe_adv_rx_desc *rx_desc,
-                             struct sk_buff *skb)
-{
-       __le32 rsc_enabled;
-       u32 rsc_cnt;
-
-       if (!ring_is_rsc_enabled(rx_ring))
-               return;
-
-       rsc_enabled = rx_desc->wb.lower.lo_dword.data &
-                     cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
-
-       /* If this is an RSC frame rsc_cnt should be non-zero */
-       if (!rsc_enabled)
-               return;
-
-       rsc_cnt = le32_to_cpu(rsc_enabled);
-       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
-
-       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
-}
-
 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
                                   struct sk_buff *skb)
 {
@@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
 
        prefetch(IXGBE_RX_DESC(rx_ring, ntc));
 
-       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
-               return false;
+       /* update RSC append count if present */
+       if (ring_is_rsc_enabled(rx_ring)) {
+               __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+                                    cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
+
+               if (unlikely(rsc_enabled)) {
+                       u32 rsc_cnt = le32_to_cpu(rsc_enabled);
+
+                       rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+                       IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
 
-       /* append_cnt indicates packet is RSC, if so fetch nextp */
-       if (IXGBE_CB(skb)->append_cnt) {
-               ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
-               ntc &= IXGBE_RXDADV_NEXTP_MASK;
-               ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+                       /* update ntc based on RSC value */
+                       ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+                       ntc &= IXGBE_RXDADV_NEXTP_MASK;
+                       ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+               }
        }
 
+       /* if we are the last buffer then there is nothing else to do */
+       if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+               return false;
+
        /* place skb in next buffer to be received */
        rx_ring->rx_buffer_info[ntc].skb = skb;
        rx_ring->rx_stats.non_eop_descs++;
@@ -1457,6 +1446,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
        return true;
 }
 
+/**
+ * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbe specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+                           struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned char *va;
+       unsigned int pull_len;
+
+       /*
+        * it is valid to use page_address instead of kmap since we are
+        * working with pages allocated out of the lomem pool per
+        * alloc_page(GFP_ATOMIC)
+        */
+       va = skb_frag_address(frag);
+
+       /*
+        * we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+}
+
+/**
+ * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb.  The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+                               struct sk_buff *skb)
+{
+       /* if the page was released unmap it, else just sync our portion */
+       if (unlikely(IXGBE_CB(skb)->page_released)) {
+               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+               IXGBE_CB(skb)->page_released = false;
+       } else {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             IXGBE_CB(skb)->dma,
+                                             frag->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+       IXGBE_CB(skb)->dma = 0;
+}
+
 /**
  * ixgbe_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                                  union ixgbe_adv_rx_desc *rx_desc,
                                  struct sk_buff *skb)
 {
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
        struct net_device *netdev = rx_ring->netdev;
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* if the page was released unmap it, else just sync our portion */
-       if (unlikely(IXGBE_CB(skb)->page_released)) {
-               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-               IXGBE_CB(skb)->page_released = false;
-       } else {
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             IXGBE_CB(skb)->dma,
-                                             frag->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
-                                             DMA_FROM_DEVICE);
-       }
-       IXGBE_CB(skb)->dma = 0;
 
        /* verify that the packet does not have any known errors */
        if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
                return true;
        }
 
-       /*
-        * it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       /*
-        * we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = skb_frag_size(frag);
-       if (pull_len > IXGBE_RX_HDR_SIZE)
-               pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-
-       /*
-        * if we sucked the frag empty then we should free it,
-        * if there are other frags here something is screwed up in hardware
-        */
-       if (skb_frag_size(frag) == 0) {
-               BUG_ON(skb_shinfo(skb)->nr_frags != 1);
-               skb_shinfo(skb)->nr_frags = 0;
-               __skb_frag_unref(frag);
-               skb->truesize -= ixgbe_rx_bufsz(rx_ring);
-       }
+       /* place header in linear portion of buffer */
+       if (skb_is_nonlinear(skb))
+               ixgbe_pull_tail(rx_ring, skb);
 
 #ifdef IXGBE_FCOE
        /* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1559,34 +1572,18 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
        return false;
 }
 
-/**
- * ixgbe_can_reuse_page - determine if we can reuse a page
- * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
- *
- * Returns true if page can be reused in another Rx buffer
- **/
-static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
-{
-       struct page *page = rx_buffer->page;
-
-       /* if we are only owner of page and it is local we can reuse it */
-       return likely(page_count(page) == 1) &&
-              likely(page_to_nid(page) == numa_node_id());
-}
-
 /**
  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
  * @rx_ring: rx descriptor ring to store buffers on
  * @old_buff: donor buffer to have page reused
  *
- * Syncronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
  **/
 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
                                struct ixgbe_rx_buffer *old_buff)
 {
        struct ixgbe_rx_buffer *new_buff;
        u16 nta = rx_ring->next_to_alloc;
-       u16 bufsz = ixgbe_rx_bufsz(rx_ring);
 
        new_buff = &rx_ring->rx_buffer_info[nta];
 
@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
        /* transfer page from old buffer to new buffer */
        new_buff->page = old_buff->page;
        new_buff->dma = old_buff->dma;
-
-       /* flip page offset to other buffer and store to new_buff */
-       new_buff->page_offset = old_buff->page_offset ^ bufsz;
+       new_buff->page_offset = old_buff->page_offset;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-                                        new_buff->page_offset, bufsz,
+                                        new_buff->page_offset,
+                                        ixgbe_rx_bufsz(rx_ring),
                                         DMA_FROM_DEVICE);
-
-       /* bump ref count on page before it is given to the stack */
-       get_page(new_buff->page);
 }
 
 /**
@@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
  * @rx_desc: descriptor containing length of buffer written by hardware
  * @skb: sk_buff to place the data into
  *
- * This function is based on skb_add_rx_frag.  I would have used that
- * function however it doesn't handle the truesize case correctly since we
- * are allocating more memory than might be used for a single receive.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
  **/
-static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                              struct ixgbe_rx_buffer *rx_buffer,
-                             struct sk_buff *skb, int size)
+                             union ixgbe_adv_rx_desc *rx_desc,
+                             struct sk_buff *skb)
 {
-       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                          rx_buffer->page, rx_buffer->page_offset,
-                          size);
-       skb->len += size;
-       skb->data_len += size;
-       skb->truesize += ixgbe_rx_bufsz(rx_ring);
+       struct page *page = rx_buffer->page;
+       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+                                  ixgbe_rx_bufsz(rx_ring);
+#endif
+
+       if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+               /* we can reuse buffer as-is, just make sure it is local */
+               if (likely(page_to_nid(page) == numa_node_id()))
+                       return true;
+
+               /* this page cannot be reused so discard it */
+               put_page(page);
+               return false;
+       }
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       rx_buffer->page_offset, size, truesize);
+
+       /* avoid re-using remote pages */
+       if (unlikely(page_to_nid(page) != numa_node_id()))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+
+       /*
+        * since we are the only owner of the page and we need to
+        * increment it, just set the value to 2 in order to avoid
+        * an unecessary locked operation
+        */
+       atomic_set(&page->_count, 2);
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+
+       /* bump ref count on page before it is given to the stack */
+       get_page(page);
+#endif
+
+       return true;
+}
+
+static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
+                                            union ixgbe_adv_rx_desc *rx_desc)
+{
+       struct ixgbe_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
+
+       rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
+
+       skb = rx_buffer->skb;
+
+       if (likely(!skb)) {
+               void *page_addr = page_address(page) +
+                                 rx_buffer->page_offset;
+
+               /* prefetch first cache line of first page */
+               prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+               prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+               /* allocate a skb to store the frags */
+               skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                               IXGBE_RX_HDR_SIZE);
+               if (unlikely(!skb)) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
+                       return NULL;
+               }
+
+               /*
+                * we will be copying header into skb->data in
+                * pskb_may_pull so it is in our interest to prefetch
+                * it now to avoid a possible cache miss
+                */
+               prefetchw(skb->data);
+
+               /*
+                * Delay unmapping of the first packet. It carries the
+                * header information, HW may still access the header
+                * after the writeback.  Only unmap it when EOP is
+                * reached
+                */
+               if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+                       goto dma_sync;
+
+               IXGBE_CB(skb)->dma = rx_buffer->dma;
+       } else {
+               if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+                       ixgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+               /* we are reusing so sync this buffer for CPU use */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_buffer->dma,
+                                             rx_buffer->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+       }
+
+       /* pull page into skb */
+       if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+               /* hand second half of page back to the ring */
+               ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+       } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+               /* the page has been released from the ring */
+               IXGBE_CB(skb)->page_released = true;
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+                              ixgbe_rx_pg_size(rx_ring),
+                              DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->skb = NULL;
+       rx_buffer->dma = 0;
+       rx_buffer->page = NULL;
+
+       return skb;
 }
 
 /**
@@ -1653,16 +1785,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 #ifdef IXGBE_FCOE
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       int ddp_bytes = 0;
+       int ddp_bytes;
+       unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 
        do {
-               struct ixgbe_rx_buffer *rx_buffer;
                union ixgbe_adv_rx_desc *rx_desc;
                struct sk_buff *skb;
-               struct page *page;
-               u16 ntc;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1800,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        cleaned_count = 0;
                }
 
-               ntc = rx_ring->next_to_clean;
-               rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
-               rx_buffer = &rx_ring->rx_buffer_info[ntc];
+               rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
                if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
                        break;
@@ -1684,75 +1812,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                rmb();
 
-               page = rx_buffer->page;
-               prefetchw(page);
-
-               skb = rx_buffer->skb;
-
-               if (likely(!skb)) {
-                       void *page_addr = page_address(page) +
-                                         rx_buffer->page_offset;
-
-                       /* prefetch first cache line of first page */
-                       prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
-                       prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+               /* retrieve a buffer from the ring */
+               skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
 
-                       /* allocate a skb to store the frags */
-                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                       IXGBE_RX_HDR_SIZE);
-                       if (unlikely(!skb)) {
-                               rx_ring->rx_stats.alloc_rx_buff_failed++;
-                               break;
-                       }
-
-                       /*
-                        * we will be copying header into skb->data in
-                        * pskb_may_pull so it is in our interest to prefetch
-                        * it now to avoid a possible cache miss
-                        */
-                       prefetchw(skb->data);
-
-                       /*
-                        * Delay unmapping of the first packet. It carries the
-                        * header information, HW may still access the header
-                        * after the writeback.  Only unmap it when EOP is
-                        * reached
-                        */
-                       IXGBE_CB(skb)->dma = rx_buffer->dma;
-               } else {
-                       /* we are reusing so sync this buffer for CPU use */
-                       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                                     rx_buffer->dma,
-                                                     rx_buffer->page_offset,
-                                                     ixgbe_rx_bufsz(rx_ring),
-                                                     DMA_FROM_DEVICE);
-               }
-
-               /* pull page into skb */
-               ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
-                                 le16_to_cpu(rx_desc->wb.upper.length));
-
-               if (ixgbe_can_reuse_page(rx_buffer)) {
-                       /* hand second half of page back to the ring */
-                       ixgbe_reuse_rx_page(rx_ring, rx_buffer);
-               } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
-                       /* the page has been released from the ring */
-                       IXGBE_CB(skb)->page_released = true;
-               } else {
-                       /* we are not reusing the buffer so unmap it */
-                       dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                                      ixgbe_rx_pg_size(rx_ring),
-                                      DMA_FROM_DEVICE);
-               }
-
-               /* clear contents of buffer_info */
-               rx_buffer->skb = NULL;
-               rx_buffer->dma = 0;
-               rx_buffer->page = NULL;
-
-               ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+               /* exit if we failed to retrieve a buffer */
+               if (!skb)
+                       break;
 
                cleaned_count++;
 
@@ -1775,6 +1840,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
                if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
                        ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+                       /* include DDPed FCoE data */
+                       if (ddp_bytes > 0) {
+                               if (!mss) {
+                                       mss = rx_ring->netdev->mtu -
+                                               sizeof(struct fcoe_hdr) -
+                                               sizeof(struct fc_frame_header) -
+                                               sizeof(struct fcoe_crc_eof);
+                                       if (mss > 512)
+                                               mss &= ~511;
+                               }
+                               total_rx_bytes += ddp_bytes;
+                               total_rx_packets += DIV_ROUND_UP(ddp_bytes,
+                                                                mss);
+                       }
                        if (!ddp_bytes) {
                                dev_kfree_skb_any(skb);
                                continue;
@@ -1788,21 +1867,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                budget--;
        } while (likely(budget));
 
-#ifdef IXGBE_FCOE
-       /* include DDPed FCoE data */
-       if (ddp_bytes > 0) {
-               unsigned int mss;
-
-               mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
-                       sizeof(struct fc_frame_header) -
-                       sizeof(struct fcoe_crc_eof);
-               if (mss > 512)
-                       mss &= ~511;
-               total_rx_bytes += ddp_bytes;
-               total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
-       }
-
-#endif /* IXGBE_FCOE */
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
        rx_ring->stats.bytes += total_rx_bytes;
@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
-       srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
        srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
 
        /* configure descriptor type */
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
         * total size of max desc * buf_len is not greater
         * than 65536
         */
-#if (PAGE_SIZE <= 8192)
        rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE <= 16384)
-       rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#else
-       rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#endif
        IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 }
 
@@ -3606,8 +3660,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-       hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
-
 #ifdef IXGBE_FCOE
        if (adapter->netdev->features & NETIF_F_FCOE_MTU)
                max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -3807,6 +3859,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCB
        ixgbe_configure_dcb(adapter);
 #endif
+       /*
+        * We must restore virtualization before VLANs or else
+        * the VLVF registers will not be populated
+        */
+       ixgbe_configure_virtualization(adapter);
 
        ixgbe_set_rx_mode(adapter->netdev);
        ixgbe_restore_vlan(adapter);
@@ -3838,8 +3895,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
                break;
        }
 
-       ixgbe_configure_virtualization(adapter);
-
 #ifdef IXGBE_FCOE
        /* configure FCoE L2 filters, redirection table, and Rx control */
        ixgbe_configure_fcoe(adapter);
@@ -4129,27 +4184,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
                hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
 }
 
-/**
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
- * @rx_ring: ring to setup
- *
- * On many IA platforms the L1 cache has a critical stride of 4K, this
- * results in each receive buffer starting in the same cache set.  To help
- * reduce the pressure on this cache set we can interleave the offsets so
- * that only every other buffer will be in the same cache set.
- **/
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
-{
-       struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
-       u16 i;
-
-       for (i = 0; i < rx_ring->count; i += 2) {
-               rx_buffer[0].page_offset = 0;
-               rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
-               rx_buffer = &rx_buffer[2];
-       }
-}
-
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
@@ -4195,8 +4229,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
        memset(rx_ring->rx_buffer_info, 0, size);
 
-       ixgbe_init_rx_page_offset(rx_ring);
-
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 
@@ -4646,8 +4678,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
-       ixgbe_init_rx_page_offset(rx_ring);
-
        return 0;
 err:
        vfree(rx_ring->rx_buffer_info);
@@ -5530,8 +5560,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
 {
        u32 ssvpc;
 
-       /* Do not perform spoof check for 82598 */
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+       /* Do not perform spoof check for 82598 or if not in IOV mode */
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
+           adapter->num_vfs == 0)
                return;
 
        ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
@@ -5543,7 +5574,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
        if (!ssvpc)
                return;
 
-       e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
+       e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
 }
 
 /**
@@ -5874,9 +5905,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        u32 type_tucmd = 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-                   !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
-                       return;
+               if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
+                       if (unlikely(skb->no_fcs))
+                               first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
+                       if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+                               return;
+               }
        } else {
                u8 l4_hdr = 0;
                switch (first->protocol) {
@@ -5938,7 +5972,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 {
        /* set type for advanced descriptor with frame checksum insertion */
        __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
-                                     IXGBE_ADVTXD_DCMD_IFCS |
                                      IXGBE_ADVTXD_DCMD_DEXT);
 
        /* set HW vlan bit if vlan is present */
@@ -5958,6 +5991,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
 #endif
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
 
+       /* insert frame checksum */
+       if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
+               cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+
        return cmd_type;
 }
 
@@ -6063,8 +6100,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
                if (likely(!data_len))
                        break;
 
-               if (unlikely(skb->no_fcs))
-                       cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
                tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
 
                i++;
@@ -6854,9 +6889,9 @@ static int ixgbe_set_features(struct net_device *netdev,
        return 0;
 }
 
-static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
-                            unsigned char *addr,
+                            const unsigned char *addr,
                             u16 flags)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -6893,7 +6928,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
 
 static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
                             struct net_device *dev,
-                            unsigned char *addr)
+                            const unsigned char *addr)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int err = -EOPNOTSUPP;
@@ -7136,11 +7171,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_ioremap;
        }
 
-       for (i = 1; i <= 5; i++) {
-               if (pci_resource_len(pdev, i) == 0)
-                       continue;
-       }
-
        netdev->netdev_ops = &ixgbe_netdev_ops;
        ixgbe_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
@@ -7419,6 +7449,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                e_err(probe, "failed to allocate sysfs resources\n");
 #endif /* CONFIG_IXGBE_HWMON */
 
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_adapter_init(adapter);
+#endif /* CONFIG_DEBUG_FS */
+
        return 0;
 
 err_register:
@@ -7453,6 +7487,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev = adapter->netdev;
 
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_adapter_exit(adapter);
+#endif /*CONFIG_DEBUG_FS */
+
        set_bit(__IXGBE_DOWN, &adapter->state);
        cancel_work_sync(&adapter->service_task);
 
@@ -7708,6 +7746,10 @@ static int __init ixgbe_init_module(void)
        pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
        pr_info("%s\n", ixgbe_copyright);
 
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_init();
+#endif /* CONFIG_DEBUG_FS */
+
 #ifdef CONFIG_IXGBE_DCA
        dca_register_notify(&dca_notifier);
 #endif
@@ -7730,6 +7772,11 @@ static void __exit ixgbe_exit_module(void)
        dca_unregister_notify(&dca_notifier);
 #endif
        pci_unregister_driver(&ixgbe_driver);
+
+#ifdef CONFIG_DEBUG_FS
+       ixgbe_dbg_exit();
+#endif /* CONFIG_DEBUG_FS */
+
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
index 3456d56171437cfbab401c39e3db6ad08c153782..39881cb17a4b5fe8958b8e0aead9188feaaf0370 100644 (file)
@@ -960,7 +960,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
        /* (Re)start the overflow check */
        adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
 
-       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps);
+       adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+                                               &adapter->pdev->dev);
        if (IS_ERR(adapter->ptp_clock)) {
                adapter->ptp_clock = NULL;
                e_dev_err("ptp_clock_register failed\n");
index 4fea8716ab64a2952b9bb052c98ffa5ccbb2014a..dce48bf64d9616beacb3ea9b081b6d7aea0730c2 100644 (file)
@@ -346,6 +346,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
                             u32 vf)
 {
+       /* VLAN 0 is a special case, don't allow it to be removed */
+       if (!vid && !add)
+               return 0;
+
        return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
@@ -414,6 +418,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
                                  VLAN_PRIO_SHIFT)), vf);
                ixgbe_set_vmolr(hw, vf, false);
        } else {
+               ixgbe_set_vf_vlan(adapter, true, 0, vf);
                ixgbe_set_vmvir(adapter, 0, vf);
                ixgbe_set_vmolr(hw, vf, true);
        }
@@ -810,9 +815,9 @@ out:
        return err;
 }
 
-static int ixgbe_link_mbps(int internal_link_speed)
+static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
 {
-       switch (internal_link_speed) {
+       switch (adapter->link_speed) {
        case IXGBE_LINK_SPEED_100_FULL:
                return 100;
        case IXGBE_LINK_SPEED_1GB_FULL:
@@ -824,27 +829,30 @@ static int ixgbe_link_mbps(int internal_link_speed)
        }
 }
 
-static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
-                                   int link_speed)
+static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
 {
-       int rf_dec, rf_int;
-       u32 bcnrc_val;
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 bcnrc_val = 0;
+       u16 queue, queues_per_pool;
+       u16 tx_rate = adapter->vfinfo[vf].tx_rate;
+
+       if (tx_rate) {
+               /* start with base link speed value */
+               bcnrc_val = adapter->vf_rate_link_speed;
 
-       if (tx_rate != 0) {
                /* Calculate the rate factor values to set */
-               rf_int = link_speed / tx_rate;
-               rf_dec = (link_speed - (rf_int * tx_rate));
-               rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
-
-               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
-               bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
-                              IXGBE_RTTBCNRC_RF_INT_MASK);
-               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
-       } else {
-               bcnrc_val = 0;
+               bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+               bcnrc_val /= tx_rate;
+
+               /* clear everything but the rate factor */
+               bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+                            IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+               /* enable the rate scheduler */
+               bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
        /*
         * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
         * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
@@ -861,53 +869,68 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
                break;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       /* determine how many queues per pool based on VMDq mask */
+       queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+
+       /* write value for all Tx queues belonging to VF */
+       for (queue = 0; queue < queues_per_pool; queue++) {
+               unsigned int reg_idx = (vf * queues_per_pool) + queue;
+
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       }
 }
 
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
 {
-       int actual_link_speed, i;
-       bool reset_rate = false;
+       int i;
 
        /* VF Tx rate limit was not set */
-       if (adapter->vf_rate_link_speed == 0)
+       if (!adapter->vf_rate_link_speed)
                return;
 
-       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
-       if (actual_link_speed != adapter->vf_rate_link_speed) {
-               reset_rate = true;
+       if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
                adapter->vf_rate_link_speed = 0;
                dev_info(&adapter->pdev->dev,
-                        "Link speed has been changed. VF Transmit rate "
-                        "is disabled\n");
+                        "Link speed has been changed. VF Transmit rate is disabled\n");
        }
 
        for (i = 0; i < adapter->num_vfs; i++) {
-               if (reset_rate)
+               if (!adapter->vf_rate_link_speed)
                        adapter->vfinfo[i].tx_rate = 0;
 
-               ixgbe_set_vf_rate_limit(&adapter->hw, i,
-                                       adapter->vfinfo[i].tx_rate,
-                                       actual_link_speed);
+               ixgbe_set_vf_rate_limit(adapter, i);
        }
 }
 
 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       int actual_link_speed;
+       int link_speed;
+
+       /* verify VF is active */
+       if (vf >= adapter->num_vfs)
+               return -EINVAL;
 
-       actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
-       if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
-           (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
-           ((tx_rate != 0) && (tx_rate <= 10)))
-           /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+       /* verify link is up */
+       if (!adapter->link_up)
                return -EINVAL;
 
-       adapter->vf_rate_link_speed = actual_link_speed;
-       adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
-       ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+       /* verify we are linked at 10Gbps */
+       link_speed = ixgbe_link_mbps(adapter);
+       if (link_speed != 10000)
+               return -EINVAL;
+
+       /* rate limit cannot be less than 10Mbs or greater than link speed */
+       if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+               return -EINVAL;
+
+       /* store values */
+       adapter->vf_rate_link_speed = link_speed;
+       adapter->vfinfo[vf].tx_rate = tx_rate;
+
+       /* update hardware configuration */
+       ixgbe_set_vf_rate_limit(adapter, vf);
 
        return 0;
 }
index 418af827b23080bba6596630c271a17c0f008ed1..da17ccf5c09db9b8647a724c55d9c98eae4fa94f 100644 (file)
@@ -272,5 +272,6 @@ struct ixgbe_adv_tx_context_desc {
 /* Error Codes */
 #define IXGBE_ERR_INVALID_MAC_ADDR              -1
 #define IXGBE_ERR_RESET_FAILED                  -2
+#define IXGBE_ERR_INVALID_ARGUMENT              -3
 
 #endif /* _IXGBEVF_DEFINES_H_ */
index 98cadb0c4dab68a46dc3e13f87a3bcc596e1fe69..383b4e1cd17532682c9dfc087d7d9a08a969b81a 100644 (file)
@@ -101,7 +101,9 @@ struct ixgbevf_ring {
 
 /* Supported Rx Buffer Sizes */
 #define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2048  2048
+#define IXGBEVF_RXBUFFER_3K    3072
+#define IXGBEVF_RXBUFFER_7K    7168
+#define IXGBEVF_RXBUFFER_15K   15360
 #define IXGBEVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 
 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -259,6 +261,11 @@ enum ixbgevf_state_t {
        __IXGBEVF_DOWN
 };
 
+struct ixgbevf_cb {
+       struct sk_buff *prev;
+};
+#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
+
 enum ixgbevf_boards {
        board_82599_vf,
        board_X540_vf,
index 6647383c4ddc85f9639eb010e0f3c5f470f4c3b9..0ee9bd4819f444b392d087403da514db125af0bd 100644 (file)
@@ -263,6 +263,8 @@ cont_loop:
        tx_ring->total_bytes += total_bytes;
        tx_ring->total_packets += total_packets;
        u64_stats_update_end(&tx_ring->syncp);
+       q_vector->tx.total_bytes += total_bytes;
+       q_vector->tx.total_packets += total_packets;
 
        return count < tx_ring->count;
 }
@@ -272,12 +274,10 @@ cont_loop:
  * @q_vector: structure containing interrupt and ring information
  * @skb: packet to send up
  * @status: hardware indication of status of receive
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
  * @rx_desc: rx descriptor
  **/
 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
                                struct sk_buff *skb, u8 status,
-                               struct ixgbevf_ring *ring,
                                union ixgbe_adv_rx_desc *rx_desc)
 {
        struct ixgbevf_adapter *adapter = q_vector->adapter;
@@ -433,11 +433,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 
                if (!(staterr & IXGBE_RXD_STAT_EOP)) {
                        skb->next = next_buffer->skb;
-                       skb->next->prev = skb;
+                       IXGBE_CB(skb->next)->prev = skb;
                        adapter->non_eop_descs++;
                        goto next_desc;
                }
 
+               /* we should not be chaining buffers, if we did drop the skb */
+               if (IXGBE_CB(skb)->prev) {
+                       do {
+                               struct sk_buff *this = skb;
+                               skb = IXGBE_CB(skb)->prev;
+                               dev_kfree_skb(this);
+                       } while (skb);
+                       goto next_desc;
+               }
+
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
                        dev_kfree_skb_irq(skb);
@@ -461,7 +471,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                }
                skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
-               ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+               ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -490,6 +500,8 @@ next_desc:
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
        u64_stats_update_end(&rx_ring->syncp);
+       q_vector->rx.total_packets += total_rx_packets;
+       q_vector->rx.total_bytes += total_rx_bytes;
 
        return !!budget;
 }
@@ -716,40 +728,15 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
        }
 }
 
-static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 {
        struct ixgbevf_adapter *adapter = data;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 msg;
-       bool got_ack = false;
-
-       if (!hw->mbx.ops.check_for_ack(hw))
-               got_ack = true;
 
-       if (!hw->mbx.ops.check_for_msg(hw)) {
-               hw->mbx.ops.read(hw, &msg, 1);
+       hw->mac.get_link_status = 1;
 
-               if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
-                       mod_timer(&adapter->watchdog_timer,
-                                 round_jiffies(jiffies + 1));
-
-               if (msg & IXGBE_VT_MSGTYPE_NACK)
-                       pr_warn("Last Request of type %2.2x to PF Nacked\n",
-                               msg & 0xFF);
-               /*
-                * Restore the PFSTS bit in case someone is polling for a
-                * return message from the PF
-                */
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
-       }
-
-       /*
-        * checking for the ack clears the PFACK bit.  Place
-        * it back in the v2p_mailbox cache so that anyone
-        * polling for an ack will not miss it
-        */
-       if (got_ack)
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+               mod_timer(&adapter->watchdog_timer, jiffies);
 
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
@@ -899,10 +886,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
        }
 
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &ixgbevf_msix_mbx, 0, netdev->name, adapter);
+                         &ixgbevf_msix_other, 0, netdev->name, adapter);
        if (err) {
                hw_dbg(&adapter->hw,
-                      "request_irq for msix_mbx failed: %d\n", err);
+                      "request_irq for msix_other failed: %d\n", err);
                goto free_queue_irqs;
        }
 
@@ -1057,15 +1044,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
 
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 
-       if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-               srrctl |= IXGBEVF_RXBUFFER_2048 >>
-                       IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-       else
-               srrctl |= rx_ring->rx_buf_len >>
-                       IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
        IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
 }
 
+static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct net_device *netdev = adapter->netdev;
+       int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       int i;
+       u16 rx_buf_len;
+
+       /* notify the PF of our intent to use this size of frame */
+       ixgbevf_rlpml_set_vf(hw, max_frame);
+
+       /* PF will allow an extra 4 bytes past for vlan tagged frames */
+       max_frame += VLAN_HLEN;
+
+       /*
+        * Make best use of allocation by using all but 1K of a
+        * power of 2 allocation that will be used for skb->head.
+        */
+       if ((hw->mac.type == ixgbe_mac_X540_vf) &&
+           (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
+               rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+       else if (max_frame <= IXGBEVF_RXBUFFER_3K)
+               rx_buf_len = IXGBEVF_RXBUFFER_3K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_7K)
+               rx_buf_len = IXGBEVF_RXBUFFER_7K;
+       else if (max_frame <= IXGBEVF_RXBUFFER_15K)
+               rx_buf_len = IXGBEVF_RXBUFFER_15K;
+       else
+               rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+}
+
 /**
  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
  * @adapter: board private structure
@@ -1076,18 +1094,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
 {
        u64 rdba;
        struct ixgbe_hw *hw = &adapter->hw;
-       struct net_device *netdev = adapter->netdev;
-       int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        int i, j;
        u32 rdlen;
-       int rx_buf_len;
 
        /* PSRTYPE must be initialized in 82599 */
        IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
-       if (netdev->mtu <= ETH_DATA_LEN)
-               rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-       else
-               rx_buf_len = ALIGN(max_frame, 1024);
+
+       /* set_rx_buffer_len must be called before ring initialization */
+       ixgbevf_set_rx_buffer_len(adapter);
 
        rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1117,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
                adapter->rx_ring[i].head = IXGBE_VFRDH(j);
                adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-               adapter->rx_ring[i].rx_buf_len = rx_buf_len;
 
                ixgbevf_configure_srrctl(adapter, j);
        }
@@ -1113,36 +1126,47 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int err;
+
+       if (!hw->mac.ops.set_vfta)
+               return -EOPNOTSUPP;
 
        spin_lock(&adapter->mbx_lock);
 
        /* add VID to filter table */
-       if (hw->mac.ops.set_vfta)
-               hw->mac.ops.set_vfta(hw, vid, 0, true);
+       err = hw->mac.ops.set_vfta(hw, vid, 0, true);
 
        spin_unlock(&adapter->mbx_lock);
 
+       /* translate error return types so error makes sense */
+       if (err == IXGBE_ERR_MBX)
+               return -EIO;
+
+       if (err == IXGBE_ERR_INVALID_ARGUMENT)
+               return -EACCES;
+
        set_bit(vid, adapter->active_vlans);
 
-       return 0;
+       return err;
 }
 
 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       int err = -EOPNOTSUPP;
 
        spin_lock(&adapter->mbx_lock);
 
        /* remove VID from filter table */
        if (hw->mac.ops.set_vfta)
-               hw->mac.ops.set_vfta(hw, vid, 0, false);
+               err = hw->mac.ops.set_vfta(hw, vid, 0, false);
 
        spin_unlock(&adapter->mbx_lock);
 
        clear_bit(vid, adapter->active_vlans);
 
-       return 0;
+       return err;
 }
 
 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1308,6 +1332,25 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
        adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
 }
 
+static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int api[] = { ixgbe_mbox_api_10,
+                     ixgbe_mbox_api_unknown };
+       int err = 0, idx = 0;
+
+       spin_lock(&adapter->mbx_lock);
+
+       while (api[idx] != ixgbe_mbox_api_unknown) {
+               err = ixgbevf_negotiate_api_version(hw, api[idx]);
+               if (!err)
+                       break;
+               idx++;
+       }
+
+       spin_unlock(&adapter->mbx_lock);
+}
+
 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -1315,7 +1358,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        int i, j = 0;
        int num_rx_rings = adapter->num_rx_queues;
        u32 txdctl, rxdctl;
-       u32 msg[2];
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
                j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1398,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
                        hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
        }
 
-       msg[0] = IXGBE_VF_SET_LPE;
-       msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-       hw->mbx.ops.write_posted(hw, msg, 2);
-
        spin_unlock(&adapter->mbx_lock);
 
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -1371,6 +1409,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        ixgbevf_save_reset_stats(adapter);
        ixgbevf_init_last_counter_stats(adapter);
 
+       hw->mac.get_link_status = 1;
        mod_timer(&adapter->watchdog_timer, jiffies);
 }
 
@@ -1378,6 +1417,8 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
+       ixgbevf_negotiate_api(adapter);
+
        ixgbevf_configure(adapter);
 
        ixgbevf_up_complete(adapter);
@@ -1419,7 +1460,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
                        rx_buffer_info->skb = NULL;
                        do {
                                struct sk_buff *this = skb;
-                               skb = skb->prev;
+                               skb = IXGBE_CB(skb)->prev;
                                dev_kfree_skb(this);
                        } while (skb);
                }
@@ -1547,8 +1588,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
 
 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-
        WARN_ON(in_interrupt());
 
        while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -1561,10 +1600,8 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
         * watchdog task will continue to schedule reset tasks until
         * the PF is up and running.
         */
-       if (!hw->mac.ops.reset_hw(hw)) {
-               ixgbevf_down(adapter);
-               ixgbevf_up(adapter);
-       }
+       ixgbevf_down(adapter);
+       ixgbevf_up(adapter);
 
        clear_bit(__IXGBEVF_RESETTING, &adapter->state);
 }
@@ -1866,6 +1903,22 @@ err_set_interrupt:
        return err;
 }
 
+/**
+ * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
+
+       ixgbevf_free_q_vectors(adapter);
+       ixgbevf_reset_interrupt_capability(adapter);
+}
+
 /**
  * ixgbevf_sw_init - Initialize general software structures
  * (struct ixgbevf_adapter)
@@ -2351,6 +2404,8 @@ static int ixgbevf_open(struct net_device *netdev)
                }
        }
 
+       ixgbevf_negotiate_api(adapter);
+
        /* allocate transmit descriptors */
        err = ixgbevf_setup_all_tx_resources(adapter);
        if (err)
@@ -2860,10 +2915,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
        int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
-       u32 msg[2];
 
        if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
                max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2877,35 +2930,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
-       if (!netif_running(netdev)) {
-               msg[0] = IXGBE_VF_SET_LPE;
-               msg[1] = max_frame;
-               hw->mbx.ops.write_posted(hw, msg, 2);
-       }
-
        if (netif_running(netdev))
                ixgbevf_reinit_locked(adapter);
 
        return 0;
 }
 
-static void ixgbevf_shutdown(struct pci_dev *pdev)
+static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+       int retval = 0;
+#endif
 
        netif_device_detach(netdev);
 
        if (netif_running(netdev)) {
+               rtnl_lock();
                ixgbevf_down(adapter);
                ixgbevf_free_irq(adapter);
                ixgbevf_free_all_tx_resources(adapter);
                ixgbevf_free_all_rx_resources(adapter);
+               rtnl_unlock();
        }
 
-       pci_save_state(pdev);
+       ixgbevf_clear_interrupt_scheme(adapter);
 
+#ifdef CONFIG_PM
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
+
+#endif
        pci_disable_device(pdev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbevf_resume(struct pci_dev *pdev)
+{
+       struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
+       u32 err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       /*
+        * pci_restore_state clears dev->state_saved so call
+        * pci_save_state to restore it.
+        */
+       pci_save_state(pdev);
+
+       err = pci_enable_device_mem(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+               return err;
+       }
+       pci_set_master(pdev);
+
+       rtnl_lock();
+       err = ixgbevf_init_interrupt_scheme(adapter);
+       rtnl_unlock();
+       if (err) {
+               dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+               return err;
+       }
+
+       ixgbevf_reset(adapter);
+
+       if (netif_running(netdev)) {
+               err = ixgbevf_open(netdev);
+               if (err)
+                       return err;
+       }
+
+       netif_device_attach(netdev);
+
+       return err;
+}
+
+#endif /* CONFIG_PM */
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+       ixgbevf_suspend(pdev, PMSG_SUSPEND);
 }
 
 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@@ -2946,7 +3055,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        return stats;
 }
 
-static const struct net_device_ops ixgbe_netdev_ops = {
+static const struct net_device_ops ixgbevf_netdev_ops = {
        .ndo_open               = ixgbevf_open,
        .ndo_stop               = ixgbevf_close,
        .ndo_start_xmit         = ixgbevf_xmit_frame,
@@ -2962,7 +3071,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
 {
-       dev->netdev_ops = &ixgbe_netdev_ops;
+       dev->netdev_ops = &ixgbevf_netdev_ops;
        ixgbevf_set_ethtool_ops(dev);
        dev->watchdog_timeo = 5 * HZ;
 }
@@ -3131,6 +3240,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        return 0;
 
 err_register:
+       ixgbevf_clear_interrupt_scheme(adapter);
 err_sw_init:
        ixgbevf_reset_interrupt_capability(adapter);
        iounmap(hw->hw_addr);
@@ -3168,6 +3278,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
+       ixgbevf_clear_interrupt_scheme(adapter);
        ixgbevf_reset_interrupt_capability(adapter);
 
        iounmap(adapter->hw.hw_addr);
@@ -3267,6 +3378,11 @@ static struct pci_driver ixgbevf_driver = {
        .id_table = ixgbevf_pci_tbl,
        .probe    = ixgbevf_probe,
        .remove   = __devexit_p(ixgbevf_remove),
+#ifdef CONFIG_PM
+       /* Power Management Hooks */
+       .suspend  = ixgbevf_suspend,
+       .resume   = ixgbevf_resume,
+#endif
        .shutdown = ixgbevf_shutdown,
        .err_handler = &ixgbevf_err_handler
 };
index 9c955900fe649deb1b7287443f2f2a5d9f21728a..d5028ddf4b318c5721d9f5b46ead7a76bb3b81b4 100644 (file)
@@ -86,14 +86,17 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
 static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
+       s32 ret_val = -IXGBE_ERR_MBX;
+
+       if (!mbx->ops.read)
+               goto out;
 
        ret_val = ixgbevf_poll_for_msg(hw);
 
        /* if ack received read message, otherwise we timed out */
        if (!ret_val)
                ret_val = mbx->ops.read(hw, msg, size);
-
+out:
        return ret_val;
 }
 
@@ -109,7 +112,11 @@ static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val;
+       s32 ret_val = -IXGBE_ERR_MBX;
+
+       /* exit if either we can't write or there isn't a defined timeout */
+       if (!mbx->ops.write || !mbx->timeout)
+               goto out;
 
        /* send msg */
        ret_val = mbx->ops.write(hw, msg, size);
@@ -117,7 +124,7 @@ static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
        /* if msg sent wait until we receive an ack */
        if (!ret_val)
                ret_val = ixgbevf_poll_for_ack(hw);
-
+out:
        return ret_val;
 }
 
index cf9131c5c1150aa6746e5201f280ec5a87c9bf91..946ce86f337f702701f10133faa8c71b099d81c3 100644 (file)
 /* bits 23:16 are used for exra info for certain messages */
 #define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
 
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+       ixgbe_mbox_api_10,      /* API version 1.0, linux/freebsd VF driver */
+       ixgbe_mbox_api_20,      /* API version 2.0, solaris Phase1 VF driver */
+       /* This value should always be last */
+       ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
 #define IXGBE_VF_RESET            0x01 /* VF requests reset */
 #define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
 #define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
 #define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN      0x06 /* VF requests PF for unicast filter */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE       0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN   0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
 
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN 4
index ec89b86f7ca4a66603c7128b3353d762928faf9c..0c7447e6fcc84a3a42044b11d0f85fbe3f2d0904 100644 (file)
@@ -79,6 +79,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
        /* Call adapter stop to disable tx/rx and clear interrupts */
        hw->mac.ops.stop_adapter(hw);
 
+       /* reset the api version */
+       hw->api_version = ixgbe_mbox_api_10;
+
        IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
        IXGBE_WRITE_FLUSH(hw);
 
@@ -97,7 +100,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
        msgbuf[0] = IXGBE_VF_RESET;
        mbx->ops.write_posted(hw, msgbuf, 1);
 
-       msleep(10);
+       mdelay(10);
 
        /* set our "perm_addr" based on info provided by PF */
        /* also set up the mc_filter_type which is piggy backed
@@ -346,16 +349,32 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
                               bool vlan_on)
 {
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
        u32 msgbuf[2];
+       s32 err;
 
        msgbuf[0] = IXGBE_VF_SET_VLAN;
        msgbuf[1] = vlan;
        /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
        msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
 
-       ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+       err = mbx->ops.write_posted(hw, msgbuf, 2);
+       if (err)
+               goto mbx_err;
 
-       return 0;
+       err = mbx->ops.read_posted(hw, msgbuf, 2);
+       if (err)
+               goto mbx_err;
+
+       /* remove extra bits from the message */
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+       msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
+
+       if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
+               err = IXGBE_ERR_INVALID_ARGUMENT;
+
+mbx_err:
+       return err;
 }
 
 /**
@@ -389,20 +408,23 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
                                     bool *link_up,
                                     bool autoneg_wait_to_complete)
 {
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       s32 ret_val = 0;
        u32 links_reg;
+       u32 in_msg = 0;
 
-       if (!(hw->mbx.ops.check_for_rst(hw))) {
-               *link_up = false;
-               *speed = 0;
-               return -1;
-       }
+       /* If we were hit with a reset drop the link */
+       if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+               mac->get_link_status = true;
 
-       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+       if (!mac->get_link_status)
+               goto out;
 
-       if (links_reg & IXGBE_LINKS_UP)
-               *link_up = true;
-       else
-               *link_up = false;
+       /* if link status is down no point in checking to see if pf is up */
+       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+       if (!(links_reg & IXGBE_LINKS_UP))
+               goto out;
 
        switch (links_reg & IXGBE_LINKS_SPEED_82599) {
        case IXGBE_LINKS_SPEED_10G_82599:
@@ -416,7 +438,79 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
                break;
        }
 
-       return 0;
+       /* if the read failed it could just be a mailbox collision, best wait
+        * until we are called again and don't report an error */
+       if (mbx->ops.read(hw, &in_msg, 1))
+               goto out;
+
+       if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+               /* msg is not CTS and is NACK we must have lost CTS status */
+               if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+                       ret_val = -1;
+               goto out;
+       }
+
+       /* the pf is talking, if we timed out in the past we reinit */
+       if (!mbx->timeout) {
+               ret_val = -1;
+               goto out;
+       }
+
+       /* if we passed all the tests above then the link is up and we no
+        * longer need to check for link */
+       mac->get_link_status = false;
+
+out:
+       *link_up = !mac->get_link_status;
+       return ret_val;
+}
+
+/**
+ *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+       u32 msgbuf[2];
+
+       msgbuf[0] = IXGBE_VF_SET_LPE;
+       msgbuf[1] = max_size;
+       ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ *  ixgbevf_negotiate_api_version - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+       int err;
+       u32 msg[3];
+
+       /* Negotiate the mailbox API version */
+       msg[0] = IXGBE_VF_API_NEGOTIATE;
+       msg[1] = api;
+       msg[2] = 0;
+       err = hw->mbx.ops.write_posted(hw, msg, 3);
+
+       if (!err)
+               err = hw->mbx.ops.read_posted(hw, msg, 3);
+
+       if (!err) {
+               msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+               /* Store value and return 0 on success */
+               if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+                       hw->api_version = api;
+                       return 0;
+               }
+
+               err = IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       return err;
 }
 
 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
index 25c951daee5d3042c85e54b9944d0fda3fb855a0..47f11a584d8c04f9f4deaeecc643729362ea6917 100644 (file)
@@ -137,6 +137,8 @@ struct ixgbe_hw {
 
        u8  revision_id;
        bool adapter_stopped;
+
+       int api_version;
 };
 
 struct ixgbevf_hw_stats {
@@ -170,5 +172,7 @@ struct ixgbevf_info {
        const struct ixgbe_mac_operations *mac_ops;
 };
 
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
 #endif /* __IXGBE_VF_H__ */
 
index c8fef435302155d32aa8236e0118c2406241c48c..3d1899ff1076e753cfa5a07f7f199a4f01904b2e 100644 (file)
@@ -40,6 +40,7 @@
 
 #include <linux/mlx4/cmd.h>
 #include <linux/semaphore.h>
+#include <rdma/ib_smi.h>
 
 #include <asm/io.h>
 
@@ -394,7 +395,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
        int ret;
 
-       down(&priv->cmd.slave_sem);
+       mutex_lock(&priv->cmd.slave_cmd_mutex);
+
        vhcr->in_param = cpu_to_be64(in_param);
        vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
        vhcr->in_modifier = cpu_to_be32(in_modifier);
@@ -402,6 +404,7 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
        vhcr->status = 0;
        vhcr->flags = !!(priv->cmd.use_events) << 6;
+
        if (mlx4_is_master(dev)) {
                ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
                if (!ret) {
@@ -438,7 +441,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                        mlx4_err(dev, "failed execution of VHCR_POST command"
                                 "opcode 0x%x\n", op);
        }
-       up(&priv->cmd.slave_sem);
+
+       mutex_unlock(&priv->cmd.slave_cmd_mutex);
        return ret;
 }
 
@@ -627,6 +631,162 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
+static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox)
+{
+       struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
+       struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
+       int err;
+       int i;
+
+       if (index & 0x1f)
+               return -EINVAL;
+
+       in_mad->attr_mod = cpu_to_be32(index / 32);
+
+       err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
+       if (err)
+               return err;
+
+       for (i = 0; i < 32; ++i)
+               pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
+
+       return err;
+}
+
+static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox)
+{
+       int i;
+       int err;
+
+       for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
+               err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+#define PORT_CAPABILITY_LOCATION_IN_SMP 20
+#define PORT_STATE_OFFSET 32
+
+static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
+{
+       if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
+               return IB_PORT_ACTIVE;
+       else
+               return IB_PORT_DOWN;
+}
+
+static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       struct ib_smp *smp = inbox->buf;
+       u32 index;
+       u8 port;
+       u16 *table;
+       int err;
+       int vidx, pidx;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct ib_smp *outsmp = outbox->buf;
+       __be16 *outtab = (__be16 *)(outsmp->data);
+       __be32 slave_cap_mask;
+       __be64 slave_node_guid;
+       port = vhcr->in_modifier;
+
+       if (smp->base_version == 1 &&
+           smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+           smp->class_version == 1) {
+               if (smp->method == IB_MGMT_METHOD_GET) {
+                       if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
+                               index = be32_to_cpu(smp->attr_mod);
+                               if (port < 1 || port > dev->caps.num_ports)
+                                       return -EINVAL;
+                               table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
+                               if (!table)
+                                       return -ENOMEM;
+                               /* need to get the full pkey table because the paravirtualized
+                                * pkeys may be scattered among several pkey blocks.
+                                */
+                               err = get_full_pkey_table(dev, port, table, inbox, outbox);
+                               if (!err) {
+                                       for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
+                                               pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
+                                               outtab[vidx % 32] = cpu_to_be16(table[pidx]);
+                                       }
+                               }
+                               kfree(table);
+                               return err;
+                       }
+                       if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
+                               /*get the slave specific caps:*/
+                               /*do the command */
+                               err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+                                           vhcr->in_modifier, vhcr->op_modifier,
+                                           vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+                               /* modify the response for slaves */
+                               if (!err && slave != mlx4_master_func_num(dev)) {
+                                       u8 *state = outsmp->data + PORT_STATE_OFFSET;
+
+                                       *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
+                                       slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+                                       memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
+                               }
+                               return err;
+                       }
+                       if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
+                               /* compute slave's gid block */
+                               smp->attr_mod = cpu_to_be32(slave / 8);
+                               /* execute cmd */
+                               err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+                                            vhcr->in_modifier, vhcr->op_modifier,
+                                            vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+                               if (!err) {
+                                       /* if needed, move slave gid to index 0 */
+                                       if (slave % 8)
+                                               memcpy(outsmp->data,
+                                                      outsmp->data + (slave % 8) * 8, 8);
+                                       /* delete all other gids */
+                                       memset(outsmp->data + 8, 0, 56);
+                               }
+                               return err;
+                       }
+                       if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
+                               err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+                                            vhcr->in_modifier, vhcr->op_modifier,
+                                            vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+                               if (!err) {
+                                       slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
+                                       memcpy(outsmp->data + 12, &slave_node_guid, 8);
+                               }
+                               return err;
+                       }
+               }
+       }
+       if (slave != mlx4_master_func_num(dev) &&
+           ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
+            (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+             smp->method == IB_MGMT_METHOD_SET))) {
+               mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
+                        "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
+                        slave, smp->method, smp->mgmt_class,
+                        be16_to_cpu(smp->attr_id));
+               return -EPERM;
+       }
+       /*default:*/
+       return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+                                   vhcr->in_modifier, vhcr->op_modifier,
+                                   vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+}
+
 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
                     struct mlx4_vhcr *vhcr,
                     struct mlx4_cmd_mailbox *inbox,
@@ -950,7 +1110,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_GEN_QP_wrapper
+               .wrapper = mlx4_INIT2INIT_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_INIT2RTR_QP,
@@ -968,7 +1128,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_GEN_QP_wrapper
+               .wrapper = mlx4_RTR2RTS_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_RTS2RTS_QP,
@@ -977,7 +1137,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_GEN_QP_wrapper
+               .wrapper = mlx4_RTS2RTS_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_SQERR2RTS_QP,
@@ -986,7 +1146,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_GEN_QP_wrapper
+               .wrapper = mlx4_SQERR2RTS_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_2ERR_QP,
@@ -1013,7 +1173,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_GEN_QP_wrapper
+               .wrapper = mlx4_SQD2SQD_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_SQD2RTS_QP,
@@ -1022,7 +1182,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_GEN_QP_wrapper
+               .wrapper = mlx4_SQD2RTS_QP_wrapper
        },
        {
                .opcode = MLX4_CMD_2RST_QP,
@@ -1060,6 +1220,24 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = mlx4_GEN_QP_wrapper
        },
+       {
+               .opcode = MLX4_CMD_CONF_SPECIAL_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL, /* XXX verify: only demux can do this */
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_MAD_IFC,
+               .has_inbox = true,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_MAD_IFC_wrapper
+       },
        {
                .opcode = MLX4_CMD_QUERY_IF_STAT,
                .has_inbox = false,
@@ -1340,6 +1518,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
                if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
                        goto inform_slave_state;
 
+               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
+
                /* write the version in the event field */
                reply |= mlx4_comm_get_version();
 
@@ -1376,19 +1556,21 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
                        goto reset_slave;
                slave_state[slave].vhcr_dma |= param;
                slave_state[slave].active = true;
+               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
                break;
        case MLX4_COMM_CMD_VHCR_POST:
                if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
                    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
                        goto reset_slave;
-               down(&priv->cmd.slave_sem);
+
+               mutex_lock(&priv->cmd.slave_cmd_mutex);
                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
                        mlx4_err(dev, "Failed processing vhcr for slave:%d,"
                                 " resetting slave.\n", slave);
-                       up(&priv->cmd.slave_sem);
+                       mutex_unlock(&priv->cmd.slave_cmd_mutex);
                        goto reset_slave;
                }
-               up(&priv->cmd.slave_sem);
+               mutex_unlock(&priv->cmd.slave_cmd_mutex);
                break;
        default:
                mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
@@ -1529,14 +1711,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
        struct mlx4_slave_state *s_state;
        int i, j, err, port;
 
-       priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
-                                           &priv->mfunc.vhcr_dma,
-                                           GFP_KERNEL);
-       if (!priv->mfunc.vhcr) {
-               mlx4_err(dev, "Couldn't allocate vhcr.\n");
-               return -ENOMEM;
-       }
-
        if (mlx4_is_master(dev))
                priv->mfunc.comm =
                ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
@@ -1590,6 +1764,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
                          mlx4_master_handle_slave_flr);
                spin_lock_init(&priv->mfunc.master.slave_state_lock);
+               spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
                priv->mfunc.master.comm_wq =
                        create_singlethread_workqueue("mlx4_comm");
                if (!priv->mfunc.master.comm_wq)
@@ -1598,7 +1773,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                if (mlx4_init_resource_tracker(dev))
                        goto err_thread;
 
-               sema_init(&priv->cmd.slave_sem, 1);
                err = mlx4_ARM_COMM_CHANNEL(dev);
                if (err) {
                        mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
@@ -1612,8 +1786,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                        mlx4_err(dev, "Couldn't sync toggles\n");
                        goto err_comm;
                }
-
-               sema_init(&priv->cmd.slave_sem, 1);
        }
        return 0;
 
@@ -1643,6 +1815,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        mutex_init(&priv->cmd.hcr_mutex);
+       mutex_init(&priv->cmd.slave_cmd_mutex);
        sema_init(&priv->cmd.poll_sem, 1);
        priv->cmd.use_events = 0;
        priv->cmd.toggle     = 1;
@@ -1659,14 +1832,30 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
                }
        }
 
+       if (mlx4_is_mfunc(dev)) {
+               priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                                     &priv->mfunc.vhcr_dma,
+                                                     GFP_KERNEL);
+               if (!priv->mfunc.vhcr) {
+                       mlx4_err(dev, "Couldn't allocate VHCR.\n");
+                       goto err_hcr;
+               }
+       }
+
        priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
                                         MLX4_MAILBOX_SIZE,
                                         MLX4_MAILBOX_SIZE, 0);
        if (!priv->cmd.pool)
-               goto err_hcr;
+               goto err_vhcr;
 
        return 0;
 
+err_vhcr:
+       if (mlx4_is_mfunc(dev))
+               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+       priv->mfunc.vhcr = NULL;
+
 err_hcr:
        if (!mlx4_is_slave(dev))
                iounmap(priv->cmd.hcr);
@@ -1689,9 +1878,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
        }
 
        iounmap(priv->mfunc.comm);
-       dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
-                    priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
-       priv->mfunc.vhcr = NULL;
 }
 
 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -1702,6 +1888,10 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
 
        if (!mlx4_is_slave(dev))
                iounmap(priv->cmd.hcr);
+       if (mlx4_is_mfunc(dev))
+               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+       priv->mfunc.vhcr = NULL;
 }
 
 /*
index 10bba09c44ea508d047123aeafb5e001b6bbd1be..c10e3a6de09f042a02be933d8aa1ca928207fe6d 100644 (file)
@@ -712,10 +712,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        if (bounce)
                tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
 
-       /* Run destructor before passing skb to HW */
-       if (likely(!skb_shared(skb)))
-               skb_orphan(skb);
-
        if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
                *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
                op_own |= htonl((bf_index & 0xffff) << 8);
index 99a04648fab079c41084ad1a601bac6e903ceaab..51c764901ad257bce17199496d895f788a647f8b 100644 (file)
@@ -164,13 +164,16 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
-       struct mlx4_eqe *s_eqe =
-               &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+       struct mlx4_eqe *s_eqe;
+       unsigned long flags;
 
+       spin_lock_irqsave(&slave_eq->event_lock, flags);
+       s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
        if ((!!(s_eqe->owner & 0x80)) ^
            (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
                mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
                          "No free EQE on slave events queue\n", slave);
+               spin_unlock_irqrestore(&slave_eq->event_lock, flags);
                return;
        }
 
@@ -183,6 +186,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
 
        queue_work(priv->mfunc.master.comm_wq,
                   &priv->mfunc.master.slave_event_work);
+       spin_unlock_irqrestore(&slave_eq->event_lock, flags);
 }
 
 static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
@@ -200,6 +204,196 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
        slave_event(dev, slave, eqe);
 }
 
+int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
+{
+       struct mlx4_eqe eqe;
+
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
+
+       if (!s_slave->active)
+               return 0;
+
+       memset(&eqe, 0, sizeof eqe);
+
+       eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+       eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
+       eqe.event.port_mgmt_change.port = port;
+
+       return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
+
+int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
+{
+       struct mlx4_eqe eqe;
+
+       /*don't send if we don't have the that slave */
+       if (dev->num_vfs < slave)
+               return 0;
+       memset(&eqe, 0, sizeof eqe);
+
+       eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+       eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
+       eqe.event.port_mgmt_change.port = port;
+
+       return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
+
+int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
+                                  u8 port_subtype_change)
+{
+       struct mlx4_eqe eqe;
+
+       /*don't send if we don't have the that slave */
+       if (dev->num_vfs < slave)
+               return 0;
+       memset(&eqe, 0, sizeof eqe);
+
+       eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
+       eqe.subtype = port_subtype_change;
+       eqe.event.port_change.port = cpu_to_be32(port << 28);
+
+       mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
+                port_subtype_change, slave, port);
+       return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
+
+enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+       if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
+               pr_err("%s: Error: asking for slave:%d, port:%d\n",
+                      __func__, slave, port);
+               return SLAVE_PORT_DOWN;
+       }
+       return s_state[slave].port_state[port];
+}
+EXPORT_SYMBOL(mlx4_get_slave_port_state);
+
+static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
+                                    enum slave_port_state state)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+
+       if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+               pr_err("%s: Error: asking for slave:%d, port:%d\n",
+                      __func__, slave, port);
+               return -1;
+       }
+       s_state[slave].port_state[port] = state;
+
+       return 0;
+}
+
+static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
+{
+       int i;
+       enum slave_port_gen_event gen_event;
+
+       for (i = 0; i < dev->num_slaves; i++)
+               set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
+}
+/**************************************************************************
+       The function get as input the new event to that port,
+       and according to the prev state change the slave's port state.
+       The events are:
+               MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+               MLX4_PORT_STATE_DEV_EVENT_PORT_UP
+               MLX4_PORT_STATE_IB_EVENT_GID_VALID
+               MLX4_PORT_STATE_IB_EVENT_GID_INVALID
+***************************************************************************/
+int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
+                                 u8 port, int event,
+                                 enum slave_port_gen_event *gen_event)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *ctx = NULL;
+       unsigned long flags;
+       int ret = -1;
+       enum slave_port_state cur_state =
+               mlx4_get_slave_port_state(dev, slave, port);
+
+       *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
+
+       if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+               pr_err("%s: Error: asking for slave:%d, port:%d\n",
+                      __func__, slave, port);
+               return ret;
+       }
+
+       ctx = &priv->mfunc.master.slave_state[slave];
+       spin_lock_irqsave(&ctx->lock, flags);
+
+       mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
+                __func__, slave, cur_state, event);
+
+       switch (cur_state) {
+       case SLAVE_PORT_DOWN:
+               if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
+                       mlx4_set_slave_port_state(dev, slave, port,
+                                                 SLAVE_PENDING_UP);
+               break;
+       case SLAVE_PENDING_UP:
+               if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
+                       mlx4_set_slave_port_state(dev, slave, port,
+                                                 SLAVE_PORT_DOWN);
+               else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
+                       mlx4_set_slave_port_state(dev, slave, port,
+                                                 SLAVE_PORT_UP);
+                       *gen_event = SLAVE_PORT_GEN_EVENT_UP;
+               }
+               break;
+       case SLAVE_PORT_UP:
+               if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
+                       mlx4_set_slave_port_state(dev, slave, port,
+                                                 SLAVE_PORT_DOWN);
+                       *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
+               } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
+                               event) {
+                       mlx4_set_slave_port_state(dev, slave, port,
+                                                 SLAVE_PENDING_UP);
+                       *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
+               }
+               break;
+       default:
+               pr_err("%s: BUG!!! UNKNOWN state: "
+                      "slave:%d, port:%d\n", __func__, slave, port);
+                       goto out;
+       }
+       ret = mlx4_get_slave_port_state(dev, slave, port);
+       mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
+                " :%d gen_event: %d\n",
+                __func__, slave, cur_state, event, *gen_event);
+
+out:
+       spin_unlock_irqrestore(&ctx->lock, flags);
+       return ret;
+}
+
+EXPORT_SYMBOL(set_and_calc_slave_port_state);
+
+int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
+{
+       struct mlx4_eqe eqe;
+
+       memset(&eqe, 0, sizeof eqe);
+
+       eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+       eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
+       eqe.event.port_mgmt_change.port = port;
+       eqe.event.port_mgmt_change.params.port_info.changed_attr =
+               cpu_to_be32((u32) attr);
+
+       slave_event(dev, ALL_SLAVES, &eqe);
+       return 0;
+}
+EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
+
 void mlx4_master_handle_slave_flr(struct work_struct *work)
 {
        struct mlx4_mfunc_master_ctx *master =
@@ -251,6 +445,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
        u32 flr_slave;
        u8 update_slave_state;
        int i;
+       enum slave_port_gen_event gen_event;
 
        while ((eqe = next_eqe_sw(eq))) {
                /*
@@ -347,35 +542,49 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_PORT_CHANGE:
                        port = be32_to_cpu(eqe->event.port_change.port) >> 28;
                        if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
-                               mlx4_dispatch_event(dev,
-                                                   MLX4_DEV_EVENT_PORT_DOWN,
+                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 1;
-                               if (mlx4_is_master(dev))
-                                       /*change the state of all slave's port
-                                       * to down:*/
-                                       for (i = 0; i < dev->num_slaves; i++) {
-                                               mlx4_dbg(dev, "%s: Sending "
-                                                        "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+                               if (!mlx4_is_master(dev))
+                                       break;
+                               for (i = 0; i < dev->num_slaves; i++) {
+                                       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
+                                               if (i == mlx4_master_func_num(dev))
+                                                       continue;
+                                               mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
                                                         " to slave: %d, port:%d\n",
                                                         __func__, i, port);
-                                               if (i == dev->caps.function)
-                                                       continue;
                                                mlx4_slave_event(dev, i, eqe);
+                                       } else {  /* IB port */
+                                               set_and_calc_slave_port_state(dev, i, port,
+                                                                             MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+                                                                             &gen_event);
+                                               /*we can be in pending state, then do not send port_down event*/
+                                               if (SLAVE_PORT_GEN_EVENT_DOWN ==  gen_event) {
+                                                       if (i == mlx4_master_func_num(dev))
+                                                               continue;
+                                                       mlx4_slave_event(dev, i, eqe);
+                                               }
                                        }
+                               }
                        } else {
-                               mlx4_dispatch_event(dev,
-                                                   MLX4_DEV_EVENT_PORT_UP,
-                                                   port);
+                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
+
                                mlx4_priv(dev)->sense.do_sense_port[port] = 0;
 
-                               if (mlx4_is_master(dev)) {
+                               if (!mlx4_is_master(dev))
+                                       break;
+                               if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
                                        for (i = 0; i < dev->num_slaves; i++) {
-                                               if (i == dev->caps.function)
+                                               if (i == mlx4_master_func_num(dev))
                                                        continue;
                                                mlx4_slave_event(dev, i, eqe);
                                        }
-                               }
+                               else /* IB port */
+                                       /* port-up event will be sent to a slave when the
+                                        * slave's alias-guid is set. This is done in alias_GUID.c
+                                        */
+                                       set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
                        }
                        break;
 
index c69648487321121e390a5cb0bb84d17c24809bde..4f30b99324cf196a46e4c09aed191adfc6323a9e 100644 (file)
@@ -183,7 +183,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET                0x24
 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET                0x28
 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET           0x2c
-#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET      0X30
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET      0x30
 
 #define QUERY_FUNC_CAP_FMR_FLAG                        0x80
 #define QUERY_FUNC_CAP_FLAG_RDMA               0x40
@@ -194,21 +194,39 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 #define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET       0x8
 #define QUERY_FUNC_CAP_ETH_PROPS_OFFSET                0xc
 
+#define QUERY_FUNC_CAP_QP0_TUNNEL              0x10
+#define QUERY_FUNC_CAP_QP0_PROXY               0x14
+#define QUERY_FUNC_CAP_QP1_TUNNEL              0x18
+#define QUERY_FUNC_CAP_QP1_PROXY               0x1c
+
 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC     0x40
 #define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN    0x80
 
 #define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
 
        if (vhcr->op_modifier == 1) {
-               field = vhcr->in_modifier;
-               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
-
                field = 0;
                /* ensure force vlan and force mac bits are not set */
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
                /* ensure that phy_wqe_gid bit is not set */
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
 
+               field = vhcr->in_modifier; /* phys-port = logical-port */
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+               /* size is now the QP number */
+               size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
+
+               size += 2;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
+
+               size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
+
+               size += 2;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
+
        } else if (vhcr->op_modifier == 0) {
                /* enable rdma and ethernet interfaces */
                field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
@@ -253,99 +271,118 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
        return err;
 }
 
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+                       struct mlx4_func_cap *func_cap)
 {
        struct mlx4_cmd_mailbox *mailbox;
        u32                     *outbox;
-       u8                      field;
+       u8                      field, op_modifier;
        u32                     size;
-       int                     i;
        int                     err = 0;
 
+       op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
 
-       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
+                          MLX4_CMD_QUERY_FUNC_CAP,
                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
        if (err)
                goto out;
 
        outbox = mailbox->buf;
 
-       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
-       if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
-               mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
-               err = -EPROTONOSUPPORT;
-               goto out;
-       }
-       func_cap->flags = field;
+       if (!op_modifier) {
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+               if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
+                       mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+               func_cap->flags = field;
+
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+               func_cap->num_ports = field;
 
-       MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
-       func_cap->num_ports = field;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+               func_cap->pf_context_behaviour = size;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
-       func_cap->pf_context_behaviour = size;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+               func_cap->qp_quota = size & 0xFFFFFF;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
-       func_cap->qp_quota = size & 0xFFFFFF;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+               func_cap->srq_quota = size & 0xFFFFFF;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
-       func_cap->srq_quota = size & 0xFFFFFF;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+               func_cap->cq_quota = size & 0xFFFFFF;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
-       func_cap->cq_quota = size & 0xFFFFFF;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+               func_cap->max_eq = size & 0xFFFFFF;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
-       func_cap->max_eq = size & 0xFFFFFF;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+               func_cap->reserved_eq = size & 0xFFFFFF;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
-       func_cap->reserved_eq = size & 0xFFFFFF;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+               func_cap->mpt_quota = size & 0xFFFFFF;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
-       func_cap->mpt_quota = size & 0xFFFFFF;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+               func_cap->mtt_quota = size & 0xFFFFFF;
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
-       func_cap->mtt_quota = size & 0xFFFFFF;
+               MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+               func_cap->mcg_quota = size & 0xFFFFFF;
+               goto out;
+       }
 
-       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
-       func_cap->mcg_quota = size & 0xFFFFFF;
+       /* logical port query */
+       if (gen_or_port > dev->caps.num_ports) {
+               err = -EINVAL;
+               goto out;
+       }
 
-       for (i = 1; i <= func_cap->num_ports; ++i) {
-               err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
-                                  MLX4_CMD_QUERY_FUNC_CAP,
-                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
-               if (err)
+       if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+               if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+                       mlx4_err(dev, "VLAN is enforced on this port\n");
+                       err = -EPROTONOSUPPORT;
                        goto out;
+               }
 
-               if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
-                       MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
-                       if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
-                               mlx4_err(dev, "VLAN is enforced on this port\n");
-                               err = -EPROTONOSUPPORT;
-                               goto out;
-                       }
-
-                       if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
-                               mlx4_err(dev, "Force mac is enabled on this port\n");
-                               err = -EPROTONOSUPPORT;
-                               goto out;
-                       }
-               } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) {
-                       MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
-                       if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
-                               mlx4_err(dev, "phy_wqe_gid is "
-                                        "enforced on this ib port\n");
-                               err = -EPROTONOSUPPORT;
-                               goto out;
-                       }
+               if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+                       mlx4_err(dev, "Force mac is enabled on this port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
                }
+       } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+               if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+                       mlx4_err(dev, "phy_wqe_gid is "
+                                "enforced on this ib port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+       }
 
-               MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
-               func_cap->physical_port[i] = field;
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+       func_cap->physical_port = field;
+       if (func_cap->physical_port != gen_or_port) {
+               err = -ENOSYS;
+               goto out;
        }
 
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
+       func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
+       func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
+       func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
+       func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
+
        /* All other resources are allocated by the master, but we still report
         * 'num' and 'reserved' capabilities as follows:
         * - num remains the maximum resource index
@@ -559,7 +596,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev_cap->max_pds = 1 << (field & 0x3f);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
        dev_cap->reserved_xrcds = field >> 4;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
        dev_cap->max_xrcds = 1 << (field & 0x1f);
 
        MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
@@ -715,6 +752,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
                               struct mlx4_cmd_mailbox *outbox,
                               struct mlx4_cmd_info *cmd)
 {
+       u64     flags;
        int     err = 0;
        u8      field;
 
@@ -723,6 +761,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        if (err)
                return err;
 
+       /* add port mng change event capability unconditionally to slaves */
+       MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+       flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
+       MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+
        /* For guests, report Blueflame disabled */
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
        field &= 0x7f;
@@ -1345,6 +1388,19 @@ out:
        return err;
 }
 
+/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
+ * and real QP0 are active, so that the paravirtualized QP0 is ready
+ * to operate */
+static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       /* irrelevant if not infiniband */
+       if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
+           priv->mfunc.master.qp0_state[port].qp0_active)
+               return 1;
+       return 0;
+}
+
 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_vhcr *vhcr,
                           struct mlx4_cmd_mailbox *inbox,
@@ -1358,17 +1414,29 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
        if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
                return 0;
 
-       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
-               return -ENODEV;
-
-       /* Enable port only if it was previously disabled */
-       if (!priv->mfunc.master.init_port_ref[port]) {
-               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
-               if (err)
-                       return err;
+       if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
+               /* Enable port only if it was previously disabled */
+               if (!priv->mfunc.master.init_port_ref[port]) {
+                       err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+                                      MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+                       if (err)
+                               return err;
+               }
+               priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
+       } else {
+               if (slave == mlx4_master_func_num(dev)) {
+                       if (check_qp0_state(dev, slave, port) &&
+                           !priv->mfunc.master.qp0_state[port].port_active) {
+                               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+                                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+                               if (err)
+                                       return err;
+                               priv->mfunc.master.qp0_state[port].port_active = 1;
+                               priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
+                       }
+               } else
+                       priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
        }
-       priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
        ++priv->mfunc.master.init_port_ref[port];
        return 0;
 }
@@ -1441,15 +1509,29 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
            (1 << port)))
                return 0;
 
-       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
-               return -ENODEV;
-       if (priv->mfunc.master.init_port_ref[port] == 1) {
-               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
-                              MLX4_CMD_NATIVE);
-               if (err)
-                       return err;
+       if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
+               if (priv->mfunc.master.init_port_ref[port] == 1) {
+                       err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
+                                      1000, MLX4_CMD_NATIVE);
+                       if (err)
+                               return err;
+               }
+               priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+       } else {
+               /* infiniband port */
+               if (slave == mlx4_master_func_num(dev)) {
+                       if (!priv->mfunc.master.qp0_state[port].qp0_active &&
+                           priv->mfunc.master.qp0_state[port].port_active) {
+                               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
+                                              1000, MLX4_CMD_NATIVE);
+                               if (err)
+                                       return err;
+                               priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+                               priv->mfunc.master.qp0_state[port].port_active = 0;
+                       }
+               } else
+                       priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
        }
-       priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
        --priv->mfunc.master.init_port_ref[port];
        return 0;
 }
index 83fcbbf1b1692f831d5219102ca8d792eb1dc394..85abe9c11a2262e199682f33f6ec77ac92062624 100644 (file)
@@ -134,8 +134,12 @@ struct mlx4_func_cap {
        int     max_eq;
        int     reserved_eq;
        int     mcg_quota;
-       u8      physical_port[MLX4_MAX_PORTS + 1];
-       u8      port_flags[MLX4_MAX_PORTS + 1];
+       u32     qp0_tunnel_qpn;
+       u32     qp0_proxy_qpn;
+       u32     qp1_tunnel_qpn;
+       u32     qp1_proxy_qpn;
+       u8      physical_port;
+       u8      port_flags;
 };
 
 struct mlx4_adapter {
@@ -192,7 +196,8 @@ struct mlx4_set_ib_param {
 };
 
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+                       struct mlx4_func_cap *func_cap);
 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
                                struct mlx4_vhcr *vhcr,
                                struct mlx4_cmd_mailbox *inbox,
index dd6ea942625cf687be0b5dbca4bda71d105a74ac..80df2ab0177c398122725a85c82133788b711198 100644 (file)
@@ -95,8 +95,6 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
                                         " Not in use with device managed"
                                         " flow steering");
 
-#define MLX4_VF                                        (1 << 0)
-
 #define HCA_GLOBAL_CAP_MASK            0
 #define PF_CONTEXT_BEHAVIOUR_MASK      0
 
@@ -299,9 +297,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        mlx4_dbg(dev, "Steering mode is: %s\n",
                 mlx4_steering_mode_str(dev->caps.steering_mode));
 
-       /* Sense port always allowed on supported devices for ConnectX1 and 2 */
-       if (dev->pdev->device != 0x1003)
+       /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
+       if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
                dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+       /* Don't do sense port on multifunction devices (for now at least) */
+       if (mlx4_is_mfunc(dev))
+               dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
 
        dev->caps.log_num_macs  = log_num_mac;
        dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
@@ -384,6 +385,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
                dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
 
+       dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
        return 0;
 }
 /*The function checks if there are live vf, return the num of them*/
@@ -409,20 +411,54 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
 {
        u32 qk = MLX4_RESERVED_QKEY_BASE;
-       if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
-           qpn < dev->caps.sqp_start)
+
+       if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
+           qpn < dev->phys_caps.base_proxy_sqpn)
                return -EINVAL;
 
-       if (qpn >= dev->caps.base_tunnel_sqpn)
+       if (qpn >= dev->phys_caps.base_tunnel_sqpn)
                /* tunnel qp */
-               qk += qpn - dev->caps.base_tunnel_sqpn;
+               qk += qpn - dev->phys_caps.base_tunnel_sqpn;
        else
-               qk += qpn - dev->caps.sqp_start;
+               qk += qpn - dev->phys_caps.base_proxy_sqpn;
        *qkey = qk;
        return 0;
 }
 EXPORT_SYMBOL(mlx4_get_parav_qkey);
 
+void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
+{
+       struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
+
+       if (!mlx4_is_master(dev))
+               return;
+
+       priv->virt2phys_pkey[slave][port - 1][i] = val;
+}
+EXPORT_SYMBOL(mlx4_sync_pkey_table);
+
+void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
+{
+       struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
+
+       if (!mlx4_is_master(dev))
+               return;
+
+       priv->slave_node_guids[slave] = guid;
+}
+EXPORT_SYMBOL(mlx4_put_slave_node_guid);
+
+__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
+
+       if (!mlx4_is_master(dev))
+               return 0;
+
+       return priv->slave_node_guids[slave];
+}
+EXPORT_SYMBOL(mlx4_get_slave_node_guid);
+
 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -493,9 +529,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        }
 
        memset(&func_cap, 0, sizeof(func_cap));
-       err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+       err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
        if (err) {
-               mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
+                         err);
                return err;
        }
 
@@ -523,12 +560,33 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
                return -ENODEV;
        }
 
+       dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+       dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+       dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+       dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+
+       if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
+           !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
+               err = -ENOMEM;
+               goto err_mem;
+       }
+
        for (i = 1; i <= dev->caps.num_ports; ++i) {
+               err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
+               if (err) {
+                       mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
+                                " port %d, aborting (%d).\n", i, err);
+                       goto err_mem;
+               }
+               dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
+               dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
+               dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
+               dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
                dev->caps.port_mask[i] = dev->caps.port_type[i];
                if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
                                                    &dev->caps.gid_table_len[i],
                                                    &dev->caps.pkey_table_len[i]))
-                       return -ENODEV;
+                       goto err_mem;
        }
 
        if (dev->caps.uar_page_size * (dev->caps.num_uars -
@@ -538,10 +596,20 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
                         "PCI resource 2 size of 0x%llx, aborting.\n",
                         dev->caps.uar_page_size * dev->caps.num_uars,
                         (unsigned long long) pci_resource_len(dev->pdev, 2));
-               return -ENODEV;
+               goto err_mem;
        }
 
        return 0;
+
+err_mem:
+       kfree(dev->caps.qp0_tunnel);
+       kfree(dev->caps.qp0_proxy);
+       kfree(dev->caps.qp1_tunnel);
+       kfree(dev->caps.qp1_proxy);
+       dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
+               dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
+
+       return err;
 }
 
 /*
@@ -1092,10 +1160,10 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
 
-       down(&priv->cmd.slave_sem);
+       mutex_lock(&priv->cmd.slave_cmd_mutex);
        if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
                mlx4_warn(dev, "Failed to close slave function.\n");
-       up(&priv->cmd.slave_sem);
+       mutex_unlock(&priv->cmd.slave_cmd_mutex);
 }
 
 static int map_bf_area(struct mlx4_dev *dev)
@@ -1147,7 +1215,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
        u32 slave_read;
        u32 cmd_channel_ver;
 
-       down(&priv->cmd.slave_sem);
+       mutex_lock(&priv->cmd.slave_cmd_mutex);
        priv->cmd.max_cmds = 1;
        mlx4_warn(dev, "Sending reset\n");
        ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
@@ -1196,12 +1264,13 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
                goto err;
        if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
                goto err;
-       up(&priv->cmd.slave_sem);
+
+       mutex_unlock(&priv->cmd.slave_cmd_mutex);
        return 0;
 
 err:
        mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
-       up(&priv->cmd.slave_sem);
+       mutex_unlock(&priv->cmd.slave_cmd_mutex);
        return -EIO;
 }
 
@@ -1848,7 +1917,7 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
        iounmap(owner);
 }
 
-static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
 {
        struct mlx4_priv *priv;
        struct mlx4_dev *dev;
@@ -1871,12 +1940,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /*
         * Check for BARs.
         */
-       if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+       if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
            !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
                dev_err(&pdev->dev, "Missing DCS, aborting."
-                       "(id == 0X%p, id->driver_data: 0x%lx,"
-                       " pci_resource_flags(pdev, 0):0x%lx)\n", id,
-                       id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
+                       "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+                       pci_dev_data, pci_resource_flags(pdev, 0));
                err = -ENODEV;
                goto err_disable_pdev;
        }
@@ -1941,7 +2009,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        dev->rev_id = pdev->revision;
        /* Detect if this device is a virtual function */
-       if (id && id->driver_data & MLX4_VF) {
+       if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
                /* When acting as pf, we normally skip vfs unless explicitly
                 * requested to probe them. */
                if (num_vfs && extended_func_num(pdev) > probe_vf) {
@@ -1969,12 +2037,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                }
 
                if (num_vfs) {
-                       mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+                       mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
                        err = pci_enable_sriov(pdev, num_vfs);
                        if (err) {
-                               mlx4_err(dev, "Failed to enable sriov,"
-                                        "continuing without sriov enabled"
-                                        " (err = %d).\n", err);
+                               mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+                                        err);
                                err = 0;
                        } else {
                                mlx4_warn(dev, "Running in master mode\n");
@@ -2089,6 +2156,7 @@ slave_start:
        mlx4_sense_init(dev);
        mlx4_start_sense(dev);
 
+       priv->pci_dev_data = pci_dev_data;
        pci_set_drvdata(pdev, dev);
 
        return 0;
@@ -2158,7 +2226,7 @@ static int __devinit mlx4_init_one(struct pci_dev *pdev,
 {
        printk_once(KERN_INFO "%s", mlx4_version);
 
-       return __mlx4_init_one(pdev, id);
+       return __mlx4_init_one(pdev, id->driver_data);
 }
 
 static void mlx4_remove_one(struct pci_dev *pdev)
@@ -2217,12 +2285,18 @@ static void mlx4_remove_one(struct pci_dev *pdev)
                if (dev->flags & MLX4_FLAG_MSI_X)
                        pci_disable_msix(pdev);
                if (dev->flags & MLX4_FLAG_SRIOV) {
-                       mlx4_warn(dev, "Disabling sriov\n");
+                       mlx4_warn(dev, "Disabling SR-IOV\n");
                        pci_disable_sriov(pdev);
                }
 
                if (!mlx4_is_slave(dev))
                        mlx4_free_ownership(dev);
+
+               kfree(dev->caps.qp0_tunnel);
+               kfree(dev->caps.qp0_proxy);
+               kfree(dev->caps.qp1_tunnel);
+               kfree(dev->caps.qp1_proxy);
+
                kfree(priv);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
@@ -2232,41 +2306,46 @@ static void mlx4_remove_one(struct pci_dev *pdev)
 
 int mlx4_restart_one(struct pci_dev *pdev)
 {
+       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int               pci_dev_data;
+
+       pci_dev_data = priv->pci_dev_data;
        mlx4_remove_one(pdev);
-       return __mlx4_init_one(pdev, NULL);
+       return __mlx4_init_one(pdev, pci_dev_data);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
        /* MT25408 "Hermon" SDR */
-       { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25408 "Hermon" DDR */
-       { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25408 "Hermon" QDR */
-       { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25408 "Hermon" DDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25408 "Hermon" QDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25408 "Hermon" EN 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25458 ConnectX EN 10GBASE-T 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT26468 ConnectX EN 10GigE PCIe gen2*/
-       { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
-       { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT26478 ConnectX2 40GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+       { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
        /* MT25400 Family [ConnectX-2 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+       { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
        /* MT27500 Family [ConnectX-3] */
        { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
        /* MT27500 Family [ConnectX-3 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+       { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
        { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
        { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
        { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
@@ -2295,7 +2374,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
 
 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
-       int ret = __mlx4_init_one(pdev, NULL);
+       int ret = __mlx4_init_one(pdev, 0);
 
        return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
index dba69d98734a29b9a10038e22eff8e93714147ed..1cf42036d7bbe5fec8831497032dbb245e9fd605 100644 (file)
@@ -452,6 +452,7 @@ struct mlx4_slave_state {
        /*initialized via the kzalloc*/
        u8 is_slave_going_down;
        u32 cookie;
+       enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
 };
 
 struct slave_list {
@@ -472,6 +473,7 @@ struct mlx4_slave_event_eq {
        u32 eqn;
        u32 cons;
        u32 prod;
+       spinlock_t event_lock;
        struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
 };
 
@@ -511,9 +513,9 @@ struct mlx4_cmd {
        struct pci_pool        *pool;
        void __iomem           *hcr;
        struct mutex            hcr_mutex;
+       struct mutex            slave_cmd_mutex;
        struct semaphore        poll_sem;
        struct semaphore        event_sem;
-       struct semaphore        slave_sem;
        int                     max_cmds;
        spinlock_t              context_lock;
        int                     free_head;
@@ -766,6 +768,11 @@ struct _rule_hw {
        };
 };
 
+enum {
+       MLX4_PCI_DEV_IS_VF              = 1 << 0,
+       MLX4_PCI_DEV_FORCE_SENSE_PORT   = 1 << 1,
+};
+
 struct mlx4_priv {
        struct mlx4_dev         dev;
 
@@ -773,6 +780,8 @@ struct mlx4_priv {
        struct list_head        ctx_list;
        spinlock_t              ctx_lock;
 
+       int                     pci_dev_data;
+
        struct list_head        pgdir_list;
        struct mutex            pgdir_mutex;
 
@@ -807,6 +816,9 @@ struct mlx4_priv {
        struct io_mapping       *bf_mapping;
        int                     reserved_mtts;
        int                     fs_hash_mode;
+       u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
+       __be64                  slave_node_guids[MLX4_MFUNC_MAX];
+
 };
 
 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1011,16 +1023,61 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
                             struct mlx4_cmd_mailbox *inbox,
                             struct mlx4_cmd_mailbox *outbox,
                             struct mlx4_cmd_info *cmd);
+int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                             struct mlx4_vhcr *vhcr,
+                             struct mlx4_cmd_mailbox *inbox,
+                             struct mlx4_cmd_mailbox *outbox,
+                             struct mlx4_cmd_info *cmd);
 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
                             struct mlx4_vhcr *vhcr,
                             struct mlx4_cmd_mailbox *inbox,
                             struct mlx4_cmd_mailbox *outbox,
                             struct mlx4_cmd_info *cmd);
+int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                             struct mlx4_vhcr *vhcr,
+                             struct mlx4_cmd_mailbox *inbox,
+                             struct mlx4_cmd_mailbox *outbox,
+                             struct mlx4_cmd_info *cmd);
+int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
+int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_vhcr *vhcr,
                         struct mlx4_cmd_mailbox *inbox,
                         struct mlx4_cmd_mailbox *outbox,
                         struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
 
 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
 
index e36dd0f2fa733fdadc1573fa991395cac25057fe..4c51b05efa284d7d921650481678c5e4a1f1a324 100644 (file)
@@ -732,6 +732,16 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                new_cap_mask = ((__be32 *) inbox->buf)[1];
        }
 
+       /* slave may not set the IS_SM capability for the port */
+       if (slave != mlx4_master_func_num(dev) &&
+           (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
+               return -EINVAL;
+
+       /* No DEV_MGMT in multifunc mode */
+       if (mlx4_is_mfunc(dev) &&
+           (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
+               return -EINVAL;
+
        agg_cap_mask = 0;
        slave_cap_mask =
                priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
index fb2b36759cbf657d66dfa7c245d49306d5a06c58..81e2abe07bbbf656689ba327ab0d4d22368961df 100644 (file)
@@ -67,10 +67,18 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
                complete(&qp->free);
 }
 
-static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+/* used for INIT/CLOSE port logic */
+static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
 {
-       return qp->qpn >= dev->caps.sqp_start &&
-               qp->qpn <= dev->caps.sqp_start + 1;
+       /* this procedure is called after we already know we are on the master */
+       /* qp0 is either the proxy qp0, or the real qp0 */
+       u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
+       *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
+
+       *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
+               qp->qpn <= dev->phys_caps.base_sqpn + 1;
+
+       return *real_qp0 || *proxy_qp0;
 }
 
 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
@@ -122,6 +130,8 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
        int ret = 0;
+       int real_qp0 = 0;
+       int proxy_qp0 = 0;
        u8 port;
 
        if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
@@ -133,9 +143,12 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                        MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
                if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
                    cur_state != MLX4_QP_STATE_RST &&
-                   is_qp0(dev, qp)) {
+                   is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
                        port = (qp->qpn & 1) + 1;
-                       priv->mfunc.master.qp0_state[port].qp0_active = 0;
+                       if (proxy_qp0)
+                               priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
+                       else
+                               priv->mfunc.master.qp0_state[port].qp0_active = 0;
                }
                return ret;
        }
@@ -162,6 +175,23 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                       new_state == MLX4_QP_STATE_RST ? 2 : 0,
                       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
 
+       if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
+               port = (qp->qpn & 1) + 1;
+               if (cur_state != MLX4_QP_STATE_ERR &&
+                   cur_state != MLX4_QP_STATE_RST &&
+                   new_state == MLX4_QP_STATE_ERR) {
+                       if (proxy_qp0)
+                               priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
+                       else
+                               priv->mfunc.master.qp0_state[port].qp0_active = 0;
+               } else if (new_state == MLX4_QP_STATE_RTR) {
+                       if (proxy_qp0)
+                               priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
+                       else
+                               priv->mfunc.master.qp0_state[port].qp0_active = 1;
+               }
+       }
+
        mlx4_free_cmd_mailbox(dev, mailbox);
        return ret;
 }
@@ -392,6 +422,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
        int err;
        int reserved_from_top = 0;
+       int k;
 
        spin_lock_init(&qp_table->lock);
        INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -406,7 +437,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
         * We also reserve the MSB of the 24-bit QP number to indicate
         * that a QP is an XRC QP.
         */
-       dev->caps.sqp_start =
+       dev->phys_caps.base_sqpn =
                ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
 
        {
@@ -437,13 +468,66 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
        }
 
+       /* Reserve 8 real SQPs in both native and SRIOV modes.
+       * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
+       * (for all PFs and VFs), and 8 corresponding tunnel QPs.
+       * Each proxy SQP works opposite its own tunnel QP.
+       *
+       * The QPs are arranged as follows:
+       * a. 8 real SQPs
+       * b. All the proxy SQPs (8 per function)
+       * c. All the tunnel QPs (8 per function)
+       */
+
        err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
-                              (1 << 23) - 1, dev->caps.sqp_start + 8,
+                              (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
+                              16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
                               reserved_from_top);
        if (err)
                return err;
 
-       return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
+       if (mlx4_is_mfunc(dev)) {
+               /* for PPF use */
+               dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
+               dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
+
+               /* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
+                * since the PF does not call mlx4_slave_caps */
+               dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+               dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+               dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+               dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+
+               if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
+                   !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
+                       err = -ENOMEM;
+                       goto err_mem;
+               }
+
+               for (k = 0; k < dev->caps.num_ports; k++) {
+                       dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
+                               8 * mlx4_master_func_num(dev) + k;
+                       dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
+                       dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
+                               8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
+                       dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
+               }
+       }
+
+
+       err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
+       if (err)
+               goto err_mem;
+       return 0;
+
+err_mem:
+       kfree(dev->caps.qp0_tunnel);
+       kfree(dev->caps.qp0_proxy);
+       kfree(dev->caps.qp1_tunnel);
+       kfree(dev->caps.qp1_proxy);
+       dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
+               dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
+       return err;
 }
 
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
index 293c9e820c49b5d470dce7eda95f2252b94d4251..ba6506ff4abb4553ec498ba93807c8cd1772e52a 100644 (file)
@@ -242,6 +242,15 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res)
        return 0;
 }
 
+enum qp_transition {
+       QP_TRANS_INIT2RTR,
+       QP_TRANS_RTR2RTS,
+       QP_TRANS_RTS2RTS,
+       QP_TRANS_SQERR2RTS,
+       QP_TRANS_SQD2SQD,
+       QP_TRANS_SQD2RTS
+};
+
 /* For Debug uses */
 static const char *ResourceType(enum mlx4_resource rt)
 {
@@ -308,14 +317,41 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
        }
 }
 
-static void update_ud_gid(struct mlx4_dev *dev,
-                         struct mlx4_qp_context *qp_ctx, u8 slave)
+static void update_pkey_index(struct mlx4_dev *dev, int slave,
+                             struct mlx4_cmd_mailbox *inbox)
 {
-       u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+       u8 sched = *(u8 *)(inbox->buf + 64);
+       u8 orig_index = *(u8 *)(inbox->buf + 35);
+       u8 new_index;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int port;
+
+       port = (sched >> 6 & 1) + 1;
+
+       new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
+       *(u8 *)(inbox->buf + 35) = new_index;
+
+       mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
+                "new pkey index = %d\n", port, orig_index, new_index);
+}
+
+static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
+                      u8 slave)
+{
+       struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
+       enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
+       u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 
        if (MLX4_QP_ST_UD == ts)
                qp_ctx->pri_path.mgid_index = 0x80 | slave;
 
+       if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
+               if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
+                       qp_ctx->pri_path.mgid_index = slave & 0x7F;
+               if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
+                       qp_ctx->alt_path.mgid_index = slave & 0x7F;
+       }
+
        mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
                slave, qp_ctx->pri_path.mgid_index);
 }
@@ -360,8 +396,6 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 
        r->from_state = r->state;
        r->state = RES_ANY_BUSY;
-       mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
-                ResourceType(type), r->res_id);
 
        if (res)
                *((struct res_common **)res) = r;
@@ -1105,7 +1139,13 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
 
 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
 {
-       return mlx4_is_qp_reserved(dev, qpn);
+       return mlx4_is_qp_reserved(dev, qpn) &&
+               (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
+}
+
+static int fw_reserved(struct mlx4_dev *dev, int qpn)
+{
+       return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
 }
 
 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1145,7 +1185,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
                if (err)
                        return err;
 
-               if (!valid_reserved(dev, slave, qpn)) {
+               if (!fw_reserved(dev, qpn)) {
                        err = __mlx4_qp_alloc_icm(dev, qpn);
                        if (err) {
                                res_abort_move(dev, slave, RES_QP, qpn);
@@ -1498,7 +1538,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
                if (err)
                        return err;
 
-               if (!valid_reserved(dev, slave, qpn))
+               if (!fw_reserved(dev, qpn))
                        __mlx4_qp_free_icm(dev, qpn);
 
                res_end_move(dev, slave, RES_QP, qpn);
@@ -1938,6 +1978,19 @@ static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
 }
 
+static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
+                                 struct mlx4_qp_context *context)
+{
+       u32 qpn = vhcr->in_modifier & 0xffffff;
+       u32 qkey = 0;
+
+       if (mlx4_get_parav_qkey(dev, qpn, &qkey))
+               return;
+
+       /* adjust qkey in qp context */
+       context->qkey = cpu_to_be32(qkey);
+}
+
 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
                             struct mlx4_vhcr *vhcr,
                             struct mlx4_cmd_mailbox *inbox,
@@ -1990,6 +2043,8 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
                        goto ex_put_scq;
        }
 
+       adjust_proxy_tun_qkey(dev, vhcr, qpc);
+       update_pkey_index(dev, slave, inbox);
        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
        if (err)
                goto ex_put_srq;
@@ -2135,6 +2190,48 @@ static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
        return err;
 }
 
+static int verify_qp_parameters(struct mlx4_dev *dev,
+                               struct mlx4_cmd_mailbox *inbox,
+                               enum qp_transition transition, u8 slave)
+{
+       u32                     qp_type;
+       struct mlx4_qp_context  *qp_ctx;
+       enum mlx4_qp_optpar     optpar;
+
+       qp_ctx  = inbox->buf + 8;
+       qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+       optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
+
+       switch (qp_type) {
+       case MLX4_QP_ST_RC:
+       case MLX4_QP_ST_UC:
+               switch (transition) {
+               case QP_TRANS_INIT2RTR:
+               case QP_TRANS_RTR2RTS:
+               case QP_TRANS_RTS2RTS:
+               case QP_TRANS_SQD2SQD:
+               case QP_TRANS_SQD2RTS:
+                       if (slave != mlx4_master_func_num(dev))
+                               /* slaves have only gid index 0 */
+                               if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
+                                       if (qp_ctx->pri_path.mgid_index)
+                                               return -EINVAL;
+                               if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
+                                       if (qp_ctx->alt_path.mgid_index)
+                                               return -EINVAL;
+                       break;
+               default:
+                       break;
+               }
+
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_vhcr *vhcr,
                           struct mlx4_cmd_mailbox *inbox,
@@ -2622,16 +2719,123 @@ out:
        return err;
 }
 
+int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                             struct mlx4_vhcr *vhcr,
+                             struct mlx4_cmd_mailbox *inbox,
+                             struct mlx4_cmd_mailbox *outbox,
+                             struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp_context *context = inbox->buf + 8;
+       adjust_proxy_tun_qkey(dev, vhcr, context);
+       update_pkey_index(dev, slave, inbox);
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
                             struct mlx4_vhcr *vhcr,
                             struct mlx4_cmd_mailbox *inbox,
                             struct mlx4_cmd_mailbox *outbox,
                             struct mlx4_cmd_info *cmd)
 {
+       int err;
        struct mlx4_qp_context *qpc = inbox->buf + 8;
 
-       update_ud_gid(dev, qpc, (u8)slave);
+       err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
+       if (err)
+               return err;
+
+       update_pkey_index(dev, slave, inbox);
+       update_gid(dev, inbox, (u8)slave);
+       adjust_proxy_tun_qkey(dev, vhcr, qpc);
+
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       int err;
+       struct mlx4_qp_context *context = inbox->buf + 8;
+
+       err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
+       if (err)
+               return err;
+
+       update_pkey_index(dev, slave, inbox);
+       update_gid(dev, inbox, (u8)slave);
+       adjust_proxy_tun_qkey(dev, vhcr, context);
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       int err;
+       struct mlx4_qp_context *context = inbox->buf + 8;
+
+       err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
+       if (err)
+               return err;
+
+       update_pkey_index(dev, slave, inbox);
+       update_gid(dev, inbox, (u8)slave);
+       adjust_proxy_tun_qkey(dev, vhcr, context);
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+
+int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                             struct mlx4_vhcr *vhcr,
+                             struct mlx4_cmd_mailbox *inbox,
+                             struct mlx4_cmd_mailbox *outbox,
+                             struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp_context *context = inbox->buf + 8;
+       adjust_proxy_tun_qkey(dev, vhcr, context);
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       int err;
+       struct mlx4_qp_context *context = inbox->buf + 8;
+
+       err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
+       if (err)
+               return err;
+
+       adjust_proxy_tun_qkey(dev, vhcr, context);
+       update_gid(dev, inbox, (u8)slave);
+       update_pkey_index(dev, slave, inbox);
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       int err;
+       struct mlx4_qp_context *context = inbox->buf + 8;
+
+       err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
+       if (err)
+               return err;
 
+       adjust_proxy_tun_qkey(dev, vhcr, context);
+       update_gid(dev, inbox, (u8)slave);
+       update_pkey_index(dev, slave, inbox);
        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
 }
 
index 34ee09bae36e98dcb28c8ff9b4c8d12a2da97292..094773d88f809d00789ca720e23caa2f183bec27 100644 (file)
@@ -139,5 +139,5 @@ void  mlx4_sense_init(struct mlx4_dev *dev)
        for (port = 1; port <= dev->caps.num_ports; port++)
                sense->do_sense_port[port] = 1;
 
-       INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
+       INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
 }
diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c
deleted file mode 100644 (file)
index db5285b..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <asm/mips-boards/simint.h>
-
-#define MIPSNET_VERSION "2007-11-17"
-
-/*
- * Net status/control block as seen by sw in the core.
- */
-struct mipsnet_regs {
-       /*
-        * Device info for probing, reads as MIPSNET%d where %d is some
-        * form of version.
-        */
-       u64 devId;              /*0x00 */
-
-       /*
-        * read only busy flag.
-        * Set and cleared by the Net Device to indicate that an rx or a tx
-        * is in progress.
-        */
-       u32 busy;               /*0x08 */
-
-       /*
-        * Set by the Net Device.
-        * The device will set it once data has been received.
-        * The value is the number of bytes that should be read from
-        * rxDataBuffer.  The value will decrease till 0 until all the data
-        * from rxDataBuffer has been read.
-        */
-       u32 rxDataCount;        /*0x0c */
-#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
-
-       /*
-        * Settable from the MIPS core, cleared by the Net Device.
-        * The core should set the number of bytes it wants to send,
-        * then it should write those bytes of data to txDataBuffer.
-        * The device will clear txDataCount has been processed (not
-        * necessarily sent).
-        */
-       u32 txDataCount;        /*0x10 */
-
-       /*
-        * Interrupt control
-        *
-        * Used to clear the interrupted generated by this dev.
-        * Write a 1 to clear the interrupt. (except bit31).
-        *
-        * Bit0 is set if it was a tx-done interrupt.
-        * Bit1 is set when new rx-data is available.
-        *    Until this bit is cleared there will be no other RXs.
-        *
-        * Bit31 is used for testing, it clears after a read.
-        *    Writing 1 to this bit will cause an interrupt to be generated.
-        *    To clear the test interrupt, write 0 to this register.
-        */
-       u32 interruptControl;   /*0x14 */
-#define MIPSNET_INTCTL_TXDONE     (1u << 0)
-#define MIPSNET_INTCTL_RXDONE     (1u << 1)
-#define MIPSNET_INTCTL_TESTBIT    (1u << 31)
-
-       /*
-        * Readonly core-specific interrupt info for the device to signal
-        * the core. The meaning of the contents of this field might change.
-        */
-       /* XXX: the whole memIntf interrupt scheme is messy: the device
-        * should have no control what so ever of what VPE/register set is
-        * being used.
-        * The MemIntf should only expose interrupt lines, and something in
-        * the config should be responsible for the line<->core/vpe bindings.
-        */
-       u32 interruptInfo;      /*0x18 */
-
-       /*
-        * This is where the received data is read out.
-        * There is more data to read until rxDataReady is 0.
-        * Only 1 byte at this regs offset is used.
-        */
-       u32 rxDataBuffer;       /*0x1c */
-
-       /*
-        * This is where the data to transmit is written.
-        * Data should be written for the amount specified in the
-        * txDataCount register.
-        * Only 1 byte at this regs offset is used.
-        */
-       u32 txDataBuffer;       /*0x20 */
-};
-
-#define regaddr(dev, field) \
-  (dev->base_addr + offsetof(struct mipsnet_regs, field))
-
-static char mipsnet_string[] = "mipsnet";
-
-/*
- * Copy data from the MIPSNET rx data port
- */
-static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
-                       int len)
-{
-       for (; len > 0; len--, kdata++)
-               *kdata = inb(regaddr(dev, rxDataBuffer));
-
-       return inl(regaddr(dev, rxDataCount));
-}
-
-static inline void mipsnet_put_todevice(struct net_device *dev,
-       struct sk_buff *skb)
-{
-       int count_to_go = skb->len;
-       char *buf_ptr = skb->data;
-
-       outl(skb->len, regaddr(dev, txDataCount));
-
-       for (; count_to_go; buf_ptr++, count_to_go--)
-               outb(*buf_ptr, regaddr(dev, txDataBuffer));
-
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
-       dev_kfree_skb(skb);
-}
-
-static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       /*
-        * Only one packet at a time. Once TXDONE interrupt is serviced, the
-        * queue will be restarted.
-        */
-       netif_stop_queue(dev);
-       mipsnet_put_todevice(dev, skb);
-
-       return NETDEV_TX_OK;
-}
-
-static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
-{
-       struct sk_buff *skb;
-
-       if (!len)
-               return len;
-
-       skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
-       if (!skb) {
-               dev->stats.rx_dropped++;
-               return -ENOMEM;
-       }
-
-       skb_reserve(skb, NET_IP_ALIGN);
-       if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
-               return -EFAULT;
-
-       skb->protocol = eth_type_trans(skb, dev);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       netif_rx(skb);
-
-       dev->stats.rx_packets++;
-       dev->stats.rx_bytes += len;
-
-       return len;
-}
-
-static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       u32 int_flags;
-       irqreturn_t ret = IRQ_NONE;
-
-       if (irq != dev->irq)
-               goto out_badirq;
-
-       /* TESTBIT is cleared on read. */
-       int_flags = inl(regaddr(dev, interruptControl));
-       if (int_flags & MIPSNET_INTCTL_TESTBIT) {
-               /* TESTBIT takes effect after a write with 0. */
-               outl(0, regaddr(dev, interruptControl));
-               ret = IRQ_HANDLED;
-       } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
-               /* Only one packet at a time, we are done. */
-               dev->stats.tx_packets++;
-               netif_wake_queue(dev);
-               outl(MIPSNET_INTCTL_TXDONE,
-                    regaddr(dev, interruptControl));
-               ret = IRQ_HANDLED;
-       } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
-               mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
-               outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
-               ret = IRQ_HANDLED;
-       }
-       return ret;
-
-out_badirq:
-       printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
-              dev->name, __func__, irq);
-       return ret;
-}
-
-static int mipsnet_open(struct net_device *dev)
-{
-       int err;
-
-       err = request_irq(dev->irq, mipsnet_interrupt,
-                         IRQF_SHARED, dev->name, (void *) dev);
-       if (err) {
-               release_region(dev->base_addr, sizeof(struct mipsnet_regs));
-               return err;
-       }
-
-       netif_start_queue(dev);
-
-       /* test interrupt handler */
-       outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
-
-       return 0;
-}
-
-static int mipsnet_close(struct net_device *dev)
-{
-       netif_stop_queue(dev);
-       free_irq(dev->irq, dev);
-       return 0;
-}
-
-static void mipsnet_set_mclist(struct net_device *dev)
-{
-}
-
-static const struct net_device_ops mipsnet_netdev_ops = {
-       .ndo_open               = mipsnet_open,
-       .ndo_stop               = mipsnet_close,
-       .ndo_start_xmit         = mipsnet_xmit,
-       .ndo_set_rx_mode        = mipsnet_set_mclist,
-       .ndo_change_mtu         = eth_change_mtu,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
-};
-
-static int __devinit mipsnet_probe(struct platform_device *dev)
-{
-       struct net_device *netdev;
-       int err;
-
-       netdev = alloc_etherdev(0);
-       if (!netdev) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       platform_set_drvdata(dev, netdev);
-
-       netdev->netdev_ops = &mipsnet_netdev_ops;
-
-       /*
-        * TODO: probe for these or load them from PARAM
-        */
-       netdev->base_addr = 0x4200;
-       netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
-                     inl(regaddr(netdev, interruptInfo));
-
-       /* Get the io region now, get irq on open() */
-       if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
-                           "mipsnet")) {
-               err = -EBUSY;
-               goto out_free_netdev;
-       }
-
-       /*
-        * Lacking any better mechanism to allocate a MAC address we use a
-        * random one ...
-        */
-       eth_hw_addr_random(netdev);
-
-       err = register_netdev(netdev);
-       if (err) {
-               printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
-               goto out_free_region;
-       }
-
-       return 0;
-
-out_free_region:
-       release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
-
-out_free_netdev:
-       free_netdev(netdev);
-
-out:
-       return err;
-}
-
-static int __devexit mipsnet_device_remove(struct platform_device *device)
-{
-       struct net_device *dev = platform_get_drvdata(device);
-
-       unregister_netdev(dev);
-       release_region(dev->base_addr, sizeof(struct mipsnet_regs));
-       free_netdev(dev);
-       platform_set_drvdata(device, NULL);
-
-       return 0;
-}
-
-static struct platform_driver mipsnet_driver = {
-       .driver = {
-               .name           = mipsnet_string,
-               .owner          = THIS_MODULE,
-       },
-       .probe          = mipsnet_probe,
-       .remove         = __devexit_p(mipsnet_device_remove),
-};
-
-static int __init mipsnet_init_module(void)
-{
-       int err;
-
-       printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
-              "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
-
-       err = platform_driver_register(&mipsnet_driver);
-       if (err)
-               printk(KERN_ERR "Driver registration failed\n");
-
-       return err;
-}
-
-static void __exit mipsnet_exit_module(void)
-{
-       platform_driver_unregister(&mipsnet_driver);
-}
-
-module_init(mipsnet_init_module);
-module_exit(mipsnet_exit_module);
index cfa71a30dc8daece534e1226a20aa2d00c210b00..3e5b7509502c6321b9065e14a39156b547b83878 100644 (file)
@@ -3521,7 +3521,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 
        strncpy(buf, dev->name, IFNAMSIZ);
 
-       flush_work_sync(&vdev->reset_task);
+       flush_work(&vdev->reset_task);
 
        /* in 2.6 will call stop() if device is up */
        unregister_netdev(dev);
index f45def01a98e46333873ee05366ad206f54b4fc9..876beceaf2d7154f07d9de46bbb75fba3b1f0c43 100644 (file)
@@ -3409,7 +3409,7 @@ set_speed:
 
        pause_flags = 0;
        /* setup pause frame */
-       if (np->duplex != 0) {
+       if (netif_running(dev) && (np->duplex != 0)) {
                if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
                        adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
                        lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
 
        regs->version = FORCEDETH_REGS_VER;
        spin_lock_irq(&np->lock);
-       for (i = 0; i <= np->register_size/sizeof(u32); i++)
+       for (i = 0; i < np->register_size/sizeof(u32); i++)
                rbuf[i] = readl(base + i*sizeof(u32));
        spin_unlock_irq(&np->lock);
 }
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
 
        netif_stop_queue(dev);
        spin_lock_irq(&np->lock);
+       nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
        nv_stop_rxtx(dev);
        nv_txrx_reset(dev);
 
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                goto out_error;
        }
 
+       netif_carrier_off(dev);
+
+       /* Some NICs freeze when TX pause is enabled while NIC is
+        * down, and this stays across warm reboots. The sequence
+        * below should be enough to recover from that state.
+        */
+       nv_update_pause(dev, 0);
+       nv_start_tx(dev);
+       nv_stop_tx(dev);
+
        if (id->driver_data & DEV_HAS_VLAN)
                nv_vlan_mode(dev, dev->features);
 
-       netif_carrier_off(dev);
-
        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
index a7cc56007b330457c53b49ef4c51145d4990221d..e7ff886e8047ac3d3a926e8c0384b3ec7568068b 100644 (file)
@@ -77,7 +77,7 @@
 static const int multicast_filter_limit = 32;
 
 #define MAX_READ_REQUEST_SHIFT 12
-#define TX_DMA_BURST   6       /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST   7       /* Maximum PCI burst, '7' is unlimited */
 #define SafeMtu                0x1c20  /* ... actually life sucks beyond ~7k */
 #define InterFrameGap  0x03    /* 3 means InterFrameGap = the shortest one */
 
@@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
+       { PCI_VENDOR_ID_DLINK,                  0x4300,
+               PCI_VENDOR_ID_DLINK, 0x4b10,             0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK,       0x4300), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK,       0x4302), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_AT,          0xc107), 0, 0, RTL_CFG_0 },
index fb3cbc27063cccb5ae821989c516195527e32e65..25906c1d1b1590825502bc192dffa1aa8d8e494e 100644 (file)
@@ -34,3 +34,10 @@ config SFC_SRIOV
          This enables support for the SFC9000 I/O Virtualization
          features, allowing accelerated network performance in
          virtualized environments.
+config SFC_PTP
+       bool "Solarflare SFC9000-family PTP support"
+       depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
+       default y
+       ---help---
+         This enables support for the Precision Time Protocol (PTP)
+         on SFC9000-family NICs
index ea1f8db5731811cbe7c7005931b1c4e4e8db5593..e11f2ecf69d9b5dfd0ae5600e33bb0fe1d7382fc 100644 (file)
@@ -5,5 +5,6 @@ sfc-y                   += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
                           mcdi.o mcdi_phy.o mcdi_mon.o
 sfc-$(CONFIG_SFC_MTD)  += mtd.o
 sfc-$(CONFIG_SFC_SRIOV)        += siena_sriov.o
+sfc-$(CONFIG_SFC_PTP)  += ptp.o
 
 obj-$(CONFIG_SFC)      += sfc.o
index b26a954c27fcba84764e4c4310e5dcb91dd17a61..5400a33f254f0d8727dd882036b033305eba0d9d 100644 (file)
@@ -120,10 +120,10 @@ typedef union efx_oword {
  * [0,high-low), with garbage in bits [high-low+1,...).
  */
 #define EFX_EXTRACT_NATIVE(native_element, min, max, low, high)                \
-       (((low > max) || (high < min)) ? 0 :                            \
-        ((low > min) ?                                                 \
-         ((native_element) >> (low - min)) :                           \
-         ((native_element) << (min - low))))
+       ((low) > (max) || (high) < (min) ? 0 :                          \
+        (low) > (min) ?                                                \
+        (native_element) >> ((low) - (min)) :                          \
+        (native_element) << ((min) - (low)))
 
 /*
  * Extract bit field portion [low,high) from the 64-bit little-endian
@@ -142,27 +142,27 @@ typedef union efx_oword {
 #define EFX_EXTRACT_OWORD64(oword, low, high)                          \
        ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) |             \
          EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) &          \
-        EFX_MASK64(high + 1 - low))
+        EFX_MASK64((high) + 1 - (low)))
 
 #define EFX_EXTRACT_QWORD64(qword, low, high)                          \
        (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) &              \
-        EFX_MASK64(high + 1 - low))
+        EFX_MASK64((high) + 1 - (low)))
 
 #define EFX_EXTRACT_OWORD32(oword, low, high)                          \
        ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) |             \
          EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) |            \
          EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) |            \
          EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) &          \
-        EFX_MASK32(high + 1 - low))
+        EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_EXTRACT_QWORD32(qword, low, high)                          \
        ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) |             \
          EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) &           \
-        EFX_MASK32(high + 1 - low))
+        EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_EXTRACT_DWORD(dword, low, high)                    \
        (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) &      \
-        EFX_MASK32(high + 1 - low))
+        EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_OWORD_FIELD64(oword, field)                                \
        EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field),          \
@@ -442,10 +442,10 @@ typedef union efx_oword {
        cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
 
 #define EFX_INPLACE_MASK64(min, max, low, high)                                \
-       EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
+       EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
 
 #define EFX_INPLACE_MASK32(min, max, low, high)                                \
-       EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
+       EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
 
 #define EFX_SET_OWORD64(oword, low, high, value) do {                  \
        (oword).u64[0] = (((oword).u64[0]                               \
index 65a8d49106a4c63c6a7faf04c8e3334421ce6472..96bd980e828da5d28b0bed6329cb85e3655d598f 100644 (file)
@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)               \
        do {                                            \
-               if ((efx->state == STATE_RUNNING) ||    \
+               if ((efx->state == STATE_READY) ||      \
                    (efx->state == STATE_DISABLED))     \
                        ASSERT_RTNL();                  \
        } while (0)
 
+static int efx_check_disabled(struct efx_nic *efx)
+{
+       if (efx->state == STATE_DISABLED) {
+               netif_err(efx, drv, efx->net_dev,
+                         "device is disabled due to earlier errors\n");
+               return -EIO;
+       }
+       return 0;
+}
+
 /**************************************************************************
  *
  * Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
        efx->rx_buffer_order = get_order(efx->rx_buffer_len +
                                         sizeof(struct efx_rx_page_state));
 
+       /* We must keep at least one descriptor in a TX ring empty.
+        * We could avoid this when the queue size does not exactly
+        * match the hardware ring size, but it's not that important.
+        * Therefore we stop the queue when one more skb might fill
+        * the ring completely.  We wake it when half way back to
+        * empty.
+        */
+       efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+       efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
        /* Initialise the channels */
        efx_for_each_channel(channel, efx) {
                efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -714,6 +734,7 @@ static void efx_remove_channel(struct efx_channel *channel)
        efx_for_each_possible_channel_tx_queue(tx_queue, channel)
                efx_remove_tx_queue(tx_queue);
        efx_remove_eventq(channel);
+       channel->type->post_remove(channel);
 }
 
 static void efx_remove_channels(struct efx_nic *efx)
@@ -730,7 +751,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
        struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
        u32 old_rxq_entries, old_txq_entries;
        unsigned i, next_buffer_table = 0;
-       int rc = 0;
+       int rc;
+
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
 
        /* Not all channels should be reallocated. We must avoid
         * reallocating their buffer table entries.
@@ -828,6 +853,7 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 
 static const struct efx_channel_type efx_default_channel_type = {
        .pre_probe              = efx_channel_dummy_op_int,
+       .post_remove            = efx_channel_dummy_op_void,
        .get_name               = efx_get_channel_name,
        .copy                   = efx_copy_channel,
        .keep_eventq            = false,
@@ -838,6 +864,10 @@ int efx_channel_dummy_op_int(struct efx_channel *channel)
        return 0;
 }
 
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
 /**************************************************************************
  *
  * Port handling
@@ -1365,6 +1395,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
        struct efx_channel *channel;
 
+       BUG_ON(efx->state == STATE_DISABLED);
+
        if (efx->legacy_irq)
                efx->legacy_irq_enabled = true;
        efx_nic_enable_interrupts(efx);
@@ -1382,6 +1414,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
 {
        struct efx_channel *channel;
 
+       if (efx->state == STATE_DISABLED)
+               return;
+
        efx_mcdi_mode_poll(efx);
 
        efx_nic_disable_interrupts(efx);
@@ -1422,10 +1457,16 @@ static void efx_set_channels(struct efx_nic *efx)
        efx->tx_channel_offset =
                separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
 
-       /* We need to adjust the TX queue numbers if we have separate
+       /* We need to mark which channels really have RX and TX
+        * queues, and adjust the TX queue numbers if we have separate
         * RX-only and TX-only channels.
         */
        efx_for_each_channel(channel, efx) {
+               if (channel->channel < efx->n_rx_channels)
+                       channel->rx_queue.core_index = channel->channel;
+               else
+                       channel->rx_queue.core_index = -1;
+
                efx_for_each_channel_tx_queue(tx_queue, channel)
                        tx_queue->queue -= (efx->tx_channel_offset *
                                            EFX_TXQ_TYPES);
@@ -1533,22 +1574,21 @@ static int efx_probe_all(struct efx_nic *efx)
        return rc;
 }
 
-/* Called after previous invocation(s) of efx_stop_all, restarts the port,
- * kernel transmit queues and NAPI processing, and ensures that the port is
- * scheduled to be reconfigured. This function is safe to call multiple
- * times when the NIC is in any state.
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured.  Interrupts must already be enabled.  This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
  */
 static void efx_start_all(struct efx_nic *efx)
 {
        EFX_ASSERT_RESET_SERIALISED(efx);
+       BUG_ON(efx->state == STATE_DISABLED);
 
        /* Check that it is appropriate to restart the interface. All
         * of these flags are safe to read under just the rtnl lock */
-       if (efx->port_enabled)
-               return;
-       if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
-               return;
-       if (!netif_running(efx->net_dev))
+       if (efx->port_enabled || !netif_running(efx->net_dev))
                return;
 
        efx_start_port(efx);
@@ -1582,11 +1622,11 @@ static void efx_flush_all(struct efx_nic *efx)
        cancel_work_sync(&efx->mac_work);
 }
 
-/* Quiesce hardware and software without bringing the link down.
- * Safe to call multiple times, when the nic and interface is in any
- * state. The caller is guaranteed to subsequently be in a position
- * to modify any hardware and software state they see fit without
- * taking locks. */
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down.  Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled.  Requires the RTNL lock.
+ */
 static void efx_stop_all(struct efx_nic *efx)
 {
        EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,7 +1779,8 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
        struct efx_nic *efx = netdev_priv(net_dev);
        struct mii_ioctl_data *data = if_mii(ifr);
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
+       if (cmd == SIOCSHWTSTAMP)
+               return efx_ptp_ioctl(efx, ifr, cmd);
 
        /* Convert phy_id from older PRTAD/DEVAD format */
        if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -1820,13 +1861,14 @@ static void efx_netpoll(struct net_device *net_dev)
 static int efx_net_open(struct net_device *net_dev)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
-       EFX_ASSERT_RESET_SERIALISED(efx);
+       int rc;
 
        netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
                  raw_smp_processor_id());
 
-       if (efx->state == STATE_DISABLED)
-               return -EIO;
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
        if (efx->phy_mode & PHY_MODE_SPECIAL)
                return -EBUSY;
        if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1894,8 @@ static int efx_net_stop(struct net_device *net_dev)
        netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
                  raw_smp_processor_id());
 
-       if (efx->state != STATE_DISABLED) {
-               /* Stop the device and flush all the channels */
-               efx_stop_all(efx);
-       }
+       /* Stop the device and flush all the channels */
+       efx_stop_all(efx);
 
        return 0;
 }
@@ -1915,9 +1955,11 @@ static void efx_watchdog(struct net_device *net_dev)
 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
-
+       rc = efx_check_disabled(efx);
+       if (rc)
+               return rc;
        if (new_mtu > EFX_MAX_MTU)
                return -EINVAL;
 
@@ -1926,8 +1968,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
        netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
 
        mutex_lock(&efx->mac_lock);
-       /* Reconfigure the MAC before enabling the dma queues so that
-        * the RX buffers don't overflow */
        net_dev->mtu = new_mtu;
        efx->type->reconfigure_mac(efx);
        mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1982,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
        struct sockaddr *addr = data;
        char *new_addr = addr->sa_data;
 
-       EFX_ASSERT_RESET_SERIALISED(efx);
-
        if (!is_valid_ether_addr(new_addr)) {
                netif_err(efx, drv, efx->net_dev,
                          "invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2117,27 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        rtnl_lock();
 
+       /* Enable resets to be scheduled and check whether any were
+        * already requested.  If so, the NIC is probably hosed so we
+        * abort.
+        */
+       efx->state = STATE_READY;
+       smp_mb(); /* ensure we change state before checking reset_pending */
+       if (efx->reset_pending) {
+               netif_err(efx, probe, efx->net_dev,
+                         "aborting probe due to scheduled reset\n");
+               rc = -EIO;
+               goto fail_locked;
+       }
+
        rc = dev_alloc_name(net_dev, net_dev->name);
        if (rc < 0)
                goto fail_locked;
        efx_update_name(efx);
 
+       /* Always start with carrier off; PHY events will detect the link */
+       netif_carrier_off(net_dev);
+
        rc = register_netdevice(net_dev);
        if (rc)
                goto fail_locked;
@@ -2094,9 +2148,6 @@ static int efx_register_netdev(struct efx_nic *efx)
                        efx_init_tx_queue_core_txq(tx_queue);
        }
 
-       /* Always start with carrier off; PHY events will detect the link */
-       netif_carrier_off(net_dev);
-
        rtnl_unlock();
 
        rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2159,14 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        return 0;
 
+fail_registered:
+       rtnl_lock();
+       unregister_netdevice(net_dev);
 fail_locked:
+       efx->state = STATE_UNINIT;
        rtnl_unlock();
        netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
        return rc;
-
-fail_registered:
-       unregister_netdev(net_dev);
-       return rc;
 }
 
 static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2189,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
 
        strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
        device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-       unregister_netdev(efx->net_dev);
+
+       rtnl_lock();
+       unregister_netdevice(efx->net_dev);
+       efx->state = STATE_UNINIT;
+       rtnl_unlock();
 }
 
 /**************************************************************************
@@ -2154,9 +2209,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        EFX_ASSERT_RESET_SERIALISED(efx);
 
        efx_stop_all(efx);
-       mutex_lock(&efx->mac_lock);
-
        efx_stop_interrupts(efx, false);
+
+       mutex_lock(&efx->mac_lock);
        if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
                efx->phy_op->fini(efx);
        efx->type->fini(efx);
@@ -2276,16 +2331,15 @@ static void efx_reset_work(struct work_struct *data)
        if (!pending)
                return;
 
-       /* If we're not RUNNING then don't reset. Leave the reset_pending
-        * flags set so that efx_pci_probe_main will be retried */
-       if (efx->state != STATE_RUNNING) {
-               netif_info(efx, drv, efx->net_dev,
-                          "scheduled reset quenched. NIC not RUNNING\n");
-               return;
-       }
-
        rtnl_lock();
-       (void)efx_reset(efx, fls(pending) - 1);
+
+       /* We checked the state in efx_schedule_reset() but it may
+        * have changed by now.  Now that we have the RTNL lock,
+        * it cannot change again.
+        */
+       if (efx->state == STATE_READY)
+               (void)efx_reset(efx, fls(pending) - 1);
+
        rtnl_unlock();
 }
 
@@ -2311,6 +2365,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
        }
 
        set_bit(method, &efx->reset_pending);
+       smp_mb(); /* ensure we change reset_pending before checking state */
+
+       /* If we're not READY then just leave the flags set as the cue
+        * to abort probing or reschedule the reset later.
+        */
+       if (ACCESS_ONCE(efx->state) != STATE_READY)
+               return;
 
        /* efx_process_channel() will no longer read events once a
         * reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2437,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
 /* This zeroes out and then fills in the invariants in a struct
  * efx_nic (including all sub-structures).
  */
-static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx,
                           struct pci_dev *pci_dev, struct net_device *net_dev)
 {
        int i;
 
        /* Initialise common structures */
-       memset(efx, 0, sizeof(*efx));
        spin_lock_init(&efx->biu_lock);
 #ifdef CONFIG_SFC_MTD
        INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2452,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
        INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
        efx->pci_dev = pci_dev;
        efx->msg_enable = debug;
-       efx->state = STATE_INIT;
+       efx->state = STATE_UNINIT;
        strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
 
        efx->net_dev = net_dev;
@@ -2409,8 +2469,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
                        goto fail;
        }
 
-       efx->type = type;
-
        EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
 
        /* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2513,12 @@ static void efx_fini_struct(struct efx_nic *efx)
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
+       /* Flush reset_work. It can no longer be scheduled since we
+        * are not READY.
+        */
+       BUG_ON(efx->state == STATE_READY);
+       cancel_work_sync(&efx->reset_work);
+
 #ifdef CONFIG_RFS_ACCEL
        free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
        efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2544,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 
        /* Mark the NIC as fini, then stop the interface */
        rtnl_lock();
-       efx->state = STATE_FINI;
        dev_close(efx->net_dev);
-
-       /* Allow any queued efx_resets() to complete */
+       efx_stop_interrupts(efx, false);
        rtnl_unlock();
 
-       efx_stop_interrupts(efx, false);
        efx_sriov_fini(efx);
        efx_unregister_netdev(efx);
 
        efx_mtd_remove(efx);
 
-       /* Wait for any scheduled resets to complete. No more will be
-        * scheduled from this point because efx_stop_all() has been
-        * called, we are no longer registered with driverlink, and
-        * the net_device's have been removed. */
-       cancel_work_sync(&efx->reset_work);
-
        efx_pci_remove_main(efx);
 
        efx_fini_io(efx);
@@ -2617,7 +2672,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                   const struct pci_device_id *entry)
 {
-       const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
        struct net_device *net_dev;
        struct efx_nic *efx;
        int rc;
@@ -2627,10 +2681,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                     EFX_MAX_RX_QUEUES);
        if (!net_dev)
                return -ENOMEM;
-       net_dev->features |= (type->offload_features | NETIF_F_SG |
+       efx = netdev_priv(net_dev);
+       efx->type = (const struct efx_nic_type *) entry->driver_data;
+       net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
                              NETIF_F_HIGHDMA | NETIF_F_TSO |
                              NETIF_F_RXCSUM);
-       if (type->offload_features & NETIF_F_V6_CSUM)
+       if (efx->type->offload_features & NETIF_F_V6_CSUM)
                net_dev->features |= NETIF_F_TSO6;
        /* Mask for features that also apply to VLAN devices */
        net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2694,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                   NETIF_F_RXCSUM);
        /* All offloads can be toggled */
        net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
-       efx = netdev_priv(net_dev);
        pci_set_drvdata(pci_dev, efx);
        SET_NETDEV_DEV(net_dev, &pci_dev->dev);
-       rc = efx_init_struct(efx, type, pci_dev, net_dev);
+       rc = efx_init_struct(efx, pci_dev, net_dev);
        if (rc)
                goto fail1;
 
@@ -2656,28 +2711,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                goto fail2;
 
        rc = efx_pci_probe_main(efx);
-
-       /* Serialise against efx_reset(). No more resets will be
-        * scheduled since efx_stop_all() has been called, and we have
-        * not and never have been registered.
-        */
-       cancel_work_sync(&efx->reset_work);
-
        if (rc)
                goto fail3;
 
-       /* If there was a scheduled reset during probe, the NIC is
-        * probably hosed anyway.
-        */
-       if (efx->reset_pending) {
-               rc = -EIO;
-               goto fail4;
-       }
-
-       /* Switch to the running state before we expose the device to the OS,
-        * so that dev_open()|efx_start_all() will actually start the device */
-       efx->state = STATE_RUNNING;
-
        rc = efx_register_netdev(efx);
        if (rc)
                goto fail4;
@@ -2717,12 +2753,18 @@ static int efx_pm_freeze(struct device *dev)
 {
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-       efx->state = STATE_FINI;
+       rtnl_lock();
 
-       netif_device_detach(efx->net_dev);
+       if (efx->state != STATE_DISABLED) {
+               efx->state = STATE_UNINIT;
 
-       efx_stop_all(efx);
-       efx_stop_interrupts(efx, false);
+               netif_device_detach(efx->net_dev);
+
+               efx_stop_all(efx);
+               efx_stop_interrupts(efx, false);
+       }
+
+       rtnl_unlock();
 
        return 0;
 }
@@ -2731,21 +2773,25 @@ static int efx_pm_thaw(struct device *dev)
 {
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 
-       efx->state = STATE_INIT;
+       rtnl_lock();
 
-       efx_start_interrupts(efx, false);
+       if (efx->state != STATE_DISABLED) {
+               efx_start_interrupts(efx, false);
 
-       mutex_lock(&efx->mac_lock);
-       efx->phy_op->reconfigure(efx);
-       mutex_unlock(&efx->mac_lock);
+               mutex_lock(&efx->mac_lock);
+               efx->phy_op->reconfigure(efx);
+               mutex_unlock(&efx->mac_lock);
 
-       efx_start_all(efx);
+               efx_start_all(efx);
 
-       netif_device_attach(efx->net_dev);
+               netif_device_attach(efx->net_dev);
 
-       efx->state = STATE_RUNNING;
+               efx->state = STATE_READY;
 
-       efx->type->resume_wol(efx);
+               efx->type->resume_wol(efx);
+       }
+
+       rtnl_unlock();
 
        /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
        queue_work(reset_workqueue, &efx->reset_work);
index 70755c97251aaab4e9cafe969b0a8b95821a1cbb..f11170bc48bf4f292d38c4ac5c31a55db10499ee 100644 (file)
@@ -102,6 +102,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
 
 /* Channels */
 extern int efx_channel_dummy_op_int(struct efx_channel *channel);
+extern void efx_channel_dummy_op_void(struct efx_channel *channel);
 extern void efx_process_channel_now(struct efx_channel *channel);
 extern int
 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
index 5faedd855b779272342b37c6d992a1a93562a70b..90f078eff8e60d86b491763c7a2b40edf7012052 100644 (file)
@@ -337,7 +337,8 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
                                  unsigned int test_index,
                                  struct ethtool_string *strings, u64 *data)
 {
-       struct efx_channel *channel = efx_get_channel(efx, 0);
+       struct efx_channel *channel =
+               efx_get_channel(efx, efx->tx_channel_offset);
        struct efx_tx_queue *tx_queue;
 
        efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -529,9 +530,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
        if (!efx_tests)
                goto fail;
 
-
-       ASSERT_RTNL();
-       if (efx->state != STATE_RUNNING) {
+       if (efx->state != STATE_READY) {
                rc = -EIO;
                goto fail1;
        }
@@ -962,9 +961,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
        int rc;
 
        /* Check that user wants us to choose the location */
-       if (rule->location != RX_CLS_LOC_ANY &&
-           rule->location != RX_CLS_LOC_FIRST &&
-           rule->location != RX_CLS_LOC_LAST)
+       if (rule->location != RX_CLS_LOC_ANY)
                return -EINVAL;
 
        /* Range-check ring_cookie */
@@ -978,9 +975,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
             rule->m_ext.data[1]))
                return -EINVAL;
 
-       efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
-                          (rule->location == RX_CLS_LOC_FIRST) ?
-                          EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
                           (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
                           0xfff : rule->ring_cookie);
 
@@ -1176,6 +1171,7 @@ const struct ethtool_ops efx_ethtool_ops = {
        .get_rxfh_indir_size    = efx_ethtool_get_rxfh_indir_size,
        .get_rxfh_indir         = efx_ethtool_get_rxfh_indir,
        .set_rxfh_indir         = efx_ethtool_set_rxfh_indir,
+       .get_ts_info            = efx_ptp_get_ts_info,
        .get_module_info        = efx_ethtool_get_module_info,
        .get_module_eeprom      = efx_ethtool_get_module_eeprom,
 };
index 8687a6c3db0dc19cb11ecbe5327db5e00f869094..ec1e99d0dcad9e32f038f8b436e2416ac6d2ff4e 100644 (file)
@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
                new_mode = PHY_MODE_SPECIAL;
        if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
                err = 0;
-       } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+       } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
                err = -EBUSY;
        } else {
                /* Reset the PHY, reconfigure the MAC and enable/disable
index c3fd61f0a95c5d0680a1a286f48000d82021b46b..8af42cd1feda7d7dddbd860b76cdddbf69ff01fa 100644 (file)
@@ -161,10 +161,6 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
                        filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
                        !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
                           EFX_FILTER_FLAG_RX_RSS));
-               EFX_SET_OWORD_FIELD(
-                       filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
-                       !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
-                          EFX_FILTER_FLAG_RX_OVERRIDE_IP));
                EFX_SET_OWORD_FIELD(
                        filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
                        table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
@@ -172,10 +168,6 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
                        filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
                        !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
                           EFX_FILTER_FLAG_RX_RSS));
-               EFX_SET_OWORD_FIELD(
-                       filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
-                       !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
-                          EFX_FILTER_FLAG_RX_OVERRIDE_IP));
        }
 
        efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -480,14 +472,12 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
 
        case EFX_FILTER_TABLE_RX_MAC: {
                bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
-               EFX_POPULATE_OWORD_8(
+               EFX_POPULATE_OWORD_7(
                        *filter,
                        FRF_CZ_RMFT_RSS_EN,
                        !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
                        FRF_CZ_RMFT_SCATTER_EN,
                        !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
-                       FRF_CZ_RMFT_IP_OVERRIDE,
-                       !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
                        FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
                        FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
                        FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
@@ -567,49 +557,62 @@ static int efx_filter_search(struct efx_filter_table *table,
 }
 
 /*
- * Construct/deconstruct external filter IDs.  These must be ordered
- * by matching priority, for RX NFC semantics.
+ * Construct/deconstruct external filter IDs.  At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
  *
- * Each RX MAC filter entry has a flag for whether it can override an
- * RX IP filter that also matches.  So we assign locations for MAC
- * filters with overriding behaviour, then for IP filters, then for
- * MAC filters without overriding behaviour.
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
  */
 
-#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP        0
-#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP        1
-#define EFX_FILTER_MATCH_PRI_NORMAL_BASE       2
+#define EFX_FILTER_MATCH_PRI_COUNT     5
+
+static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
+       [EFX_FILTER_TCP_FULL]   = 0,
+       [EFX_FILTER_UDP_FULL]   = 0,
+       [EFX_FILTER_TCP_WILD]   = 1,
+       [EFX_FILTER_UDP_WILD]   = 1,
+       [EFX_FILTER_MAC_FULL]   = 2,
+       [EFX_FILTER_MAC_WILD]   = 3,
+       [EFX_FILTER_UC_DEF]     = 4,
+       [EFX_FILTER_MC_DEF]     = 4,
+};
+
+static const enum efx_filter_table_id efx_filter_range_table[] = {
+       EFX_FILTER_TABLE_RX_IP,         /* RX match pri 0 */
+       EFX_FILTER_TABLE_RX_IP,
+       EFX_FILTER_TABLE_RX_MAC,
+       EFX_FILTER_TABLE_RX_MAC,
+       EFX_FILTER_TABLE_RX_DEF,        /* RX match pri 4 */
+       EFX_FILTER_TABLE_COUNT,         /* TX match pri 0; invalid */
+       EFX_FILTER_TABLE_COUNT,         /* invalid */
+       EFX_FILTER_TABLE_TX_MAC,
+       EFX_FILTER_TABLE_TX_MAC,        /* TX match pri 3 */
+};
 
 #define EFX_FILTER_INDEX_WIDTH 13
 #define EFX_FILTER_INDEX_MASK  ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
 
-static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
-                                    unsigned int index, u8 flags)
+static inline u32
+efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
 {
-       unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id;
+       unsigned int range;
 
-       if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) {
-               if (table_id == EFX_FILTER_TABLE_RX_MAC)
-                       match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP;
-               else if (table_id == EFX_FILTER_TABLE_RX_DEF)
-                       match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
-       }
+       range = efx_filter_type_match_pri[spec->type];
+       if (!(spec->flags & EFX_FILTER_FLAG_RX))
+               range += EFX_FILTER_MATCH_PRI_COUNT;
 
-       return match_pri << EFX_FILTER_INDEX_WIDTH | index;
+       return range << EFX_FILTER_INDEX_WIDTH | index;
 }
 
 static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
 {
-       unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+       unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
 
-       switch (match_pri) {
-       case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP:
-               return EFX_FILTER_TABLE_RX_MAC;
-       case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP:
-               return EFX_FILTER_TABLE_RX_DEF;
-       default:
-               return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
-       }
+       if (range < ARRAY_SIZE(efx_filter_range_table))
+               return efx_filter_range_table[range];
+       else
+               return EFX_FILTER_TABLE_COUNT; /* invalid */
 }
 
 static inline unsigned int efx_filter_id_index(u32 id)
@@ -619,12 +622,9 @@ static inline unsigned int efx_filter_id_index(u32 id)
 
 static inline u8 efx_filter_id_flags(u32 id)
 {
-       unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+       unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
 
-       if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE)
-               return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
-       else if (match_pri <=
-                EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
+       if (range < EFX_FILTER_MATCH_PRI_COUNT)
                return EFX_FILTER_FLAG_RX;
        else
                return EFX_FILTER_FLAG_TX;
@@ -633,14 +633,15 @@ static inline u8 efx_filter_id_flags(u32 id)
 u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
 {
        struct efx_filter_state *state = efx->filter_state;
-       unsigned int table_id = EFX_FILTER_TABLE_RX_DEF;
+       unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
+       enum efx_filter_table_id table_id;
 
        do {
+               table_id = efx_filter_range_table[range];
                if (state->table[table_id].size != 0)
-                       return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id)
-                               << EFX_FILTER_INDEX_WIDTH) +
+                       return range << EFX_FILTER_INDEX_WIDTH |
                                state->table[table_id].size;
-       } while (table_id--);
+       } while (range--);
 
        return 0;
 }
@@ -718,7 +719,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
        netif_vdbg(efx, hw, efx->net_dev,
                   "%s: filter type %d index %d rxq %u set",
                   __func__, spec->type, filter_idx, spec->dmaq_id);
-       rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
+       rc = efx_filter_make_id(spec, filter_idx);
 
 out:
        spin_unlock_bh(&state->lock);
@@ -781,8 +782,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
        spin_lock_bh(&state->lock);
 
        if (test_bit(filter_idx, table->used_bitmap) &&
-           spec->priority == priority &&
-           !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
+           spec->priority == priority) {
                efx_filter_table_clear_entry(efx, table, filter_idx);
                if (table->used == 0)
                        efx_filter_table_reset_search_depth(table);
@@ -833,8 +833,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
        spin_lock_bh(&state->lock);
 
        if (test_bit(filter_idx, table->used_bitmap) &&
-           spec->priority == priority &&
-           !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
+           spec->priority == priority) {
                *spec_buf = *spec;
                rc = 0;
        } else {
@@ -927,8 +926,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
                                        goto out;
                                }
                                buf[count++] = efx_filter_make_id(
-                                       table_id, filter_idx,
-                                       table->spec[filter_idx].flags);
+                                       &table->spec[filter_idx], filter_idx);
                        }
                }
        }
index 3c77802aed6c63e02adf29ca9d2858b4d2c100c0..5cb54723b8244bf5b4c8b21e8c655f8e9f9b0916 100644 (file)
@@ -61,16 +61,12 @@ enum efx_filter_priority {
  *     according to the indirection table.
  * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
  *     queue.
- * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
- *     any IP filter that matches the same packet.  By default, IP
- *     filters take precedence.
  * @EFX_FILTER_FLAG_RX: Filter is for RX
  * @EFX_FILTER_FLAG_TX: Filter is for TX
  */
 enum efx_filter_flags {
        EFX_FILTER_FLAG_RX_RSS = 0x01,
        EFX_FILTER_FLAG_RX_SCATTER = 0x02,
-       EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
        EFX_FILTER_FLAG_RX = 0x08,
        EFX_FILTER_FLAG_TX = 0x10,
 };
@@ -88,8 +84,7 @@ enum efx_filter_flags {
  *
  * The @priority field is used by software to determine whether a new
  * filter may replace an old one.  The hardware priority of a filter
- * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
- * flag.
+ * depends on the filter type.
  */
 struct efx_filter_spec {
        u8      type:4;
index fc5e7bbcbc9e5b9ea2ff8cf61eee36703371865d..aea43cbd05200acc1f19cd21e9f96d9e26782ca2 100644 (file)
@@ -320,14 +320,20 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
                efx_mcdi_complete(mcdi);
 }
 
-/* Issue the given command by writing the data into the shared memory PDU,
- * ring the doorbell and wait for completion. Copyout the result. */
 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
                 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
                 size_t *outlen_actual)
+{
+       efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+       return efx_mcdi_rpc_finish(efx, cmd, inlen,
+                                  outbuf, outlen, outlen_actual);
+}
+
+void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
+                       size_t inlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       int rc;
+
        BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
 
        efx_mcdi_acquire(mcdi);
@@ -338,6 +344,15 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
        spin_unlock_bh(&mcdi->iface_lock);
 
        efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+}
+
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+                       u8 *outbuf, size_t outlen, size_t *outlen_actual)
+{
+       struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+       int rc;
+
+       BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
 
        if (mcdi->mode == MCDI_MODE_POLL)
                rc = efx_mcdi_poll(efx);
@@ -563,6 +578,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
        case MCDI_EVENT_CODE_FLR:
                efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
                break;
+       case MCDI_EVENT_CODE_PTP_RX:
+       case MCDI_EVENT_CODE_PTP_FAULT:
+       case MCDI_EVENT_CODE_PTP_PPS:
+               efx_ptp_event(efx, event);
+               break;
 
        default:
                netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -641,9 +661,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
                           u16 *fw_subtype_list, u32 *capabilities)
 {
        uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
-       size_t outlen;
+       size_t outlen, offset, i;
        int port_num = efx_port_num(efx);
-       int offset;
        int rc;
 
        BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
@@ -663,11 +682,18 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
                : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
        if (mac_address)
                memcpy(mac_address, outbuf + offset, ETH_ALEN);
-       if (fw_subtype_list)
-               memcpy(fw_subtype_list,
-                      outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
-                      MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
-                      sizeof(fw_subtype_list[0]));
+       if (fw_subtype_list) {
+               /* Byte-swap and truncate or zero-pad as necessary */
+               offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
+               for (i = 0;
+                    i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
+                    i++) {
+                       fw_subtype_list[i] =
+                               (offset + 2 <= outlen) ?
+                               le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
+                       offset += 2;
+               }
+       }
        if (capabilities) {
                if (port_num)
                        *capabilities = MCDI_DWORD(outbuf,
@@ -1169,6 +1195,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
        __le32 *qid;
        int rc, count;
 
+       BUILD_BUG_ON(EFX_MAX_CHANNELS >
+                    MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
        qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
        if (qid == NULL)
                return -ENOMEM;
index 0bdf3e33183253d19803803cdf73d7519c9bdaf6..3ba2e5b5a9cc98eac81a90c9b3d2e0e650b63af0 100644 (file)
@@ -71,6 +71,12 @@ extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
                        size_t inlen, u8 *outbuf, size_t outlen,
                        size_t *outlen_actual);
 
+extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+                              const u8 *inbuf, size_t inlen);
+extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+                              u8 *outbuf, size_t outlen,
+                              size_t *outlen_actual);
+
 extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
 extern void efx_mcdi_mode_poll(struct efx_nic *efx);
 extern void efx_mcdi_mode_event(struct efx_nic *efx);
@@ -107,11 +113,13 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define MCDI_EVENT_FIELD(_ev, _field)                  \
        EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 #define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2)                \
-       EFX_DWORD_FIELD(                                                \
+       EFX_EXTRACT_DWORD(                                              \
                *((efx_dword_t *)                                       \
                  (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) +       \
                   (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
-               MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
+               MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
+               (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
+               MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
 
 extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
 extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
index db4beed97669c94a299d0ec6b96069d969e26b1e..9d426d0457bdd5140709b1da8186d817d5b2bbd2 100644 (file)
 #define          MCDI_EVENT_CODE_TX_FLUSH  0xc /* enum */
 #define          MCDI_EVENT_CODE_PTP_RX  0xd /* enum */
 #define          MCDI_EVENT_CODE_PTP_FAULT  0xe /* enum */
+#define          MCDI_EVENT_CODE_PTP_PPS  0xf /* enum */
 #define       MCDI_EVENT_CMDDONE_DATA_OFST 0
 #define       MCDI_EVENT_CMDDONE_DATA_LBN 0
 #define       MCDI_EVENT_CMDDONE_DATA_WIDTH 32
 
 /* MC_CMD_GET_FPGAREG_OUT msgresponse */
 #define    MC_CMD_GET_FPGAREG_OUT_LENMIN 1
-#define    MC_CMD_GET_FPGAREG_OUT_LENMAX 255
+#define    MC_CMD_GET_FPGAREG_OUT_LENMAX 252
 #define    MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
 #define       MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
 #define       MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
 #define       MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255
+#define       MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
 
 
 /***********************************/
 
 /* MC_CMD_PUT_FPGAREG_IN msgrequest */
 #define    MC_CMD_PUT_FPGAREG_IN_LENMIN 5
-#define    MC_CMD_PUT_FPGAREG_IN_LENMAX 255
+#define    MC_CMD_PUT_FPGAREG_IN_LENMAX 252
 #define    MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
 #define       MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
 #define       MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
 #define       MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
 #define       MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
-#define       MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251
+#define       MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
 
 /* MC_CMD_PUT_FPGAREG_OUT msgresponse */
 #define    MC_CMD_PUT_FPGAREG_OUT_LEN 0
 
 /* MC_CMD_PTP_IN_TRANSMIT msgrequest */
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
-#define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 255
+#define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
 #define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
-#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
 
 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
 #define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
 
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
-#define    MC_CMD_PUTS_IN_LENMAX 255
+#define    MC_CMD_PUTS_IN_LENMAX 252
 #define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_PUTS_IN_DEST_OFST 0
 #define        MC_CMD_PUTS_IN_UART_LBN 0
 #define       MC_CMD_PUTS_IN_STRING_OFST 12
 #define       MC_CMD_PUTS_IN_STRING_LEN 1
 #define       MC_CMD_PUTS_IN_STRING_MINNUM 1
-#define       MC_CMD_PUTS_IN_STRING_MAXNUM 243
+#define       MC_CMD_PUTS_IN_STRING_MAXNUM 240
 
 /* MC_CMD_PUTS_OUT msgresponse */
 #define    MC_CMD_PUTS_OUT_LEN 0
 
 /* MC_CMD_NVRAM_READ_OUT msgresponse */
 #define    MC_CMD_NVRAM_READ_OUT_LENMIN 1
-#define    MC_CMD_NVRAM_READ_OUT_LENMAX 255
+#define    MC_CMD_NVRAM_READ_OUT_LENMAX 252
 #define    MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
-#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
 
 
 /***********************************/
 
 /* MC_CMD_NVRAM_WRITE_IN msgrequest */
 #define    MC_CMD_NVRAM_WRITE_IN_LENMIN 13
-#define    MC_CMD_NVRAM_WRITE_IN_LENMAX 255
+#define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
 #define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
 /*            Enum values, see field(s): */
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
-#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
 
 /* MC_CMD_NVRAM_WRITE_OUT msgresponse */
 #define    MC_CMD_NVRAM_WRITE_OUT_LEN 0
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
-#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
-#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
 
 
 /***********************************/
index 758148379b0e19d34e0e413884649060efa9090f..08f825b71ac8c5a69db6995fda028626e702adae 100644 (file)
@@ -585,6 +585,7 @@ static const struct siena_nvram_type_info siena_nvram_types[] = {
        [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1]   = { 1, "sfc_exp_rom_cfg" },
        [MC_CMD_NVRAM_TYPE_PHY_PORT0]           = { 0, "sfc_phy_fw" },
        [MC_CMD_NVRAM_TYPE_PHY_PORT1]           = { 1, "sfc_phy_fw" },
+       [MC_CMD_NVRAM_TYPE_FPGA]                = { 0, "sfc_fpga" },
 };
 
 static int siena_mtd_probe_partition(struct efx_nic *efx,
@@ -598,7 +599,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
        bool protected;
        int rc;
 
-       if (type >= ARRAY_SIZE(siena_nvram_types))
+       if (type >= ARRAY_SIZE(siena_nvram_types) ||
+           siena_nvram_types[type].name == NULL)
                return -ENODEV;
 
        info = &siena_nvram_types[type];
@@ -627,7 +629,8 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
                                     struct efx_mtd *efx_mtd)
 {
        struct efx_mtd_partition *part;
-       uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM];
+       uint16_t fw_subtype_list[
+               MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
        int rc;
 
        rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
index cd9c0a989692b5a547c74136c9be28e208f6951c..c1a010cda89b92f20afc5657354a42bc592f8568 100644 (file)
@@ -37,7 +37,7 @@
  *
  **************************************************************************/
 
-#define EFX_DRIVER_VERSION     "3.1"
+#define EFX_DRIVER_VERSION     "3.2"
 
 #ifdef DEBUG
 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -56,7 +56,8 @@
 #define EFX_MAX_CHANNELS 32U
 #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
 #define EFX_EXTRA_CHANNEL_IOV  0
-#define EFX_MAX_EXTRA_CHANNELS 1U
+#define EFX_EXTRA_CHANNEL_PTP  1
+#define EFX_MAX_EXTRA_CHANNELS 2U
 
 /* Checksum generation is a per-queue option in hardware, so each
  * queue visible to the networking core is backed by two hardware TX
@@ -68,6 +69,9 @@
 #define EFX_TXQ_TYPES          4
 #define EFX_MAX_TX_QUEUES      (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
 
+/* Forward declare Precision Time Protocol (PTP) support structure. */
+struct efx_ptp_data;
+
 struct efx_self_tests;
 
 /**
@@ -91,29 +95,31 @@ struct efx_special_buffer {
 };
 
 /**
- * struct efx_tx_buffer - An Efx TX buffer
- * @skb: The associated socket buffer.
- *     Set only on the final fragment of a packet; %NULL for all other
- *     fragments.  When this fragment completes, then we can free this
- *     skb.
- * @tsoh: The associated TSO header structure, or %NULL if this
- *     buffer is not a TSO header.
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ *     freed when descriptor completes
+ * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
+ *     freed when descriptor completes.
  * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
  * @len: Length of this fragment.
  *     This field is zero when the queue slot is empty.
- * @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if dma_unmap_single should be used.
  * @unmap_len: Length of this fragment to unmap
  */
 struct efx_tx_buffer {
-       const struct sk_buff *skb;
-       struct efx_tso_header *tsoh;
+       union {
+               const struct sk_buff *skb;
+               void *heap_buf;
+       };
        dma_addr_t dma_addr;
+       unsigned short flags;
        unsigned short len;
-       bool continuation;
-       bool unmap_single;
        unsigned short unmap_len;
 };
+#define EFX_TX_BUF_CONT                1       /* not last descriptor of packet */
+#define EFX_TX_BUF_SKB         2       /* buffer is last part of skb */
+#define EFX_TX_BUF_HEAP                4       /* buffer was allocated with kmalloc() */
+#define EFX_TX_BUF_MAP_SINGLE  8       /* buffer was mapped with dma_map_single() */
 
 /**
  * struct efx_tx_queue - An Efx TX queue
@@ -133,6 +139,7 @@ struct efx_tx_buffer {
  * @channel: The associated channel
  * @core_txq: The networking core TX queue structure
  * @buffer: The software buffer ring
+ * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
  * @initialised: Has hardware queue been initialised?
@@ -156,9 +163,6 @@ struct efx_tx_buffer {
  *     variable indicates that the queue is full.  This is to
  *     avoid cache-line ping-pong between the xmit path and the
  *     completion path.
- * @tso_headers_free: A list of TSO headers allocated for this TX queue
- *     that are not in use, and so available for new TSO sends. The list
- *     is protected by the TX queue lock.
  * @tso_bursts: Number of times TSO xmit invoked by kernel
  * @tso_long_headers: Number of packets with headers too long for standard
  *     blocks
@@ -175,6 +179,7 @@ struct efx_tx_queue {
        struct efx_channel *channel;
        struct netdev_queue *core_txq;
        struct efx_tx_buffer *buffer;
+       struct efx_buffer *tsoh_page;
        struct efx_special_buffer txd;
        unsigned int ptr_mask;
        bool initialised;
@@ -187,7 +192,6 @@ struct efx_tx_queue {
        unsigned int insert_count ____cacheline_aligned_in_smp;
        unsigned int write_count;
        unsigned int old_read_count;
-       struct efx_tso_header *tso_headers_free;
        unsigned int tso_bursts;
        unsigned int tso_long_headers;
        unsigned int tso_packets;
@@ -242,6 +246,8 @@ struct efx_rx_page_state {
 /**
  * struct efx_rx_queue - An Efx RX queue
  * @efx: The associated Efx NIC
+ * @core_index:  Index of network core RX queue.  Will be >= 0 iff this
+ *     is associated with a real RX queue.
  * @buffer: The software buffer ring
  * @rxd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
@@ -263,6 +269,7 @@ struct efx_rx_page_state {
  */
 struct efx_rx_queue {
        struct efx_nic *efx;
+       int core_index;
        struct efx_rx_buffer *buffer;
        struct efx_special_buffer rxd;
        unsigned int ptr_mask;
@@ -390,14 +397,17 @@ struct efx_channel {
  * @get_name: Generate the channel's name (used for its IRQ handler)
  * @copy: Copy the channel state prior to reallocation.  May be %NULL if
  *     reallocation is not supported.
+ * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
  * @keep_eventq: Flag for whether event queue should be kept initialised
  *     while the device is stopped
  */
 struct efx_channel_type {
        void (*handle_no_channel)(struct efx_nic *);
        int (*pre_probe)(struct efx_channel *);
+       void (*post_remove)(struct efx_channel *);
        void (*get_name)(struct efx_channel *, char *buf, size_t len);
        struct efx_channel *(*copy)(const struct efx_channel *);
+       void (*receive_skb)(struct efx_channel *, struct sk_buff *);
        bool keep_eventq;
 };
 
@@ -430,11 +440,9 @@ enum efx_int_mode {
 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
 
 enum nic_state {
-       STATE_INIT = 0,
-       STATE_RUNNING = 1,
-       STATE_FINI = 2,
-       STATE_DISABLED = 3,
-       STATE_MAX,
+       STATE_UNINIT = 0,       /* device being probed/removed or is frozen */
+       STATE_READY = 1,        /* hardware ready and netdev registered */
+       STATE_DISABLED = 2,     /* device disabled due to hardware errors */
 };
 
 /*
@@ -654,7 +662,7 @@ struct vfdi_status;
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
  * @msg_enable: Log message enable flags
- * @state: Device state flag. Serialised by the rtnl_lock.
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
  * @reset_pending: Bitmask for pending resets
  * @tx_queue: TX DMA queues
  * @rx_queue: RX DMA queues
@@ -664,6 +672,8 @@ struct vfdi_status;
  *     should be allocated for this NIC
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
  * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
  * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
  * @sram_lim_qw: Qword address limit of SRAM
@@ -730,6 +740,7 @@ struct vfdi_status;
  *     %local_addr_list. Protected by %local_lock.
  * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
  * @peer_work: Work item to broadcast peer addresses to VMs.
+ * @ptp_data: PTP state data
  * @monitor_work: Hardware monitor workitem
  * @biu_lock: BIU (bus interface unit) lock
  * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
@@ -774,6 +785,9 @@ struct efx_nic {
 
        unsigned rxq_entries;
        unsigned txq_entries;
+       unsigned int txq_stop_thresh;
+       unsigned int txq_wake_thresh;
+
        unsigned tx_dc_base;
        unsigned rx_dc_base;
        unsigned sram_lim_qw;
@@ -854,6 +868,10 @@ struct efx_nic {
        struct work_struct peer_work;
 #endif
 
+#ifdef CONFIG_SFC_PTP
+       struct efx_ptp_data *ptp_data;
+#endif
+
        /* The following fields may be written more often */
 
        struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -1044,7 +1062,7 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
 
 static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
 {
-       return channel->channel < channel->efx->n_rx_channels;
+       return channel->rx_queue.core_index >= 0;
 }
 
 static inline struct efx_rx_queue *
@@ -1116,5 +1134,13 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
 #define EFX_MAX_FRAME_LEN(mtu) \
        ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
 
+static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+}
+static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
+{
+       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
 
 #endif /* EFX_NET_DRIVER_H */
index 326d799762d644b1c665a18c5b2e3bdca9a22206..cdff40b65729ad79a8a744933ca9453958305162 100644 (file)
@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
 /**************************************************************************
  *
  * Generic buffer handling
- * These buffers are used for interrupt status and MAC stats
+ * These buffers are used for interrupt status, MAC stats, etc.
  *
  **************************************************************************/
 
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
                ++tx_queue->write_count;
 
                /* Create TX descriptor ring entry */
+               BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
                EFX_POPULATE_QWORD_4(*txd,
-                                    FSF_AZ_TX_KER_CONT, buffer->continuation,
+                                    FSF_AZ_TX_KER_CONT,
+                                    buffer->flags & EFX_TX_BUF_CONT,
                                     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
                                     FSF_AZ_TX_KER_BUF_REGION, 0,
                                     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
index bab5cd9f5740bb8e9e7476849f089ba513346b9f..438cef11f7270bd620ed04c42fb1c8ee8a9d3621 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef EFX_NIC_H
 #define EFX_NIC_H
 
+#include <linux/net_tstamp.h>
 #include <linux/i2c-algo-bit.h>
 #include "net_driver.h"
 #include "efx.h"
@@ -250,6 +251,41 @@ extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
 extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
                                     bool spoofchk);
 
+struct ethtool_ts_info;
+#ifdef CONFIG_SFC_PTP
+extern void efx_ptp_probe(struct efx_nic *efx);
+extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+extern int efx_ptp_get_ts_info(struct net_device *net_dev,
+                              struct ethtool_ts_info *ts_info);
+extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+#else
+static inline void efx_ptp_probe(struct efx_nic *efx) {}
+static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+{
+       return -EOPNOTSUPP;
+}
+static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
+                                     struct ethtool_ts_info *ts_info)
+{
+       ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
+                                   SOF_TIMESTAMPING_RX_SOFTWARE);
+       ts_info->phc_index = -1;
+
+       return 0;
+}
+static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       return false;
+}
+static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       return NETDEV_TX_OK;
+}
+static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
+#endif
+
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
 extern const struct efx_nic_type siena_a0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
new file mode 100644 (file)
index 0000000..5b3dd02
--- /dev/null
@@ -0,0 +1,1484 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Theory of operation:
+ *
+ * PTP support is assisted by firmware running on the MC, which provides
+ * the hardware timestamping capabilities.  Both transmitted and received
+ * PTP event packets are queued onto internal queues for subsequent processing;
+ * this is because the MC operations are relatively long and would block
+ * block NAPI/interrupt operation.
+ *
+ * Receive event processing:
+ *     The event contains the packet's UUID and sequence number, together
+ *     with the hardware timestamp.  The PTP receive packet queue is searched
+ *     for this UUID/sequence number and, if found, put on a pending queue.
+ *     Packets not matching are delivered without timestamps (MCDI events will
+ *     always arrive after the actual packet).
+ *     It is important for the operation of the PTP protocol that the ordering
+ *     of packets between the event and general port is maintained.
+ *
+ * Work queue processing:
+ *     If work waiting, synchronise host/hardware time
+ *
+ *     Transmit: send packet through MC, which returns the transmission time
+ *     that is converted to an appropriate timestamp.
+ *
+ *     Receive: the packet's reception time is converted to an appropriate
+ *     timestamp.
+ */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "io.h"
+#include "regs.h"
+#include "nic.h"
+
+/* Maximum number of events expected to make up a PTP event */
+#define        MAX_EVENT_FRAGS                 3
+
+/* Maximum delay, ms, to begin synchronisation */
+#define        MAX_SYNCHRONISE_WAIT_MS         2
+
+/* How long, at most, to spend synchronising */
+#define        SYNCHRONISE_PERIOD_NS           250000
+
+/* How often to update the shared memory time */
+#define        SYNCHRONISATION_GRANULARITY_NS  200
+
+/* Minimum permitted length of a (corrected) synchronisation time */
+#define        MIN_SYNCHRONISATION_NS          120
+
+/* Maximum permitted length of a (corrected) synchronisation time */
+#define        MAX_SYNCHRONISATION_NS          1000
+
+/* How many (MC) receive events that can be queued */
+#define        MAX_RECEIVE_EVENTS              8
+
+/* Length of (modified) moving average. */
+#define        AVERAGE_LENGTH                  16
+
+/* How long an unmatched event or packet can be held */
+#define PKT_EVENT_LIFETIME_MS          10
+
+/* Offsets into PTP packet for identification.  These offsets are from the
+ * start of the IP header, not the MAC header.  Note that neither PTP V1 nor
+ * PTP V2 permit the use of IPV4 options.
+ */
+#define PTP_DPORT_OFFSET       22
+
+#define PTP_V1_VERSION_LENGTH  2
+#define PTP_V1_VERSION_OFFSET  28
+
+#define PTP_V1_UUID_LENGTH     6
+#define PTP_V1_UUID_OFFSET     50
+
+#define PTP_V1_SEQUENCE_LENGTH 2
+#define PTP_V1_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define        PTP_V1_MIN_LENGTH       64
+
+#define PTP_V2_VERSION_LENGTH  1
+#define PTP_V2_VERSION_OFFSET  29
+
+/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
+ * the MC only captures the last six bytes of the clock identity. These values
+ * reflect those, not the ones used in the standard.  The standard permits
+ * mapping of V1 UUIDs to V2 UUIDs with these same values.
+ */
+#define PTP_V2_MC_UUID_LENGTH  6
+#define PTP_V2_MC_UUID_OFFSET  50
+
+#define PTP_V2_SEQUENCE_LENGTH 2
+#define PTP_V2_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define        PTP_V2_MIN_LENGTH       63
+
+#define        PTP_MIN_LENGTH          63
+
+#define PTP_ADDRESS            0xe0000181      /* 224.0.1.129 */
+#define PTP_EVENT_PORT         319
+#define PTP_GENERAL_PORT       320
+
+/* Annoyingly the format of the version numbers are different between
+ * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
+ */
+#define        PTP_VERSION_V1          1
+
+#define        PTP_VERSION_V2          2
+#define        PTP_VERSION_V2_MASK     0x0f
+
+enum ptp_packet_state {
+       PTP_PACKET_STATE_UNMATCHED = 0,
+       PTP_PACKET_STATE_MATCHED,
+       PTP_PACKET_STATE_TIMED_OUT,
+       PTP_PACKET_STATE_MATCH_UNWANTED
+};
+
+/* NIC synchronised with single word of time only comprising
+ * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
+ */
+#define        MC_NANOSECOND_BITS      30
+#define        MC_NANOSECOND_MASK      ((1 << MC_NANOSECOND_BITS) - 1)
+#define        MC_SECOND_MASK          ((1 << (32 - MC_NANOSECOND_BITS)) - 1)
+
+/* Maximum parts-per-billion adjustment that is acceptable */
+#define MAX_PPB                        1000000
+
+/* Number of bits required to hold the above */
+#define        MAX_PPB_BITS            20
+
+/* Number of extra bits allowed when calculating fractional ns.
+ * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
+ * be less than 63.
+ */
+#define        PPB_EXTRA_BITS          2
+
+/* Precalculate scale word to avoid long long division at runtime */
+#define        PPB_SCALE_WORD  ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
+                       MAX_PPB_BITS)) / 1000000000LL)
+
+#define PTP_SYNC_ATTEMPTS      4
+
+/**
+ * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
+ * @words: UUID and (partial) sequence number
+ * @expiry: Time after which the packet should be delivered irrespective of
+ *            event arrival.
+ * @state: The state of the packet - whether it is ready for processing or
+ *         whether that is of no interest.
+ */
+struct efx_ptp_match {
+       u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
+       unsigned long expiry;
+       enum ptp_packet_state state;
+};
+
+/**
+ * struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @seq0: First part of (PTP) UUID
+ * @seq1: Second part of (PTP) UUID and sequence number
+ * @hwtimestamp: Event timestamp
+ */
+struct efx_ptp_event_rx {
+       struct list_head link;
+       u32 seq0;
+       u32 seq1;
+       ktime_t hwtimestamp;
+       unsigned long expiry;
+};
+
+/**
+ * struct efx_ptp_timeset - Synchronisation between host and MC
+ * @host_start: Host time immediately before hardware timestamp taken
+ * @seconds: Hardware timestamp, seconds
+ * @nanoseconds: Hardware timestamp, nanoseconds
+ * @host_end: Host time immediately after hardware timestamp taken
+ * @waitns: Number of nanoseconds between hardware timestamp being read and
+ *          host end time being seen
+ * @window: Difference of host_end and host_start
+ * @valid: Whether this timeset is valid
+ */
+struct efx_ptp_timeset {
+       u32 host_start;
+       u32 seconds;
+       u32 nanoseconds;
+       u32 host_end;
+       u32 waitns;
+       u32 window;     /* Derived: end - start, allowing for wrap */
+};
+
+/**
+ * struct efx_ptp_data - Precision Time Protocol (PTP) state
+ * @channel: The PTP channel
+ * @rxq: Receive queue (awaiting timestamps)
+ * @txq: Transmit queue
+ * @evt_list: List of MC receive events awaiting packets
+ * @evt_free_list: List of free events
+ * @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @rx_evts: Instantiated events (on evt_list and evt_free_list)
+ * @workwq: Work queue for processing pending PTP operations
+ * @work: Work task
+ * @reset_required: A serious error has occurred and the PTP task needs to be
+ *                  reset (disable, enable).
+ * @rxfilter_event: Receive filter when operating
+ * @rxfilter_general: Receive filter when operating
+ * @config: Current timestamp configuration
+ * @enabled: PTP operation enabled
+ * @mode: Mode in which PTP operating (PTP version)
+ * @evt_frags: Partly assembled PTP events
+ * @evt_frag_idx: Current fragment number
+ * @evt_code: Last event code
+ * @start: Address at which MC indicates ready for synchronisation
+ * @host_time_pps: Host time at last PPS
+ * @last_sync_ns: Last number of nanoseconds between readings when synchronising
+ * @base_sync_ns: Number of nanoseconds for last synchronisation.
+ * @base_sync_valid: Whether base_sync_time is valid.
+ * @current_adjfreq: Current ppb adjustment.
+ * @phc_clock: Pointer to registered phc device
+ * @phc_clock_info: Registration structure for phc device
+ * @pps_work: pps work task for handling pps events
+ * @pps_workwq: pps work queue
+ * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
+ * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
+ *         allocations in main data path).
+ * @debug_ptp_dir: PTP debugfs directory
+ * @missed_rx_sync: Number of packets received without syncrhonisation.
+ * @good_syncs: Number of successful synchronisations.
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @bad_syncs: Number of failed synchronisations.
+ * @last_sync_time: Number of nanoseconds for last synchronisation.
+ * @sync_timeouts: Number of synchronisation timeouts
+ * @fast_syncs: Number of synchronisations requiring short delay
+ * @min_sync_delta: Minimum time between event and synchronisation
+ * @max_sync_delta: Maximum time between event and synchronisation
+ * @average_sync_delta: Average time between event and synchronisation.
+ *                      Modified moving average.
+ * @last_sync_delta: Last time between event and synchronisation
+ * @mc_stats: Context value for MC statistics
+ * @timeset: Last set of synchronisation statistics.
+ */
+struct efx_ptp_data {
+       struct efx_channel *channel;
+       struct sk_buff_head rxq;
+       struct sk_buff_head txq;
+       struct list_head evt_list;
+       struct list_head evt_free_list;
+       spinlock_t evt_lock;
+       struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
+       struct workqueue_struct *workwq;
+       struct work_struct work;
+       bool reset_required;
+       u32 rxfilter_event;
+       u32 rxfilter_general;
+       bool rxfilter_installed;
+       struct hwtstamp_config config;
+       bool enabled;
+       unsigned int mode;
+       efx_qword_t evt_frags[MAX_EVENT_FRAGS];
+       int evt_frag_idx;
+       int evt_code;
+       struct efx_buffer start;
+       struct pps_event_time host_time_pps;
+       unsigned last_sync_ns;
+       unsigned base_sync_ns;
+       bool base_sync_valid;
+       s64 current_adjfreq;
+       struct ptp_clock *phc_clock;
+       struct ptp_clock_info phc_clock_info;
+       struct work_struct pps_work;
+       struct workqueue_struct *pps_workwq;
+       bool nic_ts_enabled;
+       u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
+                              MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
+       struct efx_ptp_timeset
+       timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+};
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts);
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+                          const struct timespec *e_ts);
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *request, int on);
+
+/* Enable MCDI PTP support. */
+static int efx_ptp_enable(struct efx_nic *efx)
+{
+       u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
+                      efx->ptp_data->channel->channel);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
+
+       return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+/* Disable MCDI PTP support.
+ *
+ * Note that this function should never rely on the presence of ptp_data -
+ * may be called before that exists.
+ */
+static int efx_ptp_disable(struct efx_nic *efx)
+{
+       u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+       return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
+{
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(q))) {
+               local_bh_disable();
+               netif_receive_skb(skb);
+               local_bh_enable();
+       }
+}
+
+static void efx_ptp_handle_no_channel(struct efx_nic *efx)
+{
+       netif_err(efx, drv, efx->net_dev,
+                 "ERROR: PTP requires MSI-X and 1 additional interrupt"
+                 "vector. PTP disabled\n");
+}
+
+/* Repeatedly send the host time to the MC which will capture the hardware
+ * time.
+ */
+static void efx_ptp_send_times(struct efx_nic *efx,
+                              struct pps_event_time *last_time)
+{
+       struct pps_event_time now;
+       struct timespec limit;
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct timespec start;
+       int *mc_running = ptp->start.addr;
+
+       pps_get_ts(&now);
+       start = now.ts_real;
+       limit = now.ts_real;
+       timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+
+       /* Write host time for specified period or until MC is done */
+       while ((timespec_compare(&now.ts_real, &limit) < 0) &&
+              ACCESS_ONCE(*mc_running)) {
+               struct timespec update_time;
+               unsigned int host_time;
+
+               /* Don't update continuously to avoid saturating the PCIe bus */
+               update_time = now.ts_real;
+               timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+               do {
+                       pps_get_ts(&now);
+               } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
+                        ACCESS_ONCE(*mc_running));
+
+               /* Synchronise NIC with single word of time only */
+               host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
+                            now.ts_real.tv_nsec);
+               /* Update host time in NIC memory */
+               _efx_writed(efx, cpu_to_le32(host_time),
+                           FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+       }
+       *last_time = now;
+}
+
+/* Read a timeset from the MC's results and partial process. */
+static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
+{
+       unsigned start_ns, end_ns;
+
+       timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
+       timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
+       timeset->nanoseconds = MCDI_DWORD(data,
+                                        PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+       timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+       timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+
+       /* Ignore seconds */
+       start_ns = timeset->host_start & MC_NANOSECOND_MASK;
+       end_ns = timeset->host_end & MC_NANOSECOND_MASK;
+       /* Allow for rollover */
+       if (end_ns < start_ns)
+               end_ns += NSEC_PER_SEC;
+       /* Determine duration of operation */
+       timeset->window = end_ns - start_ns;
+}
+
+/* Process times received from MC.
+ *
+ * Extract times from returned results, and establish the minimum value
+ * seen.  The minimum value represents the "best" possible time and events
+ * too much greater than this are rejected - the machine is, perhaps, too
+ * busy. A number of readings are taken so that, hopefully, at least one good
+ * synchronisation will be seen in the results.
+ */
+static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
+                                size_t response_length,
+                                const struct pps_event_time *last_time)
+{
+       unsigned number_readings = (response_length /
+                              MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
+       unsigned i;
+       unsigned min;
+       unsigned min_set = 0;
+       unsigned total;
+       unsigned ngood = 0;
+       unsigned last_good = 0;
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       bool min_valid = false;
+       u32 last_sec;
+       u32 start_sec;
+       struct timespec delta;
+
+       if (number_readings == 0)
+               return -EAGAIN;
+
+       /* Find minimum value in this set of results, discarding clearly
+        * erroneous results.
+        */
+       for (i = 0; i < number_readings; i++) {
+               efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
+               synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
+               if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
+                       if (min_valid) {
+                               if (ptp->timeset[i].window < min_set)
+                                       min_set = ptp->timeset[i].window;
+                       } else {
+                               min_valid = true;
+                               min_set = ptp->timeset[i].window;
+                       }
+               }
+       }
+
+       if (min_valid) {
+               if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
+                       min = ptp->base_sync_ns;
+               else
+                       min = min_set;
+       } else {
+               min = SYNCHRONISATION_GRANULARITY_NS;
+       }
+
+       /* Discard excessively long synchronise durations.  The MC times
+        * when it finishes reading the host time so the corrected window
+        * time should be fairly constant for a given platform.
+        */
+       total = 0;
+       for (i = 0; i < number_readings; i++)
+               if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
+                       unsigned win;
+
+                       win = ptp->timeset[i].window - ptp->timeset[i].waitns;
+                       if (win >= MIN_SYNCHRONISATION_NS &&
+                           win < MAX_SYNCHRONISATION_NS) {
+                               total += ptp->timeset[i].window;
+                               ngood++;
+                               last_good = i;
+                       }
+               }
+
+       if (ngood == 0) {
+               netif_warn(efx, drv, efx->net_dev,
+                          "PTP no suitable synchronisations %dns %dns\n",
+                          ptp->base_sync_ns, min_set);
+               return -EAGAIN;
+       }
+
+       /* Average minimum this synchronisation */
+       ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
+       if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
+               ptp->base_sync_valid = true;
+               ptp->base_sync_ns = ptp->last_sync_ns;
+       }
+
+       /* Calculate delay from actual PPS to last_time */
+       delta.tv_nsec =
+               ptp->timeset[last_good].nanoseconds +
+               last_time->ts_real.tv_nsec -
+               (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+       /* It is possible that the seconds rolled over between taking
+        * the start reading and the last value written by the host.  The
+        * timescales are such that a gap of more than one second is never
+        * expected.
+        */
+       start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
+       last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
+       if (start_sec != last_sec) {
+               if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+                       netif_warn(efx, hw, efx->net_dev,
+                                  "PTP bad synchronisation seconds\n");
+                       return -EAGAIN;
+               } else {
+                       delta.tv_sec = 1;
+               }
+       } else {
+               delta.tv_sec = 0;
+       }
+
+       ptp->host_time_pps = *last_time;
+       pps_sub_ts(&ptp->host_time_pps, delta);
+
+       return 0;
+}
+
+/* Synchronize times between the host and the MC */
+static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
+       size_t response_length;
+       int rc;
+       unsigned long timeout;
+       struct pps_event_time last_time = {};
+       unsigned int loops = 0;
+       int *start = ptp->start.addr;
+
+       MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+       MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
+                      num_readings);
+       MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
+                      (u32)ptp->start.dma_addr);
+       MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
+                      (u32)((u64)ptp->start.dma_addr >> 32));
+
+       /* Clear flag that signals MC ready */
+       ACCESS_ONCE(*start) = 0;
+       efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+                          MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+
+       /* Wait for start from MCDI (or timeout) */
+       timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
+       while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+               udelay(20);     /* Usually start MCDI execution quickly */
+               loops++;
+       }
+
+       if (ACCESS_ONCE(*start))
+               efx_ptp_send_times(efx, &last_time);
+
+       /* Collect results */
+       rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP,
+                                MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
+                                synch_buf, sizeof(synch_buf),
+                                &response_length);
+       if (rc == 0)
+               rc = efx_ptp_process_times(efx, synch_buf, response_length,
+                                          &last_time);
+
+       return rc;
+}
+
+/* Transmit a PTP packet, via the MCDI interface, to the wire. */
+static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+{
+       u8 *txbuf = efx->ptp_data->txbuf;
+       struct skb_shared_hwtstamps timestamps;
+       int rc = -EIO;
+       /* MCDI driver requires word aligned lengths */
+       size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
+       u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
+
+       MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+       MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+       if (skb_shinfo(skb)->nr_frags != 0) {
+               rc = skb_linearize(skb);
+               if (rc != 0)
+                       goto fail;
+       }
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               rc = skb_checksum_help(skb);
+               if (rc != 0)
+                       goto fail;
+       }
+       skb_copy_from_linear_data(skb,
+                                 &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
+                                 len);
+       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
+                         sizeof(txtime), &len);
+       if (rc != 0)
+               goto fail;
+
+       memset(&timestamps, 0, sizeof(timestamps));
+       timestamps.hwtstamp = ktime_set(
+               MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
+               MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+
+       skb_tstamp_tx(skb, &timestamps);
+
+       rc = 0;
+
+fail:
+       dev_kfree_skb(skb);
+
+       return rc;
+}
+
+static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct list_head *cursor;
+       struct list_head *next;
+
+       /* Drop time-expired events */
+       spin_lock_bh(&ptp->evt_lock);
+       if (!list_empty(&ptp->evt_list)) {
+               list_for_each_safe(cursor, next, &ptp->evt_list) {
+                       struct efx_ptp_event_rx *evt;
+
+                       evt = list_entry(cursor, struct efx_ptp_event_rx,
+                                        link);
+                       if (time_after(jiffies, evt->expiry)) {
+                               list_del(&evt->link);
+                               list_add(&evt->link, &ptp->evt_free_list);
+                               netif_warn(efx, hw, efx->net_dev,
+                                          "PTP rx event dropped\n");
+                       }
+               }
+       }
+       spin_unlock_bh(&ptp->evt_lock);
+}
+
+static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
+                                             struct sk_buff *skb)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       bool evts_waiting;
+       struct list_head *cursor;
+       struct list_head *next;
+       struct efx_ptp_match *match;
+       enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+
+       spin_lock_bh(&ptp->evt_lock);
+       evts_waiting = !list_empty(&ptp->evt_list);
+       spin_unlock_bh(&ptp->evt_lock);
+
+       if (!evts_waiting)
+               return PTP_PACKET_STATE_UNMATCHED;
+
+       match = (struct efx_ptp_match *)skb->cb;
+       /* Look for a matching timestamp in the event queue */
+       spin_lock_bh(&ptp->evt_lock);
+       list_for_each_safe(cursor, next, &ptp->evt_list) {
+               struct efx_ptp_event_rx *evt;
+
+               evt = list_entry(cursor, struct efx_ptp_event_rx, link);
+               if ((evt->seq0 == match->words[0]) &&
+                   (evt->seq1 == match->words[1])) {
+                       struct skb_shared_hwtstamps *timestamps;
+
+                       /* Match - add in hardware timestamp */
+                       timestamps = skb_hwtstamps(skb);
+                       timestamps->hwtstamp = evt->hwtimestamp;
+
+                       match->state = PTP_PACKET_STATE_MATCHED;
+                       rc = PTP_PACKET_STATE_MATCHED;
+                       list_del(&evt->link);
+                       list_add(&evt->link, &ptp->evt_free_list);
+                       break;
+               }
+       }
+       spin_unlock_bh(&ptp->evt_lock);
+
+       return rc;
+}
+
+/* Process any queued receive events and corresponding packets
+ *
+ * q is returned with all the packets that are ready for delivery.
+ * true is returned if at least one of those packets requires
+ * synchronisation.
+ */
+static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       bool rc = false;
+       struct sk_buff *skb;
+
+       while ((skb = skb_dequeue(&ptp->rxq))) {
+               struct efx_ptp_match *match;
+
+               match = (struct efx_ptp_match *)skb->cb;
+               if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
+                       __skb_queue_tail(q, skb);
+               } else if (efx_ptp_match_rx(efx, skb) ==
+                          PTP_PACKET_STATE_MATCHED) {
+                       rc = true;
+                       __skb_queue_tail(q, skb);
+               } else if (time_after(jiffies, match->expiry)) {
+                       match->state = PTP_PACKET_STATE_TIMED_OUT;
+                       netif_warn(efx, rx_err, efx->net_dev,
+                                  "PTP packet - no timestamp seen\n");
+                       __skb_queue_tail(q, skb);
+               } else {
+                       /* Replace unprocessed entry and stop */
+                       skb_queue_head(&ptp->rxq, skb);
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+/* Complete processing of a received packet */
+static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       local_bh_disable();
+       netif_receive_skb(skb);
+       local_bh_enable();
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct efx_filter_spec rxfilter;
+       int rc;
+
+       ptp->reset_required = false;
+
+       /* Must filter on both event and general ports to ensure
+        * that there is no packet re-ordering.
+        */
+       efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+                          efx_rx_queue_index(
+                                  efx_channel_get_rx_queue(ptp->channel)));
+       rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+                                      htonl(PTP_ADDRESS),
+                                      htons(PTP_EVENT_PORT));
+       if (rc != 0)
+               return rc;
+
+       rc = efx_filter_insert_filter(efx, &rxfilter, true);
+       if (rc < 0)
+               return rc;
+       ptp->rxfilter_event = rc;
+
+       efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+                          efx_rx_queue_index(
+                                  efx_channel_get_rx_queue(ptp->channel)));
+       rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+                                      htonl(PTP_ADDRESS),
+                                      htons(PTP_GENERAL_PORT));
+       if (rc != 0)
+               goto fail;
+
+       rc = efx_filter_insert_filter(efx, &rxfilter, true);
+       if (rc < 0)
+               goto fail;
+       ptp->rxfilter_general = rc;
+
+       rc = efx_ptp_enable(efx);
+       if (rc != 0)
+               goto fail2;
+
+       ptp->evt_frag_idx = 0;
+       ptp->current_adjfreq = 0;
+       ptp->rxfilter_installed = true;
+
+       return 0;
+
+fail2:
+       efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                 ptp->rxfilter_general);
+fail:
+       efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                 ptp->rxfilter_event);
+
+       return rc;
+}
+
+static int efx_ptp_stop(struct efx_nic *efx)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       int rc = efx_ptp_disable(efx);
+       struct list_head *cursor;
+       struct list_head *next;
+
+       if (ptp->rxfilter_installed) {
+               efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                         ptp->rxfilter_general);
+               efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+                                         ptp->rxfilter_event);
+               ptp->rxfilter_installed = false;
+       }
+
+       /* Make sure RX packets are really delivered */
+       efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
+       skb_queue_purge(&efx->ptp_data->txq);
+
+       /* Drop any pending receive events */
+       spin_lock_bh(&efx->ptp_data->evt_lock);
+       list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
+               list_del(cursor);
+               list_add(cursor, &efx->ptp_data->evt_free_list);
+       }
+       spin_unlock_bh(&efx->ptp_data->evt_lock);
+
+       return rc;
+}
+
+static void efx_ptp_pps_worker(struct work_struct *work)
+{
+       struct efx_ptp_data *ptp =
+               container_of(work, struct efx_ptp_data, pps_work);
+       struct efx_nic *efx = ptp->channel->efx;
+       struct ptp_clock_event ptp_evt;
+
+       if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
+               return;
+
+       ptp_evt.type = PTP_CLOCK_PPSUSR;
+       ptp_evt.pps_times = ptp->host_time_pps;
+       ptp_clock_event(ptp->phc_clock, &ptp_evt);
+}
+
+/* Process any pending transmissions and timestamp any received packets.
+ */
+static void efx_ptp_worker(struct work_struct *work)
+{
+       struct efx_ptp_data *ptp_data =
+               container_of(work, struct efx_ptp_data, work);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       struct sk_buff *skb;
+       struct sk_buff_head tempq;
+
+       if (ptp_data->reset_required) {
+               efx_ptp_stop(efx);
+               efx_ptp_start(efx);
+               return;
+       }
+
+       efx_ptp_drop_time_expired_events(efx);
+
+       __skb_queue_head_init(&tempq);
+       if (efx_ptp_process_events(efx, &tempq) ||
+           !skb_queue_empty(&ptp_data->txq)) {
+
+               while ((skb = skb_dequeue(&ptp_data->txq)))
+                       efx_ptp_xmit_skb(efx, skb);
+       }
+
+       while ((skb = __skb_dequeue(&tempq)))
+               efx_ptp_process_rx(efx, skb);
+}
+
+/* Initialise PTP channel and state.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
+{
+       struct efx_nic *efx = channel->efx;
+       struct efx_ptp_data *ptp;
+       int rc = 0;
+       unsigned int pos;
+
+       channel->irq_moderation = 0;
+       channel->rx_queue.core_index = 0;
+
+       ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
+       efx->ptp_data = ptp;
+       if (!efx->ptp_data)
+               return -ENOMEM;
+
+       rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
+       if (rc != 0)
+               goto fail1;
+
+       ptp->channel = channel;
+       skb_queue_head_init(&ptp->rxq);
+       skb_queue_head_init(&ptp->txq);
+       ptp->workwq = create_singlethread_workqueue("sfc_ptp");
+       if (!ptp->workwq) {
+               rc = -ENOMEM;
+               goto fail2;
+       }
+
+       INIT_WORK(&ptp->work, efx_ptp_worker);
+       ptp->config.flags = 0;
+       ptp->config.tx_type = HWTSTAMP_TX_OFF;
+       ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
+       INIT_LIST_HEAD(&ptp->evt_list);
+       INIT_LIST_HEAD(&ptp->evt_free_list);
+       spin_lock_init(&ptp->evt_lock);
+       for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
+               list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+
+       ptp->phc_clock_info.owner = THIS_MODULE;
+       snprintf(ptp->phc_clock_info.name,
+                sizeof(ptp->phc_clock_info.name),
+                "%pm", efx->net_dev->perm_addr);
+       ptp->phc_clock_info.max_adj = MAX_PPB;
+       ptp->phc_clock_info.n_alarm = 0;
+       ptp->phc_clock_info.n_ext_ts = 0;
+       ptp->phc_clock_info.n_per_out = 0;
+       ptp->phc_clock_info.pps = 1;
+       ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
+       ptp->phc_clock_info.adjtime = efx_phc_adjtime;
+       ptp->phc_clock_info.gettime = efx_phc_gettime;
+       ptp->phc_clock_info.settime = efx_phc_settime;
+       ptp->phc_clock_info.enable = efx_phc_enable;
+
+       ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+                                           &efx->pci_dev->dev);
+       if (!ptp->phc_clock)
+               goto fail3;
+
+       INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+       ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+       if (!ptp->pps_workwq) {
+               rc = -ENOMEM;
+               goto fail4;
+       }
+       ptp->nic_ts_enabled = false;
+
+       return 0;
+fail4:
+       ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+fail3:
+       destroy_workqueue(efx->ptp_data->workwq);
+
+fail2:
+       efx_nic_free_buffer(efx, &ptp->start);
+
+fail1:
+       kfree(efx->ptp_data);
+       efx->ptp_data = NULL;
+
+       return rc;
+}
+
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+       struct efx_nic *efx = channel->efx;
+
+       if (!efx->ptp_data)
+               return;
+
+       (void)efx_ptp_disable(channel->efx);
+
+       cancel_work_sync(&efx->ptp_data->work);
+       cancel_work_sync(&efx->ptp_data->pps_work);
+
+       skb_queue_purge(&efx->ptp_data->rxq);
+       skb_queue_purge(&efx->ptp_data->txq);
+
+       ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+       destroy_workqueue(efx->ptp_data->workwq);
+       destroy_workqueue(efx->ptp_data->pps_workwq);
+
+       efx_nic_free_buffer(efx, &efx->ptp_data->start);
+       kfree(efx->ptp_data);
+}
+
+static void efx_ptp_get_channel_name(struct efx_channel *channel,
+                                    char *buf, size_t len)
+{
+       snprintf(buf, len, "%s-ptp", channel->efx->name);
+}
+
+/* Determine whether this packet should be processed by the PTP module
+ * or transmitted conventionally.
+ */
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       return efx->ptp_data &&
+               efx->ptp_data->enabled &&
+               skb->len >= PTP_MIN_LENGTH &&
+               skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+               likely(skb->protocol == htons(ETH_P_IP)) &&
+               ip_hdr(skb)->protocol == IPPROTO_UDP &&
+               udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
+}
+
+/* Receive a PTP packet.  Packets are queued until the arrival of
+ * the receive timestamp from the MC - this will probably occur after the
+ * packet arrival because of the processing in the MC.
+ */
+static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+{
+       struct efx_nic *efx = channel->efx;
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
+       u8 *data;
+       unsigned int version;
+
+       match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+
+       /* Correct version? */
+       if (ptp->mode == MC_CMD_PTP_MODE_V1) {
+               if (skb->len < PTP_V1_MIN_LENGTH) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+               version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+               if (version != PTP_VERSION_V1) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+       } else {
+               if (skb->len < PTP_V2_MIN_LENGTH) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+               version = skb->data[PTP_V2_VERSION_OFFSET];
+
+               BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
+               BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
+               BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
+               BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+               BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
+               if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
+                       netif_receive_skb(skb);
+                       return;
+               }
+       }
+
+       /* Does this packet require timestamping? */
+       if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+               struct skb_shared_hwtstamps *timestamps;
+
+               match->state = PTP_PACKET_STATE_UNMATCHED;
+
+               /* Clear all timestamps held: filled in later */
+               timestamps = skb_hwtstamps(skb);
+               memset(timestamps, 0, sizeof(*timestamps));
+
+               /* Extract UUID/Sequence information */
+               data = skb->data + PTP_V1_UUID_OFFSET;
+               match->words[0] = (data[0]         |
+                                  (data[1] << 8)  |
+                                  (data[2] << 16) |
+                                  (data[3] << 24));
+               match->words[1] = (data[4]         |
+                                  (data[5] << 8)  |
+                                  (skb->data[PTP_V1_SEQUENCE_OFFSET +
+                                             PTP_V1_SEQUENCE_LENGTH - 1] <<
+                                   16));
+       } else {
+               match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
+       }
+
+       skb_queue_tail(&ptp->rxq, skb);
+       queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Transmit a PTP packet.  This has to be transmitted by the MC
+ * itself, through an MCDI call.  MCDI calls aren't permitted
+ * in the transmit path so defer the actual transmission to a suitable worker.
+ */
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+
+       skb_queue_tail(&ptp->txq, skb);
+
+       if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
+           (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
+               efx_xmit_hwtstamp_pending(skb);
+       queue_work(ptp->workwq, &ptp->work);
+
+       return NETDEV_TX_OK;
+}
+
+static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+                              unsigned int new_mode)
+{
+       if ((enable_wanted != efx->ptp_data->enabled) ||
+           (enable_wanted && (efx->ptp_data->mode != new_mode))) {
+               int rc;
+
+               if (enable_wanted) {
+                       /* Change of mode requires disable */
+                       if (efx->ptp_data->enabled &&
+                           (efx->ptp_data->mode != new_mode)) {
+                               efx->ptp_data->enabled = false;
+                               rc = efx_ptp_stop(efx);
+                               if (rc != 0)
+                                       return rc;
+                       }
+
+                       /* Set new operating mode and establish
+                        * baseline synchronisation, which must
+                        * succeed.
+                        */
+                       efx->ptp_data->mode = new_mode;
+                       rc = efx_ptp_start(efx);
+                       if (rc == 0) {
+                               rc = efx_ptp_synchronize(efx,
+                                                        PTP_SYNC_ATTEMPTS * 2);
+                               if (rc != 0)
+                                       efx_ptp_stop(efx);
+                       }
+               } else {
+                       rc = efx_ptp_stop(efx);
+               }
+
+               if (rc != 0)
+                       return rc;
+
+               efx->ptp_data->enabled = enable_wanted;
+       }
+
+       return 0;
+}
+
+static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+{
+       bool enable_wanted = false;
+       unsigned int new_mode;
+       int rc;
+
+       if (init->flags)
+               return -EINVAL;
+
+       if ((init->tx_type != HWTSTAMP_TX_OFF) &&
+           (init->tx_type != HWTSTAMP_TX_ON))
+               return -ERANGE;
+
+       new_mode = efx->ptp_data->mode;
+       /* Determine whether any PTP HW operations are required */
+       switch (init->rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+               new_mode = MC_CMD_PTP_MODE_V1;
+               enable_wanted = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       /* Although these three are accepted only IPV4 packets will be
+        * timestamped
+        */
+               init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+               new_mode = MC_CMD_PTP_MODE_V2;
+               enable_wanted = true;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+               /* Non-IP + IPv6 timestamping not supported */
+               return -ERANGE;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (init->tx_type != HWTSTAMP_TX_OFF)
+               enable_wanted = true;
+
+       rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
+       if (rc != 0)
+               return rc;
+
+       efx->ptp_data->config = *init;
+
+       return 0;
+}
+
+int
+efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_ptp_data *ptp = efx->ptp_data;
+
+       if (!ptp)
+               return -EOPNOTSUPP;
+
+       ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE |
+                                   SOF_TIMESTAMPING_RX_HARDWARE |
+                                   SOF_TIMESTAMPING_RAW_HARDWARE);
+       ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+       ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
+       ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
+                              1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+                              1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+                              1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+                              1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+                              1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+                              1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+       return 0;
+}
+
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+{
+       struct hwtstamp_config config;
+       int rc;
+
+       /* Not a PTP enabled port */
+       if (!efx->ptp_data)
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       rc = efx_ptp_ts_init(efx, &config);
+       if (rc != 0)
+               return rc;
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config))
+               ? -EFAULT : 0;
+}
+
+static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+
+       netif_err(efx, hw, efx->net_dev,
+               "PTP unexpected event length: got %d expected %d\n",
+               ptp->evt_frag_idx, expected_frag_len);
+       ptp->reset_required = true;
+       queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Process a completed receive event.  Put it on the event queue and
+ * start worker thread.  This is required because event and their
+ * correspoding packets may come in either order.
+ */
+static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+       struct efx_ptp_event_rx *evt = NULL;
+
+       if (ptp->evt_frag_idx != 3) {
+               ptp_event_failure(efx, 3);
+               return;
+       }
+
+       spin_lock_bh(&ptp->evt_lock);
+       if (!list_empty(&ptp->evt_free_list)) {
+               evt = list_first_entry(&ptp->evt_free_list,
+                                      struct efx_ptp_event_rx, link);
+               list_del(&evt->link);
+
+               evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
+               evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
+                                            MCDI_EVENT_SRC)        |
+                            (EFX_QWORD_FIELD(ptp->evt_frags[1],
+                                             MCDI_EVENT_SRC) << 8) |
+                            (EFX_QWORD_FIELD(ptp->evt_frags[0],
+                                             MCDI_EVENT_SRC) << 16));
+               evt->hwtimestamp = ktime_set(
+                       EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
+                       EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+               evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+               list_add_tail(&evt->link, &ptp->evt_list);
+
+               queue_work(ptp->workwq, &ptp->work);
+       } else {
+               netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
+       }
+       spin_unlock_bh(&ptp->evt_lock);
+}
+
+static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+       int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
+       if (ptp->evt_frag_idx != 1) {
+               ptp_event_failure(efx, 1);
+               return;
+       }
+
+       netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
+}
+
+static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+       if (ptp->nic_ts_enabled)
+               queue_work(ptp->pps_workwq, &ptp->pps_work);
+}
+
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+       struct efx_ptp_data *ptp = efx->ptp_data;
+       int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+
+       if (!ptp->enabled)
+               return;
+
+       if (ptp->evt_frag_idx == 0) {
+               ptp->evt_code = code;
+       } else if (ptp->evt_code != code) {
+               netif_err(efx, hw, efx->net_dev,
+                         "PTP out of sequence event %d\n", code);
+               ptp->evt_frag_idx = 0;
+       }
+
+       ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
+       if (!MCDI_EVENT_FIELD(*ev, CONT)) {
+               /* Process resulting event */
+               switch (code) {
+               case MCDI_EVENT_CODE_PTP_RX:
+                       ptp_event_rx(efx, ptp);
+                       break;
+               case MCDI_EVENT_CODE_PTP_FAULT:
+                       ptp_event_fault(efx, ptp);
+                       break;
+               case MCDI_EVENT_CODE_PTP_PPS:
+                       ptp_event_pps(efx, ptp);
+                       break;
+               default:
+                       netif_err(efx, hw, efx->net_dev,
+                                 "PTP unknown event %d\n", code);
+                       break;
+               }
+               ptp->evt_frag_idx = 0;
+       } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
+               netif_err(efx, hw, efx->net_dev,
+                         "PTP too many event fragments\n");
+               ptp->evt_frag_idx = 0;
+       }
+}
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
+       s64 adjustment_ns;
+       int rc;
+
+       if (delta > MAX_PPB)
+               delta = MAX_PPB;
+       else if (delta < -MAX_PPB)
+               delta = -MAX_PPB;
+
+       /* Convert ppb to fixed point ns. */
+       adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
+                        (PPB_EXTRA_BITS + MAX_PPB_BITS));
+
+       MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
+                      (u32)(adjustment_ns >> 32));
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
+       MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
+       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
+                         NULL, 0, NULL);
+       if (rc != 0)
+               return rc;
+
+       ptp_data->current_adjfreq = delta;
+       return 0;
+}
+
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       struct timespec delta_ts = ns_to_timespec(delta);
+       u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
+       MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+       return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       struct efx_nic *efx = ptp_data->channel->efx;
+       u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
+       u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), NULL);
+       if (rc != 0)
+               return rc;
+
+       ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
+       ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+       return 0;
+}
+
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+                          const struct timespec *e_ts)
+{
+       /* Get the current NIC time, efx_phc_gettime.
+        * Subtract from the desired time to get the offset
+        * call efx_phc_adjtime with the offset
+        */
+       int rc;
+       struct timespec time_now;
+       struct timespec delta;
+
+       rc = efx_phc_gettime(ptp, &time_now);
+       if (rc != 0)
+               return rc;
+
+       delta = timespec_sub(*e_ts, time_now);
+
+       efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+       if (rc != 0)
+               return rc;
+
+       return 0;
+}
+
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *request,
+                         int enable)
+{
+       struct efx_ptp_data *ptp_data = container_of(ptp,
+                                                    struct efx_ptp_data,
+                                                    phc_clock_info);
+       if (request->type != PTP_CLK_REQ_PPS)
+               return -EOPNOTSUPP;
+
+       ptp_data->nic_ts_enabled = !!enable;
+       return 0;
+}
+
+static const struct efx_channel_type efx_ptp_channel_type = {
+       .handle_no_channel      = efx_ptp_handle_no_channel,
+       .pre_probe              = efx_ptp_probe_channel,
+       .post_remove            = efx_ptp_remove_channel,
+       .get_name               = efx_ptp_get_channel_name,
+       /* no copy operation; there is no need to reallocate this channel */
+       .receive_skb            = efx_ptp_rx,
+       .keep_eventq            = false,
+};
+
+void efx_ptp_probe(struct efx_nic *efx)
+{
+       /* Check whether PTP is implemented on this NIC.  The DISABLE
+        * operation will succeed if and only if it is implemented.
+        */
+       if (efx_ptp_disable(efx) == 0)
+               efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
+                       &efx_ptp_channel_type;
+}
index 719319b89d7a8086f315559a1c68cfe28a82acd3..9e0ad1b75c335c0bf014fd1b47962c6719705767 100644 (file)
@@ -479,7 +479,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
                skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
                                  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
 
-               skb_record_rx_queue(skb, channel->channel);
+               skb_record_rx_queue(skb, channel->rx_queue.core_index);
 
                gro_result = napi_gro_frags(napi);
        } else {
@@ -571,8 +571,14 @@ static void efx_rx_deliver(struct efx_channel *channel,
        /* Set the SKB flags */
        skb_checksum_none_assert(skb);
 
+       /* Record the rx_queue */
+       skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
        /* Pass the packet up */
-       netif_receive_skb(skb);
+       if (channel->type->receive_skb)
+               channel->type->receive_skb(channel, skb);
+       else
+               netif_receive_skb(skb);
 
        /* Update allocation strategy method */
        channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
@@ -608,13 +614,14 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
                 * at the ethernet header */
                skb->protocol = eth_type_trans(skb, efx->net_dev);
 
-               skb_record_rx_queue(skb, channel->channel);
+               skb_record_rx_queue(skb, channel->rx_queue.core_index);
        }
 
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
-       if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
+       if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
+           !channel->type->receive_skb)
                efx_rx_packet_gro(channel, rx_buf, eh);
        else
                efx_rx_deliver(channel, rx_buf);
@@ -624,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel)
 {
        enum efx_rx_alloc_method method = rx_alloc_method;
 
+       if (channel->type->receive_skb) {
+               channel->rx_alloc_push_pages = false;
+               return;
+       }
+
        /* Only makes sense to use page based allocation if GRO is enabled */
        if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
                method = RX_ALLOC_METHOD_SKB;
index 96068d15b601f5afc285618ea6fecd86b7713caf..ce72ae4f399fdf3861262967aeb8222d5856bd85 100644 (file)
@@ -614,7 +614,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
 {
        enum efx_loopback_mode mode;
        struct efx_loopback_state *state;
-       struct efx_channel *channel = efx_get_channel(efx, 0);
+       struct efx_channel *channel =
+               efx_get_channel(efx, efx->tx_channel_offset);
        struct efx_tx_queue *tx_queue;
        int rc = 0;
 
index 6bafd216e55e6eb668d12b5e636f14975ae7f110..84b41bf08a38a22ad8e9f0954ec32631628b4ad7 100644 (file)
@@ -335,6 +335,7 @@ static int siena_probe_nic(struct efx_nic *efx)
                goto fail5;
 
        efx_sriov_probe(efx);
+       efx_ptp_probe(efx);
 
        return 0;
 
index 9cb3b84ecae99a24553f0bd039d5a5fdbaafbd8e..d49b53dc2a500a2602093a2df63daba5c5ad2538 100644 (file)
@@ -21,6 +21,9 @@
 /* Number of longs required to track all the VIs in a VF */
 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
 
+/* Maximum number of RX queues supported */
+#define VF_MAX_RX_QUEUES 63
+
 /**
  * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
  * @VF_TX_FILTER_OFF: Disabled
@@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
        efx_oword_t reg;
 
        if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
+           vf_rxq >= VF_MAX_RX_QUEUES ||
            bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
                if (net_ratelimit())
                        netif_err(efx, hw, efx->net_dev,
@@ -683,6 +687,9 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
        __le32 *rxqs;
        int rc;
 
+       BUILD_BUG_ON(VF_MAX_RX_QUEUES >
+                    MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
        rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
        if (rxqs == NULL)
                return VFDI_RC_ENOMEM;
@@ -1028,6 +1035,7 @@ efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
 static const struct efx_channel_type efx_sriov_channel_type = {
        .handle_no_channel      = efx_sriov_handle_no_channel,
        .pre_probe              = efx_sriov_probe_channel,
+       .post_remove            = efx_channel_dummy_op_void,
        .get_name               = efx_sriov_get_channel_name,
        /* no copy operation; channel must not be reallocated */
        .keep_eventq            = true,
index 18713436b44345a110ed263d54c5411d1e93c6fa..5e090e54298e667a732c53ece789bb03e40aef5e 100644 (file)
 #include "nic.h"
 #include "workarounds.h"
 
-/*
- * TX descriptor ring full threshold
- *
- * The tx_queue descriptor ring fill-level must fall below this value
- * before we restart the netif queue
- */
-#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                               struct efx_tx_buffer *buffer,
                               unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
                dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
                                         buffer->unmap_len);
-               if (buffer->unmap_single)
+               if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
                                         DMA_TO_DEVICE);
                else
                        dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
                                       DMA_TO_DEVICE);
                buffer->unmap_len = 0;
-               buffer->unmap_single = false;
        }
 
-       if (buffer->skb) {
+       if (buffer->flags & EFX_TX_BUF_SKB) {
                (*pkts_compl)++;
                (*bytes_compl) += buffer->skb->len;
                dev_kfree_skb_any((struct sk_buff *) buffer->skb);
-               buffer->skb = NULL;
                netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
                           "TX queue %d transmission id %x complete\n",
                           tx_queue->queue, tx_queue->read_count);
+       } else if (buffer->flags & EFX_TX_BUF_HEAP) {
+               kfree(buffer->heap_buf);
        }
-}
 
-/**
- * struct efx_tso_header - a DMA mapped buffer for packet headers
- * @next: Linked list of free ones.
- *     The list is protected by the TX queue lock.
- * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
- * @dma_addr: The DMA address of the header below.
- *
- * This controls the memory used for a TSO header.  Use TSOH_DATA()
- * to find the packet header data.  Use TSOH_SIZE() to calculate the
- * total size required for a given packet header length.  TSO headers
- * in the free list are exactly %TSOH_STD_SIZE bytes in size.
- */
-struct efx_tso_header {
-       union {
-               struct efx_tso_header *next;
-               size_t unmap_len;
-       };
-       dma_addr_t dma_addr;
-};
+       buffer->len = 0;
+       buffer->flags = 0;
+}
 
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                               struct sk_buff *skb);
-static void efx_fini_tso(struct efx_tx_queue *tx_queue);
-static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
-                              struct efx_tso_header *tsoh);
-
-static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
-                         struct efx_tx_buffer *buffer)
-{
-       if (buffer->tsoh) {
-               if (likely(!buffer->tsoh->unmap_len)) {
-                       buffer->tsoh->next = tx_queue->tso_headers_free;
-                       tx_queue->tso_headers_free = buffer->tsoh;
-               } else {
-                       efx_tsoh_heap_free(tx_queue, buffer->tsoh);
-               }
-               buffer->tsoh = NULL;
-       }
-}
-
 
 static inline unsigned
 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
        return max_descs;
 }
 
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+       if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+               return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+       else
+               return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+       /* We need to consider both queues that the net core sees as one */
+       struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+       struct efx_nic *efx = txq1->efx;
+       unsigned int fill_level;
+
+       fill_level = max(txq1->insert_count - txq1->old_read_count,
+                        txq2->insert_count - txq2->old_read_count);
+       if (likely(fill_level < efx->txq_stop_thresh))
+               return;
+
+       /* We used the stale old_read_count above, which gives us a
+        * pessimistic estimate of the fill level (which may even
+        * validly be >= efx->txq_entries).  Now try again using
+        * read_count (more likely to be a cache miss).
+        *
+        * If we read read_count and then conditionally stop the
+        * queue, it is possible for the completion path to race with
+        * us and complete all outstanding descriptors in the middle,
+        * after which there will be no more completions to wake it.
+        * Therefore we stop the queue first, then read read_count
+        * (with a memory barrier to ensure the ordering), then
+        * restart the queue if the fill level turns out to be low
+        * enough.
+        */
+       netif_tx_stop_queue(txq1->core_txq);
+       smp_mb();
+       txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+       txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+       fill_level = max(txq1->insert_count - txq1->old_read_count,
+                        txq2->insert_count - txq2->old_read_count);
+       EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+       if (likely(fill_level < efx->txq_stop_thresh)) {
+               smp_mb();
+               if (likely(!efx->loopback_selftest))
+                       netif_tx_start_queue(txq1->core_txq);
+       }
+}
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
  * This function is split out from efx_hard_start_xmit to allow the
  * loopback test to direct packets via specific TX queues.
  *
- * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * Returns NETDEV_TX_OK.
  * You must hold netif_tx_lock() to call this function.
  */
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        struct device *dma_dev = &efx->pci_dev->dev;
        struct efx_tx_buffer *buffer;
        skb_frag_t *fragment;
-       unsigned int len, unmap_len = 0, fill_level, insert_ptr;
+       unsigned int len, unmap_len = 0, insert_ptr;
        dma_addr_t dma_addr, unmap_addr = 0;
        unsigned int dma_len;
-       bool unmap_single;
-       int q_space, i = 0;
-       netdev_tx_t rc = NETDEV_TX_OK;
+       unsigned short dma_flags;
+       int i = 0;
 
        EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        return NETDEV_TX_OK;
        }
 
-       fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-       q_space = efx->txq_entries - 1 - fill_level;
-
        /* Map for DMA.  Use dma_map_single rather than dma_map_page
         * since this is more efficient on machines with sparse
         * memory.
         */
-       unmap_single = true;
+       dma_flags = EFX_TX_BUF_MAP_SINGLE;
        dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
 
        /* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 
                /* Add to TX queue, splitting across DMA boundaries */
                do {
-                       if (unlikely(q_space-- <= 0)) {
-                               /* It might be that completions have
-                                * happened since the xmit path last
-                                * checked.  Update the xmit path's
-                                * copy of read_count.
-                                */
-                               netif_tx_stop_queue(tx_queue->core_txq);
-                               /* This memory barrier protects the
-                                * change of queue state from the access
-                                * of read_count. */
-                               smp_mb();
-                               tx_queue->old_read_count =
-                                       ACCESS_ONCE(tx_queue->read_count);
-                               fill_level = (tx_queue->insert_count
-                                             - tx_queue->old_read_count);
-                               q_space = efx->txq_entries - 1 - fill_level;
-                               if (unlikely(q_space-- <= 0)) {
-                                       rc = NETDEV_TX_BUSY;
-                                       goto unwind;
-                               }
-                               smp_mb();
-                               if (likely(!efx->loopback_selftest))
-                                       netif_tx_start_queue(
-                                               tx_queue->core_txq);
-                       }
-
                        insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                        buffer = &tx_queue->buffer[insert_ptr];
-                       efx_tsoh_free(tx_queue, buffer);
-                       EFX_BUG_ON_PARANOID(buffer->tsoh);
-                       EFX_BUG_ON_PARANOID(buffer->skb);
+                       EFX_BUG_ON_PARANOID(buffer->flags);
                        EFX_BUG_ON_PARANOID(buffer->len);
-                       EFX_BUG_ON_PARANOID(!buffer->continuation);
                        EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
                        dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        /* Fill out per descriptor fields */
                        buffer->len = dma_len;
                        buffer->dma_addr = dma_addr;
+                       buffer->flags = EFX_TX_BUF_CONT;
                        len -= dma_len;
                        dma_addr += dma_len;
                        ++tx_queue->insert_count;
                } while (len);
 
                /* Transfer ownership of the unmapping to the final buffer */
-               buffer->unmap_single = unmap_single;
+               buffer->flags = EFX_TX_BUF_CONT | dma_flags;
                buffer->unmap_len = unmap_len;
                unmap_len = 0;
 
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                len = skb_frag_size(fragment);
                i++;
                /* Map for DMA */
-               unmap_single = false;
+               dma_flags = 0;
                dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
                                            DMA_TO_DEVICE);
        }
 
        /* Transfer ownership of the skb to the final buffer */
        buffer->skb = skb;
-       buffer->continuation = false;
+       buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
        netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
 
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       efx_tx_maybe_stop_queue(tx_queue);
+
        return NETDEV_TX_OK;
 
  dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        /* Mark the packet as transmitted, and free the SKB ourselves */
        dev_kfree_skb_any(skb);
 
- unwind:
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-               buffer->len = 0;
        }
 
        /* Free the fragment we were mid-way through pushing */
        if (unmap_len) {
-               if (unmap_single)
+               if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(dma_dev, unmap_addr, unmap_len,
                                         DMA_TO_DEVICE);
                else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                                       DMA_TO_DEVICE);
        }
 
-       return rc;
+       return NETDEV_TX_OK;
 }
 
 /* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
                }
 
                efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
-               buffer->continuation = true;
-               buffer->len = 0;
 
                ++tx_queue->read_count;
                read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -366,6 +339,12 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
 
        EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
 
+       /* PTP "event" packet */
+       if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+           unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+               return efx_ptp_tx(efx, skb);
+       }
+
        index = skb_get_queue_mapping(skb);
        type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
        if (index >= efx->n_tx_channels) {
@@ -450,6 +429,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned fill_level;
        struct efx_nic *efx = tx_queue->efx;
+       struct efx_tx_queue *txq2;
        unsigned int pkts_compl = 0, bytes_compl = 0;
 
        EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +437,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
        efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
        netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
-       /* See if we need to restart the netif queue.  This barrier
-        * separates the update of read_count from the test of the
-        * queue state. */
+       /* See if we need to restart the netif queue.  This memory
+        * barrier ensures that we write read_count (inside
+        * efx_dequeue_buffers()) before reading the queue status.
+        */
        smp_mb();
        if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
            likely(efx->port_enabled) &&
            likely(netif_device_present(efx->net_dev))) {
-               fill_level = tx_queue->insert_count - tx_queue->read_count;
-               if (fill_level < EFX_TXQ_THRESHOLD(efx))
+               txq2 = efx_tx_queue_partner(tx_queue);
+               fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+                                txq2->insert_count - txq2->read_count);
+               if (fill_level <= efx->txq_wake_thresh)
                        netif_tx_wake_queue(tx_queue->core_txq);
        }
 
@@ -480,11 +463,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
        }
 }
 
+/* Size of page-based TSO header buffers.  Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE  128
+#define TSOH_PER_PAGE  (PAGE_SIZE / TSOH_STD_SIZE)
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an skb.
+ */
+static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+{
+       return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+}
+
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
        unsigned int entries;
-       int i, rc;
+       int rc;
 
        /* Create the smallest power-of-two aligned ring */
        entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +498,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
                                   GFP_KERNEL);
        if (!tx_queue->buffer)
                return -ENOMEM;
-       for (i = 0; i <= tx_queue->ptr_mask; ++i)
-               tx_queue->buffer[i].continuation = true;
+
+       if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
+               tx_queue->tsoh_page =
+                       kcalloc(efx_tsoh_page_count(tx_queue),
+                               sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
+               if (!tx_queue->tsoh_page) {
+                       rc = -ENOMEM;
+                       goto fail1;
+               }
+       }
 
        /* Allocate hardware ring */
        rc = efx_nic_probe_tx(tx_queue);
        if (rc)
-               goto fail;
+               goto fail2;
 
        return 0;
 
- fail:
+fail2:
+       kfree(tx_queue->tsoh_page);
+       tx_queue->tsoh_page = NULL;
+fail1:
        kfree(tx_queue->buffer);
        tx_queue->buffer = NULL;
        return rc;
@@ -546,8 +555,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
                unsigned int pkts_compl = 0, bytes_compl = 0;
                buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-               buffer->continuation = true;
-               buffer->len = 0;
 
                ++tx_queue->read_count;
        }
@@ -568,13 +575,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
        efx_nic_fini_tx(tx_queue);
 
        efx_release_tx_buffers(tx_queue);
-
-       /* Free up TSO header cache */
-       efx_fini_tso(tx_queue);
 }
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
+       int i;
+
        if (!tx_queue->buffer)
                return;
 
@@ -582,6 +588,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
                  "destroying TX queue %d\n", tx_queue->queue);
        efx_nic_remove_tx(tx_queue);
 
+       if (tx_queue->tsoh_page) {
+               for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+                       efx_nic_free_buffer(tx_queue->efx,
+                                           &tx_queue->tsoh_page[i]);
+               kfree(tx_queue->tsoh_page);
+               tx_queue->tsoh_page = NULL;
+       }
+
        kfree(tx_queue->buffer);
        tx_queue->buffer = NULL;
 }
@@ -604,22 +618,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 #define TSOH_OFFSET    NET_IP_ALIGN
 #endif
 
-#define TSOH_BUFFER(tsoh)      ((u8 *)(tsoh + 1) + TSOH_OFFSET)
-
-/* Total size of struct efx_tso_header, buffer and padding */
-#define TSOH_SIZE(hdr_len)                                     \
-       (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
-
-/* Size of blocks on free list.  Larger blocks must be allocated from
- * the heap.
- */
-#define TSOH_STD_SIZE          128
-
 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
-#define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
-#define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
-#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
-#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
 
 /**
  * struct tso_state - TSO state for an SKB
@@ -631,10 +630,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * @in_len: Remaining length in current SKB fragment
  * @unmap_len: Length of SKB fragment
  * @unmap_addr: DMA address of SKB fragment
- * @unmap_single: DMA single vs page mapping flag
+ * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
  * @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
- * @full_packet_size: Number of bytes to put in each outgoing segment
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
@@ -651,11 +652,13 @@ struct tso_state {
        unsigned in_len;
        unsigned unmap_len;
        dma_addr_t unmap_addr;
-       bool unmap_single;
+       unsigned short dma_flags;
 
        __be16 protocol;
+       unsigned int ip_off;
+       unsigned int tcp_off;
        unsigned header_len;
-       int full_packet_size;
+       unsigned int ip_base_len;
 };
 
 
@@ -687,91 +690,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
        return protocol;
 }
 
-
-/*
- * Allocate a page worth of efx_tso_header structures, and string them
- * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
- */
-static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
+                              struct efx_tx_buffer *buffer, unsigned int len)
 {
-       struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
-       struct efx_tso_header *tsoh;
-       dma_addr_t dma_addr;
-       u8 *base_kva, *kva;
+       u8 *result;
 
-       base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
-       if (base_kva == NULL) {
-               netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
-                         "Unable to allocate page for TSO headers\n");
-               return -ENOMEM;
-       }
-
-       /* dma_alloc_coherent() allocates pages. */
-       EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
-
-       for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
-               tsoh = (struct efx_tso_header *)kva;
-               tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
-               tsoh->next = tx_queue->tso_headers_free;
-               tx_queue->tso_headers_free = tsoh;
-       }
-
-       return 0;
-}
-
-
-/* Free up a TSO header, and all others in the same page. */
-static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
-                               struct efx_tso_header *tsoh,
-                               struct device *dma_dev)
-{
-       struct efx_tso_header **p;
-       unsigned long base_kva;
-       dma_addr_t base_dma;
-
-       base_kva = (unsigned long)tsoh & PAGE_MASK;
-       base_dma = tsoh->dma_addr & PAGE_MASK;
-
-       p = &tx_queue->tso_headers_free;
-       while (*p != NULL) {
-               if (((unsigned long)*p & PAGE_MASK) == base_kva)
-                       *p = (*p)->next;
-               else
-                       p = &(*p)->next;
-       }
+       EFX_BUG_ON_PARANOID(buffer->len);
+       EFX_BUG_ON_PARANOID(buffer->flags);
+       EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
-       dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
-}
+       if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+               unsigned index =
+                       (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
+               struct efx_buffer *page_buf =
+                       &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
+               unsigned offset =
+                       TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+
+               if (unlikely(!page_buf->addr) &&
+                   efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+                       return NULL;
+
+               result = (u8 *)page_buf->addr + offset;
+               buffer->dma_addr = page_buf->dma_addr + offset;
+               buffer->flags = EFX_TX_BUF_CONT;
+       } else {
+               tx_queue->tso_long_headers++;
 
-static struct efx_tso_header *
-efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
-{
-       struct efx_tso_header *tsoh;
-
-       tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
-       if (unlikely(!tsoh))
-               return NULL;
-
-       tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
-                                       TSOH_BUFFER(tsoh), header_len,
-                                       DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
-                                      tsoh->dma_addr))) {
-               kfree(tsoh);
-               return NULL;
+               buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+               if (unlikely(!buffer->heap_buf))
+                       return NULL;
+               result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+               buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
        }
 
-       tsoh->unmap_len = header_len;
-       return tsoh;
-}
+       buffer->len = len;
 
-static void
-efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
-{
-       dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-                        tsoh->dma_addr, tsoh->unmap_len,
-                        DMA_TO_DEVICE);
-       kfree(tsoh);
+       return result;
 }
 
 /**
@@ -781,47 +736,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
  * @len:               Length of fragment
  * @final_buffer:      The final buffer inserted into the queue
  *
- * Push descriptors onto the TX queue.  Return 0 on success or 1 if
- * @tx_queue full.
+ * Push descriptors onto the TX queue.
  */
-static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
-                              dma_addr_t dma_addr, unsigned len,
-                              struct efx_tx_buffer **final_buffer)
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+                               dma_addr_t dma_addr, unsigned len,
+                               struct efx_tx_buffer **final_buffer)
 {
        struct efx_tx_buffer *buffer;
        struct efx_nic *efx = tx_queue->efx;
-       unsigned dma_len, fill_level, insert_ptr;
-       int q_space;
+       unsigned dma_len, insert_ptr;
 
        EFX_BUG_ON_PARANOID(len <= 0);
 
-       fill_level = tx_queue->insert_count - tx_queue->old_read_count;
-       /* -1 as there is no way to represent all descriptors used */
-       q_space = efx->txq_entries - 1 - fill_level;
-
        while (1) {
-               if (unlikely(q_space-- <= 0)) {
-                       /* It might be that completions have happened
-                        * since the xmit path last checked.  Update
-                        * the xmit path's copy of read_count.
-                        */
-                       netif_tx_stop_queue(tx_queue->core_txq);
-                       /* This memory barrier protects the change of
-                        * queue state from the access of read_count. */
-                       smp_mb();
-                       tx_queue->old_read_count =
-                               ACCESS_ONCE(tx_queue->read_count);
-                       fill_level = (tx_queue->insert_count
-                                     - tx_queue->old_read_count);
-                       q_space = efx->txq_entries - 1 - fill_level;
-                       if (unlikely(q_space-- <= 0)) {
-                               *final_buffer = NULL;
-                               return 1;
-                       }
-                       smp_mb();
-                       netif_tx_start_queue(tx_queue->core_txq);
-               }
-
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
                ++tx_queue->insert_count;
@@ -830,12 +757,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
                                    tx_queue->read_count >=
                                    efx->txq_entries);
 
-               efx_tsoh_free(tx_queue, buffer);
                EFX_BUG_ON_PARANOID(buffer->len);
                EFX_BUG_ON_PARANOID(buffer->unmap_len);
-               EFX_BUG_ON_PARANOID(buffer->skb);
-               EFX_BUG_ON_PARANOID(!buffer->continuation);
-               EFX_BUG_ON_PARANOID(buffer->tsoh);
+               EFX_BUG_ON_PARANOID(buffer->flags);
 
                buffer->dma_addr = dma_addr;
 
@@ -845,7 +769,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
                if (dma_len >= len)
                        break;
 
-               buffer->len = dma_len; /* Don't set the other members */
+               buffer->len = dma_len;
+               buffer->flags = EFX_TX_BUF_CONT;
                dma_addr += dma_len;
                len -= dma_len;
        }
@@ -853,7 +778,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
        EFX_BUG_ON_PARANOID(!len);
        buffer->len = len;
        *final_buffer = buffer;
-       return 0;
 }
 
 
@@ -864,54 +788,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
  * a single fragment, and we know it doesn't cross a page boundary.  It
  * also allows us to not worry about end-of-packet etc.
  */
-static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
-                              struct efx_tso_header *tsoh, unsigned len)
+static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
+                             struct efx_tx_buffer *buffer, u8 *header)
 {
-       struct efx_tx_buffer *buffer;
-
-       buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-       efx_tsoh_free(tx_queue, buffer);
-       EFX_BUG_ON_PARANOID(buffer->len);
-       EFX_BUG_ON_PARANOID(buffer->unmap_len);
-       EFX_BUG_ON_PARANOID(buffer->skb);
-       EFX_BUG_ON_PARANOID(!buffer->continuation);
-       EFX_BUG_ON_PARANOID(buffer->tsoh);
-       buffer->len = len;
-       buffer->dma_addr = tsoh->dma_addr;
-       buffer->tsoh = tsoh;
+       if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
+               buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
+                                                 header, buffer->len,
+                                                 DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+                                              buffer->dma_addr))) {
+                       kfree(buffer->heap_buf);
+                       buffer->len = 0;
+                       buffer->flags = 0;
+                       return -ENOMEM;
+               }
+               buffer->unmap_len = buffer->len;
+               buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
+       }
 
        ++tx_queue->insert_count;
+       return 0;
 }
 
 
-/* Remove descriptors put into a tx_queue. */
+/* Remove buffers put into a tx_queue.  None of the buffers must have
+ * an skb attached.
+ */
 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 {
        struct efx_tx_buffer *buffer;
-       dma_addr_t unmap_addr;
 
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
                buffer = &tx_queue->buffer[tx_queue->insert_count &
                                           tx_queue->ptr_mask];
-               efx_tsoh_free(tx_queue, buffer);
-               EFX_BUG_ON_PARANOID(buffer->skb);
-               if (buffer->unmap_len) {
-                       unmap_addr = (buffer->dma_addr + buffer->len -
-                                     buffer->unmap_len);
-                       if (buffer->unmap_single)
-                               dma_unmap_single(&tx_queue->efx->pci_dev->dev,
-                                                unmap_addr, buffer->unmap_len,
-                                                DMA_TO_DEVICE);
-                       else
-                               dma_unmap_page(&tx_queue->efx->pci_dev->dev,
-                                              unmap_addr, buffer->unmap_len,
-                                              DMA_TO_DEVICE);
-                       buffer->unmap_len = 0;
-               }
-               buffer->len = 0;
-               buffer->continuation = true;
+               efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
        }
 }
 
@@ -919,17 +831,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 /* Parse the SKB header and initialise state. */
 static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 {
-       /* All ethernet/IP/TCP headers combined size is TCP header size
-        * plus offset of TCP header relative to start of packet.
-        */
-       st->header_len = ((tcp_hdr(skb)->doff << 2u)
-                         + PTR_DIFF(tcp_hdr(skb), skb->data));
-       st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
-
-       if (st->protocol == htons(ETH_P_IP))
+       st->ip_off = skb_network_header(skb) - skb->data;
+       st->tcp_off = skb_transport_header(skb) - skb->data;
+       st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+       if (st->protocol == htons(ETH_P_IP)) {
+               st->ip_base_len = st->header_len - st->ip_off;
                st->ipv4_id = ntohs(ip_hdr(skb)->id);
-       else
+       } else {
+               st->ip_base_len = st->header_len - st->tcp_off;
                st->ipv4_id = 0;
+       }
        st->seqnum = ntohl(tcp_hdr(skb)->seq);
 
        EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +849,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
 
        st->out_len = skb->len - st->header_len;
        st->unmap_len = 0;
-       st->unmap_single = false;
+       st->dma_flags = 0;
 }
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +858,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
        st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
                                          skb_frag_size(frag), DMA_TO_DEVICE);
        if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->unmap_single = false;
+               st->dma_flags = 0;
                st->unmap_len = skb_frag_size(frag);
                st->in_len = skb_frag_size(frag);
                st->dma_addr = st->unmap_addr;
@@ -965,7 +876,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
        st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
                                        len, DMA_TO_DEVICE);
        if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->unmap_single = true;
+               st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
                st->unmap_len = len;
                st->in_len = len;
                st->dma_addr = st->unmap_addr;
@@ -982,20 +893,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
  * @st:                        TSO state
  *
  * Form descriptors for the current fragment, until we reach the end
- * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
- * space in @tx_queue.
+ * of fragment or end-of-packet.
  */
-static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
-                                        const struct sk_buff *skb,
-                                        struct tso_state *st)
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+                                         const struct sk_buff *skb,
+                                         struct tso_state *st)
 {
        struct efx_tx_buffer *buffer;
-       int n, end_of_packet, rc;
+       int n;
 
        if (st->in_len == 0)
-               return 0;
+               return;
        if (st->packet_space == 0)
-               return 0;
+               return;
 
        EFX_BUG_ON_PARANOID(st->in_len <= 0);
        EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +916,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
        st->out_len -= n;
        st->in_len -= n;
 
-       rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
-       if (likely(rc == 0)) {
-               if (st->out_len == 0)
-                       /* Transfer ownership of the skb */
-                       buffer->skb = skb;
+       efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
 
-               end_of_packet = st->out_len == 0 || st->packet_space == 0;
-               buffer->continuation = !end_of_packet;
+       if (st->out_len == 0) {
+               /* Transfer ownership of the skb */
+               buffer->skb = skb;
+               buffer->flags = EFX_TX_BUF_SKB;
+       } else if (st->packet_space != 0) {
+               buffer->flags = EFX_TX_BUF_CONT;
+       }
 
-               if (st->in_len == 0) {
-                       /* Transfer ownership of the DMA mapping */
-                       buffer->unmap_len = st->unmap_len;
-                       buffer->unmap_single = st->unmap_single;
-                       st->unmap_len = 0;
-               }
+       if (st->in_len == 0) {
+               /* Transfer ownership of the DMA mapping */
+               buffer->unmap_len = st->unmap_len;
+               buffer->flags |= st->dma_flags;
+               st->unmap_len = 0;
        }
 
        st->dma_addr += n;
-       return rc;
 }
 
 
@@ -1035,36 +944,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
  * @st:                        TSO state
  *
  * Generate a new header and prepare for the new packet.  Return 0 on
- * success, or -1 if failed to alloc header.
+ * success, or -%ENOMEM if failed to alloc header.
  */
 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                                const struct sk_buff *skb,
                                struct tso_state *st)
 {
-       struct efx_tso_header *tsoh;
+       struct efx_tx_buffer *buffer =
+               &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
        struct tcphdr *tsoh_th;
        unsigned ip_length;
        u8 *header;
+       int rc;
 
-       /* Allocate a DMA-mapped header buffer. */
-       if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
-               if (tx_queue->tso_headers_free == NULL) {
-                       if (efx_tsoh_block_alloc(tx_queue))
-                               return -1;
-               }
-               EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
-               tsoh = tx_queue->tso_headers_free;
-               tx_queue->tso_headers_free = tsoh->next;
-               tsoh->unmap_len = 0;
-       } else {
-               tx_queue->tso_long_headers++;
-               tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
-               if (unlikely(!tsoh))
-                       return -1;
-       }
+       /* Allocate and insert a DMA-mapped header buffer. */
+       header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+       if (!header)
+               return -ENOMEM;
 
-       header = TSOH_BUFFER(tsoh);
-       tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+       tsoh_th = (struct tcphdr *)(header + st->tcp_off);
 
        /* Copy and update the headers. */
        memcpy(header, skb->data, st->header_len);
@@ -1073,19 +971,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
        st->seqnum += skb_shinfo(skb)->gso_size;
        if (st->out_len > skb_shinfo(skb)->gso_size) {
                /* This packet will not finish the TSO burst. */
-               ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
+               st->packet_space = skb_shinfo(skb)->gso_size;
                tsoh_th->fin = 0;
                tsoh_th->psh = 0;
        } else {
                /* This packet will be the last in the TSO burst. */
-               ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
+               st->packet_space = st->out_len;
                tsoh_th->fin = tcp_hdr(skb)->fin;
                tsoh_th->psh = tcp_hdr(skb)->psh;
        }
+       ip_length = st->ip_base_len + st->packet_space;
 
        if (st->protocol == htons(ETH_P_IP)) {
-               struct iphdr *tsoh_iph =
-                       (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+               struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
 
                tsoh_iph->tot_len = htons(ip_length);
 
@@ -1094,16 +992,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                st->ipv4_id++;
        } else {
                struct ipv6hdr *tsoh_iph =
-                       (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+                       (struct ipv6hdr *)(header + st->ip_off);
 
-               tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+               tsoh_iph->payload_len = htons(ip_length);
        }
 
-       st->packet_space = skb_shinfo(skb)->gso_size;
-       ++tx_queue->tso_packets;
+       rc = efx_tso_put_header(tx_queue, buffer, header);
+       if (unlikely(rc))
+               return rc;
 
-       /* Form a descriptor for this header. */
-       efx_tso_put_header(tx_queue, tsoh, st->header_len);
+       ++tx_queue->tso_packets;
 
        return 0;
 }
@@ -1118,13 +1016,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
  *
  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
  * @skb was not enqueued.  In all cases @skb is consumed.  Return
- * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ * %NETDEV_TX_OK.
  */
 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                               struct sk_buff *skb)
 {
        struct efx_nic *efx = tx_queue->efx;
-       int frag_i, rc, rc2 = NETDEV_TX_OK;
+       int frag_i, rc;
        struct tso_state state;
 
        /* Find the packet protocol and sanity-check it */
@@ -1156,11 +1054,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                goto mem_err;
 
        while (1) {
-               rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
-               if (unlikely(rc)) {
-                       rc2 = NETDEV_TX_BUSY;
-                       goto unwind;
-               }
+               tso_fill_packet_with_fragment(tx_queue, skb, &state);
 
                /* Move onto the next fragment? */
                if (state.in_len == 0) {
@@ -1184,6 +1078,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       efx_tx_maybe_stop_queue(tx_queue);
+
        tx_queue->tso_bursts++;
        return NETDEV_TX_OK;
 
@@ -1192,10 +1088,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                  "Out of memory for TSO headers, or DMA mapping error\n");
        dev_kfree_skb_any(skb);
 
- unwind:
        /* Free the DMA mapping we were in the process of writing out */
        if (state.unmap_len) {
-               if (state.unmap_single)
+               if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
                        dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
                                         state.unmap_len, DMA_TO_DEVICE);
                else
@@ -1204,25 +1099,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
        }
 
        efx_enqueue_unwind(tx_queue);
-       return rc2;
-}
-
-
-/*
- * Free up all TSO datastructures associated with tx_queue. This
- * routine should be called only once the tx_queue is both empty and
- * will no longer be used.
- */
-static void efx_fini_tso(struct efx_tx_queue *tx_queue)
-{
-       unsigned i;
-
-       if (tx_queue->buffer) {
-               for (i = 0; i <= tx_queue->ptr_mask; ++i)
-                       efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
-       }
-
-       while (tx_queue->tso_headers_free != NULL)
-               efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
-                                   &tx_queue->efx->pci_dev->dev);
+       return NETDEV_TX_OK;
 }
index ade108232048aca7771e2a3d0ea9d6d5a15f7a45..0376a5e6b2bf9c0584e45bf2e34c6f5ee8608033 100644 (file)
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
        new_bus->write = &stmmac_mdio_write;
        new_bus->reset = &stmmac_mdio_reset;
        snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-               new_bus->name, mdio_bus_data->bus_id);
+               new_bus->name, priv->plat->bus_id);
        new_bus->priv = ndev;
        new_bus->irq = irqlist;
        new_bus->phy_mask = mdio_bus_data->phy_mask;
@@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
                         * and no PHY number was provided to the MAC,
                         * use the one probed here.
                         */
-                       if ((priv->plat->bus_id == mdio_bus_data->bus_id) &&
-                           (priv->plat->phy_addr == -1))
+                       if (priv->plat->phy_addr == -1)
                                priv->plat->phy_addr = addr;
 
-                       act = (priv->plat->bus_id == mdio_bus_data->bus_id) &&
-                               (priv->plat->phy_addr == addr);
+                       act = (priv->plat->phy_addr == addr);
                        switch (phydev->irq) {
                        case PHY_POLL:
                                irq_str = "POLL";
@@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
 {
        struct stmmac_priv *priv = netdev_priv(ndev);
 
+       if (!priv->mii)
+               return 0;
+
        mdiobus_unregister(priv->mii);
        priv->mii->priv = NULL;
        mdiobus_free(priv->mii);
index 13afb8edfadca49892f068e22554855dc930aa49..1f069b0f6af592342ecc1f6a023b9334acc4099b 100644 (file)
@@ -40,7 +40,6 @@ static void stmmac_default_data(void)
        plat_dat.has_gmac = 1;
        plat_dat.force_sf_dma_mode = 1;
 
-       mdio_data.bus_id = 1;
        mdio_data.phy_reset = NULL;
        mdio_data.phy_mask = 0;
        plat_dat.mdio_bus_data = &mdio_data;
index b93245c11995bc15329321e6879be78d4c1b44dc..ed112b55ae7f95a587150a0f5ed23d58ca318f97 100644 (file)
@@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
 {
        int ret = 0;
        struct resource *res;
+       struct device *dev = &pdev->dev;
        void __iomem *addr = NULL;
        struct stmmac_priv *priv = NULL;
        struct plat_stmmacenet_data *plat_dat = NULL;
@@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        if (!res)
                return -ENODEV;
 
-       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-               pr_err("%s: ERROR: memory allocation failed"
-                      "cannot get the I/O addr 0x%x\n",
-                      __func__, (unsigned int)res->start);
-               return -EBUSY;
-       }
-
-       addr = ioremap(res->start, resource_size(res));
+       addr = devm_request_and_ioremap(dev, res);
        if (!addr) {
                pr_err("%s: ERROR: memory mapping failed", __func__);
-               ret = -ENOMEM;
-               goto out_release_region;
+               return -ENOMEM;
        }
 
        if (pdev->dev.of_node) {
@@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
                                        GFP_KERNEL);
                if (!plat_dat) {
                        pr_err("%s: ERROR: no memory", __func__);
-                       ret = -ENOMEM;
-                       goto out_unmap;
+                       return  -ENOMEM;
                }
 
                ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
                if (ret) {
                        pr_err("%s: main dt probe failed", __func__);
-                       goto out_unmap;
+                       return ret;
                }
        } else {
                plat_dat = pdev->dev.platform_data;
@@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        if (plat_dat->init) {
                ret = plat_dat->init(pdev);
                if (unlikely(ret))
-                       goto out_unmap;
+                       return ret;
        }
 
        priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
        if (!priv) {
                pr_err("%s: main driver probe failed", __func__);
-               goto out_unmap;
+               return -ENODEV;
        }
 
        /* Get MAC address if available (DT) */
@@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        if (priv->dev->irq == -ENXIO) {
                pr_err("%s: ERROR: MAC IRQ configuration "
                       "information not found\n", __func__);
-               ret = -ENXIO;
-               goto out_unmap;
+               return -ENXIO;
        }
 
        /*
@@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
        pr_debug("STMMAC platform driver registration completed");
 
        return 0;
-
-out_unmap:
-       iounmap(addr);
-       platform_set_drvdata(pdev, NULL);
-
-out_release_region:
-       release_mem_region(res->start, resource_size(res));
-
-       return ret;
 }
 
 /**
@@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct stmmac_priv *priv = netdev_priv(ndev);
-       struct resource *res;
        int ret = stmmac_dvr_remove(ndev);
 
        if (priv->plat->exit)
@@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       iounmap((void __force __iomem *)priv->ioaddr);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
        return ret;
 }
 
index ce4df61b4b5684e1ab3a6d92c3773803a1613beb..c8251be104d6ad6bde2d37b6cea7f2a0c9810429 100644 (file)
@@ -3890,7 +3890,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
        schedule_work(&cp->reset_task);
 #endif
 
-       flush_work_sync(&cp->reset_task);
+       flush_work(&cp->reset_task);
        return 0;
 }
 
index 3208dca66758c51ff251a910b1cf1bd06a97c07c..8419bf385e08b89633948c237e4cce6821e40e77 100644 (file)
@@ -9927,7 +9927,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
        if (!netif_running(dev))
                return 0;
 
-       flush_work_sync(&np->reset_task);
+       flush_work(&np->reset_task);
        niu_netif_stop(np);
 
        del_timer_sync(&np->timer);
index 967fe8cb476e56eda49013963a347f262712536d..c9c977bf02ace45e1e4be1b863d682995d39185f 100644 (file)
@@ -212,7 +212,6 @@ static void bigmac_clean_rings(struct bigmac *bp)
 static void bigmac_init_rings(struct bigmac *bp, int from_irq)
 {
        struct bmac_init_block *bb = bp->bmac_block;
-       struct net_device *dev = bp->dev;
        int i;
        gfp_t gfp_flags = GFP_KERNEL;
 
index 1b173a6145d642fb3c2fe26c58c8d8b995ce50b8..b26cbda5efa9b5264dd4e2bb885d35ea7e26b692 100644 (file)
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
 
 config TI_DAVINCI_MDIO
        tristate "TI DaVinci MDIO Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
        select PHYLIB
        ---help---
          This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
 
 config TI_DAVINCI_CPDMA
        tristate "TI DaVinci CPDMA Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
        ---help---
          This driver supports TI's DaVinci CPDMA dma engine.
 
index 1e5d85b06e71b1dea196be8e0f4db1a928c62c4b..df55e240374646e2f076d9b4764fab6dc0a07bab 100644 (file)
@@ -28,6 +28,9 @@
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
 
 #include <linux/platform_data/cpsw.h>
 
@@ -383,6 +386,11 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
                        mac_control |= BIT(7);  /* GIGABITEN    */
                if (phy->duplex)
                        mac_control |= BIT(0);  /* FULLDUPLEXEN */
+
+               /* set speed_in input in case RMII mode is used in 100Mbps */
+               if (phy->speed == 100)
+                       mac_control |= BIT(15);
+
                *link = true;
        } else {
                mac_control = 0;
@@ -709,6 +717,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
        slave->sliver   = regs + data->sliver_reg_ofs;
 }
 
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
+                        struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       struct device_node *slave_node;
+       int i = 0, ret;
+       u32 prop;
+
+       if (!node)
+               return -EINVAL;
+
+       if (of_property_read_u32(node, "slaves", &prop)) {
+               pr_err("Missing slaves property in the DT.\n");
+               return -EINVAL;
+       }
+       data->slaves = prop;
+
+       data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+                                  data->slaves, GFP_KERNEL);
+       if (!data->slave_data) {
+               pr_err("Could not allocate slave memory.\n");
+               return -EINVAL;
+       }
+
+       data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
+
+       if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+               pr_err("Missing cpdma_channels property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->channels = prop;
+
+       if (of_property_read_u32(node, "host_port_no", &prop)) {
+               pr_err("Missing host_port_no property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->host_port_num = prop;
+
+       if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
+               pr_err("Missing cpdma_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpdma_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
+               pr_err("Missing cpdma_sram_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpdma_sram_ofs = prop;
+
+       if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
+               pr_err("Missing ale_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->ale_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "ale_entries", &prop)) {
+               pr_err("Missing ale_entries property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->ale_entries = prop;
+
+       if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
+               pr_err("Missing host_port_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->host_port_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
+               pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->hw_stats_reg_ofs = prop;
+
+       if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
+               pr_err("Missing bd_ram_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->bd_ram_ofs = prop;
+
+       if (of_property_read_u32(node, "bd_ram_size", &prop)) {
+               pr_err("Missing bd_ram_size property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->bd_ram_size = prop;
+
+       if (of_property_read_u32(node, "rx_descs", &prop)) {
+               pr_err("Missing rx_descs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->rx_descs = prop;
+
+       if (of_property_read_u32(node, "mac_control", &prop)) {
+               pr_err("Missing mac_control property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->mac_control = prop;
+
+       for_each_child_of_node(node, slave_node) {
+               struct cpsw_slave_data *slave_data = data->slave_data + i;
+               const char *phy_id = NULL;
+               const void *mac_addr = NULL;
+
+               if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+                       pr_err("Missing slave[%d] phy_id property\n", i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->phy_id = phy_id;
+
+               if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
+                       pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->slave_reg_ofs = prop;
+
+               if (of_property_read_u32(slave_node, "sliver_reg_ofs",
+                                        &prop)) {
+                       pr_err("Missing slave[%d] sliver_reg_ofs property\n",
+                               i);
+                       ret = -EINVAL;
+                       goto error_ret;
+               }
+               slave_data->sliver_reg_ofs = prop;
+
+               mac_addr = of_get_mac_address(slave_node);
+               if (mac_addr)
+                       memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+
+               i++;
+       }
+
+       return 0;
+
+error_ret:
+       kfree(data->slave_data);
+       return ret;
+}
+
 static int __devinit cpsw_probe(struct platform_device *pdev)
 {
        struct cpsw_platform_data       *data = pdev->dev.platform_data;
@@ -720,11 +880,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
        struct resource                 *res;
        int ret = 0, i, k = 0;
 
-       if (!data) {
-               pr_err("platform data missing\n");
-               return -ENODEV;
-       }
-
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
                pr_err("error allocating net_device\n");
@@ -734,13 +889,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ndev);
        priv = netdev_priv(ndev);
        spin_lock_init(&priv->lock);
-       priv->data = *data;
        priv->pdev = pdev;
        priv->ndev = ndev;
        priv->dev  = &ndev->dev;
        priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
        priv->rx_packet_max = max(rx_packet_max, 128);
 
+       if (cpsw_probe_dt(&priv->data, pdev)) {
+               pr_err("cpsw: platform data missing\n");
+               ret = -ENODEV;
+               goto clean_ndev_ret;
+       }
+       data = &priv->data;
+
        if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
                memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
                pr_info("Detected MACID = %pM", priv->mac_addr);
@@ -996,11 +1157,17 @@ static const struct dev_pm_ops cpsw_pm_ops = {
        .resume         = cpsw_resume,
 };
 
+static const struct of_device_id cpsw_of_mtable[] = {
+       { .compatible = "ti,cpsw", },
+       { /* sentinel */ },
+};
+
 static struct platform_driver cpsw_driver = {
        .driver = {
                .name    = "cpsw",
                .owner   = THIS_MODULE,
                .pm      = &cpsw_pm_ops,
+               .of_match_table = of_match_ptr(cpsw_of_mtable),
        },
        .probe = cpsw_probe,
        .remove = __devexit_p(cpsw_remove),
index a9ca4a03d31b2fe2adc68818da636edb8d5aeb2b..51a96dbee9accbae9dbe32e8eb3d81a367819307 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 /*
  * This timeout definition is a worst-case ultra defensive measure against
@@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
        return 0;
 }
 
+static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
+                        struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       u32 prop;
+
+       if (!node)
+               return -EINVAL;
+
+       if (of_property_read_u32(node, "bus_freq", &prop)) {
+               pr_err("Missing bus_freq property in the DT.\n");
+               return -EINVAL;
+       }
+       data->bus_freq = prop;
+
+       return 0;
+}
+
+
 static int __devinit davinci_mdio_probe(struct platform_device *pdev)
 {
        struct mdio_platform_data *pdata = pdev->dev.platform_data;
@@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       data->pdata = pdata ? (*pdata) : default_pdata;
-
        data->bus = mdiobus_alloc();
        if (!data->bus) {
                dev_err(dev, "failed to alloc mii bus\n");
@@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
                goto bail_out;
        }
 
+       if (dev->of_node) {
+               if (davinci_mdio_probe_dt(&data->pdata, pdev))
+                       data->pdata = default_pdata;
+               snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+       } else {
+               data->pdata = pdata ? (*pdata) : default_pdata;
+               snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+                        pdev->name, pdev->id);
+       }
+
        data->bus->name         = dev_name(dev);
        data->bus->read         = davinci_mdio_read,
        data->bus->write        = davinci_mdio_write,
        data->bus->reset        = davinci_mdio_reset,
        data->bus->parent       = dev;
        data->bus->priv         = data;
-       snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
-               pdev->name, pdev->id);
 
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
@@ -456,11 +483,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = {
        .resume         = davinci_mdio_resume,
 };
 
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+       { .compatible = "ti,davinci_mdio", },
+       { /* sentinel */ },
+};
+
 static struct platform_driver davinci_mdio_driver = {
        .driver = {
                .name    = "davinci_mdio",
                .owner   = THIS_MODULE,
                .pm      = &davinci_mdio_pm_ops,
+               .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
        },
        .probe = davinci_mdio_probe,
        .remove = __devexit_p(davinci_mdio_remove),
index 277c93e9ff4d7201470fd0fdea50a691c28f24de..8fa947a2d9290f2dd7dc612968199aa0f46fdde1 100644 (file)
@@ -1358,7 +1358,6 @@ static int tsi108_open(struct net_device *dev)
                        break;
                }
 
-               data->rxskbs[i] = skb;
                data->rxskbs[i] = skb;
                data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
                data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
index a5826a3111a6ed7f1570e763c2ef36ce98ddb175..2c08bf6e7bf3b326f828585a3a8fefc96d62ddb3 100644 (file)
@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
        if (data && is_valid_ether_addr(data->mac_addr)) {
                memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
        } else {
-               eth_random_addr(ndev->dev_addr);
-               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+               eth_hw_addr_random(ndev);
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index bdd8891c215ad3c1b4ca4fa6b5ae6ddc3724a7df..88943d90c7653565283a064e3a8ed25f65d5d213 100644 (file)
@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
        if (data && is_valid_ether_addr(data->mac_addr)) {
                memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
        } else {
-               eth_random_addr(ndev->dev_addr);
-               ndev->addr_assign_type |= NET_ADDR_RANDOM;
+               eth_hw_addr_random(ndev);
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 95ceb35930437be5103e23ac99b83becfdd669bc..5fd6f4674326f0d3236ae8a97127b842ffffa008 100644 (file)
@@ -35,6 +35,7 @@ struct hv_netvsc_packet;
 /* Represent the xfer page packet which contains 1 or more netvsc packet */
 struct xferpage_packet {
        struct list_head list_ent;
+       u32 status;
 
        /* # of netvsc packets this xfer packet contains */
        u32 count;
@@ -47,6 +48,7 @@ struct xferpage_packet {
 struct hv_netvsc_packet {
        /* Bookkeeping stuff */
        struct list_head list_ent;
+       u32 status;
 
        struct hv_device *device;
        bool is_data_pkt;
@@ -465,8 +467,6 @@ struct nvsp_message {
 
 #define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
-#define NETVSC_RECEIVE_SG_COUNT                        1
-
 /* Preallocated receive packets */
 #define NETVSC_RECEIVE_PACKETLIST_COUNT                256
 
index 4a1a5f58fa73ffd7899429bcaf28062e29f83dc3..1cd77483da50114c2c309ab70a69dff134c3a919 100644 (file)
@@ -558,7 +558,7 @@ int netvsc_send(struct hv_device *device,
 }
 
 static void netvsc_send_recv_completion(struct hv_device *device,
-                                       u64 transaction_id)
+                                       u64 transaction_id, u32 status)
 {
        struct nvsp_message recvcompMessage;
        int retries = 0;
@@ -571,9 +571,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
        recvcompMessage.hdr.msg_type =
                                NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
 
-       /* FIXME: Pass in the status */
-       recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
-               NVSP_STAT_SUCCESS;
+       recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
 
 retry_send_cmplt:
        /* Send the completion */
@@ -613,6 +611,7 @@ static void netvsc_receive_completion(void *context)
        bool fsend_receive_comp = false;
        unsigned long flags;
        struct net_device *ndev;
+       u32 status = NVSP_STAT_NONE;
 
        /*
         * Even though it seems logical to do a GetOutboundNetDevice() here to
@@ -627,6 +626,9 @@ static void netvsc_receive_completion(void *context)
        /* Overloading use of the lock. */
        spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
 
+       if (packet->status != NVSP_STAT_SUCCESS)
+               packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
+
        packet->xfer_page_pkt->count--;
 
        /*
@@ -636,6 +638,7 @@ static void netvsc_receive_completion(void *context)
        if (packet->xfer_page_pkt->count == 0) {
                fsend_receive_comp = true;
                transaction_id = packet->completion.recv.recv_completion_tid;
+               status = packet->xfer_page_pkt->status;
                list_add_tail(&packet->xfer_page_pkt->list_ent,
                              &net_device->recv_pkt_list);
 
@@ -647,7 +650,7 @@ static void netvsc_receive_completion(void *context)
 
        /* Send a receive completion for the xfer page packet */
        if (fsend_receive_comp)
-               netvsc_send_recv_completion(device, transaction_id);
+               netvsc_send_recv_completion(device, transaction_id, status);
 
 }
 
@@ -736,7 +739,8 @@ static void netvsc_receive(struct hv_device *device,
                                       flags);
 
                netvsc_send_recv_completion(device,
-                                           vmxferpage_packet->d.trans_id);
+                                           vmxferpage_packet->d.trans_id,
+                                           NVSP_STAT_FAIL);
 
                return;
        }
@@ -744,6 +748,7 @@ static void netvsc_receive(struct hv_device *device,
        /* Remove the 1st packet to represent the xfer page packet itself */
        xferpage_packet = (struct xferpage_packet *)listHead.next;
        list_del(&xferpage_packet->list_ent);
+       xferpage_packet->status = NVSP_STAT_SUCCESS;
 
        /* This is how much we can satisfy */
        xferpage_packet->count = count - 1;
@@ -760,6 +765,7 @@ static void netvsc_receive(struct hv_device *device,
                list_del(&netvsc_packet->list_ent);
 
                /* Initialize the netvsc packet */
+               netvsc_packet->status = NVSP_STAT_SUCCESS;
                netvsc_packet->xfer_page_pkt = xferpage_packet;
                netvsc_packet->completion.recv.recv_completion =
                                        netvsc_receive_completion;
@@ -904,9 +910,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
        INIT_LIST_HEAD(&net_device->recv_pkt_list);
 
        for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
-               packet = kzalloc(sizeof(struct hv_netvsc_packet) +
-                                (NETVSC_RECEIVE_SG_COUNT *
-                                 sizeof(struct hv_page_buffer)), GFP_KERNEL);
+               packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
                if (!packet)
                        break;
 
index 8c5a1c43c81d257c09a67385e01da93f3c24465f..f825a629a699cfe5fac73803353da4b47ca18974 100644 (file)
@@ -265,6 +265,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        if (!net) {
                netdev_err(net, "got receive callback but net device"
                        " not initialized yet\n");
+               packet->status = NVSP_STAT_FAIL;
                return 0;
        }
 
@@ -272,6 +273,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
        if (unlikely(!skb)) {
                ++net->stats.rx_dropped;
+               packet->status = NVSP_STAT_FAIL;
                return 0;
        }
 
@@ -400,7 +402,7 @@ static void netvsc_send_garp(struct work_struct *w)
        ndev_ctx = container_of(w, struct net_device_context, dwork.work);
        net_device = hv_get_drvdata(ndev_ctx->device_ctx);
        net = net_device->ndev;
-       netif_notify_peers(net);
+       netdev_notify_peers(net);
 }
 
 
index 1e88a1095934a6d7b62c84cf1fff0e083f088881..928148cc32207a90da8cf173258c0ef8cc2b7e36 100644 (file)
 #include "hyperv_net.h"
 
 
+#define RNDIS_EXT_LEN 100
 struct rndis_request {
        struct list_head list_ent;
        struct completion  wait_event;
 
+       struct rndis_message response_msg;
        /*
-        * FIXME: We assumed a fixed size response here. If we do ever need to
-        * handle a bigger response, we can either define a max response
-        * message or add a response buffer variable above this field
+        * The buffer for extended info after the RNDIS response message. It's
+        * referenced based on the data offset in the RNDIS message. Its size
+        * is enough for current needs, and should be sufficient for the near
+        * future.
         */
-       struct rndis_message response_msg;
+       u8 response_ext[RNDIS_EXT_LEN];
 
        /* Simplify allocation by having a netvsc packet inline */
        struct hv_netvsc_packet pkt;
-       struct hv_page_buffer buf;
-       /* FIXME: We assumed a fixed size request here. */
+       /* Set 2 pages for rndis requests crossing page boundary */
+       struct hv_page_buffer buf[2];
+
        struct rndis_message request_msg;
-       u8 ext[100];
+       /*
+        * The buffer for the extended info after the RNDIS request message.
+        * It is referenced and sized in a similar way as response_ext.
+        */
+       u8 request_ext[RNDIS_EXT_LEN];
 };
 
 static void rndis_filter_send_completion(void *ctx);
@@ -221,6 +229,18 @@ static int rndis_filter_send_request(struct rndis_device *dev,
        packet->page_buf[0].offset =
                (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
 
+       /* Add one page_buf when request_msg crossing page boundary */
+       if (packet->page_buf[0].offset + packet->page_buf[0].len > PAGE_SIZE) {
+               packet->page_buf_cnt++;
+               packet->page_buf[0].len = PAGE_SIZE -
+                       packet->page_buf[0].offset;
+               packet->page_buf[1].pfn = virt_to_phys((void *)&req->request_msg
+                       + packet->page_buf[0].len) >> PAGE_SHIFT;
+               packet->page_buf[1].offset = 0;
+               packet->page_buf[1].len = req->request_msg.msg_len -
+                       packet->page_buf[0].len;
+       }
+
        packet->completion.send.send_completion_ctx = req;/* packet; */
        packet->completion.send.send_completion =
                rndis_filter_send_request_completion;
@@ -255,7 +275,8 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
        spin_unlock_irqrestore(&dev->request_lock, flags);
 
        if (found) {
-               if (resp->msg_len <= sizeof(struct rndis_message)) {
+               if (resp->msg_len <=
+                   sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
                        memcpy(&request->response_msg, resp,
                               resp->msg_len);
                } else {
@@ -392,9 +413,12 @@ int rndis_filter_receive(struct hv_device *dev,
        struct rndis_device *rndis_dev;
        struct rndis_message *rndis_msg;
        struct net_device *ndev;
+       int ret = 0;
 
-       if (!net_dev)
-               return -EINVAL;
+       if (!net_dev) {
+               ret = -EINVAL;
+               goto exit;
+       }
 
        ndev = net_dev->ndev;
 
@@ -402,14 +426,16 @@ int rndis_filter_receive(struct hv_device *dev,
        if (!net_dev->extension) {
                netdev_err(ndev, "got rndis message but no rndis device - "
                          "dropping this message!\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto exit;
        }
 
        rndis_dev = (struct rndis_device *)net_dev->extension;
        if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
                netdev_err(ndev, "got rndis message but rndis device "
                           "uninitialized...dropping this message!\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto exit;
        }
 
        rndis_msg = pkt->data;
@@ -441,7 +467,11 @@ int rndis_filter_receive(struct hv_device *dev,
                break;
        }
 
-       return 0;
+exit:
+       if (ret != 0)
+               pkt->status = NVSP_STAT_FAIL;
+
+       return ret;
 }
 
 static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -641,6 +671,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
        if (t == 0) {
                netdev_err(ndev,
                        "timeout before we got a set response...\n");
+               ret = -ETIMEDOUT;
                /*
                 * We can't deallocate the request since we may still receive a
                 * send completion for it.
@@ -678,8 +709,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
        init = &request->request_msg.msg.init_req;
        init->major_ver = RNDIS_MAJOR_VERSION;
        init->minor_ver = RNDIS_MINOR_VERSION;
-       /* FIXME: Use 1536 - rounded ethernet frame size */
-       init->max_xfer_size = 2048;
+       init->max_xfer_size = 0x4000;
 
        dev->state = RNDIS_DEV_INITIALIZING;
 
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
new file mode 100644 (file)
index 0000000..08ae465
--- /dev/null
@@ -0,0 +1,47 @@
+menuconfig IEEE802154_DRIVERS
+       tristate "IEEE 802.15.4 drivers"
+       depends on NETDEVICES && IEEE802154
+       default y
+       ---help---
+         Say Y here to get to see options for IEEE 802.15.4 Low-Rate
+         Wireless Personal Area Network device drivers. This option alone
+         does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and
+         disabled.
+
+config IEEE802154_FAKEHARD
+       tristate "Fake LR-WPAN driver with several interconnected devices"
+       depends on  IEEE802154_DRIVERS
+       ---help---
+         Say Y here to enable the fake driver that serves as an example
+          of HardMAC device driver.
+
+          This driver can also be built as a module. To do so say M here.
+         The module will be called 'fakehard'.
+
+config IEEE802154_FAKELB
+       depends on IEEE802154_DRIVERS && MAC802154
+       tristate "IEEE 802.15.4 loopback driver"
+       ---help---
+         Say Y here to enable the fake driver that can emulate a net
+         of several interconnected radio devices.
+
+         This driver can also be built as a module. To do so say M here.
+         The module will be called 'fakelb'.
+
+config IEEE802154_AT86RF230
+        depends on IEEE802154_DRIVERS && MAC802154
+        tristate "AT86RF230/231 transceiver driver"
+        depends on SPI
+
+config IEEE802154_MRF24J40
+       tristate "Microchip MRF24J40 transceiver driver"
+       depends on IEEE802154_DRIVERS && MAC802154
+       depends on SPI
+       ---help---
+         Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so, say M here.
+         the module will be called 'mrf24j40'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
new file mode 100644 (file)
index 0000000..abb0c08
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
+obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
+obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
+obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
new file mode 100644 (file)
index 0000000..ba753d8
--- /dev/null
@@ -0,0 +1,958 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at86rf230.h>
+#include <linux/skbuff.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+struct at86rf230_local {
+       struct spi_device *spi;
+       int rstn, slp_tr, dig2;
+
+       u8 part;
+       u8 vers;
+
+       u8 buf[2];
+       struct mutex bmux;
+
+       struct work_struct irqwork;
+       struct completion tx_complete;
+
+       struct ieee802154_dev *dev;
+
+       spinlock_t lock;
+       bool irq_disabled;
+       bool is_tx;
+};
+
+#define        RG_TRX_STATUS   (0x01)
+#define        SR_TRX_STATUS           0x01, 0x1f, 0
+#define        SR_RESERVED_01_3        0x01, 0x20, 5
+#define        SR_CCA_STATUS           0x01, 0x40, 6
+#define        SR_CCA_DONE             0x01, 0x80, 7
+#define        RG_TRX_STATE    (0x02)
+#define        SR_TRX_CMD              0x02, 0x1f, 0
+#define        SR_TRAC_STATUS          0x02, 0xe0, 5
+#define        RG_TRX_CTRL_0   (0x03)
+#define        SR_CLKM_CTRL            0x03, 0x07, 0
+#define        SR_CLKM_SHA_SEL         0x03, 0x08, 3
+#define        SR_PAD_IO_CLKM          0x03, 0x30, 4
+#define        SR_PAD_IO               0x03, 0xc0, 6
+#define        RG_TRX_CTRL_1   (0x04)
+#define        SR_IRQ_POLARITY         0x04, 0x01, 0
+#define        SR_IRQ_MASK_MODE        0x04, 0x02, 1
+#define        SR_SPI_CMD_MODE         0x04, 0x0c, 2
+#define        SR_RX_BL_CTRL           0x04, 0x10, 4
+#define        SR_TX_AUTO_CRC_ON       0x04, 0x20, 5
+#define        SR_IRQ_2_EXT_EN         0x04, 0x40, 6
+#define        SR_PA_EXT_EN            0x04, 0x80, 7
+#define        RG_PHY_TX_PWR   (0x05)
+#define        SR_TX_PWR               0x05, 0x0f, 0
+#define        SR_PA_LT                0x05, 0x30, 4
+#define        SR_PA_BUF_LT            0x05, 0xc0, 6
+#define        RG_PHY_RSSI     (0x06)
+#define        SR_RSSI                 0x06, 0x1f, 0
+#define        SR_RND_VALUE            0x06, 0x60, 5
+#define        SR_RX_CRC_VALID         0x06, 0x80, 7
+#define        RG_PHY_ED_LEVEL (0x07)
+#define        SR_ED_LEVEL             0x07, 0xff, 0
+#define        RG_PHY_CC_CCA   (0x08)
+#define        SR_CHANNEL              0x08, 0x1f, 0
+#define        SR_CCA_MODE             0x08, 0x60, 5
+#define        SR_CCA_REQUEST          0x08, 0x80, 7
+#define        RG_CCA_THRES    (0x09)
+#define        SR_CCA_ED_THRES         0x09, 0x0f, 0
+#define        SR_RESERVED_09_1        0x09, 0xf0, 4
+#define        RG_RX_CTRL      (0x0a)
+#define        SR_PDT_THRES            0x0a, 0x0f, 0
+#define        SR_RESERVED_0a_1        0x0a, 0xf0, 4
+#define        RG_SFD_VALUE    (0x0b)
+#define        SR_SFD_VALUE            0x0b, 0xff, 0
+#define        RG_TRX_CTRL_2   (0x0c)
+#define        SR_OQPSK_DATA_RATE      0x0c, 0x03, 0
+#define        SR_RESERVED_0c_2        0x0c, 0x7c, 2
+#define        SR_RX_SAFE_MODE         0x0c, 0x80, 7
+#define        RG_ANT_DIV      (0x0d)
+#define        SR_ANT_CTRL             0x0d, 0x03, 0
+#define        SR_ANT_EXT_SW_EN        0x0d, 0x04, 2
+#define        SR_ANT_DIV_EN           0x0d, 0x08, 3
+#define        SR_RESERVED_0d_2        0x0d, 0x70, 4
+#define        SR_ANT_SEL              0x0d, 0x80, 7
+#define        RG_IRQ_MASK     (0x0e)
+#define        SR_IRQ_MASK             0x0e, 0xff, 0
+#define        RG_IRQ_STATUS   (0x0f)
+#define        SR_IRQ_0_PLL_LOCK       0x0f, 0x01, 0
+#define        SR_IRQ_1_PLL_UNLOCK     0x0f, 0x02, 1
+#define        SR_IRQ_2_RX_START       0x0f, 0x04, 2
+#define        SR_IRQ_3_TRX_END        0x0f, 0x08, 3
+#define        SR_IRQ_4_CCA_ED_DONE    0x0f, 0x10, 4
+#define        SR_IRQ_5_AMI            0x0f, 0x20, 5
+#define        SR_IRQ_6_TRX_UR         0x0f, 0x40, 6
+#define        SR_IRQ_7_BAT_LOW        0x0f, 0x80, 7
+#define        RG_VREG_CTRL    (0x10)
+#define        SR_RESERVED_10_6        0x10, 0x03, 0
+#define        SR_DVDD_OK              0x10, 0x04, 2
+#define        SR_DVREG_EXT            0x10, 0x08, 3
+#define        SR_RESERVED_10_3        0x10, 0x30, 4
+#define        SR_AVDD_OK              0x10, 0x40, 6
+#define        SR_AVREG_EXT            0x10, 0x80, 7
+#define        RG_BATMON       (0x11)
+#define        SR_BATMON_VTH           0x11, 0x0f, 0
+#define        SR_BATMON_HR            0x11, 0x10, 4
+#define        SR_BATMON_OK            0x11, 0x20, 5
+#define        SR_RESERVED_11_1        0x11, 0xc0, 6
+#define        RG_XOSC_CTRL    (0x12)
+#define        SR_XTAL_TRIM            0x12, 0x0f, 0
+#define        SR_XTAL_MODE            0x12, 0xf0, 4
+#define        RG_RX_SYN       (0x15)
+#define        SR_RX_PDT_LEVEL         0x15, 0x0f, 0
+#define        SR_RESERVED_15_2        0x15, 0x70, 4
+#define        SR_RX_PDT_DIS           0x15, 0x80, 7
+#define        RG_XAH_CTRL_1   (0x17)
+#define        SR_RESERVED_17_8        0x17, 0x01, 0
+#define        SR_AACK_PROM_MODE       0x17, 0x02, 1
+#define        SR_AACK_ACK_TIME        0x17, 0x04, 2
+#define        SR_RESERVED_17_5        0x17, 0x08, 3
+#define        SR_AACK_UPLD_RES_FT     0x17, 0x10, 4
+#define        SR_AACK_FLTR_RES_FT     0x17, 0x20, 5
+#define        SR_RESERVED_17_2        0x17, 0x40, 6
+#define        SR_RESERVED_17_1        0x17, 0x80, 7
+#define        RG_FTN_CTRL     (0x18)
+#define        SR_RESERVED_18_2        0x18, 0x7f, 0
+#define        SR_FTN_START            0x18, 0x80, 7
+#define        RG_PLL_CF       (0x1a)
+#define        SR_RESERVED_1a_2        0x1a, 0x7f, 0
+#define        SR_PLL_CF_START         0x1a, 0x80, 7
+#define        RG_PLL_DCU      (0x1b)
+#define        SR_RESERVED_1b_3        0x1b, 0x3f, 0
+#define        SR_RESERVED_1b_2        0x1b, 0x40, 6
+#define        SR_PLL_DCU_START        0x1b, 0x80, 7
+#define        RG_PART_NUM     (0x1c)
+#define        SR_PART_NUM             0x1c, 0xff, 0
+#define        RG_VERSION_NUM  (0x1d)
+#define        SR_VERSION_NUM          0x1d, 0xff, 0
+#define        RG_MAN_ID_0     (0x1e)
+#define        SR_MAN_ID_0             0x1e, 0xff, 0
+#define        RG_MAN_ID_1     (0x1f)
+#define        SR_MAN_ID_1             0x1f, 0xff, 0
+#define        RG_SHORT_ADDR_0 (0x20)
+#define        SR_SHORT_ADDR_0         0x20, 0xff, 0
+#define        RG_SHORT_ADDR_1 (0x21)
+#define        SR_SHORT_ADDR_1         0x21, 0xff, 0
+#define        RG_PAN_ID_0     (0x22)
+#define        SR_PAN_ID_0             0x22, 0xff, 0
+#define        RG_PAN_ID_1     (0x23)
+#define        SR_PAN_ID_1             0x23, 0xff, 0
+#define        RG_IEEE_ADDR_0  (0x24)
+#define        SR_IEEE_ADDR_0          0x24, 0xff, 0
+#define        RG_IEEE_ADDR_1  (0x25)
+#define        SR_IEEE_ADDR_1          0x25, 0xff, 0
+#define        RG_IEEE_ADDR_2  (0x26)
+#define        SR_IEEE_ADDR_2          0x26, 0xff, 0
+#define        RG_IEEE_ADDR_3  (0x27)
+#define        SR_IEEE_ADDR_3          0x27, 0xff, 0
+#define        RG_IEEE_ADDR_4  (0x28)
+#define        SR_IEEE_ADDR_4          0x28, 0xff, 0
+#define        RG_IEEE_ADDR_5  (0x29)
+#define        SR_IEEE_ADDR_5          0x29, 0xff, 0
+#define        RG_IEEE_ADDR_6  (0x2a)
+#define        SR_IEEE_ADDR_6          0x2a, 0xff, 0
+#define        RG_IEEE_ADDR_7  (0x2b)
+#define        SR_IEEE_ADDR_7          0x2b, 0xff, 0
+#define        RG_XAH_CTRL_0   (0x2c)
+#define        SR_SLOTTED_OPERATION    0x2c, 0x01, 0
+#define        SR_MAX_CSMA_RETRIES     0x2c, 0x0e, 1
+#define        SR_MAX_FRAME_RETRIES    0x2c, 0xf0, 4
+#define        RG_CSMA_SEED_0  (0x2d)
+#define        SR_CSMA_SEED_0          0x2d, 0xff, 0
+#define        RG_CSMA_SEED_1  (0x2e)
+#define        SR_CSMA_SEED_1          0x2e, 0x07, 0
+#define        SR_AACK_I_AM_COORD      0x2e, 0x08, 3
+#define        SR_AACK_DIS_ACK         0x2e, 0x10, 4
+#define        SR_AACK_SET_PD          0x2e, 0x20, 5
+#define        SR_AACK_FVN_MODE        0x2e, 0xc0, 6
+#define        RG_CSMA_BE      (0x2f)
+#define        SR_MIN_BE               0x2f, 0x0f, 0
+#define        SR_MAX_BE               0x2f, 0xf0, 4
+
+#define CMD_REG                0x80
+#define CMD_REG_MASK   0x3f
+#define CMD_WRITE      0x40
+#define CMD_FB         0x20
+
+#define IRQ_BAT_LOW    (1 << 7)
+#define IRQ_TRX_UR     (1 << 6)
+#define IRQ_AMI                (1 << 5)
+#define IRQ_CCA_ED     (1 << 4)
+#define IRQ_TRX_END    (1 << 3)
+#define IRQ_RX_START   (1 << 2)
+#define IRQ_PLL_UNL    (1 << 1)
+#define IRQ_PLL_LOCK   (1 << 0)
+
+#define STATE_P_ON             0x00    /* BUSY */
+#define STATE_BUSY_RX          0x01
+#define STATE_BUSY_TX          0x02
+#define STATE_FORCE_TRX_OFF    0x03
+#define STATE_FORCE_TX_ON      0x04    /* IDLE */
+/* 0x05 */                             /* INVALID_PARAMETER */
+#define STATE_RX_ON            0x06
+/* 0x07 */                             /* SUCCESS */
+#define STATE_TRX_OFF          0x08
+#define STATE_TX_ON            0x09
+/* 0x0a - 0x0e */                      /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP            0x0F
+#define STATE_BUSY_RX_AACK     0x11
+#define STATE_BUSY_TX_ARET     0x12
+#define STATE_BUSY_RX_AACK_ON  0x16
+#define STATE_BUSY_TX_ARET_ON  0x19
+#define STATE_RX_ON_NOCLK      0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+static int
+__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
+{
+       u8 *buf = lp->buf;
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len    = 2,
+               .tx_buf = buf,
+       };
+
+       buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+       buf[1] = data;
+       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       status = spi_sync(lp->spi, &msg);
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+       return status;
+}
+
+static int
+__at86rf230_read_subreg(struct at86rf230_local *lp,
+                       u8 addr, u8 mask, int shift, u8 *data)
+{
+       u8 *buf = lp->buf;
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len    = 2,
+               .tx_buf = buf,
+               .rx_buf = buf,
+       };
+
+       buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
+       buf[1] = 0xff;
+       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       status = spi_sync(lp->spi, &msg);
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+       if (status == 0)
+               *data = buf[1];
+
+       return status;
+}
+
+static int
+at86rf230_read_subreg(struct at86rf230_local *lp,
+                     u8 addr, u8 mask, int shift, u8 *data)
+{
+       int status;
+
+       mutex_lock(&lp->bmux);
+       status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
+       mutex_unlock(&lp->bmux);
+
+       return status;
+}
+
+static int
+at86rf230_write_subreg(struct at86rf230_local *lp,
+                      u8 addr, u8 mask, int shift, u8 data)
+{
+       int status;
+       u8 val;
+
+       mutex_lock(&lp->bmux);
+       status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
+       if (status)
+               goto out;
+
+       val &= ~mask;
+       val |= (data << shift) & mask;
+
+       status = __at86rf230_write(lp, addr, val);
+out:
+       mutex_unlock(&lp->bmux);
+
+       return status;
+}
+
+static int
+at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
+{
+       u8 *buf = lp->buf;
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer_head = {
+               .len            = 2,
+               .tx_buf         = buf,
+
+       };
+       struct spi_transfer xfer_buf = {
+               .len            = len,
+               .tx_buf         = data,
+       };
+
+       mutex_lock(&lp->bmux);
+       buf[0] = CMD_WRITE | CMD_FB;
+       buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
+
+       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       status = spi_sync(lp->spi, &msg);
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+       mutex_unlock(&lp->bmux);
+       return status;
+}
+
+static int
+at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
+{
+       u8 *buf = lp->buf;
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer_head = {
+               .len            = 2,
+               .tx_buf         = buf,
+               .rx_buf         = buf,
+       };
+       struct spi_transfer xfer_head1 = {
+               .len            = 2,
+               .tx_buf         = buf,
+               .rx_buf         = buf,
+       };
+       struct spi_transfer xfer_buf = {
+               .len            = 0,
+               .rx_buf         = data,
+       };
+
+       mutex_lock(&lp->bmux);
+
+       buf[0] = CMD_FB;
+       buf[1] = 0x00;
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+
+       status = spi_sync(lp->spi, &msg);
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+
+       xfer_buf.len = *(buf + 1) + 1;
+       *len = buf[1];
+
+       buf[0] = CMD_FB;
+       buf[1] = 0x00;
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head1, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       status = spi_sync(lp->spi, &msg);
+
+       if (msg.status)
+               status = msg.status;
+
+       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+       if (status) {
+               if (lqi && (*len > lp->buf[1]))
+                       *lqi = data[lp->buf[1]];
+       }
+       mutex_unlock(&lp->bmux);
+
+       return status;
+}
+
+static int
+at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       might_sleep();
+       BUG_ON(!level);
+       *level = 0xbe;
+       return 0;
+}
+
+static int
+at86rf230_state(struct ieee802154_dev *dev, int state)
+{
+       struct at86rf230_local *lp = dev->priv;
+       int rc;
+       u8 val;
+       u8 desired_status;
+
+       might_sleep();
+
+       if (state == STATE_FORCE_TX_ON)
+               desired_status = STATE_TX_ON;
+       else if (state == STATE_FORCE_TRX_OFF)
+               desired_status = STATE_TRX_OFF;
+       else
+               desired_status = state;
+
+       do {
+               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+               if (rc)
+                       goto err;
+       } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+       if (val == desired_status)
+               return 0;
+
+       /* state is equal to phy states */
+       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
+       if (rc)
+               goto err;
+
+       do {
+               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+               if (rc)
+                       goto err;
+       } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+
+       if (val == desired_status)
+               return 0;
+
+       pr_err("unexpected state change: %d, asked for %d\n", val, state);
+       return -EBUSY;
+
+err:
+       pr_err("error: %d\n", rc);
+       return rc;
+}
+
+static int
+at86rf230_start(struct ieee802154_dev *dev)
+{
+       struct at86rf230_local *lp = dev->priv;
+       u8 rc;
+
+       rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
+       if (rc)
+               return rc;
+
+       return at86rf230_state(dev, STATE_RX_ON);
+}
+
+static void
+at86rf230_stop(struct ieee802154_dev *dev)
+{
+       at86rf230_state(dev, STATE_FORCE_TRX_OFF);
+}
+
+static int
+at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+       struct at86rf230_local *lp = dev->priv;
+       int rc;
+
+       might_sleep();
+
+       if (page != 0 || channel < 11 || channel > 26) {
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+       msleep(1); /* Wait for PLL */
+       dev->phy->current_channel = channel;
+
+       return 0;
+}
+
+static int
+at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+       struct at86rf230_local *lp = dev->priv;
+       int rc;
+       unsigned long flags;
+
+       spin_lock(&lp->lock);
+       if  (lp->irq_disabled) {
+               spin_unlock(&lp->lock);
+               return -EBUSY;
+       }
+       spin_unlock(&lp->lock);
+
+       might_sleep();
+
+       rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
+       if (rc)
+               goto err;
+
+       spin_lock_irqsave(&lp->lock, flags);
+       lp->is_tx = 1;
+       INIT_COMPLETION(lp->tx_complete);
+       spin_unlock_irqrestore(&lp->lock, flags);
+
+       rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
+       if (rc)
+               goto err_rx;
+
+       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
+       if (rc)
+               goto err_rx;
+
+       rc = wait_for_completion_interruptible(&lp->tx_complete);
+       if (rc < 0)
+               goto err_rx;
+
+       rc = at86rf230_start(dev);
+
+       return rc;
+
+err_rx:
+       at86rf230_start(dev);
+err:
+       pr_err("error: %d\n", rc);
+
+       spin_lock_irqsave(&lp->lock, flags);
+       lp->is_tx = 0;
+       spin_unlock_irqrestore(&lp->lock, flags);
+
+       return rc;
+}
+
+static int at86rf230_rx(struct at86rf230_local *lp)
+{
+       u8 len = 128, lqi = 0;
+       struct sk_buff *skb;
+
+       skb = alloc_skb(len, GFP_KERNEL);
+
+       if (!skb)
+               return -ENOMEM;
+
+       if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
+               goto err;
+
+       if (len < 2)
+               goto err;
+
+       skb_trim(skb, len - 2); /* We do not put CRC into the frame */
+
+       ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+
+       dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
+
+       return 0;
+err:
+       pr_debug("received frame is too small\n");
+
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static struct ieee802154_ops at86rf230_ops = {
+       .owner = THIS_MODULE,
+       .xmit = at86rf230_xmit,
+       .ed = at86rf230_ed,
+       .set_channel = at86rf230_channel,
+       .start = at86rf230_start,
+       .stop = at86rf230_stop,
+};
+
+static void at86rf230_irqwork(struct work_struct *work)
+{
+       struct at86rf230_local *lp =
+               container_of(work, struct at86rf230_local, irqwork);
+       u8 status = 0, val;
+       int rc;
+       unsigned long flags;
+
+       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
+       status |= val;
+
+       status &= ~IRQ_PLL_LOCK; /* ignore */
+       status &= ~IRQ_RX_START; /* ignore */
+       status &= ~IRQ_AMI; /* ignore */
+       status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
+
+       if (status & IRQ_TRX_END) {
+               spin_lock_irqsave(&lp->lock, flags);
+               status &= ~IRQ_TRX_END;
+               if (lp->is_tx) {
+                       lp->is_tx = 0;
+                       spin_unlock_irqrestore(&lp->lock, flags);
+                       complete(&lp->tx_complete);
+               } else {
+                       spin_unlock_irqrestore(&lp->lock, flags);
+                       at86rf230_rx(lp);
+               }
+       }
+
+       spin_lock_irqsave(&lp->lock, flags);
+       lp->irq_disabled = 0;
+       spin_unlock_irqrestore(&lp->lock, flags);
+
+       enable_irq(lp->spi->irq);
+}
+
+static irqreturn_t at86rf230_isr(int irq, void *data)
+{
+       struct at86rf230_local *lp = data;
+
+       disable_irq_nosync(irq);
+
+       spin_lock(&lp->lock);
+       lp->irq_disabled = 1;
+       spin_unlock(&lp->lock);
+
+       schedule_work(&lp->irqwork);
+
+       return IRQ_HANDLED;
+}
+
+
+static int at86rf230_hw_init(struct at86rf230_local *lp)
+{
+       u8 status;
+       int rc;
+
+       rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+       if (rc)
+               return rc;
+
+       dev_info(&lp->spi->dev, "Status: %02x\n", status);
+       if (status == STATE_P_ON) {
+               rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
+               if (rc)
+                       return rc;
+               msleep(1);
+               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+               if (rc)
+                       return rc;
+               dev_info(&lp->spi->dev, "Status: %02x\n", status);
+       }
+
+       rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
+                                                            * IRQ_CCA_ED |
+                                                            * IRQ_TRX_END |
+                                                            * IRQ_PLL_UNL |
+                                                            * IRQ_PLL_LOCK
+                                                            */
+       if (rc)
+               return rc;
+
+       /* CLKM changes are applied immediately */
+       rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
+       if (rc)
+               return rc;
+
+       /* Turn CLKM Off */
+       rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
+       if (rc)
+               return rc;
+       /* Wait the next SLEEP cycle */
+       msleep(100);
+
+       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
+       if (rc)
+               return rc;
+       msleep(1);
+
+       rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+       if (rc)
+               return rc;
+       dev_info(&lp->spi->dev, "Status: %02x\n", status);
+
+       rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+       if (rc)
+               return rc;
+       if (!status) {
+               dev_err(&lp->spi->dev, "DVDD error\n");
+               return -EINVAL;
+       }
+
+       rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
+       if (rc)
+               return rc;
+       if (!status) {
+               dev_err(&lp->spi->dev, "AVDD error\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
+{
+       return 0;
+}
+
+static int at86rf230_resume(struct spi_device *spi)
+{
+       return 0;
+}
+
+static int at86rf230_fill_data(struct spi_device *spi)
+{
+       struct at86rf230_local *lp = spi_get_drvdata(spi);
+       struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+
+       if (!pdata) {
+               dev_err(&spi->dev, "no platform_data\n");
+               return -EINVAL;
+       }
+
+       lp->rstn = pdata->rstn;
+       lp->slp_tr = pdata->slp_tr;
+       lp->dig2 = pdata->dig2;
+
+       return 0;
+}
+
+static int __devinit at86rf230_probe(struct spi_device *spi)
+{
+       struct ieee802154_dev *dev;
+       struct at86rf230_local *lp;
+       u8 man_id_0, man_id_1;
+       int rc;
+       const char *chip;
+       int supported = 0;
+
+       if (!spi->irq) {
+               dev_err(&spi->dev, "no IRQ specified\n");
+               return -EINVAL;
+       }
+
+       dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
+       if (!dev)
+               return -ENOMEM;
+
+       lp = dev->priv;
+       lp->dev = dev;
+
+       lp->spi = spi;
+
+       dev->priv = lp;
+       dev->parent = &spi->dev;
+       dev->extra_tx_headroom = 0;
+       /* We do support only 2.4 Ghz */
+       dev->phy->channels_supported[0] = 0x7FFF800;
+       dev->flags = IEEE802154_HW_OMIT_CKSUM;
+
+       mutex_init(&lp->bmux);
+       INIT_WORK(&lp->irqwork, at86rf230_irqwork);
+       spin_lock_init(&lp->lock);
+       init_completion(&lp->tx_complete);
+
+       spi_set_drvdata(spi, lp);
+
+       rc = at86rf230_fill_data(spi);
+       if (rc)
+               goto err_fill;
+
+       rc = gpio_request(lp->rstn, "rstn");
+       if (rc)
+               goto err_rstn;
+
+       if (gpio_is_valid(lp->slp_tr)) {
+               rc = gpio_request(lp->slp_tr, "slp_tr");
+               if (rc)
+                       goto err_slp_tr;
+       }
+
+       rc = gpio_direction_output(lp->rstn, 1);
+       if (rc)
+               goto err_gpio_dir;
+
+       if (gpio_is_valid(lp->slp_tr)) {
+               rc = gpio_direction_output(lp->slp_tr, 0);
+               if (rc)
+                       goto err_gpio_dir;
+       }
+
+       /* Reset */
+       msleep(1);
+       gpio_set_value(lp->rstn, 0);
+       msleep(1);
+       gpio_set_value(lp->rstn, 1);
+       msleep(1);
+
+       rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
+       if (rc)
+               goto err_gpio_dir;
+       rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
+       if (rc)
+               goto err_gpio_dir;
+
+       if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+               dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
+                       man_id_1, man_id_0);
+               rc = -EINVAL;
+               goto err_gpio_dir;
+       }
+
+       rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
+       if (rc)
+               goto err_gpio_dir;
+
+       rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
+       if (rc)
+               goto err_gpio_dir;
+
+       switch (lp->part) {
+       case 2:
+               chip = "at86rf230";
+               /* supported = 1;  FIXME: should be easy to support; */
+               break;
+       case 3:
+               chip = "at86rf231";
+               supported = 1;
+               break;
+       default:
+               chip = "UNKNOWN";
+               break;
+       }
+
+       dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
+       if (!supported) {
+               rc = -ENOTSUPP;
+               goto err_gpio_dir;
+       }
+
+       rc = at86rf230_hw_init(lp);
+       if (rc)
+               goto err_gpio_dir;
+
+       rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
+                        dev_name(&spi->dev), lp);
+       if (rc)
+               goto err_gpio_dir;
+
+       rc = ieee802154_register_device(lp->dev);
+       if (rc)
+               goto err_irq;
+
+       return rc;
+
+       ieee802154_unregister_device(lp->dev);
+err_irq:
+       free_irq(spi->irq, lp);
+       flush_work(&lp->irqwork);
+err_gpio_dir:
+       if (gpio_is_valid(lp->slp_tr))
+               gpio_free(lp->slp_tr);
+err_slp_tr:
+       gpio_free(lp->rstn);
+err_rstn:
+err_fill:
+       spi_set_drvdata(spi, NULL);
+       mutex_destroy(&lp->bmux);
+       ieee802154_free_device(lp->dev);
+       return rc;
+}
+
+static int __devexit at86rf230_remove(struct spi_device *spi)
+{
+       struct at86rf230_local *lp = spi_get_drvdata(spi);
+
+       ieee802154_unregister_device(lp->dev);
+
+       free_irq(spi->irq, lp);
+       flush_work(&lp->irqwork);
+
+       if (gpio_is_valid(lp->slp_tr))
+               gpio_free(lp->slp_tr);
+       gpio_free(lp->rstn);
+
+       spi_set_drvdata(spi, NULL);
+       mutex_destroy(&lp->bmux);
+       ieee802154_free_device(lp->dev);
+
+       dev_dbg(&spi->dev, "unregistered at86rf230\n");
+       return 0;
+}
+
+static struct spi_driver at86rf230_driver = {
+       .driver = {
+               .name   = "at86rf230",
+               .owner  = THIS_MODULE,
+       },
+       .probe      = at86rf230_probe,
+       .remove     = __devexit_p(at86rf230_remove),
+       .suspend    = at86rf230_suspend,
+       .resume     = at86rf230_resume,
+};
+
+module_spi_driver(at86rf230_driver);
+
+MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
new file mode 100644 (file)
index 0000000..7d39add
--- /dev/null
@@ -0,0 +1,448 @@
+/*
+ * Sample driver for HardMAC IEEE 802.15.4 devices
+ *
+ * Copyright (C) 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/ieee802154.h>
+#include <net/nl802154.h>
+#include <net/wpan-phy.h>
+
+struct fakehard_priv {
+       struct wpan_phy *phy;
+};
+
+static struct wpan_phy *fake_to_phy(const struct net_device *dev)
+{
+       struct fakehard_priv *priv = netdev_priv(dev);
+       return priv->phy;
+}
+
+/**
+ * fake_get_phy - Return a phy corresponding to this device.
+ * @dev: The network device for which to return the wan-phy object
+ *
+ * This function returns a wpan-phy object corresponding to the passed
+ * network device. Reference counter for wpan-phy object is incremented,
+ * so when the wpan-phy isn't necessary, you should drop the reference
+ * via @wpan_phy_put() call.
+ */
+static struct wpan_phy *fake_get_phy(const struct net_device *dev)
+{
+       struct wpan_phy *phy = fake_to_phy(dev);
+       return to_phy(get_device(&phy->dev));
+}
+
+/**
+ * fake_get_pan_id - Retrieve the PAN ID of the device.
+ * @dev: The network device to retrieve the PAN of.
+ *
+ * Return the ID of the PAN from the PIB.
+ */
+static u16 fake_get_pan_id(const struct net_device *dev)
+{
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       return 0xeba1;
+}
+
+/**
+ * fake_get_short_addr - Retrieve the short address of the device.
+ * @dev: The network device to retrieve the short address of.
+ *
+ * Returns the IEEE 802.15.4 short-form address cached for this
+ * device. If the device has not yet had a short address assigned
+ * then this should return 0xFFFF to indicate a lack of association.
+ */
+static u16 fake_get_short_addr(const struct net_device *dev)
+{
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       return 0x1;
+}
+
+/**
+ * fake_get_dsn - Retrieve the DSN of the device.
+ * @dev: The network device to retrieve the DSN for.
+ *
+ * Returns the IEEE 802.15.4 DSN for the network device.
+ * The DSN is the sequence number which will be added to each
+ * packet or MAC command frame by the MAC during transmission.
+ *
+ * DSN means 'Data Sequence Number'.
+ *
+ * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
+ *       document.
+ */
+static u8 fake_get_dsn(const struct net_device *dev)
+{
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       return 0x00; /* DSN are implemented in HW, so return just 0 */
+}
+
+/**
+ * fake_get_bsn - Retrieve the BSN of the device.
+ * @dev: The network device to retrieve the BSN for.
+ *
+ * Returns the IEEE 802.15.4 BSN for the network device.
+ * The BSN is the sequence number which will be added to each
+ * beacon frame sent by the MAC.
+ *
+ * BSN means 'Beacon Sequence Number'.
+ *
+ * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
+ *       document.
+ */
+static u8 fake_get_bsn(const struct net_device *dev)
+{
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       return 0x00; /* BSN are implemented in HW, so return just 0 */
+}
+
+/**
+ * fake_assoc_req - Make an association request to the HW.
+ * @dev: The network device which we are associating to a network.
+ * @addr: The coordinator with which we wish to associate.
+ * @channel: The channel on which to associate.
+ * @cap: The capability information field to use in the association.
+ *
+ * Start an association with a coordinator. The coordinator's address
+ * and PAN ID can be found in @addr.
+ *
+ * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE
+ *       802.15.4-2006 document.
+ */
+static int fake_assoc_req(struct net_device *dev,
+               struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
+{
+       struct wpan_phy *phy = fake_to_phy(dev);
+
+       mutex_lock(&phy->pib_lock);
+       phy->current_channel = channel;
+       phy->current_page = page;
+       mutex_unlock(&phy->pib_lock);
+
+       /* We simply emulate it here */
+       return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev),
+                       IEEE802154_SUCCESS);
+}
+
+/**
+ * fake_assoc_resp - Send an association response to a device.
+ * @dev: The network device on which to send the response.
+ * @addr: The address of the device to respond to.
+ * @short_addr: The assigned short address for the device (if any).
+ * @status: The result of the association request.
+ *
+ * Queue the association response of the coordinator to another
+ * device's attempt to associate with the network which we
+ * coordinate. This is then added to the indirect-send queue to be
+ * transmitted to the end device when it polls for data.
+ *
+ * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE
+ *       802.15.4-2006 document.
+ */
+static int fake_assoc_resp(struct net_device *dev,
+               struct ieee802154_addr *addr, u16 short_addr, u8 status)
+{
+       return 0;
+}
+
+/**
+ * fake_disassoc_req - Disassociate a device from a network.
+ * @dev: The network device on which we're disassociating a device.
+ * @addr: The device to disassociate from the network.
+ * @reason: The reason to give to the device for being disassociated.
+ *
+ * This sends a disassociation notification to the device being
+ * disassociated from the network.
+ *
+ * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006
+ *       document, with the reason described in 7.3.3.2.
+ */
+static int fake_disassoc_req(struct net_device *dev,
+               struct ieee802154_addr *addr, u8 reason)
+{
+       return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS);
+}
+
+/**
+ * fake_start_req - Start an IEEE 802.15.4 PAN.
+ * @dev: The network device on which to start the PAN.
+ * @addr: The coordinator address to use when starting the PAN.
+ * @channel: The channel on which to start the PAN.
+ * @bcn_ord: Beacon order.
+ * @sf_ord: Superframe order.
+ * @pan_coord: Whether or not we are the PAN coordinator or just
+ *             requesting a realignment perhaps?
+ * @blx: Battery Life Extension feature bitfield.
+ * @coord_realign: Something to realign something else.
+ *
+ * If pan_coord is non-zero then this starts a network with the
+ * provided parameters, otherwise it attempts a coordinator
+ * realignment of the stated network instead.
+ *
+ * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
+ * document, with 7.3.8 describing coordinator realignment.
+ */
+static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
+                               u8 channel, u8 page,
+                               u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
+                               u8 coord_realign)
+{
+       struct wpan_phy *phy = fake_to_phy(dev);
+
+       mutex_lock(&phy->pib_lock);
+       phy->current_channel = channel;
+       phy->current_page = page;
+       mutex_unlock(&phy->pib_lock);
+
+       /* We don't emulate beacons here at all, so START should fail */
+       ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER);
+       return 0;
+}
+
+/**
+ * fake_scan_req - Start a channel scan.
+ * @dev: The network device on which to perform a channel scan.
+ * @type: The type of scan to perform.
+ * @channels: The channel bitmask to scan.
+ * @duration: How long to spend on each channel.
+ *
+ * This starts either a passive (energy) scan or an active (PAN) scan
+ * on the channels indicated in the @channels bitmask. The duration of
+ * the scan is measured in terms of superframe duration. Specifically,
+ * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each
+ * channel.
+ *
+ * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document.
+ */
+static int fake_scan_req(struct net_device *dev, u8 type, u32 channels,
+               u8 page, u8 duration)
+{
+       u8 edl[27] = {};
+       return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type,
+                       channels, page,
+                       type == IEEE802154_MAC_SCAN_ED ? edl : NULL);
+}
+
+static struct ieee802154_mlme_ops fake_mlme = {
+       .assoc_req = fake_assoc_req,
+       .assoc_resp = fake_assoc_resp,
+       .disassoc_req = fake_disassoc_req,
+       .start_req = fake_start_req,
+       .scan_req = fake_scan_req,
+
+       .get_phy = fake_get_phy,
+
+       .get_pan_id = fake_get_pan_id,
+       .get_short_addr = fake_get_short_addr,
+       .get_dsn = fake_get_dsn,
+       .get_bsn = fake_get_bsn,
+};
+
+static int ieee802154_fake_open(struct net_device *dev)
+{
+       netif_start_queue(dev);
+       return 0;
+}
+
+static int ieee802154_fake_close(struct net_device *dev)
+{
+       netif_stop_queue(dev);
+       return 0;
+}
+
+static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
+                                             struct net_device *dev)
+{
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
+       /* FIXME: do hardware work here ... */
+
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+
+static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
+               int cmd)
+{
+       struct sockaddr_ieee802154 *sa =
+               (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
+       u16 pan_id, short_addr;
+
+       switch (cmd) {
+       case SIOCGIFADDR:
+               /* FIXME: fixed here, get from device IRL */
+               pan_id = fake_get_pan_id(dev);
+               short_addr = fake_get_short_addr(dev);
+               if (pan_id == IEEE802154_PANID_BROADCAST ||
+                   short_addr == IEEE802154_ADDR_BROADCAST)
+                       return -EADDRNOTAVAIL;
+
+               sa->family = AF_IEEE802154;
+               sa->addr.addr_type = IEEE802154_ADDR_SHORT;
+               sa->addr.pan_id = pan_id;
+               sa->addr.short_addr = short_addr;
+               return 0;
+       }
+       return -ENOIOCTLCMD;
+}
+
+static int ieee802154_fake_mac_addr(struct net_device *dev, void *p)
+{
+       return -EBUSY; /* HW address is built into the device */
+}
+
+static const struct net_device_ops fake_ops = {
+       .ndo_open               = ieee802154_fake_open,
+       .ndo_stop               = ieee802154_fake_close,
+       .ndo_start_xmit         = ieee802154_fake_xmit,
+       .ndo_do_ioctl           = ieee802154_fake_ioctl,
+       .ndo_set_mac_address    = ieee802154_fake_mac_addr,
+};
+
+static void ieee802154_fake_destruct(struct net_device *dev)
+{
+       struct wpan_phy *phy = fake_to_phy(dev);
+
+       wpan_phy_unregister(phy);
+       free_netdev(dev);
+       wpan_phy_free(phy);
+}
+
+static void ieee802154_fake_setup(struct net_device *dev)
+{
+       dev->addr_len           = IEEE802154_ADDR_LEN;
+       memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+       dev->features           = NETIF_F_HW_CSUM;
+       dev->needed_tailroom    = 2; /* FCS */
+       dev->mtu                = 127;
+       dev->tx_queue_len       = 10;
+       dev->type               = ARPHRD_IEEE802154;
+       dev->flags              = IFF_NOARP | IFF_BROADCAST;
+       dev->watchdog_timeo     = 0;
+       dev->destructor         = ieee802154_fake_destruct;
+}
+
+
+static int __devinit ieee802154fake_probe(struct platform_device *pdev)
+{
+       struct net_device *dev;
+       struct fakehard_priv *priv;
+       struct wpan_phy *phy = wpan_phy_alloc(0);
+       int err;
+
+       if (!phy)
+               return -ENOMEM;
+
+       dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
+       if (!dev) {
+               wpan_phy_free(phy);
+               return -ENOMEM;
+       }
+
+       memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
+                       dev->addr_len);
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+       /*
+        * For now we'd like to emulate 2.4 GHz-only device,
+        * both O-QPSK and CSS
+        */
+       /* 2.4 GHz O-QPSK 802.15.4-2003 */
+       phy->channels_supported[0] |= 0x7FFF800;
+       /* 2.4 GHz CSS 802.15.4a-2007 */
+       phy->channels_supported[3] |= 0x3fff;
+
+       phy->transmit_power = 0xbf;
+
+       dev->netdev_ops = &fake_ops;
+       dev->ml_priv = &fake_mlme;
+
+       priv = netdev_priv(dev);
+       priv->phy = phy;
+
+       wpan_phy_set_dev(phy, &pdev->dev);
+       SET_NETDEV_DEV(dev, &phy->dev);
+
+       platform_set_drvdata(pdev, dev);
+
+       err = wpan_phy_register(phy);
+       if (err)
+               goto out;
+
+       err = register_netdev(dev);
+       if (err < 0)
+               goto out;
+
+       dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
+       return 0;
+
+out:
+       unregister_netdev(dev);
+       return err;
+}
+
+static int __devexit ieee802154fake_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       unregister_netdev(dev);
+       return 0;
+}
+
+static struct platform_device *ieee802154fake_dev;
+
+static struct platform_driver ieee802154fake_driver = {
+       .probe = ieee802154fake_probe,
+       .remove = __devexit_p(ieee802154fake_remove),
+       .driver = {
+                       .name = "ieee802154hardmac",
+                       .owner = THIS_MODULE,
+       },
+};
+
+static __init int fake_init(void)
+{
+       ieee802154fake_dev = platform_device_register_simple(
+                       "ieee802154hardmac", -1, NULL, 0);
+       return platform_driver_register(&ieee802154fake_driver);
+}
+
+static __exit void fake_exit(void)
+{
+       platform_driver_unregister(&ieee802154fake_driver);
+       platform_device_unregister(ieee802154fake_dev);
+}
+
+module_init(fake_init);
+module_exit(fake_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
new file mode 100644 (file)
index 0000000..e7456fc
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * Loopback IEEE 802.15.4 interface
+ *
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+static int numlbs = 1;
+
+struct fakelb_dev_priv {
+       struct ieee802154_dev *dev;
+
+       struct list_head list;
+       struct fakelb_priv *fake;
+
+       spinlock_t lock;
+       bool working;
+};
+
+struct fakelb_priv {
+       struct list_head list;
+       rwlock_t lock;
+};
+
+static int
+fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       might_sleep();
+       BUG_ON(!level);
+       *level = 0xbe;
+
+       return 0;
+}
+
+static int
+fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+       pr_debug("set channel to %d\n", channel);
+
+       might_sleep();
+       dev->phy->current_page = page;
+       dev->phy->current_channel = channel;
+
+       return 0;
+}
+
+static void
+fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+{
+       struct sk_buff *newskb;
+
+       spin_lock(&priv->lock);
+       if (priv->working) {
+               newskb = pskb_copy(skb, GFP_ATOMIC);
+               ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc);
+       }
+       spin_unlock(&priv->lock);
+}
+
+static int
+fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+       struct fakelb_dev_priv *priv = dev->priv;
+       struct fakelb_priv *fake = priv->fake;
+
+       might_sleep();
+
+       read_lock_bh(&fake->lock);
+       if (priv->list.next == priv->list.prev) {
+               /* we are the only one device */
+               fakelb_hw_deliver(priv, skb);
+       } else {
+               struct fakelb_dev_priv *dp;
+               list_for_each_entry(dp, &priv->fake->list, list) {
+                       if (dp != priv &&
+                           (dp->dev->phy->current_channel ==
+                            priv->dev->phy->current_channel))
+                               fakelb_hw_deliver(dp, skb);
+               }
+       }
+       read_unlock_bh(&fake->lock);
+
+       return 0;
+}
+
+static int
+fakelb_hw_start(struct ieee802154_dev *dev) {
+       struct fakelb_dev_priv *priv = dev->priv;
+       int ret = 0;
+
+       spin_lock(&priv->lock);
+       if (priv->working)
+               ret = -EBUSY;
+       else
+               priv->working = 1;
+       spin_unlock(&priv->lock);
+
+       return ret;
+}
+
+static void
+fakelb_hw_stop(struct ieee802154_dev *dev) {
+       struct fakelb_dev_priv *priv = dev->priv;
+
+       spin_lock(&priv->lock);
+       priv->working = 0;
+       spin_unlock(&priv->lock);
+}
+
+static struct ieee802154_ops fakelb_ops = {
+       .owner = THIS_MODULE,
+       .xmit = fakelb_hw_xmit,
+       .ed = fakelb_hw_ed,
+       .set_channel = fakelb_hw_channel,
+       .start = fakelb_hw_start,
+       .stop = fakelb_hw_stop,
+};
+
+/* Number of dummy devices to be set up by this module. */
+module_param(numlbs, int, 0);
+MODULE_PARM_DESC(numlbs, " number of pseudo devices");
+
+static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+{
+       struct fakelb_dev_priv *priv;
+       int err;
+       struct ieee802154_dev *ieee;
+
+       ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops);
+       if (!ieee)
+               return -ENOMEM;
+
+       priv = ieee->priv;
+       priv->dev = ieee;
+
+       /* 868 MHz BPSK 802.15.4-2003 */
+       ieee->phy->channels_supported[0] |= 1;
+       /* 915 MHz BPSK 802.15.4-2003 */
+       ieee->phy->channels_supported[0] |= 0x7fe;
+       /* 2.4 GHz O-QPSK 802.15.4-2003 */
+       ieee->phy->channels_supported[0] |= 0x7FFF800;
+       /* 868 MHz ASK 802.15.4-2006 */
+       ieee->phy->channels_supported[1] |= 1;
+       /* 915 MHz ASK 802.15.4-2006 */
+       ieee->phy->channels_supported[1] |= 0x7fe;
+       /* 868 MHz O-QPSK 802.15.4-2006 */
+       ieee->phy->channels_supported[2] |= 1;
+       /* 915 MHz O-QPSK 802.15.4-2006 */
+       ieee->phy->channels_supported[2] |= 0x7fe;
+       /* 2.4 GHz CSS 802.15.4a-2007 */
+       ieee->phy->channels_supported[3] |= 0x3fff;
+       /* UWB Sub-gigahertz 802.15.4a-2007 */
+       ieee->phy->channels_supported[4] |= 1;
+       /* UWB Low band 802.15.4a-2007 */
+       ieee->phy->channels_supported[4] |= 0x1e;
+       /* UWB High band 802.15.4a-2007 */
+       ieee->phy->channels_supported[4] |= 0xffe0;
+       /* 750 MHz O-QPSK 802.15.4c-2009 */
+       ieee->phy->channels_supported[5] |= 0xf;
+       /* 750 MHz MPSK 802.15.4c-2009 */
+       ieee->phy->channels_supported[5] |= 0xf0;
+       /* 950 MHz BPSK 802.15.4d-2009 */
+       ieee->phy->channels_supported[6] |= 0x3ff;
+       /* 950 MHz GFSK 802.15.4d-2009 */
+       ieee->phy->channels_supported[6] |= 0x3ffc00;
+
+       INIT_LIST_HEAD(&priv->list);
+       priv->fake = fake;
+
+       spin_lock_init(&priv->lock);
+
+       ieee->parent = dev;
+
+       err = ieee802154_register_device(ieee);
+       if (err)
+               goto err_reg;
+
+       write_lock_bh(&fake->lock);
+       list_add_tail(&priv->list, &fake->list);
+       write_unlock_bh(&fake->lock);
+
+       return 0;
+
+err_reg:
+       ieee802154_free_device(priv->dev);
+       return err;
+}
+
+static void fakelb_del(struct fakelb_dev_priv *priv)
+{
+       write_lock_bh(&priv->fake->lock);
+       list_del(&priv->list);
+       write_unlock_bh(&priv->fake->lock);
+
+       ieee802154_unregister_device(priv->dev);
+       ieee802154_free_device(priv->dev);
+}
+
+static int __devinit fakelb_probe(struct platform_device *pdev)
+{
+       struct fakelb_priv *priv;
+       struct fakelb_dev_priv *dp;
+       int err = -ENOMEM;
+       int i;
+
+       priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+       if (!priv)
+               goto err_alloc;
+
+       INIT_LIST_HEAD(&priv->list);
+       rwlock_init(&priv->lock);
+
+       for (i = 0; i < numlbs; i++) {
+               err = fakelb_add_one(&pdev->dev, priv);
+               if (err < 0)
+                       goto err_slave;
+       }
+
+       platform_set_drvdata(pdev, priv);
+       dev_info(&pdev->dev, "added ieee802154 hardware\n");
+       return 0;
+
+err_slave:
+       list_for_each_entry(dp, &priv->list, list)
+               fakelb_del(dp);
+       kfree(priv);
+err_alloc:
+       return err;
+}
+
+static int __devexit fakelb_remove(struct platform_device *pdev)
+{
+       struct fakelb_priv *priv = platform_get_drvdata(pdev);
+       struct fakelb_dev_priv *dp, *temp;
+
+       list_for_each_entry_safe(dp, temp, &priv->list, list)
+               fakelb_del(dp);
+       kfree(priv);
+
+       return 0;
+}
+
+static struct platform_device *ieee802154fake_dev;
+
+static struct platform_driver ieee802154fake_driver = {
+       .probe = fakelb_probe,
+       .remove = __devexit_p(fakelb_remove),
+       .driver = {
+                       .name = "ieee802154fakelb",
+                       .owner = THIS_MODULE,
+       },
+};
+
+static __init int fakelb_init_module(void)
+{
+       ieee802154fake_dev = platform_device_register_simple(
+                            "ieee802154fakelb", -1, NULL, 0);
+       return platform_driver_register(&ieee802154fake_driver);
+}
+
+static __exit void fake_remove_module(void)
+{
+       platform_driver_unregister(&ieee802154fake_driver);
+       platform_device_unregister(ieee802154fake_dev);
+}
+
+module_init(fakelb_init_module);
+module_exit(fake_remove_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
new file mode 100644 (file)
index 0000000..ed75216
--- /dev/null
@@ -0,0 +1,767 @@
+/*
+ * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller
+ *
+ * Copyright (C) 2012 Alan Ott <alan@signal11.us>
+ *                    Signal 11 Software
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <net/wpan-phy.h>
+#include <net/mac802154.h>
+
+/* MRF24J40 Short Address Registers */
+#define REG_RXMCR    0x00  /* Receive MAC control */
+#define REG_PANIDL   0x01  /* PAN ID (low) */
+#define REG_PANIDH   0x02  /* PAN ID (high) */
+#define REG_SADRL    0x03  /* Short address (low) */
+#define REG_SADRH    0x04  /* Short address (high) */
+#define REG_EADR0    0x05  /* Long address (low) (high is EADR7) */
+#define REG_TXMCR    0x11  /* Transmit MAC control */
+#define REG_PACON0   0x16  /* Power Amplifier Control */
+#define REG_PACON1   0x17  /* Power Amplifier Control */
+#define REG_PACON2   0x18  /* Power Amplifier Control */
+#define REG_TXNCON   0x1B  /* Transmit Normal FIFO Control */
+#define REG_TXSTAT   0x24  /* TX MAC Status Register */
+#define REG_SOFTRST  0x2A  /* Soft Reset */
+#define REG_TXSTBL   0x2E  /* TX Stabilization */
+#define REG_INTSTAT  0x31  /* Interrupt Status */
+#define REG_INTCON   0x32  /* Interrupt Control */
+#define REG_RFCTL    0x36  /* RF Control Mode Register */
+#define REG_BBREG1   0x39  /* Baseband Registers */
+#define REG_BBREG2   0x3A  /* */
+#define REG_BBREG6   0x3E  /* */
+#define REG_CCAEDTH  0x3F  /* Energy Detection Threshold */
+
+/* MRF24J40 Long Address Registers */
+#define REG_RFCON0     0x200  /* RF Control Registers */
+#define REG_RFCON1     0x201
+#define REG_RFCON2     0x202
+#define REG_RFCON3     0x203
+#define REG_RFCON5     0x205
+#define REG_RFCON6     0x206
+#define REG_RFCON7     0x207
+#define REG_RFCON8     0x208
+#define REG_RSSI       0x210
+#define REG_SLPCON0    0x211  /* Sleep Clock Control Registers */
+#define REG_SLPCON1    0x220
+#define REG_WAKETIMEL  0x222  /* Wake-up Time Match Value Low */
+#define REG_WAKETIMEH  0x223  /* Wake-up Time Match Value High */
+#define REG_RX_FIFO    0x300  /* Receive FIFO */
+
+/* Device configuration: Only channels 11-26 on page 0 are supported. */
+#define MRF24J40_CHAN_MIN 11
+#define MRF24J40_CHAN_MAX 26
+#define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \
+                     - ((u32)1 << MRF24J40_CHAN_MIN))
+
+#define TX_FIFO_SIZE 128 /* From datasheet */
+#define RX_FIFO_SIZE 144 /* From datasheet */
+#define SET_CHANNEL_DELAY_US 192 /* From datasheet */
+
+/* Device Private Data */
+struct mrf24j40 {
+       struct spi_device *spi;
+       struct ieee802154_dev *dev;
+
+       struct mutex buffer_mutex; /* only used to protect buf */
+       struct completion tx_complete;
+       struct work_struct irqwork;
+       u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
+};
+
+/* Read/Write SPI Commands for Short and Long Address registers. */
+#define MRF24J40_READSHORT(reg) ((reg) << 1)
+#define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1)
+#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
+#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
+
+/* Maximum speed to run the device at. TODO: Get the real max value from
+ * someone at Microchip since it isn't in the datasheet. */
+#define MAX_SPI_SPEED_HZ 1000000
+
+#define printdev(X) (&X->spi->dev)
+
+static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value)
+{
+       int ret;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 2,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = MRF24J40_WRITESHORT(reg);
+       devrec->buf[1] = value;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI write Failed for short register 0x%hhx\n", reg);
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val)
+{
+       int ret = -1;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 2,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = MRF24J40_READSHORT(reg);
+       devrec->buf[1] = 0;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI read Failed for short register 0x%hhx\n", reg);
+       else
+               *val = devrec->buf[1];
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value)
+{
+       int ret;
+       u16 cmd;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 3,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       cmd = MRF24J40_READLONG(reg);
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = cmd >> 8 & 0xff;
+       devrec->buf[1] = cmd & 0xff;
+       devrec->buf[2] = 0;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI read Failed for long register 0x%hx\n", reg);
+       else
+               *value = devrec->buf[2];
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val)
+{
+       int ret;
+       u16 cmd;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 3,
+               .tx_buf = devrec->buf,
+               .rx_buf = devrec->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       cmd = MRF24J40_WRITELONG(reg);
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = cmd >> 8 & 0xff;
+       devrec->buf[1] = cmd & 0xff;
+       devrec->buf[2] = val;
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec),
+                       "SPI write Failed for long register 0x%hx\n", reg);
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+/* This function relies on an undocumented write method. Once a write command
+   and address is set, as many bytes of data as desired can be clocked into
+   the device. The datasheet only shows setting one byte at a time. */
+static int write_tx_buf(struct mrf24j40 *devrec, u16 reg,
+                       const u8 *data, size_t length)
+{
+       int ret;
+       u16 cmd;
+       u8 lengths[2];
+       struct spi_message msg;
+       struct spi_transfer addr_xfer = {
+               .len = 2,
+               .tx_buf = devrec->buf,
+       };
+       struct spi_transfer lengths_xfer = {
+               .len = 2,
+               .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */
+       };
+       struct spi_transfer data_xfer = {
+               .len = length,
+               .tx_buf = data,
+       };
+
+       /* Range check the length. 2 bytes are used for the length fields.*/
+       if (length > TX_FIFO_SIZE-2) {
+               dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n");
+               length = TX_FIFO_SIZE-2;
+       }
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&addr_xfer, &msg);
+       spi_message_add_tail(&lengths_xfer, &msg);
+       spi_message_add_tail(&data_xfer, &msg);
+
+       cmd = MRF24J40_WRITELONG(reg);
+       mutex_lock(&devrec->buffer_mutex);
+       devrec->buf[0] = cmd >> 8 & 0xff;
+       devrec->buf[1] = cmd & 0xff;
+       lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
+       lengths[1] = length; /* Total length */
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret)
+               dev_err(printdev(devrec), "SPI write Failed for TX buf\n");
+
+       mutex_unlock(&devrec->buffer_mutex);
+       return ret;
+}
+
+static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
+                               u8 *data, u8 *len, u8 *lqi)
+{
+       u8 rx_len;
+       u8 addr[2];
+       u8 lqi_rssi[2];
+       u16 cmd;
+       int ret;
+       struct spi_message msg;
+       struct spi_transfer addr_xfer = {
+               .len = 2,
+               .tx_buf = &addr,
+       };
+       struct spi_transfer data_xfer = {
+               .len = 0x0, /* set below */
+               .rx_buf = data,
+       };
+       struct spi_transfer status_xfer = {
+               .len = 2,
+               .rx_buf = &lqi_rssi,
+       };
+
+       /* Get the length of the data in the RX FIFO. The length in this
+        * register exclues the 1-byte length field at the beginning. */
+       ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len);
+       if (ret)
+               goto out;
+
+       /* Range check the RX FIFO length, accounting for the one-byte
+        * length field at the begining. */
+       if (rx_len > RX_FIFO_SIZE-1) {
+               dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
+               rx_len = RX_FIFO_SIZE-1;
+       }
+
+       if (rx_len > *len) {
+               /* Passed in buffer wasn't big enough. Should never happen. */
+               dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n");
+               rx_len = *len;
+       }
+
+       /* Set up the commands to read the data. */
+       cmd = MRF24J40_READLONG(REG_RX_FIFO+1);
+       addr[0] = cmd >> 8 & 0xff;
+       addr[1] = cmd & 0xff;
+       data_xfer.len = rx_len;
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&addr_xfer, &msg);
+       spi_message_add_tail(&data_xfer, &msg);
+       spi_message_add_tail(&status_xfer, &msg);
+
+       ret = spi_sync(devrec->spi, &msg);
+       if (ret) {
+               dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n");
+               goto out;
+       }
+
+       *lqi = lqi_rssi[0];
+       *len = rx_len;
+
+#ifdef DEBUG
+       print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
+               DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
+       printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
+               lqi_rssi[0], lqi_rssi[1]);
+#endif
+
+out:
+       return ret;
+}
+
+static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret = 0;
+
+       dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
+
+       ret = write_tx_buf(devrec, 0x000, skb->data, skb->len);
+       if (ret)
+               goto err;
+
+       /* Set TXNTRIG bit of TXNCON to send packet */
+       ret = read_short_reg(devrec, REG_TXNCON, &val);
+       if (ret)
+               goto err;
+       val |= 0x1;
+       val &= ~0x4;
+       write_short_reg(devrec, REG_TXNCON, val);
+
+       INIT_COMPLETION(devrec->tx_complete);
+
+       /* Wait for the device to send the TX complete interrupt. */
+       ret = wait_for_completion_interruptible_timeout(
+                                               &devrec->tx_complete,
+                                               5 * HZ);
+       if (ret == -ERESTARTSYS)
+               goto err;
+       if (ret == 0) {
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       /* Check for send error from the device. */
+       ret = read_short_reg(devrec, REG_TXSTAT, &val);
+       if (ret)
+               goto err;
+       if (val & 0x1) {
+               dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n");
+               ret = -ECOMM; /* TODO: Better error code ? */
+       } else
+               dev_dbg(printdev(devrec), "Packet Sent\n");
+
+err:
+
+       return ret;
+}
+
+static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       /* TODO: */
+       printk(KERN_WARNING "mrf24j40: ed not implemented\n");
+       *level = 0;
+       return 0;
+}
+
+static int mrf24j40_start(struct ieee802154_dev *dev)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret;
+
+       dev_dbg(printdev(devrec), "start\n");
+
+       ret = read_short_reg(devrec, REG_INTCON, &val);
+       if (ret)
+               return ret;
+       val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */
+       write_short_reg(devrec, REG_INTCON, val);
+
+       return 0;
+}
+
+static void mrf24j40_stop(struct ieee802154_dev *dev)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret;
+       dev_dbg(printdev(devrec), "stop\n");
+
+       ret = read_short_reg(devrec, REG_INTCON, &val);
+       if (ret)
+               return;
+       val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */
+       write_short_reg(devrec, REG_INTCON, val);
+
+       return;
+}
+
+static int mrf24j40_set_channel(struct ieee802154_dev *dev,
+                               int page, int channel)
+{
+       struct mrf24j40 *devrec = dev->priv;
+       u8 val;
+       int ret;
+
+       dev_dbg(printdev(devrec), "Set Channel %d\n", channel);
+
+       WARN_ON(page != 0);
+       WARN_ON(channel < MRF24J40_CHAN_MIN);
+       WARN_ON(channel > MRF24J40_CHAN_MAX);
+
+       /* Set Channel TODO */
+       val = (channel-11) << 4 | 0x03;
+       write_long_reg(devrec, REG_RFCON0, val);
+
+       /* RF Reset */
+       ret = read_short_reg(devrec, REG_RFCTL, &val);
+       if (ret)
+               return ret;
+       val |= 0x04;
+       write_short_reg(devrec, REG_RFCTL, val);
+       val &= ~0x04;
+       write_short_reg(devrec, REG_RFCTL, val);
+
+       udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
+
+       return 0;
+}
+
+static int mrf24j40_filter(struct ieee802154_dev *dev,
+                          struct ieee802154_hw_addr_filt *filt,
+                          unsigned long changed)
+{
+       struct mrf24j40 *devrec = dev->priv;
+
+       dev_dbg(printdev(devrec), "filter\n");
+
+       if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+               /* Short Addr */
+               u8 addrh, addrl;
+               addrh = filt->short_addr >> 8 & 0xff;
+               addrl = filt->short_addr & 0xff;
+
+               write_short_reg(devrec, REG_SADRH, addrh);
+               write_short_reg(devrec, REG_SADRL, addrl);
+               dev_dbg(printdev(devrec),
+                       "Set short addr to %04hx\n", filt->short_addr);
+       }
+
+       if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+               /* Device Address */
+               int i;
+               for (i = 0; i < 8; i++)
+                       write_short_reg(devrec, REG_EADR0+i,
+                                       filt->ieee_addr[i]);
+
+#ifdef DEBUG
+               printk(KERN_DEBUG "Set long addr to: ");
+               for (i = 0; i < 8; i++)
+                       printk("%02hhx ", filt->ieee_addr[i]);
+               printk(KERN_DEBUG "\n");
+#endif
+       }
+
+       if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+               /* PAN ID */
+               u8 panidl, panidh;
+               panidh = filt->pan_id >> 8 & 0xff;
+               panidl = filt->pan_id & 0xff;
+               write_short_reg(devrec, REG_PANIDH, panidh);
+               write_short_reg(devrec, REG_PANIDL, panidl);
+
+               dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
+       }
+
+       if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+               /* Pan Coordinator */
+               u8 val;
+               int ret;
+
+               ret = read_short_reg(devrec, REG_RXMCR, &val);
+               if (ret)
+                       return ret;
+               if (filt->pan_coord)
+                       val |= 0x8;
+               else
+                       val &= ~0x8;
+               write_short_reg(devrec, REG_RXMCR, val);
+
+               /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA).
+                * REG_ORDER is maintained as default (no beacon/superframe).
+                */
+
+               dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
+                                       filt->pan_coord ? "on" : "off");
+       }
+
+       return 0;
+}
+
+static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
+{
+       u8 len = RX_FIFO_SIZE;
+       u8 lqi = 0;
+       u8 val;
+       int ret = 0;
+       struct sk_buff *skb;
+
+       /* Turn off reception of packets off the air. This prevents the
+        * device from overwriting the buffer while we're reading it. */
+       ret = read_short_reg(devrec, REG_BBREG1, &val);
+       if (ret)
+               goto out;
+       val |= 4; /* SET RXDECINV */
+       write_short_reg(devrec, REG_BBREG1, val);
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi);
+       if (ret < 0) {
+               dev_err(printdev(devrec), "Failure reading RX FIFO\n");
+               kfree_skb(skb);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Cut off the checksum */
+       skb_trim(skb, len-2);
+
+       /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
+        * also from a workqueue).  I think irqsafe is not necessary here.
+        * Can someone confirm? */
+       ieee802154_rx_irqsafe(devrec->dev, skb, lqi);
+
+       dev_dbg(printdev(devrec), "RX Handled\n");
+
+out:
+       /* Turn back on reception of packets off the air. */
+       ret = read_short_reg(devrec, REG_BBREG1, &val);
+       if (ret)
+               return ret;
+       val &= ~0x4; /* Clear RXDECINV */
+       write_short_reg(devrec, REG_BBREG1, val);
+
+       return ret;
+}
+
+static struct ieee802154_ops mrf24j40_ops = {
+       .owner = THIS_MODULE,
+       .xmit = mrf24j40_tx,
+       .ed = mrf24j40_ed,
+       .start = mrf24j40_start,
+       .stop = mrf24j40_stop,
+       .set_channel = mrf24j40_set_channel,
+       .set_hw_addr_filt = mrf24j40_filter,
+};
+
+static irqreturn_t mrf24j40_isr(int irq, void *data)
+{
+       struct mrf24j40 *devrec = data;
+
+       disable_irq_nosync(irq);
+
+       schedule_work(&devrec->irqwork);
+
+       return IRQ_HANDLED;
+}
+
+static void mrf24j40_isrwork(struct work_struct *work)
+{
+       struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
+       u8 intstat;
+       int ret;
+
+       /* Read the interrupt status */
+       ret = read_short_reg(devrec, REG_INTSTAT, &intstat);
+       if (ret)
+               goto out;
+
+       /* Check for TX complete */
+       if (intstat & 0x1)
+               complete(&devrec->tx_complete);
+
+       /* Check for Rx */
+       if (intstat & 0x8)
+               mrf24j40_handle_rx(devrec);
+
+out:
+       enable_irq(devrec->spi->irq);
+}
+
+static int __devinit mrf24j40_probe(struct spi_device *spi)
+{
+       int ret = -ENOMEM;
+       u8 val;
+       struct mrf24j40 *devrec;
+
+       printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
+
+       devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL);
+       if (!devrec)
+               goto err_devrec;
+       devrec->buf = kzalloc(3, GFP_KERNEL);
+       if (!devrec->buf)
+               goto err_buf;
+
+       spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
+       if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
+               spi->max_speed_hz = MAX_SPI_SPEED_HZ;
+
+       mutex_init(&devrec->buffer_mutex);
+       init_completion(&devrec->tx_complete);
+       INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
+       devrec->spi = spi;
+       dev_set_drvdata(&spi->dev, devrec);
+
+       /* Register with the 802154 subsystem */
+
+       devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
+       if (!devrec->dev)
+               goto err_alloc_dev;
+
+       devrec->dev->priv = devrec;
+       devrec->dev->parent = &devrec->spi->dev;
+       devrec->dev->phy->channels_supported[0] = CHANNEL_MASK;
+       devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK;
+
+       dev_dbg(printdev(devrec), "registered mrf24j40\n");
+       ret = ieee802154_register_device(devrec->dev);
+       if (ret)
+               goto err_register_device;
+
+       /* Initialize the device.
+               From datasheet section 3.2: Initialization. */
+       write_short_reg(devrec, REG_SOFTRST, 0x07);
+       write_short_reg(devrec, REG_PACON2, 0x98);
+       write_short_reg(devrec, REG_TXSTBL, 0x95);
+       write_long_reg(devrec, REG_RFCON0, 0x03);
+       write_long_reg(devrec, REG_RFCON1, 0x01);
+       write_long_reg(devrec, REG_RFCON2, 0x80);
+       write_long_reg(devrec, REG_RFCON6, 0x90);
+       write_long_reg(devrec, REG_RFCON7, 0x80);
+       write_long_reg(devrec, REG_RFCON8, 0x10);
+       write_long_reg(devrec, REG_SLPCON1, 0x21);
+       write_short_reg(devrec, REG_BBREG2, 0x80);
+       write_short_reg(devrec, REG_CCAEDTH, 0x60);
+       write_short_reg(devrec, REG_BBREG6, 0x40);
+       write_short_reg(devrec, REG_RFCTL, 0x04);
+       write_short_reg(devrec, REG_RFCTL, 0x0);
+       udelay(192);
+
+       /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
+       ret = read_short_reg(devrec, REG_RXMCR, &val);
+       if (ret)
+               goto err_read_reg;
+       val &= ~0x3; /* Clear RX mode (normal) */
+       write_short_reg(devrec, REG_RXMCR, val);
+
+       ret = request_irq(spi->irq,
+                         mrf24j40_isr,
+                         IRQF_TRIGGER_FALLING,
+                         dev_name(&spi->dev),
+                         devrec);
+
+       if (ret) {
+               dev_err(printdev(devrec), "Unable to get IRQ");
+               goto err_irq;
+       }
+
+       return 0;
+
+err_irq:
+err_read_reg:
+       ieee802154_unregister_device(devrec->dev);
+err_register_device:
+       ieee802154_free_device(devrec->dev);
+err_alloc_dev:
+       kfree(devrec->buf);
+err_buf:
+       kfree(devrec);
+err_devrec:
+       return ret;
+}
+
+static int __devexit mrf24j40_remove(struct spi_device *spi)
+{
+       struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev);
+
+       dev_dbg(printdev(devrec), "remove\n");
+
+       free_irq(spi->irq, devrec);
+       flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
+       ieee802154_unregister_device(devrec->dev);
+       ieee802154_free_device(devrec->dev);
+       /* TODO: Will ieee802154_free_device() wait until ->xmit() is
+        * complete? */
+
+       /* Clean up the SPI stuff. */
+       dev_set_drvdata(&spi->dev, NULL);
+       kfree(devrec->buf);
+       kfree(devrec);
+       return 0;
+}
+
+static const struct spi_device_id mrf24j40_ids[] = {
+       { "mrf24j40", 0 },
+       { "mrf24j40ma", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
+
+static struct spi_driver mrf24j40_driver = {
+       .driver = {
+               .name = "mrf24j40",
+               .bus = &spi_bus_type,
+               .owner = THIS_MODULE,
+       },
+       .id_table = mrf24j40_ids,
+       .probe = mrf24j40_probe,
+       .remove = __devexit_p(mrf24j40_remove),
+};
+
+static int __init mrf24j40_init(void)
+{
+       return spi_register_driver(&mrf24j40_driver);
+}
+
+static void __exit mrf24j40_exit(void)
+{
+       spi_unregister_driver(&mrf24j40_driver);
+}
+
+module_init(mrf24j40_init);
+module_exit(mrf24j40_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alan Ott");
+MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver");
index e2a06fd996d51409ed711fb360105d7f423f27a8..81f8f9e31db510892acae3cdb39a799de7e21be9 100644 (file)
@@ -157,7 +157,7 @@ static const struct net_device_ops loopback_ops = {
  */
 static void loopback_setup(struct net_device *dev)
 {
-       dev->mtu                = (16 * 1024) + 20 + 20 + 12;
+       dev->mtu                = 64 * 1024;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
        dev->tx_queue_len       = 0;
@@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net)
        if (err)
                goto out_free_netdev;
 
+       BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
        net->loopback_dev = dev;
        return 0;
 
index 66a9bfe7b1c87f40c8da94c34b3245e1ff3fcc0c..68a43fe602e7a89ccffa54d1a5b82e2da4e5330c 100644 (file)
@@ -546,9 +546,9 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
        return 0;
 }
 
-static int macvlan_fdb_add(struct ndmsg *ndm,
+static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                           struct net_device *dev,
-                          unsigned char *addr,
+                          const unsigned char *addr,
                           u16 flags)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,7 +567,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm,
 
 static int macvlan_fdb_del(struct ndmsg *ndm,
                           struct net_device *dev,
-                          unsigned char *addr)
+                          const unsigned char *addr)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        int err = -EINVAL;
index 3090dc65a6f131541d055a17cb96e7c8c3e9d06a..983bbf4d5ef6a7437118ac2d21ba2cb66599bc8c 100644 (file)
@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
          several child MDIO busses to a parent bus.  Child bus
          selection is under the control of GPIO lines.
 
+config MDIO_BUS_MUX_MMIOREG
+       tristate "Support for MMIO device-controlled MDIO bus multiplexers"
+       depends on OF_MDIO
+       select MDIO_BUS_MUX
+       help
+         This module provides a driver for MDIO bus multiplexers that
+         are controlled via a simple memory-mapped device, like an FPGA.
+         The multiplexer connects one of several child MDIO busses to a
+         parent bus.  Child bus selection is under the control of one of
+         the FPGA's registers.
+
+         Currently, only 8-bit registers are supported.
+
 endif # PHYLIB
 
 config MICREL_KS8995MA
index 6d2dc6c94f2e4bc30c41729b9c9d2cdbfe4de120..426674debae44dfb9967e305ad571f7012010afb 100644 (file)
@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
 obj-$(CONFIG_AMD_PHY)          += amd.o
 obj-$(CONFIG_MDIO_BUS_MUX)     += mdio-mux.o
 obj-$(CONFIG_MDIO_BUS_MUX_GPIO)        += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
index b0da0226661f76b1c9af768cc1eeb6f21d5e1b61..24e05c43bff872e34e35e1deb4d385884057ae4a 100644 (file)
@@ -980,7 +980,7 @@ static int dp83640_probe(struct phy_device *phydev)
 
        if (choose_this_phy(clock, phydev)) {
                clock->chosen = dp83640;
-               clock->ptp_clock = ptp_clock_register(&clock->caps);
+               clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev);
                if (IS_ERR(clock->ptp_clock)) {
                        err = PTR_ERR(clock->ptp_clock);
                        goto no_register;
index 6d1e3fcc43e237b8076c2be6a2b5c41db5473d38..ec40ba882f612dba77a72b0a4a9e5689c7898ce5 100644 (file)
@@ -122,6 +122,123 @@ static int lxt971_config_intr(struct phy_device *phydev)
        return err;
 }
 
+/*
+ * A2 version of LXT973 chip has an ERRATA: it randomly return the contents
+ * of the previous even register when you read a odd register regularly
+ */
+
+static int lxt973a2_update_link(struct phy_device *phydev)
+{
+       int status;
+       int control;
+       int retry = 8; /* we try 8 times */
+
+       /* Do a fake read */
+       status = phy_read(phydev, MII_BMSR);
+
+       if (status < 0)
+               return status;
+
+       control = phy_read(phydev, MII_BMCR);
+       if (control < 0)
+               return control;
+
+       do {
+               /* Read link and autonegotiation status */
+               status = phy_read(phydev, MII_BMSR);
+       } while (status >= 0 && retry-- && status == control);
+
+       if (status < 0)
+               return status;
+
+       if ((status & BMSR_LSTATUS) == 0)
+               phydev->link = 0;
+       else
+               phydev->link = 1;
+
+       return 0;
+}
+
+int lxt973a2_read_status(struct phy_device *phydev)
+{
+       int adv;
+       int err;
+       int lpa;
+       int lpagb = 0;
+
+       /* Update the link, but return if there was an error */
+       err = lxt973a2_update_link(phydev);
+       if (err)
+               return err;
+
+       if (AUTONEG_ENABLE == phydev->autoneg) {
+               int retry = 1;
+
+               adv = phy_read(phydev, MII_ADVERTISE);
+
+               if (adv < 0)
+                       return adv;
+
+               do {
+                       lpa = phy_read(phydev, MII_LPA);
+
+                       if (lpa < 0)
+                               return lpa;
+
+                       /* If both registers are equal, it is suspect but not
+                       * impossible, hence a new try
+                       */
+               } while (lpa == adv && retry--);
+
+               lpa &= adv;
+
+               phydev->speed = SPEED_10;
+               phydev->duplex = DUPLEX_HALF;
+               phydev->pause = phydev->asym_pause = 0;
+
+               if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+                       phydev->speed = SPEED_1000;
+
+                       if (lpagb & LPA_1000FULL)
+                               phydev->duplex = DUPLEX_FULL;
+               } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+                       phydev->speed = SPEED_100;
+
+                       if (lpa & LPA_100FULL)
+                               phydev->duplex = DUPLEX_FULL;
+               } else {
+                       if (lpa & LPA_10FULL)
+                               phydev->duplex = DUPLEX_FULL;
+               }
+
+               if (phydev->duplex == DUPLEX_FULL) {
+                       phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+                       phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+               }
+       } else {
+               int bmcr = phy_read(phydev, MII_BMCR);
+
+               if (bmcr < 0)
+                       return bmcr;
+
+               if (bmcr & BMCR_FULLDPLX)
+                       phydev->duplex = DUPLEX_FULL;
+               else
+                       phydev->duplex = DUPLEX_HALF;
+
+               if (bmcr & BMCR_SPEED1000)
+                       phydev->speed = SPEED_1000;
+               else if (bmcr & BMCR_SPEED100)
+                       phydev->speed = SPEED_100;
+               else
+                       phydev->speed = SPEED_10;
+
+               phydev->pause = phydev->asym_pause = 0;
+       }
+
+       return 0;
+}
+
 static int lxt973_probe(struct phy_device *phydev)
 {
        int val = phy_read(phydev, MII_LXT973_PCR);
@@ -173,6 +290,16 @@ static struct phy_driver lxt97x_driver[] = {
        .ack_interrupt  = lxt971_ack_interrupt,
        .config_intr    = lxt971_config_intr,
        .driver         = { .owner = THIS_MODULE,},
+}, {
+       .phy_id         = 0x00137a10,
+       .name           = "LXT973-A2",
+       .phy_id_mask    = 0xffffffff,
+       .features       = PHY_BASIC_FEATURES,
+       .flags          = 0,
+       .probe          = lxt973_probe,
+       .config_aneg    = lxt973_config_aneg,
+       .read_status    = lxt973a2_read_status,
+       .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = 0x00137a10,
        .name           = "LXT973",
index 7189adf54bd18eb376cd5fc83745c4c4c0ec130b..899274f2f9b1dd1da0aac0442807b6e42b0cddc2 100644 (file)
 #include <linux/gpio.h>
 #include <linux/mdio-gpio.h>
 
-#ifdef CONFIG_OF_GPIO
 #include <linux/of_gpio.h>
 #include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#endif
 
 struct mdio_gpio_info {
        struct mdiobb_ctrl ctrl;
        int mdc, mdio;
 };
 
+static void *mdio_gpio_of_get_data(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct mdio_gpio_platform_data *pdata;
+       int ret;
+
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       ret = of_get_gpio(np, 0);
+       if (ret < 0)
+               return NULL;
+
+       pdata->mdc = ret;
+
+       ret = of_get_gpio(np, 1);
+       if (ret < 0)
+               return NULL;
+       pdata->mdio = ret;
+
+       return pdata;
+}
+
 static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
 {
        struct mdio_gpio_info *bitbang =
@@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
 
 static int __devinit mdio_gpio_probe(struct platform_device *pdev)
 {
-       struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data;
+       struct mdio_gpio_platform_data *pdata;
        struct mii_bus *new_bus;
        int ret;
 
+       if (pdev->dev.of_node)
+               pdata = mdio_gpio_of_get_data(pdev);
+       else
+               pdata = pdev->dev.platform_data;
+
        if (!pdata)
                return -ENODEV;
 
@@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
        if (!new_bus)
                return -ENODEV;
 
-       ret = mdiobus_register(new_bus);
+       if (pdev->dev.of_node)
+               ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+       else
+               ret = mdiobus_register(new_bus);
+
        if (ret)
                mdio_gpio_bus_deinit(&pdev->dev);
 
@@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF_GPIO
-
-static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev)
-{
-       struct mdio_gpio_platform_data *pdata;
-       struct mii_bus *new_bus;
-       int ret;
-
-       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               return -ENOMEM;
-
-       ret = of_get_gpio(ofdev->dev.of_node, 0);
-       if (ret < 0)
-               goto out_free;
-       pdata->mdc = ret;
-
-       ret = of_get_gpio(ofdev->dev.of_node, 1);
-       if (ret < 0)
-               goto out_free;
-       pdata->mdio = ret;
-
-       new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
-       if (!new_bus)
-               goto out_free;
-
-       ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
-       if (ret)
-               mdio_gpio_bus_deinit(&ofdev->dev);
-
-       return ret;
-
-out_free:
-       kfree(pdata);
-       return -ENODEV;
-}
-
-static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
-{
-       mdio_gpio_bus_destroy(&ofdev->dev);
-       kfree(ofdev->dev.platform_data);
-
-       return 0;
-}
-
-static struct of_device_id mdio_ofgpio_match[] = {
-       {
-               .compatible = "virtual,mdio-gpio",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
-
-static struct platform_driver mdio_ofgpio_driver = {
-       .driver = {
-               .name = "mdio-ofgpio",
-               .owner = THIS_MODULE,
-               .of_match_table = mdio_ofgpio_match,
-       },
-       .probe = mdio_ofgpio_probe,
-       .remove = __devexit_p(mdio_ofgpio_remove),
+static struct of_device_id mdio_gpio_of_match[] = {
+       { .compatible = "virtual,mdio-gpio", },
+       { /* sentinel */ }
 };
 
-static inline int __init mdio_ofgpio_init(void)
-{
-       return platform_driver_register(&mdio_ofgpio_driver);
-}
-
-static inline void mdio_ofgpio_exit(void)
-{
-       platform_driver_unregister(&mdio_ofgpio_driver);
-}
-#else
-static inline int __init mdio_ofgpio_init(void) { return 0; }
-static inline void mdio_ofgpio_exit(void) { }
-#endif /* CONFIG_OF_GPIO */
-
 static struct platform_driver mdio_gpio_driver = {
        .probe = mdio_gpio_probe,
        .remove = __devexit_p(mdio_gpio_remove),
        .driver         = {
                .name   = "mdio-gpio",
                .owner  = THIS_MODULE,
+               .of_match_table = mdio_gpio_of_match,
        },
 };
 
 static int __init mdio_gpio_init(void)
 {
-       int ret;
-
-       ret = mdio_ofgpio_init();
-       if (ret)
-               return ret;
-
-       ret = platform_driver_register(&mdio_gpio_driver);
-       if (ret)
-               mdio_ofgpio_exit();
-
-       return ret;
+       return platform_driver_register(&mdio_gpio_driver);
 }
 module_init(mdio_gpio_init);
 
 static void __exit mdio_gpio_exit(void)
 {
        platform_driver_unregister(&mdio_gpio_driver);
-       mdio_ofgpio_exit();
 }
 module_exit(mdio_gpio_exit);
 
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
new file mode 100644 (file)
index 0000000..9061ba6
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Simple memory-mapped device MDIO MUX driver
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <linux/mdio-mux.h>
+
+struct mdio_mux_mmioreg_state {
+       void *mux_handle;
+       phys_addr_t phys;
+       uint8_t mask;
+};
+
+/*
+ * MDIO multiplexing switch function
+ *
+ * This function is called by the mdio-mux layer when it thinks the mdio bus
+ * multiplexer needs to switch.
+ *
+ * 'current_child' is the current value of the mux register (masked via
+ * s->mask).
+ *
+ * 'desired_child' is the value of the 'reg' property of the target child MDIO
+ * node.
+ *
+ * The first time this function is called, current_child == -1.
+ *
+ * If current_child == desired_child, then the mux is already set to the
+ * correct bus.
+ */
+static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
+                                     void *data)
+{
+       struct mdio_mux_mmioreg_state *s = data;
+
+       if (current_child ^ desired_child) {
+               void *p = ioremap(s->phys, 1);
+               uint8_t x, y;
+
+               if (!p)
+                       return -ENOMEM;
+
+               x = ioread8(p);
+               y = (x & ~s->mask) | desired_child;
+               if (x != y) {
+                       iowrite8((x & ~s->mask) | desired_child, p);
+                       pr_debug("%s: %02x -> %02x\n", __func__, x, y);
+               }
+
+               iounmap(p);
+       }
+
+       return 0;
+}
+
+static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
+{
+       struct device_node *np2, *np = pdev->dev.of_node;
+       struct mdio_mux_mmioreg_state *s;
+       struct resource res;
+       const __be32 *iprop;
+       int len, ret;
+
+       dev_dbg(&pdev->dev, "probing node %s\n", np->full_name);
+
+       s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
+               dev_err(&pdev->dev, "could not obtain memory map for node %s\n",
+                       np->full_name);
+               return ret;
+       }
+       s->phys = res.start;
+
+       if (resource_size(&res) != sizeof(uint8_t)) {
+               dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+               return -EINVAL;
+       }
+
+       iprop = of_get_property(np, "mux-mask", &len);
+       if (!iprop || len != sizeof(uint32_t)) {
+               dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
+               return -ENODEV;
+       }
+       if (be32_to_cpup(iprop) > 255) {
+               dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+               return -EINVAL;
+       }
+       s->mask = be32_to_cpup(iprop);
+
+       /*
+        * Verify that the 'reg' property of each child MDIO bus does not
+        * set any bits outside of the 'mask'.
+        */
+       for_each_available_child_of_node(np, np2) {
+               iprop = of_get_property(np2, "reg", &len);
+               if (!iprop || len != sizeof(uint32_t)) {
+                       dev_err(&pdev->dev, "mdio-mux child node %s is "
+                               "missing a 'reg' property\n", np2->full_name);
+                       return -ENODEV;
+               }
+               if (be32_to_cpup(iprop) & ~s->mask) {
+                       dev_err(&pdev->dev, "mdio-mux child node %s has "
+                               "a 'reg' value with unmasked bits\n",
+                               np2->full_name);
+                       return -ENODEV;
+               }
+       }
+
+       ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
+                           &s->mux_handle, s);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
+                       np->full_name);
+               return ret;
+       }
+
+       pdev->dev.platform_data = s;
+
+       return 0;
+}
+
+static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
+{
+       struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
+
+       mdio_mux_uninit(s->mux_handle);
+
+       return 0;
+}
+
+static struct of_device_id mdio_mux_mmioreg_match[] = {
+       {
+               .compatible = "mdio-mux-mmioreg",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
+
+static struct platform_driver mdio_mux_mmioreg_driver = {
+       .driver = {
+               .name           = "mdio-mux-mmioreg",
+               .owner          = THIS_MODULE,
+               .of_match_table = mdio_mux_mmioreg_match,
+       },
+       .probe          = mdio_mux_mmioreg_probe,
+       .remove         = __devexit_p(mdio_mux_mmioreg_remove),
+};
+
+module_platform_driver(mdio_mux_mmioreg_driver);
+
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver");
+MODULE_LICENSE("GPL v2");
index 7ca2ff97c368d6027b5c1a2cb1f94566741616b9..ef9ea924822349217f536ad41d124a791aecd2fb 100644 (file)
@@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
        bus->write(bus, addr, MII_MMD_DATA, data);
 }
 
-static u32 phy_eee_to_adv(u16 eee_adv)
-{
-       u32 adv = 0;
-
-       if (eee_adv & MDIO_EEE_100TX)
-               adv |= ADVERTISED_100baseT_Full;
-       if (eee_adv & MDIO_EEE_1000T)
-               adv |= ADVERTISED_1000baseT_Full;
-       if (eee_adv & MDIO_EEE_10GT)
-               adv |= ADVERTISED_10000baseT_Full;
-       if (eee_adv & MDIO_EEE_1000KX)
-               adv |= ADVERTISED_1000baseKX_Full;
-       if (eee_adv & MDIO_EEE_10GKX4)
-               adv |= ADVERTISED_10000baseKX4_Full;
-       if (eee_adv & MDIO_EEE_10GKR)
-               adv |= ADVERTISED_10000baseKR_Full;
-
-       return adv;
-}
-
-static u32 phy_eee_to_supported(u16 eee_caported)
-{
-       u32 supported = 0;
-
-       if (eee_caported & MDIO_EEE_100TX)
-               supported |= SUPPORTED_100baseT_Full;
-       if (eee_caported & MDIO_EEE_1000T)
-               supported |= SUPPORTED_1000baseT_Full;
-       if (eee_caported & MDIO_EEE_10GT)
-               supported |= SUPPORTED_10000baseT_Full;
-       if (eee_caported & MDIO_EEE_1000KX)
-               supported |= SUPPORTED_1000baseKX_Full;
-       if (eee_caported & MDIO_EEE_10GKX4)
-               supported |= SUPPORTED_10000baseKX4_Full;
-       if (eee_caported & MDIO_EEE_10GKR)
-               supported |= SUPPORTED_10000baseKR_Full;
-
-       return supported;
-}
-
-static u16 phy_adv_to_eee(u32 adv)
-{
-       u16 reg = 0;
-
-       if (adv & ADVERTISED_100baseT_Full)
-               reg |= MDIO_EEE_100TX;
-       if (adv & ADVERTISED_1000baseT_Full)
-               reg |= MDIO_EEE_1000T;
-       if (adv & ADVERTISED_10000baseT_Full)
-               reg |= MDIO_EEE_10GT;
-       if (adv & ADVERTISED_1000baseKX_Full)
-               reg |= MDIO_EEE_1000KX;
-       if (adv & ADVERTISED_10000baseKX4_Full)
-               reg |= MDIO_EEE_10GKX4;
-       if (adv & ADVERTISED_10000baseKR_Full)
-               reg |= MDIO_EEE_10GKR;
-
-       return reg;
-}
-
 /**
  * phy_init_eee - init and check the EEE feature
  * @phydev: target phy_device struct
@@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                if (eee_cap < 0)
                        return eee_cap;
 
-               cap = phy_eee_to_supported(eee_cap);
+               cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
                if (!cap)
                        goto eee_exit;
 
@@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                if (eee_adv < 0)
                        return eee_adv;
 
-               adv = phy_eee_to_adv(eee_adv);
-               lp = phy_eee_to_adv(eee_lp);
+               adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+               lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
                idx = phy_find_setting(phydev->speed, phydev->duplex);
                if ((lp & adv & settings[idx].setting))
                        goto eee_exit;
@@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
                                    MDIO_MMD_PCS, phydev->addr);
        if (val < 0)
                return val;
-       data->supported = phy_eee_to_supported(val);
+       data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
 
        /* Get advertisement EEE */
        val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
-       data->advertised = phy_eee_to_adv(val);
+       data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        /* Get LP advertisement EEE */
        val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
-       data->lp_advertised = phy_eee_to_adv(val);
+       data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        return 0;
 }
@@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
 {
        int val;
 
-       val = phy_adv_to_eee(data->advertised);
+       val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
        phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
                               phydev->addr, val);
 
index 5c0557222f20b26a10fa450146098b699ff4e98e..eb3f5cefeba3c6ddcbd53a44cb63a759d9b803ef 100644 (file)
@@ -93,6 +93,18 @@ struct ppp_file {
 #define PF_TO_PPP(pf)          PF_TO_X(pf, struct ppp)
 #define PF_TO_CHANNEL(pf)      PF_TO_X(pf, struct channel)
 
+/*
+ * Data structure to hold primary network stats for which
+ * we want to use 64 bit storage.  Other network stats
+ * are stored in dev->stats of the ppp strucute.
+ */
+struct ppp_link_stats {
+       u64 rx_packets;
+       u64 tx_packets;
+       u64 rx_bytes;
+       u64 tx_bytes;
+};
+
 /*
  * Data structure describing one ppp unit.
  * A ppp unit corresponds to a ppp network interface device
@@ -136,6 +148,7 @@ struct ppp {
        unsigned pass_len, active_len;
 #endif /* CONFIG_PPP_FILTER */
        struct net      *ppp_net;       /* the net we belong to */
+       struct ppp_link_stats stats64;  /* 64 bit network stats */
 };
 
 /*
@@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return err;
 }
 
+struct rtnl_link_stats64*
+ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+{
+       struct ppp *ppp = netdev_priv(dev);
+
+       ppp_recv_lock(ppp);
+       stats64->rx_packets = ppp->stats64.rx_packets;
+       stats64->rx_bytes   = ppp->stats64.rx_bytes;
+       ppp_recv_unlock(ppp);
+
+       ppp_xmit_lock(ppp);
+       stats64->tx_packets = ppp->stats64.tx_packets;
+       stats64->tx_bytes   = ppp->stats64.tx_bytes;
+       ppp_xmit_unlock(ppp);
+
+       stats64->rx_errors        = dev->stats.rx_errors;
+       stats64->tx_errors        = dev->stats.tx_errors;
+       stats64->rx_dropped       = dev->stats.rx_dropped;
+       stats64->tx_dropped       = dev->stats.tx_dropped;
+       stats64->rx_length_errors = dev->stats.rx_length_errors;
+
+       return stats64;
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
-       .ndo_start_xmit = ppp_start_xmit,
-       .ndo_do_ioctl   = ppp_net_ioctl,
+       .ndo_start_xmit  = ppp_start_xmit,
+       .ndo_do_ioctl    = ppp_net_ioctl,
+       .ndo_get_stats64 = ppp_get_stats64,
 };
 
 static void ppp_setup(struct net_device *dev)
@@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
 #endif /* CONFIG_PPP_FILTER */
        }
 
-       ++ppp->dev->stats.tx_packets;
-       ppp->dev->stats.tx_bytes += skb->len - 2;
+       ++ppp->stats64.tx_packets;
+       ppp->stats64.tx_bytes += skb->len - 2;
 
        switch (proto) {
        case PPP_IP:
@@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                break;
        }
 
-       ++ppp->dev->stats.rx_packets;
-       ppp->dev->stats.rx_bytes += skb->len - 2;
+       ++ppp->stats64.rx_packets;
+       ppp->stats64.rx_bytes += skb->len - 2;
 
        npi = proto_to_npindex(proto);
        if (npi < 0) {
@@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
        struct slcompress *vj = ppp->vj;
 
        memset(st, 0, sizeof(*st));
-       st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
+       st->p.ppp_ipackets = ppp->stats64.rx_packets;
        st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
-       st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
-       st->p.ppp_opackets = ppp->dev->stats.tx_packets;
+       st->p.ppp_ibytes = ppp->stats64.rx_bytes;
+       st->p.ppp_opackets = ppp->stats64.tx_packets;
        st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
-       st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
+       st->p.ppp_obytes = ppp->stats64.tx_bytes;
        if (!vj)
                return;
        st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
index 6a7260b03a1e0a91c2186a913bf134b45e6d1047..6b08bd419fba912b239e5f6ad2c9151922e6649a 100644 (file)
@@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST
        ---help---
          Basic mode where packets are transmitted always by all suitable ports.
 
-         All added ports are setup to have team's mac address.
+         All added ports are setup to have team's device address.
 
          To compile this team mode as a module, choose M here: the module
          will be called team_mode_broadcast.
@@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
          Basic mode where port used for transmitting packets is selected in
          round-robin fashion using packet counter.
 
-         All added ports are setup to have team's mac address.
+         All added ports are setup to have team's device address.
 
          To compile this team mode as a module, choose M here: the module
          will be called team_mode_roundrobin.
index f8cd61f449a4772da5a50a446cd07fc80d5928c8..5c7547c4f802550426c8f9769f9d9ba9a93becf9 100644 (file)
@@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
 }
 
 /*
- * Since the ability to change mac address for open port device is tested in
+ * Since the ability to change device address for open port device is tested in
  * team_port_add, this function can be called without control of return value
  */
-static int __set_port_mac(struct net_device *port_dev,
-                         const unsigned char *dev_addr)
+static int __set_port_dev_addr(struct net_device *port_dev,
+                              const unsigned char *dev_addr)
 {
        struct sockaddr addr;
 
-       memcpy(addr.sa_data, dev_addr, ETH_ALEN);
-       addr.sa_family = ARPHRD_ETHER;
+       memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
+       addr.sa_family = port_dev->type;
        return dev_set_mac_address(port_dev, &addr);
 }
 
-static int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_dev_addr(struct team_port *port)
 {
-       return __set_port_mac(port->dev, port->orig.dev_addr);
+       return __set_port_dev_addr(port->dev, port->orig.dev_addr);
 }
 
-int team_port_set_team_mac(struct team_port *port)
+int team_port_set_team_dev_addr(struct team_port *port)
 {
-       return __set_port_mac(port->dev, port->team->dev->dev_addr);
+       return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
 }
-EXPORT_SYMBOL(team_port_set_team_mac);
+EXPORT_SYMBOL(team_port_set_team_dev_addr);
 
 static void team_refresh_port_linkup(struct team_port *port)
 {
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 }
 
 
+/*************************************
+ * Multiqueue Tx port select override
+ *************************************/
+
+static int team_queue_override_init(struct team *team)
+{
+       struct list_head *listarr;
+       unsigned int queue_cnt = team->dev->num_tx_queues - 1;
+       unsigned int i;
+
+       if (!queue_cnt)
+               return 0;
+       listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
+       if (!listarr)
+               return -ENOMEM;
+       team->qom_lists = listarr;
+       for (i = 0; i < queue_cnt; i++)
+               INIT_LIST_HEAD(listarr++);
+       return 0;
+}
+
+static void team_queue_override_fini(struct team *team)
+{
+       kfree(team->qom_lists);
+}
+
+static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
+{
+       return &team->qom_lists[queue_id - 1];
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct list_head *qom_list;
+       struct team_port *port;
+
+       if (!team->queue_override_enabled || !skb->queue_mapping)
+               return false;
+       qom_list = __team_get_qom_list(team, skb->queue_mapping);
+       list_for_each_entry_rcu(port, qom_list, qom_list) {
+               if (!team_dev_queue_xmit(team, port, skb))
+                       return true;
+       }
+       return false;
+}
+
+static void __team_queue_override_port_del(struct team *team,
+                                          struct team_port *port)
+{
+       list_del_rcu(&port->qom_list);
+       synchronize_rcu();
+       INIT_LIST_HEAD(&port->qom_list);
+}
+
+static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
+                                                     struct team_port *cur)
+{
+       if (port->priority < cur->priority)
+               return true;
+       if (port->priority > cur->priority)
+               return false;
+       if (port->index < cur->index)
+               return true;
+       return false;
+}
+
+static void __team_queue_override_port_add(struct team *team,
+                                          struct team_port *port)
+{
+       struct team_port *cur;
+       struct list_head *qom_list;
+       struct list_head *node;
+
+       if (!port->queue_id || !team_port_enabled(port))
+               return;
+
+       qom_list = __team_get_qom_list(team, port->queue_id);
+       node = qom_list;
+       list_for_each_entry(cur, qom_list, qom_list) {
+               if (team_queue_override_port_has_gt_prio_than(port, cur))
+                       break;
+               node = &cur->qom_list;
+       }
+       list_add_tail_rcu(&port->qom_list, node);
+}
+
+static void __team_queue_override_enabled_check(struct team *team)
+{
+       struct team_port *port;
+       bool enabled = false;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               if (!list_empty(&port->qom_list)) {
+                       enabled = true;
+                       break;
+               }
+       }
+       if (enabled == team->queue_override_enabled)
+               return;
+       netdev_dbg(team->dev, "%s queue override\n",
+                  enabled ? "Enabling" : "Disabling");
+       team->queue_override_enabled = enabled;
+}
+
+static void team_queue_override_port_refresh(struct team *team,
+                                            struct team_port *port)
+{
+       __team_queue_override_port_del(team, port);
+       __team_queue_override_port_add(team, port);
+       __team_queue_override_enabled_check(team);
+}
+
+
 /****************
  * Port handling
  ****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
        hlist_add_head_rcu(&port->hlist,
                           team_port_index_hash(team, port->index));
        team_adjust_ops(team);
+       team_queue_override_port_refresh(team, port);
        if (team->ops.port_enabled)
                team->ops.port_enabled(team, port);
 }
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
        hlist_del_rcu(&port->hlist);
        __reconstruct_port_hlist(team, port->index);
        port->index = -1;
+       team_queue_override_port_refresh(team, port);
        __team_adjust_ops(team, team->en_port_count - 1);
        /*
         * Wait until readers see adjusted ops. This ensures that
@@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
 #endif
 
 static void __team_port_change_port_added(struct team_port *port, bool linkup);
+static int team_dev_type_check_change(struct net_device *dev,
+                                     struct net_device *port_dev);
 
 static int team_port_add(struct team *team, struct net_device *port_dev)
 {
@@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
        char *portname = port_dev->name;
        int err;
 
-       if (port_dev->flags & IFF_LOOPBACK ||
-           port_dev->type != ARPHRD_ETHER) {
-               netdev_err(dev, "Device %s is of an unsupported type\n",
+       if (port_dev->flags & IFF_LOOPBACK) {
+               netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
                           portname);
                return -EINVAL;
        }
@@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                return -EBUSY;
        }
 
+       if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+           vlan_uses_dev(dev)) {
+               netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+                          portname);
+               return -EPERM;
+       }
+
+       err = team_dev_type_check_change(dev, port_dev);
+       if (err)
+               return err;
+
        if (port_dev->flags & IFF_UP) {
                netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
                           portname);
@@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
 
        port->dev = port_dev;
        port->team = team;
+       INIT_LIST_HEAD(&port->qom_list);
 
        port->orig.mtu = port_dev->mtu;
        err = dev_set_mtu(port_dev, dev->mtu);
@@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_set_mtu;
        }
 
-       memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+       memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
 
        err = team_port_enter(team, port);
        if (err) {
@@ -972,7 +1103,7 @@ err_vids_add:
 
 err_dev_open:
        team_port_leave(team, port);
-       team_port_set_orig_mac(port);
+       team_port_set_orig_dev_addr(port);
 
 err_port_enter:
        dev_set_mtu(port_dev, port->orig.mtu);
@@ -1010,7 +1141,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
        vlan_vids_del_by_dev(port_dev, dev);
        dev_close(port_dev);
        team_port_leave(team, port);
-       team_port_set_orig_mac(port);
+       team_port_set_orig_dev_addr(port);
        dev_set_mtu(port_dev, port->orig.mtu);
        synchronize_rcu();
        kfree(port);
@@ -1095,6 +1226,49 @@ static int team_user_linkup_en_option_set(struct team *team,
        return 0;
 }
 
+static int team_priority_option_get(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       ctx->data.s32_val = port->priority;
+       return 0;
+}
+
+static int team_priority_option_set(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       port->priority = ctx->data.s32_val;
+       team_queue_override_port_refresh(team, port);
+       return 0;
+}
+
+static int team_queue_id_option_get(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       ctx->data.u32_val = port->queue_id;
+       return 0;
+}
+
+static int team_queue_id_option_set(struct team *team,
+                                   struct team_gsetter_ctx *ctx)
+{
+       struct team_port *port = ctx->info->port;
+
+       if (port->queue_id == ctx->data.u32_val)
+               return 0;
+       if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+               return -EINVAL;
+       port->queue_id = ctx->data.u32_val;
+       team_queue_override_port_refresh(team, port);
+       return 0;
+}
+
+
 static const struct team_option team_options[] = {
        {
                .name = "mode",
@@ -1123,6 +1297,20 @@ static const struct team_option team_options[] = {
                .getter = team_user_linkup_en_option_get,
                .setter = team_user_linkup_en_option_set,
        },
+       {
+               .name = "priority",
+               .type = TEAM_OPTION_TYPE_S32,
+               .per_port = true,
+               .getter = team_priority_option_get,
+               .setter = team_priority_option_set,
+       },
+       {
+               .name = "queue_id",
+               .type = TEAM_OPTION_TYPE_U32,
+               .per_port = true,
+               .getter = team_queue_id_option_get,
+               .setter = team_queue_id_option_set,
+       },
 };
 
 static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1158,6 +1346,9 @@ static int team_init(struct net_device *dev)
        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
        INIT_LIST_HEAD(&team->port_list);
+       err = team_queue_override_init(team);
+       if (err)
+               goto err_team_queue_override_init;
 
        team_adjust_ops(team);
 
@@ -1173,6 +1364,8 @@ static int team_init(struct net_device *dev)
        return 0;
 
 err_options_register:
+       team_queue_override_fini(team);
+err_team_queue_override_init:
        free_percpu(team->pcpu_stats);
 
        return err;
@@ -1190,6 +1383,7 @@ static void team_uninit(struct net_device *dev)
 
        __team_change_mode(team, NULL); /* cleanup */
        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+       team_queue_override_fini(team);
        mutex_unlock(&team->lock);
 }
 
@@ -1219,10 +1413,12 @@ static int team_close(struct net_device *dev)
 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct team *team = netdev_priv(dev);
-       bool tx_success = false;
+       bool tx_success;
        unsigned int len = skb->len;
 
-       tx_success = team->ops.transmit(team, skb);
+       tx_success = team_queue_override_transmit(team, skb);
+       if (!tx_success)
+               tx_success = team->ops.transmit(team, skb);
        if (tx_success) {
                struct team_pcpu_stats *pcpu_stats;
 
@@ -1296,17 +1492,18 @@ static void team_set_rx_mode(struct net_device *dev)
 
 static int team_set_mac_address(struct net_device *dev, void *p)
 {
+       struct sockaddr *addr = p;
        struct team *team = netdev_priv(dev);
        struct team_port *port;
-       int err;
 
-       err = eth_mac_addr(dev, p);
-       if (err)
-               return err;
+       if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       dev->addr_assign_type &= ~NET_ADDR_RANDOM;
        rcu_read_lock();
        list_for_each_entry_rcu(port, &team->port_list, list)
-               if (team->ops.port_change_mac)
-                       team->ops.port_change_mac(team, port);
+               if (team->ops.port_change_dev_addr)
+                       team->ops.port_change_dev_addr(team, port);
        rcu_read_unlock();
        return 0;
 }
@@ -1537,6 +1734,45 @@ static const struct net_device_ops team_netdev_ops = {
  * rt netlink interface
  ***********************/
 
+static void team_setup_by_port(struct net_device *dev,
+                              struct net_device *port_dev)
+{
+       dev->header_ops = port_dev->header_ops;
+       dev->type = port_dev->type;
+       dev->hard_header_len = port_dev->hard_header_len;
+       dev->addr_len = port_dev->addr_len;
+       dev->mtu = port_dev->mtu;
+       memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
+       memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+       dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+}
+
+static int team_dev_type_check_change(struct net_device *dev,
+                                     struct net_device *port_dev)
+{
+       struct team *team = netdev_priv(dev);
+       char *portname = port_dev->name;
+       int err;
+
+       if (dev->type == port_dev->type)
+               return 0;
+       if (!list_empty(&team->port_list)) {
+               netdev_err(dev, "Device %s is of different type\n", portname);
+               return -EBUSY;
+       }
+       err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
+       err = notifier_to_errno(err);
+       if (err) {
+               netdev_err(dev, "Refused to change device type\n");
+               return err;
+       }
+       dev_uc_flush(dev);
+       dev_mc_flush(dev);
+       team_setup_by_port(dev, port_dev);
+       call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+       return 0;
+}
+
 static void team_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -1651,7 +1887,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
                          &team_nl_family, 0, TEAM_CMD_NOOP);
        if (!hdr) {
                err = -EMSGSIZE;
@@ -1660,7 +1896,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
 
        genlmsg_end(msg, hdr);
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
 err_msg_put:
        nlmsg_free(msg);
@@ -1717,7 +1953,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
        if (err < 0)
                goto err_fill;
 
-       err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+       err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
        return err;
 
 err_fill:
@@ -1726,11 +1962,11 @@ err_fill:
 }
 
 typedef int team_nl_send_func_t(struct sk_buff *skb,
-                               struct team *team, u32 pid);
+                               struct team *team, u32 portid);
 
-static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
+static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
 {
-       return genlmsg_unicast(dev_net(team->dev), skb, pid);
+       return genlmsg_unicast(dev_net(team->dev), skb, portid);
 }
 
 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
@@ -1790,6 +2026,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
                    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
                        goto nest_cancel;
                break;
+       case TEAM_OPTION_TYPE_S32:
+               if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
+                       goto nest_cancel;
+               if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
+                       goto nest_cancel;
+               break;
        default:
                BUG();
        }
@@ -1809,13 +2051,13 @@ nest_cancel:
 }
 
 static int __send_and_alloc_skb(struct sk_buff **pskb,
-                               struct team *team, u32 pid,
+                               struct team *team, u32 portid,
                                team_nl_send_func_t *send_func)
 {
        int err;
 
        if (*pskb) {
-               err = send_func(*pskb, team, pid);
+               err = send_func(*pskb, team, portid);
                if (err)
                        return err;
        }
@@ -1825,7 +2067,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb,
        return 0;
 }
 
-static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
+static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
                                    int flags, team_nl_send_func_t *send_func,
                                    struct list_head *sel_opt_inst_list)
 {
@@ -1842,11 +2084,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
                                    struct team_option_inst, tmp_list);
 
 start_again:
-       err = __send_and_alloc_skb(&skb, team, pid, send_func);
+       err = __send_and_alloc_skb(&skb, team, portid, send_func);
        if (err)
                return err;
 
-       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
+       hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
                          TEAM_CMD_OPTIONS_GET);
        if (!hdr)
                return -EMSGSIZE;
@@ -1879,15 +2121,15 @@ start_again:
                goto start_again;
 
 send_done:
-       nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+       nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
        if (!nlh) {
-               err = __send_and_alloc_skb(&skb, team, pid, send_func);
+               err = __send_and_alloc_skb(&skb, team, portid, send_func);
                if (err)
                        goto errout;
                goto send_done;
        }
 
-       return send_func(skb, team, pid);
+       return send_func(skb, team, portid);
 
 nla_put_failure:
        err = -EMSGSIZE;
@@ -1910,7 +2152,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
 
        list_for_each_entry(opt_inst, &team->option_inst_list, list)
                list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
-       err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
+       err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
                                       NLM_F_ACK, team_nl_send_unicast,
                                       &sel_opt_inst_list);
 
@@ -1978,6 +2220,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
                case NLA_FLAG:
                        opt_type = TEAM_OPTION_TYPE_BOOL;
                        break;
+               case NLA_S32:
+                       opt_type = TEAM_OPTION_TYPE_S32;
+                       break;
                default:
                        goto team_put;
                }
@@ -2034,6 +2279,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
                        case TEAM_OPTION_TYPE_BOOL:
                                ctx.data.bool_val = attr_data ? true : false;
                                break;
+                       case TEAM_OPTION_TYPE_S32:
+                               ctx.data.s32_val = nla_get_s32(attr_data);
+                               break;
                        default:
                                BUG();
                        }
@@ -2058,7 +2306,7 @@ team_put:
 }
 
 static int team_nl_fill_port_list_get(struct sk_buff *skb,
-                                     u32 pid, u32 seq, int flags,
+                                     u32 portid, u32 seq, int flags,
                                      struct team *team,
                                      bool fillall)
 {
@@ -2066,7 +2314,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
        void *hdr;
        struct team_port *port;
 
-       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+       hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
                          TEAM_CMD_PORT_LIST_GET);
        if (!hdr)
                return -EMSGSIZE;
@@ -2115,7 +2363,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
                                          struct genl_info *info, int flags,
                                          struct team *team)
 {
-       return team_nl_fill_port_list_get(skb, info->snd_pid,
+       return team_nl_fill_port_list_get(skb, info->snd_portid,
                                          info->snd_seq, NLM_F_ACK,
                                          team, true);
 }
@@ -2168,7 +2416,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
 };
 
 static int team_nl_send_multicast(struct sk_buff *skb,
-                                 struct team *team, u32 pid)
+                                 struct team *team, u32 portid)
 {
        return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
                                       team_change_event_mcgrp.id, GFP_KERNEL);
@@ -2246,7 +2494,7 @@ static void __team_options_change_check(struct team *team)
                        list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
        }
        err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
-       if (err)
+       if (err && err != -ESRCH)
                netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
                            err);
 }
@@ -2275,9 +2523,9 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
 
 send_event:
        err = team_nl_send_event_port_list_get(port->team);
-       if (err)
-               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
-                           port->dev->name);
+       if (err && err != -ESRCH)
+               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
+                           port->dev->name, err);
 
 }
 
index c96e4d2967f01e1588d5b44d07ba27ceae6853d6..9db0171e93669f483eeae19791ed4e090c333bb0 100644 (file)
@@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
 
 static int bc_port_enter(struct team *team, struct team_port *port)
 {
-       return team_port_set_team_mac(port);
+       return team_port_set_team_dev_addr(port);
 }
 
-static void bc_port_change_mac(struct team *team, struct team_port *port)
+static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-       team_port_set_team_mac(port);
+       team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops bc_mode_ops = {
        .transmit               = bc_transmit,
        .port_enter             = bc_port_enter,
-       .port_change_mac        = bc_port_change_mac,
+       .port_change_dev_addr   = bc_port_change_dev_addr,
 };
 
 static const struct team_mode bc_mode = {
index ad7ed0ec544c436fe8fa7df1b3a4360ae011d1f0..105135aa8f0586844927b641951087110a834cc1 100644 (file)
@@ -66,18 +66,18 @@ drop:
 
 static int rr_port_enter(struct team *team, struct team_port *port)
 {
-       return team_port_set_team_mac(port);
+       return team_port_set_team_dev_addr(port);
 }
 
-static void rr_port_change_mac(struct team *team, struct team_port *port)
+static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
 {
-       team_port_set_team_mac(port);
+       team_port_set_team_dev_addr(port);
 }
 
 static const struct team_mode_ops rr_mode_ops = {
        .transmit               = rr_transmit,
        .port_enter             = rr_port_enter,
-       .port_change_mac        = rr_port_change_mac,
+       .port_change_dev_addr   = rr_port_change_dev_addr,
 };
 
 static const struct team_mode rr_mode = {
index 3a16d4fdaa052817135c885b23cb28d90d872ec6..0873cdcf39bebb09d75b1cadbffce4c2d614f593 100644 (file)
@@ -68,6 +68,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
+#include <net/cls_cgroup.h>
 
 #include <asm/uaccess.h>
 
@@ -120,8 +121,8 @@ struct tun_sock;
 struct tun_struct {
        struct tun_file         *tfile;
        unsigned int            flags;
-       uid_t                   owner;
-       gid_t                   group;
+       kuid_t                  owner;
+       kgid_t                  group;
 
        struct net_device       *dev;
        netdev_features_t       set_features;
@@ -1031,8 +1032,8 @@ static void tun_setup(struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
-       tun->owner = -1;
-       tun->group = -1;
+       tun->owner = INVALID_UID;
+       tun->group = INVALID_GID;
 
        dev->ethtool_ops = &tun_ethtool_ops;
        dev->destructor = tun_free_netdev;
@@ -1155,14 +1156,20 @@ static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
-       return sprintf(buf, "%d\n", tun->owner);
+       return uid_valid(tun->owner)?
+               sprintf(buf, "%u\n",
+                       from_kuid_munged(current_user_ns(), tun->owner)):
+               sprintf(buf, "-1\n");
 }
 
 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
-       return sprintf(buf, "%d\n", tun->group);
+       return gid_valid(tun->group) ?
+               sprintf(buf, "%u\n",
+                       from_kgid_munged(current_user_ns(), tun->group)):
+               sprintf(buf, "-1\n");
 }
 
 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
@@ -1189,8 +1196,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                else
                        return -EINVAL;
 
-               if (((tun->owner != -1 && cred->euid != tun->owner) ||
-                    (tun->group != -1 && !in_egroup_p(tun->group))) &&
+               if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+                    (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
                    !capable(CAP_NET_ADMIN))
                        return -EPERM;
                err = security_tun_dev_attach(tun->socket.sk);
@@ -1374,6 +1381,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        void __user* argp = (void __user*)arg;
        struct sock_fprog fprog;
        struct ifreq ifr;
+       kuid_t owner;
+       kgid_t group;
        int sndbuf;
        int vnet_hdr_sz;
        int ret;
@@ -1447,16 +1456,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 
        case TUNSETOWNER:
                /* Set owner of the device */
-               tun->owner = (uid_t) arg;
-
-               tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
+               owner = make_kuid(current_user_ns(), arg);
+               if (!uid_valid(owner)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               tun->owner = owner;
+               tun_debug(KERN_INFO, tun, "owner set to %d\n",
+                         from_kuid(&init_user_ns, tun->owner));
                break;
 
        case TUNSETGROUP:
                /* Set group of the device */
-               tun->group= (gid_t) arg;
-
-               tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
+               group = make_kgid(current_user_ns(), arg);
+               if (!gid_valid(group)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               tun->group = group;
+               tun_debug(KERN_INFO, tun, "group set to %d\n",
+                         from_kgid(&init_user_ns, tun->group));
                break;
 
        case TUNSETLINK:
index 32e31c5c5dc6bfe95b4d38e377a508448d85c890..33ab824773c5a795dc49cc91fd28e67bb3e501a5 100644 (file)
@@ -221,7 +221,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Get the MAC address */
        ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
        if (ret < 0) {
-               dbg("read AX_CMD_READ_NODE_ID failed: %d", ret);
+               netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n",
+                          ret);
                goto out;
        }
        memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -303,7 +304,7 @@ static int ax88772_reset(struct usbnet *dev)
 
        ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
        if (ret < 0) {
-               dbg("Select PHY #1 failed: %d", ret);
+               netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
                goto out;
        }
 
@@ -331,13 +332,13 @@ static int ax88772_reset(struct usbnet *dev)
 
        msleep(150);
        rx_ctl = asix_read_rx_ctl(dev);
-       dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
+       netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
        ret = asix_write_rx_ctl(dev, 0x0000);
        if (ret < 0)
                goto out;
 
        rx_ctl = asix_read_rx_ctl(dev);
-       dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl);
+       netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
 
        ret = asix_sw_reset(dev, AX_SWRESET_PRL);
        if (ret < 0)
@@ -364,7 +365,7 @@ static int ax88772_reset(struct usbnet *dev)
                                AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
                                AX88772_IPG2_DEFAULT, 0, NULL);
        if (ret < 0) {
-               dbg("Write IPG,IPG1,IPG2 failed: %d", ret);
+               netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
                goto out;
        }
 
@@ -381,10 +382,13 @@ static int ax88772_reset(struct usbnet *dev)
                goto out;
 
        rx_ctl = asix_read_rx_ctl(dev);
-       dbg("RX_CTL is 0x%04x after all initializations", rx_ctl);
+       netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+                  rx_ctl);
 
        rx_ctl = asix_read_medium_status(dev);
-       dbg("Medium Status is 0x%04x after all initializations", rx_ctl);
+       netdev_dbg(dev->net,
+                  "Medium Status is 0x%04x after all initializations\n",
+                  rx_ctl);
 
        return 0;
 
@@ -416,7 +420,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Get the MAC address */
        ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
        if (ret < 0) {
-               dbg("Failed to read MAC address: %d", ret);
+               netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
                return ret;
        }
        memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -439,7 +443,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Reset the PHY to normal operation mode */
        ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
        if (ret < 0) {
-               dbg("Select PHY #1 failed: %d", ret);
+               netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
                return ret;
        }
 
@@ -459,7 +463,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* Read PHYID register *AFTER* the PHY was reset properly */
        phyid = asix_get_phyid(dev);
-       dbg("PHYID=0x%08x", phyid);
+       netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
 
        /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
        if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -575,13 +579,13 @@ static int ax88178_reset(struct usbnet *dev)
        u32 phyid;
 
        asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
-       dbg("GPIO Status: 0x%04x", status);
+       netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
 
        asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
        asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
        asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
 
-       dbg("EEPROM index 0x17 is 0x%04x", eeprom);
+       netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
 
        if (eeprom == cpu_to_le16(0xffff)) {
                data->phymode = PHY_MODE_MARVELL;
@@ -592,7 +596,7 @@ static int ax88178_reset(struct usbnet *dev)
                data->ledmode = le16_to_cpu(eeprom) >> 8;
                gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
        }
-       dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
+       netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
 
        /* Power up external GigaPHY through AX88178 GPIO pin */
        asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
@@ -601,14 +605,14 @@ static int ax88178_reset(struct usbnet *dev)
                asix_write_gpio(dev, 0x001c, 300);
                asix_write_gpio(dev, 0x003c, 30);
        } else {
-               dbg("gpio phymode == 1 path");
+               netdev_dbg(dev->net, "gpio phymode == 1 path\n");
                asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
                asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
        }
 
        /* Read PHYID register *AFTER* powering up PHY */
        phyid = asix_get_phyid(dev);
-       dbg("PHYID=0x%08x", phyid);
+       netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
 
        /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
        asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
@@ -770,7 +774,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
        /* Get the MAC address */
        ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
        if (ret < 0) {
-               dbg("Failed to read MAC address: %d", ret);
+               netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
                return ret;
        }
        memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -929,6 +933,10 @@ static const struct usb_device_id  products [] = {
        // JVC MP-PRX1 Port Replicator
        USB_DEVICE (0x04f1, 0x3008),
        .driver_info = (unsigned long) &ax8817x_info,
+}, {
+       // Lenovo U2L100P 10/100
+       USB_DEVICE (0x17ef, 0x7203),
+       .driver_info = (unsigned long) &ax88772_info,
 }, {
        // ASIX AX88772B 10/100
        USB_DEVICE (0x0b95, 0x772b),
index 26c5bebd9ecab4c39c23044bbc45d0adc84a5897..18d9579123ea82a2b649e0f2bdcdaa6a9f2ca80c 100644 (file)
@@ -236,7 +236,8 @@ static void catc_rx_done(struct urb *urb)
        }
 
        if (status) {
-               dbg("rx_done, status %d, length %d", status, urb->actual_length);
+               dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
+                       status, urb->actual_length);
                return;
        }
 
@@ -275,10 +276,11 @@ static void catc_rx_done(struct urb *urb)
                if (atomic_read(&catc->recq_sz)) {
                        int state;
                        atomic_dec(&catc->recq_sz);
-                       dbg("getting extra packet");
+                       netdev_dbg(catc->netdev, "getting extra packet\n");
                        urb->dev = catc->usbdev;
                        if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
-                               dbg("submit(rx_urb) status %d", state);
+                               netdev_dbg(catc->netdev,
+                                          "submit(rx_urb) status %d\n", state);
                        }
                } else {
                        clear_bit(RX_RUNNING, &catc->flags);
@@ -317,18 +319,20 @@ static void catc_irq_done(struct urb *urb)
                return;
        /* -EPIPE:  should clear the halt */
        default:                /* error */
-               dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]);
+               dev_dbg(&urb->dev->dev,
+                       "irq_done, status %d, data %02x %02x.\n",
+                       status, data[0], data[1]);
                goto resubmit;
        }
 
        if (linksts == LinkGood) {
                netif_carrier_on(catc->netdev);
-               dbg("link ok");
+               netdev_dbg(catc->netdev, "link ok\n");
        }
 
        if (linksts == LinkBad) {
                netif_carrier_off(catc->netdev);
-               dbg("link bad");
+               netdev_dbg(catc->netdev, "link bad\n");
        }
 
        if (hasdata) {
@@ -385,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
        int r, status = urb->status;
 
        if (status == -ECONNRESET) {
-               dbg("Tx Reset.");
+               dev_dbg(&urb->dev->dev, "Tx Reset.\n");
                urb->status = 0;
                catc->netdev->trans_start = jiffies;
                catc->netdev->stats.tx_errors++;
@@ -395,7 +399,8 @@ static void catc_tx_done(struct urb *urb)
        }
 
        if (status) {
-               dbg("tx_done, status %d, length %d", status, urb->actual_length);
+               dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
+                       status, urb->actual_length);
                return;
        }
 
@@ -511,7 +516,8 @@ static void catc_ctrl_done(struct urb *urb)
        int status = urb->status;
 
        if (status)
-               dbg("ctrl_done, status %d, len %d.", status, urb->actual_length);
+               dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
+                       status, urb->actual_length);
 
        spin_lock_irqsave(&catc->ctrl_lock, flags);
 
@@ -667,7 +673,9 @@ static void catc_set_multicast_list(struct net_device *netdev)
                f5u011_mchash_async(catc, catc->multicast);
                if (catc->rxmode[0] != rx) {
                        catc->rxmode[0] = rx;
-                       dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]);
+                       netdev_dbg(catc->netdev,
+                                  "Setting RX mode to %2.2X %2.2X\n",
+                                  catc->rxmode[0], catc->rxmode[1]);
                        f5u011_rxmode_async(catc, catc->rxmode);
                }
        }
@@ -766,6 +774,7 @@ static const struct net_device_ops catc_netdev_ops = {
 
 static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
+       struct device *dev = &intf->dev;
        struct usb_device *usbdev = interface_to_usbdev(intf);
        struct net_device *netdev;
        struct catc *catc;
@@ -774,7 +783,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
 
        if (usb_set_interface(usbdev,
                        intf->altsetting->desc.bInterfaceNumber, 1)) {
-                dev_err(&intf->dev, "Can't set altsetting 1.\n");
+               dev_err(dev, "Can't set altsetting 1.\n");
                return -EIO;
        }
 
@@ -817,7 +826,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && 
            le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
            le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
-               dbg("Testing for f5u011");
+               dev_dbg(dev, "Testing for f5u011\n");
                catc->is_f5u011 = 1;            
                atomic_set(&catc->recq_sz, 0);
                pktsz = RX_PKT_SZ;
@@ -838,7 +847,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                 catc->irq_buf, 2, catc_irq_done, catc, 1);
 
        if (!catc->is_f5u011) {
-               dbg("Checking memory size\n");
+               dev_dbg(dev, "Checking memory size\n");
 
                i = 0x12345678;
                catc_write_mem(catc, 0x7a80, &i, 4);
@@ -850,7 +859,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                case 0x12345678:
                        catc_set_reg(catc, TxBufCount, 8);
                        catc_set_reg(catc, RxBufCount, 32);
-                       dbg("64k Memory\n");
+                       dev_dbg(dev, "64k Memory\n");
                        break;
                default:
                        dev_warn(&intf->dev,
@@ -858,49 +867,49 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                case 0x87654321:
                        catc_set_reg(catc, TxBufCount, 4);
                        catc_set_reg(catc, RxBufCount, 16);
-                       dbg("32k Memory\n");
+                       dev_dbg(dev, "32k Memory\n");
                        break;
                }
          
-               dbg("Getting MAC from SEEROM.");
+               dev_dbg(dev, "Getting MAC from SEEROM.\n");
          
                catc_get_mac(catc, netdev->dev_addr);
                
-               dbg("Setting MAC into registers.");
+               dev_dbg(dev, "Setting MAC into registers.\n");
          
                for (i = 0; i < 6; i++)
                        catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
                
-               dbg("Filling the multicast list.");
+               dev_dbg(dev, "Filling the multicast list.\n");
          
                memset(broadcast, 0xff, 6);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
                
-               dbg("Clearing error counters.");
+               dev_dbg(dev, "Clearing error counters.\n");
                
                for (i = 0; i < 8; i++)
                        catc_set_reg(catc, EthStats + i, 0);
                catc->last_stats = jiffies;
                
-               dbg("Enabling.");
+               dev_dbg(dev, "Enabling.\n");
                
                catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
                catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
                catc_set_reg(catc, LEDCtrl, LEDLink);
                catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
        } else {
-               dbg("Performing reset\n");
+               dev_dbg(dev, "Performing reset\n");
                catc_reset(catc);
                catc_get_mac(catc, netdev->dev_addr);
                
-               dbg("Setting RX Mode");
+               dev_dbg(dev, "Setting RX Mode\n");
                catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
                catc->rxmode[1] = 0;
                f5u011_rxmode(catc, catc->rxmode);
        }
-       dbg("Init done.");
+       dev_dbg(dev, "Init done.\n");
        printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
               netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
               usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
index 49ab45e17fe8999c6da7281773c4be8c4da2cee3..1e207f086b759416a916cb9180af1be85e8a3fce 100644 (file)
@@ -302,18 +302,9 @@ static const struct driver_info    cx82310_info = {
        .tx_fixup       = cx82310_tx_fixup,
 };
 
-#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
-       .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
-                      USB_DEVICE_ID_MATCH_DEV_INFO, \
-       .idVendor = (vend), \
-       .idProduct = (prod), \
-       .bDeviceClass = (cl), \
-       .bDeviceSubClass = (sc), \
-       .bDeviceProtocol = (pr)
-
 static const struct usb_device_id products[] = {
        {
-               USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+               USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
                .driver_info = (unsigned long) &cx82310_info
        },
        { },
index db3c8021f2a3aedb2dd69cd093cd65023f8b6cdc..a7e3f4e55bf3651b3874710ffcf4cfcdeb37e299 100644 (file)
@@ -91,7 +91,9 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        // get the packet count of the received skb
        count = le32_to_cpu(header->packet_count);
        if (count > GL_MAX_TRANSMIT_PACKETS) {
-               dbg("genelink: invalid received packet count %u", count);
+               netdev_dbg(dev->net,
+                          "genelink: invalid received packet count %u\n",
+                          count);
                return 0;
        }
 
@@ -107,7 +109,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
                // this may be a broken packet
                if (size > GL_MAX_PACKET_LEN) {
-                       dbg("genelink: invalid rx length %d", size);
+                       netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
+                                  size);
                        return 0;
                }
 
@@ -133,7 +136,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        skb_pull(skb, 4);
 
        if (skb->len > GL_MAX_PACKET_LEN) {
-               dbg("genelink: invalid rx length %d", skb->len);
+               netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
+                          skb->len);
                return 0;
        }
        return 1;
index c3d03490c97d04c4724b472bade7f1b6bc9bba0d..c75e11e1b385f3f5654605959b1a6db5ba44641a 100644 (file)
@@ -267,19 +267,16 @@ static int kaweth_control(struct kaweth_device *kaweth,
        struct usb_ctrlrequest *dr;
        int retval;
 
-       dbg("kaweth_control()");
+       netdev_dbg(kaweth->net, "kaweth_control()\n");
 
        if(in_interrupt()) {
-               dbg("in_interrupt()");
+               netdev_dbg(kaweth->net, "in_interrupt()\n");
                return -EBUSY;
        }
 
        dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
-
-       if (!dr) {
-               dbg("kmalloc() failed");
+       if (!dr)
                return -ENOMEM;
-       }
 
        dr->bRequestType = requesttype;
        dr->bRequest = request;
@@ -305,7 +302,7 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
 {
        int retval;
 
-       dbg("Reading kaweth configuration");
+       netdev_dbg(kaweth->net, "Reading kaweth configuration\n");
 
        retval = kaweth_control(kaweth,
                                usb_rcvctrlpipe(kaweth->dev, 0),
@@ -327,7 +324,7 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
 {
        int retval;
 
-       dbg("Setting URB size to %d", (unsigned)urb_size);
+       netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size);
 
        retval = kaweth_control(kaweth,
                                usb_sndctrlpipe(kaweth->dev, 0),
@@ -349,7 +346,7 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
 {
        int retval;
 
-       dbg("Set SOFS wait to %d", (unsigned)sofs_wait);
+       netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait);
 
        retval = kaweth_control(kaweth,
                                usb_sndctrlpipe(kaweth->dev, 0),
@@ -372,7 +369,8 @@ static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
 {
        int retval;
 
-       dbg("Set receive filter to %d", (unsigned)receive_filter);
+       netdev_dbg(kaweth->net, "Set receive filter to %d\n",
+                  (unsigned)receive_filter);
 
        retval = kaweth_control(kaweth,
                                usb_sndctrlpipe(kaweth->dev, 0),
@@ -421,12 +419,13 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
        kaweth->firmware_buf[4] = type;
        kaweth->firmware_buf[5] = interrupt;
 
-       dbg("High: %i, Low:%i", kaweth->firmware_buf[3],
+       netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3],
                   kaweth->firmware_buf[2]);
 
-       dbg("Downloading firmware at %p to kaweth device at %p",
-           fw->data, kaweth);
-       dbg("Firmware length: %d", data_len);
+       netdev_dbg(kaweth->net,
+                  "Downloading firmware at %p to kaweth device at %p\n",
+                  fw->data, kaweth);
+       netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
 
        return kaweth_control(kaweth,
                              usb_sndctrlpipe(kaweth->dev, 0),
@@ -454,7 +453,7 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
        kaweth->firmware_buf[6] = 0x00;
        kaweth->firmware_buf[7] = 0x00;
 
-       dbg("Triggering firmware");
+       netdev_dbg(kaweth->net, "Triggering firmware\n");
 
        return kaweth_control(kaweth,
                              usb_sndctrlpipe(kaweth->dev, 0),
@@ -474,11 +473,11 @@ static int kaweth_reset(struct kaweth_device *kaweth)
 {
        int result;
 
-       dbg("kaweth_reset(%p)", kaweth);
+       netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth);
        result = usb_reset_configuration(kaweth->dev);
        mdelay(10);
 
-       dbg("kaweth_reset() returns %d.",result);
+       netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result);
 
        return result;
 }
@@ -595,6 +594,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
  ****************************************************************/
 static void kaweth_usb_receive(struct urb *urb)
 {
+       struct device *dev = &urb->dev->dev;
        struct kaweth_device *kaweth = urb->context;
        struct net_device *net = kaweth->net;
        int status = urb->status;
@@ -610,25 +610,25 @@ static void kaweth_usb_receive(struct urb *urb)
                kaweth->stats.rx_errors++;
                kaweth->end = 1;
                wake_up(&kaweth->term_wait);
-               dbg("Status was -EPIPE.");
+               dev_dbg(dev, "Status was -EPIPE.\n");
                return;
        }
        if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
                /* we are killed - set a flag and wake the disconnect handler */
                kaweth->end = 1;
                wake_up(&kaweth->term_wait);
-               dbg("Status was -ECONNRESET or -ESHUTDOWN.");
+               dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n");
                return;
        }
        if (unlikely(status == -EPROTO || status == -ETIME ||
                     status == -EILSEQ)) {
                kaweth->stats.rx_errors++;
-               dbg("Status was -EPROTO, -ETIME, or -EILSEQ.");
+               dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
                return;
        }
        if (unlikely(status == -EOVERFLOW)) {
                kaweth->stats.rx_errors++;
-               dbg("Status was -EOVERFLOW.");
+               dev_dbg(dev, "Status was -EOVERFLOW.\n");
        }
        spin_lock(&kaweth->device_lock);
        if (IS_BLOCKED(kaweth->status)) {
@@ -687,7 +687,7 @@ static int kaweth_open(struct net_device *net)
        struct kaweth_device *kaweth = netdev_priv(net);
        int res;
 
-       dbg("Opening network device.");
+       netdev_dbg(kaweth->net, "Opening network device.\n");
 
        res = usb_autopm_get_interface(kaweth->intf);
        if (res) {
@@ -787,7 +787,8 @@ static void kaweth_usb_transmit_complete(struct urb *urb)
 
        if (unlikely(status != 0))
                if (status != -ENOENT)
-                       dbg("%s: TX status %d.", kaweth->net->name, status);
+                       dev_dbg(&urb->dev->dev, "%s: TX status %d.\n",
+                               kaweth->net->name, status);
 
        netif_wake_queue(kaweth->net);
        dev_kfree_skb_irq(skb);
@@ -871,7 +872,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
                                      KAWETH_PACKET_FILTER_BROADCAST |
                                     KAWETH_PACKET_FILTER_MULTICAST;
 
-       dbg("Setting Rx mode to %d", packet_filter_bitmap);
+       netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap);
 
        netif_stop_queue(net);
 
@@ -916,7 +917,8 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
                        result);
        }
        else {
-               dbg("Set Rx mode to %d", packet_filter_bitmap);
+               netdev_dbg(kaweth->net, "Set Rx mode to %d\n",
+                          packet_filter_bitmap);
        }
 }
 
@@ -951,7 +953,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
        struct kaweth_device *kaweth = usb_get_intfdata(intf);
        unsigned long flags;
 
-       dbg("Suspending device");
+       dev_dbg(&intf->dev, "Suspending device\n");
        spin_lock_irqsave(&kaweth->device_lock, flags);
        kaweth->status |= KAWETH_STATUS_SUSPENDING;
        spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -968,7 +970,7 @@ static int kaweth_resume(struct usb_interface *intf)
        struct kaweth_device *kaweth = usb_get_intfdata(intf);
        unsigned long flags;
 
-       dbg("Resuming device");
+       dev_dbg(&intf->dev, "Resuming device\n");
        spin_lock_irqsave(&kaweth->device_lock, flags);
        kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
        spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1003,36 +1005,37 @@ static int kaweth_probe(
                const struct usb_device_id *id      /* from id_table */
        )
 {
-       struct usb_device *dev = interface_to_usbdev(intf);
+       struct device *dev = &intf->dev;
+       struct usb_device *udev = interface_to_usbdev(intf);
        struct kaweth_device *kaweth;
        struct net_device *netdev;
        const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
        int result = 0;
 
-       dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x",
-                dev->devnum,
-                le16_to_cpu(dev->descriptor.idVendor),
-                le16_to_cpu(dev->descriptor.idProduct),
-                le16_to_cpu(dev->descriptor.bcdDevice));
+       dev_dbg(dev,
+               "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
+               udev->devnum, le16_to_cpu(udev->descriptor.idVendor),
+               le16_to_cpu(udev->descriptor.idProduct),
+               le16_to_cpu(udev->descriptor.bcdDevice));
 
-       dbg("Device at %p", dev);
+       dev_dbg(dev, "Device at %p\n", udev);
 
-       dbg("Descriptor length: %x type: %x",
-                (int)dev->descriptor.bLength,
-                (int)dev->descriptor.bDescriptorType);
+       dev_dbg(dev, "Descriptor length: %x type: %x\n",
+               (int)udev->descriptor.bLength,
+               (int)udev->descriptor.bDescriptorType);
 
        netdev = alloc_etherdev(sizeof(*kaweth));
        if (!netdev)
                return -ENOMEM;
 
        kaweth = netdev_priv(netdev);
-       kaweth->dev = dev;
+       kaweth->dev = udev;
        kaweth->net = netdev;
 
        spin_lock_init(&kaweth->device_lock);
        init_waitqueue_head(&kaweth->term_wait);
 
-       dbg("Resetting.");
+       dev_dbg(dev, "Resetting.\n");
 
        kaweth_reset(kaweth);
 
@@ -1041,17 +1044,17 @@ static int kaweth_probe(
         * downloaded. Don't try to do it again, or we'll hang the device.
         */
 
-       if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) {
-               dev_info(&intf->dev, "Firmware present in device.\n");
+       if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) {
+               dev_info(dev, "Firmware present in device.\n");
        } else {
                /* Download the firmware */
-               dev_info(&intf->dev, "Downloading firmware...\n");
+               dev_info(dev, "Downloading firmware...\n");
                kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
                if ((result = kaweth_download_firmware(kaweth,
                                                      "kaweth/new_code.bin",
                                                      100,
                                                      2)) < 0) {
-                       dev_err(&intf->dev, "Error downloading firmware (%d)\n",
+                       dev_err(dev, "Error downloading firmware (%d)\n",
                                result);
                        goto err_fw;
                }
@@ -1060,8 +1063,7 @@ static int kaweth_probe(
                                                      "kaweth/new_code_fix.bin",
                                                      100,
                                                      3)) < 0) {
-                       dev_err(&intf->dev,
-                               "Error downloading firmware fix (%d)\n",
+                       dev_err(dev, "Error downloading firmware fix (%d)\n",
                                result);
                        goto err_fw;
                }
@@ -1070,8 +1072,7 @@ static int kaweth_probe(
                                                      "kaweth/trigger_code.bin",
                                                      126,
                                                      2)) < 0) {
-                       dev_err(&intf->dev,
-                               "Error downloading trigger code (%d)\n",
+                       dev_err(dev, "Error downloading trigger code (%d)\n",
                                result);
                        goto err_fw;
 
@@ -1081,19 +1082,18 @@ static int kaweth_probe(
                                                      "kaweth/trigger_code_fix.bin",
                                                      126,
                                                      3)) < 0) {
-                       dev_err(&intf->dev, "Error downloading trigger code fix (%d)\n", result);
+                       dev_err(dev, "Error downloading trigger code fix (%d)\n", result);
                        goto err_fw;
                }
 
 
                if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) {
-                       dev_err(&intf->dev, "Error triggering firmware (%d)\n",
-                               result);
+                       dev_err(dev, "Error triggering firmware (%d)\n", result);
                        goto err_fw;
                }
 
                /* Device will now disappear for a moment...  */
-               dev_info(&intf->dev, "Firmware loaded.  I'll be back...\n");
+               dev_info(dev, "Firmware loaded.  I'll be back...\n");
 err_fw:
                free_page((unsigned long)kaweth->firmware_buf);
                free_netdev(netdev);
@@ -1103,29 +1103,29 @@ err_fw:
        result = kaweth_read_configuration(kaweth);
 
        if(result < 0) {
-               dev_err(&intf->dev, "Error reading configuration (%d), no net device created\n", result);
+               dev_err(dev, "Error reading configuration (%d), no net device created\n", result);
                goto err_free_netdev;
        }
 
-       dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
-       dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
-       dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
-       dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
+       dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
+       dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
+       dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
+       dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
 
        if(!memcmp(&kaweth->configuration.hw_addr,
                    &bcast_addr,
                   sizeof(bcast_addr))) {
-               dev_err(&intf->dev, "Firmware not functioning properly, no net device created\n");
+               dev_err(dev, "Firmware not functioning properly, no net device created\n");
                goto err_free_netdev;
        }
 
        if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) {
-               dbg("Error setting URB size");
+               dev_dbg(dev, "Error setting URB size\n");
                goto err_free_netdev;
        }
 
        if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) {
-               dev_err(&intf->dev, "Error setting SOFS wait\n");
+               dev_err(dev, "Error setting SOFS wait\n");
                goto err_free_netdev;
        }
 
@@ -1135,11 +1135,11 @@ err_fw:
                                            KAWETH_PACKET_FILTER_MULTICAST);
 
        if(result < 0) {
-               dev_err(&intf->dev, "Error setting receive filter\n");
+               dev_err(dev, "Error setting receive filter\n");
                goto err_free_netdev;
        }
 
-       dbg("Initializing net device.");
+       dev_dbg(dev, "Initializing net device.\n");
 
        kaweth->intf = intf;
 
@@ -1181,20 +1181,20 @@ err_fw:
 
 #if 0
 // dma_supported() is deeply broken on almost all architectures
-       if (dma_supported (&intf->dev, 0xffffffffffffffffULL))
+       if (dma_supported (dev, 0xffffffffffffffffULL))
                kaweth->net->features |= NETIF_F_HIGHDMA;
 #endif
 
-       SET_NETDEV_DEV(netdev, &intf->dev);
+       SET_NETDEV_DEV(netdev, dev);
        if (register_netdev(netdev) != 0) {
-               dev_err(&intf->dev, "Error registering netdev.\n");
+               dev_err(dev, "Error registering netdev.\n");
                goto err_intfdata;
        }
 
-       dev_info(&intf->dev, "kaweth interface created at %s\n",
+       dev_info(dev, "kaweth interface created at %s\n",
                 kaweth->net->name);
 
-       dbg("Kaweth probe returning.");
+       dev_dbg(dev, "Kaweth probe returning.\n");
 
        return 0;
 
@@ -1232,7 +1232,7 @@ static void kaweth_disconnect(struct usb_interface *intf)
        }
        netdev = kaweth->net;
 
-       dbg("Unregistering net device");
+       netdev_dbg(kaweth->net, "Unregistering net device\n");
        unregister_netdev(netdev);
 
        usb_free_urb(kaweth->rx_urb);
index 28c4d513ba850c9165a6e0376986fcff0bb9a0f5..c062a3e8295c12d5f69158026b3055522f0f5422 100644 (file)
@@ -155,12 +155,10 @@ static void nc_dump_registers(struct usbnet *dev)
        u8      reg;
        u16     *vp = kmalloc(sizeof (u16));
 
-       if (!vp) {
-               dbg("no memory?");
+       if (!vp)
                return;
-       }
 
-       dbg("%s registers:", dev->net->name);
+       netdev_dbg(dev->net, "registers:\n");
        for (reg = 0; reg < 0x20; reg++) {
                int retval;
 
@@ -172,11 +170,10 @@ static void nc_dump_registers(struct usbnet *dev)
 
                retval = nc_register_read(dev, reg, vp);
                if (retval < 0)
-                       dbg("%s reg [0x%x] ==> error %d",
-                               dev->net->name, reg, retval);
+                       netdev_dbg(dev->net, "reg [0x%x] ==> error %d\n",
+                                  reg, retval);
                else
-                       dbg("%s reg [0x%x] = 0x%x",
-                               dev->net->name, reg, *vp);
+                       netdev_dbg(dev->net, "reg [0x%x] = 0x%x\n", reg, *vp);
        }
        kfree(vp);
 }
@@ -300,15 +297,15 @@ static int net1080_reset(struct usbnet *dev)
        // nc_dump_registers(dev);
 
        if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
-               dbg("can't read %s-%s status: %d",
-                       dev->udev->bus->bus_name, dev->udev->devpath, retval);
+               netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
+                          dev->udev->bus->bus_name, dev->udev->devpath, retval);
                goto done;
        }
        status = *vp;
        nc_dump_status(dev, status);
 
        if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
-               dbg("can't read USBCTL, %d", retval);
+               netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
                goto done;
        }
        usbctl = *vp;
@@ -318,7 +315,7 @@ static int net1080_reset(struct usbnet *dev)
                        USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
 
        if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
-               dbg("can't read TTL, %d", retval);
+               netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
                goto done;
        }
        ttl = *vp;
@@ -326,7 +323,7 @@ static int net1080_reset(struct usbnet *dev)
 
        nc_register_write(dev, REG_TTL,
                        MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
-       dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
+       netdev_dbg(dev->net, "assigned TTL, %d ms\n", NC_READ_TTL_MS);
 
        netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
                   (status & STATUS_PORT_A) ? 'A' : 'B',
@@ -350,7 +347,7 @@ static int net1080_check_connect(struct usbnet *dev)
        status = *vp;
        kfree(vp);
        if (retval != 0) {
-               dbg("%s net1080_check_conn read - %d", dev->net->name, retval);
+               netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
                return retval;
        }
        if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
@@ -420,11 +417,9 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        u16                     hdr_len, packet_len;
 
        if (!(skb->len & 0x01)) {
-#ifdef DEBUG
-               struct net_device       *net = dev->net;
-               dbg("rx framesize %d range %d..%d mtu %d", skb->len,
-                       net->hard_header_len, dev->hard_mtu, net->mtu);
-#endif
+               netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
+                          skb->len, dev->net->hard_header_len, dev->hard_mtu,
+                          dev->net->mtu);
                dev->net->stats.rx_frame_errors++;
                nc_ensure_sync(dev);
                return 0;
@@ -435,17 +430,17 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        packet_len = le16_to_cpup(&header->packet_len);
        if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
                dev->net->stats.rx_frame_errors++;
-               dbg("packet too big, %d", packet_len);
+               netdev_dbg(dev->net, "packet too big, %d\n", packet_len);
                nc_ensure_sync(dev);
                return 0;
        } else if (hdr_len < MIN_HEADER) {
                dev->net->stats.rx_frame_errors++;
-               dbg("header too short, %d", hdr_len);
+               netdev_dbg(dev->net, "header too short, %d\n", hdr_len);
                nc_ensure_sync(dev);
                return 0;
        } else if (hdr_len > MIN_HEADER) {
                // out of band data for us?
-               dbg("header OOB, %d bytes", hdr_len - MIN_HEADER);
+               netdev_dbg(dev->net, "header OOB, %d bytes\n", hdr_len - MIN_HEADER);
                nc_ensure_sync(dev);
                // switch (vendor/product ids) { ... }
        }
@@ -458,23 +453,23 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        if ((packet_len & 0x01) == 0) {
                if (skb->data [packet_len] != PAD_BYTE) {
                        dev->net->stats.rx_frame_errors++;
-                       dbg("bad pad");
+                       netdev_dbg(dev->net, "bad pad\n");
                        return 0;
                }
                skb_trim(skb, skb->len - 1);
        }
        if (skb->len != packet_len) {
                dev->net->stats.rx_frame_errors++;
-               dbg("bad packet len %d (expected %d)",
-                       skb->len, packet_len);
+               netdev_dbg(dev->net, "bad packet len %d (expected %d)\n",
+                          skb->len, packet_len);
                nc_ensure_sync(dev);
                return 0;
        }
        if (header->packet_id != get_unaligned(&trailer->packet_id)) {
                dev->net->stats.rx_fifo_errors++;
-               dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
-                       le16_to_cpu(header->packet_id),
-                       le16_to_cpu(trailer->packet_id));
+               netdev_dbg(dev->net, "(2+ dropped) rx packet_id mismatch 0x%x 0x%x\n",
+                          le16_to_cpu(header->packet_id),
+                          le16_to_cpu(trailer->packet_id));
                return 0;
        }
 #if 0
index 3543c9e578247e6ee550ef7f18f680b680be3587..6883c371c59f5a2eb416466563cad5e1fbaa5b9a 100644 (file)
@@ -108,7 +108,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
        atomic_set(&info->pmcount, 0);
 
        /* register subdriver */
-       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
+       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
        if (IS_ERR(subdriver)) {
                dev_err(&info->control->dev, "subdriver registration failed\n");
                rv = PTR_ERR(subdriver);
@@ -139,10 +139,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
 
        BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
 
-       /* require a single interrupt status endpoint for subdriver */
+       /* control and data is shared? */
+       if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
+               info->control = intf;
+               info->data = intf;
+               goto shared;
+       }
+
+       /* else require a single interrupt status endpoint on control intf */
        if (intf->cur_altsetting->desc.bNumEndpoints != 1)
                goto err;
 
+       /* and a number of CDC descriptors */
        while (len > 3) {
                struct usb_descriptor_header *h = (void *)buf;
 
@@ -231,8 +239,9 @@ next_desc:
        if (status < 0)
                goto err;
 
+shared:
        status = qmi_wwan_register_subdriver(dev);
-       if (status < 0) {
+       if (status < 0 && info->control != info->data) {
                usb_set_intfdata(info->data, NULL);
                usb_driver_release_interface(driver, info->data);
        }
@@ -241,20 +250,6 @@ err:
        return status;
 }
 
-/* Some devices combine the "control" and "data" functions into a
- * single interface with all three endpoints: interrupt + bulk in and
- * out
- */
-static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
-{
-       struct qmi_wwan_state *info = (void *)&dev->data;
-
-       /*  control and data is shared */
-       info->control = intf;
-       info->data = intf;
-       return qmi_wwan_register_subdriver(dev);
-}
-
 static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct qmi_wwan_state *info = (void *)&dev->data;
@@ -331,20 +326,12 @@ static const struct driver_info   qmi_wwan_info = {
        .manage_power   = qmi_wwan_manage_power,
 };
 
-static const struct driver_info        qmi_wwan_shared = {
-       .description    = "WWAN/QMI device",
-       .flags          = FLAG_WWAN,
-       .bind           = qmi_wwan_bind_shared,
-       .unbind         = qmi_wwan_unbind,
-       .manage_power   = qmi_wwan_manage_power,
-};
-
 #define HUAWEI_VENDOR_ID       0x12D1
 
 /* map QMI/wwan function by a fixed interface number */
 #define QMI_FIXED_INTF(vend, prod, num) \
        USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
-       .driver_info = (unsigned long)&qmi_wwan_shared
+       .driver_info = (unsigned long)&qmi_wwan_info
 
 /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
 #define QMI_GOBI1K_DEVICE(vend, prod) \
@@ -372,15 +359,15 @@ static const struct usb_device_id products[] = {
        },
        {       /* Huawei E392, E398 and possibly others in "Windows mode" */
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
-               .driver_info        = (unsigned long)&qmi_wwan_shared,
+               .driver_info        = (unsigned long)&qmi_wwan_info,
        },
        {       /* Pantech UML290, P4200 and more */
                USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
-               .driver_info        = (unsigned long)&qmi_wwan_shared,
+               .driver_info        = (unsigned long)&qmi_wwan_info,
        },
        {       /* Pantech UML290 - newer firmware */
                USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
-               .driver_info        = (unsigned long)&qmi_wwan_shared,
+               .driver_info        = (unsigned long)&qmi_wwan_info,
        },
 
        /* 3. Combined interface devices matching on interface number */
@@ -467,7 +454,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id
         */
        if (!id->driver_info) {
                dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
-               id->driver_info = (unsigned long)&qmi_wwan_shared;
+               id->driver_info = (unsigned long)&qmi_wwan_info;
        }
 
        return usbnet_probe(intf, id);
index 0e2c92e0e5323e03b57b442e43db78f4f0739e8a..5f39a3b225ef8729f47707dbfa2e8dcc98b52b54 100644 (file)
@@ -275,7 +275,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
                return -EBUSY;
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
+       netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
        /* Set the IDR registers. */
        set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
 #ifdef EEPROM_WRITE
@@ -503,12 +503,12 @@ static void intr_callback(struct urb *urb)
        if ((d[INT_MSR] & MSR_LINK) == 0) {
                if (netif_carrier_ok(dev->netdev)) {
                        netif_carrier_off(dev->netdev);
-                       dbg("%s: LINK LOST\n", __func__);
+                       netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__);
                }
        } else {
                if (!netif_carrier_ok(dev->netdev)) {
                        netif_carrier_on(dev->netdev);
-                       dbg("%s: LINK CAME BACK\n", __func__);
+                       netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__);
                }
        }
 
index 8e22417fa6c11b5d41845bda1ff4c84e9d369cda..c27d27701aee2e2ce0d7e22f077c579da2ee1b07 100644 (file)
@@ -68,9 +68,8 @@ static        atomic_t iface_counter = ATOMIC_INIT(0);
  */
 #define SIERRA_NET_USBCTL_BUF_LEN      1024
 
-struct sierra_net_info_data {
-       u16 rx_urb_size;
-};
+/* Overriding the default usbnet rx_urb_size */
+#define SIERRA_NET_RX_URB_SIZE         (8 * 1024)
 
 /* Private data structure */
 struct sierra_net_data {
@@ -560,7 +559,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
 /*
  * Sync Retransmit Timer Handler. On expiry, kick the work queue
  */
-void sierra_sync_timer(unsigned long syncdata)
+static void sierra_sync_timer(unsigned long syncdata)
 {
        struct usbnet *dev = (struct usbnet *)syncdata;
 
@@ -678,9 +677,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
                0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
 
-       struct sierra_net_info_data *data =
-                       (struct sierra_net_info_data *)dev->driver_info->data;
-
        dev_dbg(&dev->udev->dev, "%s", __func__);
 
        ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -725,9 +721,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        sierra_net_set_ctx_index(priv, 0);
 
        /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
-       dev->rx_urb_size  = data->rx_urb_size;
+       dev->rx_urb_size  = SIERRA_NET_RX_URB_SIZE;
        if (dev->udev->speed != USB_SPEED_HIGH)
-               dev->rx_urb_size  = min_t(size_t, 4096, data->rx_urb_size);
+               dev->rx_urb_size  = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE);
 
        dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
@@ -842,7 +838,7 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                                netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
 
                        dev->net->stats.rx_frame_errors++;
-                       /* dev->net->stats.rx_errors incremented by caller */;
+                       /* dev->net->stats.rx_errors incremented by caller */
                        return 0;
                }
 
@@ -866,8 +862,8 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 }
 
 /* ---------------------------- Transmit data path ----------------------*/
-struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
-               gfp_t flags)
+static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev,
+                                          struct sk_buff *skb, gfp_t flags)
 {
        struct sierra_net_data *priv = sierra_net_get_private(dev);
        u16 len;
@@ -918,10 +914,6 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
        return NULL;
 }
 
-static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
-       .rx_urb_size = 8 * 1024,
-};
-
 static const struct driver_info sierra_net_info_direct_ip = {
        .description = "Sierra Wireless USB-to-WWAN Modem",
        .flags = FLAG_WWAN | FLAG_SEND_ZLP,
@@ -930,7 +922,6 @@ static const struct driver_info sierra_net_info_direct_ip = {
        .status = sierra_net_status,
        .rx_fixup = sierra_net_rx_fixup,
        .tx_fixup = sierra_net_tx_fixup,
-       .data = (unsigned long)&sierra_net_info_data_direct_ip,
 };
 
 #define DIRECT_IP_DEVICE(vend, prod) \
index 376143e8a1aaf6f78ee44fb76888922064c46dd1..b77ae76f4aa8f96bbbcabb2d2c4eac5a837d3c29 100644 (file)
@@ -52,6 +52,7 @@
 #define USB_PRODUCT_ID_LAN7500         (0x7500)
 #define USB_PRODUCT_ID_LAN7505         (0x7505)
 #define RXW_PADDING                    2
+#define SUPPORTED_WAKE                 (WAKE_MAGIC)
 
 #define check_warn(ret, fmt, args...) \
        ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -65,6 +66,7 @@
 struct smsc75xx_priv {
        struct usbnet *dev;
        u32 rfe_ctl;
+       u32 wolopts;
        u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
        struct mutex dataport_mutex;
        spinlock_t rfe_ctl_lock;
@@ -135,6 +137,30 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
        return ret;
 }
 
+static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
+static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
 /* Loop until the read is completed with timeout
  * called with phy_mutex held */
 static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
@@ -578,6 +604,26 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
        return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
 }
 
+static void smsc75xx_ethtool_get_wol(struct net_device *net,
+                                    struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+       wolinfo->supported = SUPPORTED_WAKE;
+       wolinfo->wolopts = pdata->wolopts;
+}
+
+static int smsc75xx_ethtool_set_wol(struct net_device *net,
+                                   struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+       pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+       return 0;
+}
+
 static const struct ethtool_ops smsc75xx_ethtool_ops = {
        .get_link       = usbnet_get_link,
        .nway_reset     = usbnet_nway_reset,
@@ -589,6 +635,8 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
        .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
        .get_eeprom     = smsc75xx_ethtool_get_eeprom,
        .set_eeprom     = smsc75xx_ethtool_set_eeprom,
+       .get_wol        = smsc75xx_ethtool_get_wol,
+       .set_wol        = smsc75xx_ethtool_set_wol,
 };
 
 static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -756,6 +804,26 @@ static int smsc75xx_set_features(struct net_device *netdev,
        return 0;
 }
 
+static int smsc75xx_wait_ready(struct usbnet *dev)
+{
+       int timeout = 0;
+
+       do {
+               u32 buf;
+               int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+               check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+
+               if (buf & PMT_CTL_DEV_RDY)
+                       return 0;
+
+               msleep(10);
+               timeout++;
+       } while (timeout < 100);
+
+       netdev_warn(dev->net, "timeout waiting for device ready");
+       return -EIO;
+}
+
 static int smsc75xx_reset(struct usbnet *dev)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -764,6 +832,9 @@ static int smsc75xx_reset(struct usbnet *dev)
 
        netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
 
+       ret = smsc75xx_wait_ready(dev);
+       check_warn_return(ret, "device not ready in smsc75xx_reset");
+
        ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
        check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
 
@@ -1083,6 +1154,169 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
        }
 }
 
+static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       ret = usbnet_suspend(intf, message);
+       check_warn_return(ret, "usbnet_suspend error");
+
+       /* if no wol options set, enter lowest power SUSPEND2 mode */
+       if (!(pdata->wolopts & SUPPORTED_WAKE)) {
+               netdev_info(dev->net, "entering SUSPEND2 mode");
+
+               /* disable energy detect (link up) & wake up events */
+               ret = smsc75xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~(WUCSR_MPEN | WUCSR_WUEN);
+
+               ret = smsc75xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+
+               /* enter suspend2 mode */
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+               val |= PMT_CTL_SUS_MODE_2;
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+
+               return 0;
+       }
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               /* clear any pending magic packet status */
+               ret = smsc75xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val |= WUCSR_MPR;
+
+               ret = smsc75xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+       }
+
+       /* enable/disable magic packup wake */
+       ret = smsc75xx_read_reg(dev, WUCSR, &val);
+       check_warn_return(ret, "Error reading WUCSR");
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               netdev_info(dev->net, "enabling magic packet wakeup");
+               val |= WUCSR_MPEN;
+       } else {
+               netdev_info(dev->net, "disabling magic packet wakeup");
+               val &= ~WUCSR_MPEN;
+       }
+
+       ret = smsc75xx_write_reg(dev, WUCSR, val);
+       check_warn_return(ret, "Error writing WUCSR");
+
+       /* enable wol wakeup source */
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL");
+
+       val |= PMT_CTL_WOL_EN;
+
+       ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL");
+
+       /* enable receiver */
+       ret = smsc75xx_read_reg(dev, MAC_RX, &val);
+       check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+
+       val |= MAC_RX_RXEN;
+
+       ret = smsc75xx_write_reg(dev, MAC_RX, val);
+       check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+
+       /* some wol options are enabled, so enter SUSPEND0 */
+       netdev_info(dev->net, "entering SUSPEND0 mode");
+
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL");
+
+       val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
+       val |= PMT_CTL_SUS_MODE_0;
+
+       ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL");
+
+       /* clear wol status */
+       val &= ~PMT_CTL_WUPS;
+       val |= PMT_CTL_WUPS_WOL;
+       ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+       check_warn_return(ret, "Error writing PMT_CTL");
+
+       /* read back PMT_CTL */
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+       check_warn_return(ret, "Error reading PMT_CTL");
+
+       smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+       return 0;
+}
+
+static int smsc75xx_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               netdev_info(dev->net, "resuming from SUSPEND0");
+
+               smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+               /* Disable magic packup wake */
+               ret = smsc75xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~WUCSR_MPEN;
+
+               ret = smsc75xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               /* clear wake-up status */
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val &= ~PMT_CTL_WOL_EN;
+               val |= PMT_CTL_WUPS;
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+       } else {
+               netdev_info(dev->net, "resuming from SUSPEND2");
+
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+               check_warn_return(ret, "Error reading PMT_CTL");
+
+               val |= PMT_CTL_PHY_PWRUP;
+
+               ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+               check_warn_return(ret, "Error writing PMT_CTL");
+       }
+
+       ret = smsc75xx_wait_ready(dev);
+       check_warn_return(ret, "device not ready in smsc75xx_resume");
+
+       return usbnet_resume(intf);
+}
+
 static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
                                     u32 rx_cmd_a, u32 rx_cmd_b)
 {
@@ -1251,9 +1485,9 @@ static struct usb_driver smsc75xx_driver = {
        .name           = SMSC_CHIPNAME,
        .id_table       = products,
        .probe          = usbnet_probe,
-       .suspend        = usbnet_suspend,
-       .resume         = usbnet_resume,
-       .reset_resume   = usbnet_resume,
+       .suspend        = smsc75xx_suspend,
+       .resume         = smsc75xx_resume,
+       .reset_resume   = smsc75xx_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
 };
index d45e539a84b79daa0b8b4f2f1f5c993326e2211d..7479a5761d0d6b35c9a275ae470e49dc959de366 100644 (file)
 #define SMSC95XX_INTERNAL_PHY_ID       (1)
 #define SMSC95XX_TX_OVERHEAD           (8)
 #define SMSC95XX_TX_OVERHEAD_CSUM      (12)
+#define SUPPORTED_WAKE                 (WAKE_MAGIC)
+
+#define check_warn(ret, fmt, args...) \
+       ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
+
+#define check_warn_return(ret, fmt, args...) \
+       ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
+
+#define check_warn_goto_done(ret, fmt, args...) \
+       ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
 
 struct smsc95xx_priv {
        u32 mac_cr;
        u32 hash_hi;
        u32 hash_lo;
+       u32 wolopts;
        spinlock_t mac_cr_lock;
 };
 
@@ -63,7 +74,8 @@ static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
-static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+                                         u32 *data)
 {
        u32 *buf = kmalloc(4, GFP_KERNEL);
        int ret;
@@ -88,7 +100,8 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
        return ret;
 }
 
-static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+                                          u32 data)
 {
        u32 *buf = kmalloc(4, GFP_KERNEL);
        int ret;
@@ -114,15 +127,41 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
        return ret;
 }
 
+static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
+static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
+{
+       if (WARN_ON_ONCE(!dev))
+               return -EINVAL;
+
+       cpu_to_le32s(&feature);
+
+       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+               USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+               USB_CTRL_SET_TIMEOUT);
+}
+
 /* Loop until the read is completed with timeout
  * called with phy_mutex held */
-static int smsc95xx_phy_wait_not_busy(struct usbnet *dev)
+static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
 {
        unsigned long start_time = jiffies;
        u32 val;
+       int ret;
 
        do {
-               smsc95xx_read_reg(dev, MII_ADDR, &val);
+               ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
+               check_warn_return(ret, "Error reading MII_ACCESS");
                if (!(val & MII_BUSY_))
                        return 0;
        } while (!time_after(jiffies, start_time + HZ));
@@ -134,33 +173,32 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 val, addr;
+       int ret;
 
        mutex_lock(&dev->phy_mutex);
 
        /* confirm MII not busy */
-       if (smsc95xx_phy_wait_not_busy(dev)) {
-               netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
-               mutex_unlock(&dev->phy_mutex);
-               return -EIO;
-       }
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read");
 
        /* set the address, index & direction (read from PHY) */
        phy_id &= dev->mii.phy_id_mask;
        idx &= dev->mii.reg_num_mask;
        addr = (phy_id << 11) | (idx << 6) | MII_READ_;
-       smsc95xx_write_reg(dev, MII_ADDR, addr);
+       ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
+       check_warn_goto_done(ret, "Error writing MII_ADDR");
 
-       if (smsc95xx_phy_wait_not_busy(dev)) {
-               netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
-               mutex_unlock(&dev->phy_mutex);
-               return -EIO;
-       }
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
 
-       smsc95xx_read_reg(dev, MII_DATA, &val);
+       ret = smsc95xx_read_reg(dev, MII_DATA, &val);
+       check_warn_goto_done(ret, "Error reading MII_DATA");
 
-       mutex_unlock(&dev->phy_mutex);
+       ret = (u16)(val & 0xFFFF);
 
-       return (u16)(val & 0xFFFF);
+done:
+       mutex_unlock(&dev->phy_mutex);
+       return ret;
 }
 
 static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
@@ -168,38 +206,41 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 val, addr;
+       int ret;
 
        mutex_lock(&dev->phy_mutex);
 
        /* confirm MII not busy */
-       if (smsc95xx_phy_wait_not_busy(dev)) {
-               netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
-               mutex_unlock(&dev->phy_mutex);
-               return;
-       }
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write");
 
        val = regval;
-       smsc95xx_write_reg(dev, MII_DATA, val);
+       ret = smsc95xx_write_reg(dev, MII_DATA, val);
+       check_warn_goto_done(ret, "Error writing MII_DATA");
 
        /* set the address, index & direction (write to PHY) */
        phy_id &= dev->mii.phy_id_mask;
        idx &= dev->mii.reg_num_mask;
        addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
-       smsc95xx_write_reg(dev, MII_ADDR, addr);
+       ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
+       check_warn_goto_done(ret, "Error writing MII_ADDR");
 
-       if (smsc95xx_phy_wait_not_busy(dev))
-               netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
+       ret = smsc95xx_phy_wait_not_busy(dev);
+       check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
 
+done:
        mutex_unlock(&dev->phy_mutex);
 }
 
-static int smsc95xx_wait_eeprom(struct usbnet *dev)
+static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
 {
        unsigned long start_time = jiffies;
        u32 val;
+       int ret;
 
        do {
-               smsc95xx_read_reg(dev, E2P_CMD, &val);
+               ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
+               check_warn_return(ret, "Error reading E2P_CMD");
                if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
                        break;
                udelay(40);
@@ -213,13 +254,15 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
        return 0;
 }
 
-static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
+static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
 {
        unsigned long start_time = jiffies;
        u32 val;
+       int ret;
 
        do {
-               smsc95xx_read_reg(dev, E2P_CMD, &val);
+               ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
+               check_warn_return(ret, "Error reading E2P_CMD");
 
                if (!(val & E2P_CMD_BUSY_))
                        return 0;
@@ -246,13 +289,15 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
 
        for (i = 0; i < length; i++) {
                val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
-               smsc95xx_write_reg(dev, E2P_CMD, val);
+               ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+               check_warn_return(ret, "Error writing E2P_CMD");
 
                ret = smsc95xx_wait_eeprom(dev);
                if (ret < 0)
                        return ret;
 
-               smsc95xx_read_reg(dev, E2P_DATA, &val);
+               ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
+               check_warn_return(ret, "Error reading E2P_DATA");
 
                data[i] = val & 0xFF;
                offset++;
@@ -276,7 +321,8 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
 
        /* Issue write/erase enable command */
        val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
-       smsc95xx_write_reg(dev, E2P_CMD, val);
+       ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+       check_warn_return(ret, "Error writing E2P_DATA");
 
        ret = smsc95xx_wait_eeprom(dev);
        if (ret < 0)
@@ -286,11 +332,13 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
 
                /* Fill data register */
                val = data[i];
-               smsc95xx_write_reg(dev, E2P_DATA, val);
+               ret = smsc95xx_write_reg(dev, E2P_DATA, val);
+               check_warn_return(ret, "Error writing E2P_DATA");
 
                /* Send "write" command */
                val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
-               smsc95xx_write_reg(dev, E2P_CMD, val);
+               ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+               check_warn_return(ret, "Error writing E2P_CMD");
 
                ret = smsc95xx_wait_eeprom(dev);
                if (ret < 0)
@@ -308,14 +356,14 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
        struct usbnet *dev = usb_context->dev;
        int status = urb->status;
 
-       if (status < 0)
-               netdev_warn(dev->net, "async callback failed with %d\n", status);
+       check_warn(status, "async callback failed with %d\n", status);
 
        kfree(usb_context);
        usb_free_urb(urb);
 }
 
-static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
+static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
+                                                u32 *data)
 {
        struct usb_context *usb_context;
        int status;
@@ -371,6 +419,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
        struct usbnet *dev = netdev_priv(netdev);
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        unsigned long flags;
+       int ret;
 
        pdata->hash_hi = 0;
        pdata->hash_lo = 0;
@@ -411,21 +460,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
        /* Initiate async writes, as we can't wait for completion here */
-       smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
-       smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
-       smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
+       ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
+       check_warn(ret, "failed to initiate async write to HASHH");
+
+       ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
+       check_warn(ret, "failed to initiate async write to HASHL");
+
+       ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
+       check_warn(ret, "failed to initiate async write to MAC_CR");
 }
 
-static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
-                                           u16 lcladv, u16 rmtadv)
+static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
+                                          u16 lcladv, u16 rmtadv)
 {
        u32 flow, afc_cfg = 0;
 
        int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
-       if (ret < 0) {
-               netdev_warn(dev->net, "error reading AFC_CFG\n");
-               return;
-       }
+       check_warn_return(ret, "Error reading AFC_CFG");
 
        if (duplex == DUPLEX_FULL) {
                u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -449,8 +500,13 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
                afc_cfg |= 0xF;
        }
 
-       smsc95xx_write_reg(dev, FLOW, flow);
-       smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
+       ret = smsc95xx_write_reg(dev, FLOW, flow);
+       check_warn_return(ret, "Error writing FLOW");
+
+       ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
+       check_warn_return(ret, "Error writing AFC_CFG");
+
+       return 0;
 }
 
 static int smsc95xx_link_reset(struct usbnet *dev)
@@ -460,12 +516,14 @@ static int smsc95xx_link_reset(struct usbnet *dev)
        struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
        unsigned long flags;
        u16 lcladv, rmtadv;
-       u32 intdata;
+       int ret;
 
        /* clear interrupt status */
-       smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
-       intdata = 0xFFFFFFFF;
-       smsc95xx_write_reg(dev, INT_STS, intdata);
+       ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
+       check_warn_return(ret, "Error reading PHY_INT_SRC");
+
+       ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+       check_warn_return(ret, "Error writing INT_STS");
 
        mii_check_media(mii, 1, 1);
        mii_ethtool_gset(&dev->mii, &ecmd);
@@ -486,9 +544,11 @@ static int smsc95xx_link_reset(struct usbnet *dev)
        }
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
-       smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       check_warn_return(ret, "Error writing MAC_CR");
 
-       smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+       ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+       check_warn_return(ret, "Error updating PHY flow control");
 
        return 0;
 }
@@ -524,10 +584,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
        int ret;
 
        ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read COE_CR: %d\n", ret);
 
        if (features & NETIF_F_HW_CSUM)
                read_buf |= Tx_COE_EN_;
@@ -540,10 +597,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
                read_buf &= ~Rx_COE_EN_;
 
        ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write COE_CR: %d\n", ret);
 
        netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
        return 0;
@@ -608,6 +662,26 @@ smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
        }
 }
 
+static void smsc95xx_ethtool_get_wol(struct net_device *net,
+                                    struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+       wolinfo->supported = SUPPORTED_WAKE;
+       wolinfo->wolopts = pdata->wolopts;
+}
+
+static int smsc95xx_ethtool_set_wol(struct net_device *net,
+                                   struct ethtool_wolinfo *wolinfo)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+       pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+       return 0;
+}
+
 static const struct ethtool_ops smsc95xx_ethtool_ops = {
        .get_link       = usbnet_get_link,
        .nway_reset     = usbnet_nway_reset,
@@ -621,6 +695,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
        .set_eeprom     = smsc95xx_ethtool_set_eeprom,
        .get_regs_len   = smsc95xx_ethtool_getregslen,
        .get_regs       = smsc95xx_ethtool_getregs,
+       .get_wol        = smsc95xx_ethtool_get_wol,
+       .set_wol        = smsc95xx_ethtool_set_wol,
 };
 
 static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -658,55 +734,56 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
        int ret;
 
        ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write ADDRL: %d\n", ret);
 
        ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write ADDRH: %d\n", ret);
 
        return 0;
 }
 
 /* starts the TX path */
-static void smsc95xx_start_tx_path(struct usbnet *dev)
+static int smsc95xx_start_tx_path(struct usbnet *dev)
 {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        unsigned long flags;
-       u32 reg_val;
+       int ret;
 
        /* Enable Tx at MAC */
        spin_lock_irqsave(&pdata->mac_cr_lock, flags);
        pdata->mac_cr |= MAC_CR_TXEN_;
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
-       smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
 
        /* Enable Tx at SCSRs */
-       reg_val = TX_CFG_ON_;
-       smsc95xx_write_reg(dev, TX_CFG, reg_val);
+       ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
+       check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret);
+
+       return 0;
 }
 
 /* Starts the Receive path */
-static void smsc95xx_start_rx_path(struct usbnet *dev)
+static int smsc95xx_start_rx_path(struct usbnet *dev)
 {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&pdata->mac_cr_lock, flags);
        pdata->mac_cr |= MAC_CR_RXEN_;
        spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
 
-       smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+       check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
+
+       return 0;
 }
 
 static int smsc95xx_phy_initialize(struct usbnet *dev)
 {
-       int bmcr, timeout = 0;
+       int bmcr, ret, timeout = 0;
 
        /* Initialize MII structure */
        dev->mii.dev = dev->net;
@@ -735,7 +812,8 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
                ADVERTISE_PAUSE_ASYM);
 
        /* read to clear */
-       smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+       ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+       check_warn_return(ret, "Failed to read PHY_INT_SRC during init");
 
        smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
                PHY_INT_MASK_DEFAULT_);
@@ -753,22 +831,14 @@ static int smsc95xx_reset(struct usbnet *dev)
 
        netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
 
-       write_buf = HW_CFG_LRST_;
-       ret = smsc95xx_write_reg(dev, HW_CFG, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
-                           ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
+       check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
 
        timeout = 0;
        do {
-               ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-               if (ret < 0) {
-                       netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-                       return ret;
-               }
                msleep(10);
+               ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
+               check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
                timeout++;
        } while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
 
@@ -777,21 +847,14 @@ static int smsc95xx_reset(struct usbnet *dev)
                return ret;
        }
 
-       write_buf = PM_CTL_PHY_RST_;
-       ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
+       check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret);
 
        timeout = 0;
        do {
-               ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
-               if (ret < 0) {
-                       netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
-                       return ret;
-               }
                msleep(10);
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
+               check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret);
                timeout++;
        } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
 
@@ -808,10 +871,7 @@ static int smsc95xx_reset(struct usbnet *dev)
                  "MAC Address: %pM\n", dev->net->dev_addr);
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
 
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG : 0x%08x\n", read_buf);
@@ -819,17 +879,10 @@ static int smsc95xx_reset(struct usbnet *dev)
        read_buf |= HW_CFG_BIR_;
 
        ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
-                           ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
                  read_buf);
@@ -849,41 +902,28 @@ static int smsc95xx_reset(struct usbnet *dev)
                  "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
 
        ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from BURST_CAP after writing: 0x%08x\n",
                  read_buf);
 
-       read_buf = DEFAULT_BULK_IN_DELAY;
-       ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "ret = %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+       check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
                  read_buf);
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG: 0x%08x\n", read_buf);
 
@@ -896,101 +936,66 @@ static int smsc95xx_reset(struct usbnet *dev)
        read_buf |= NET_IP_ALIGN << 9;
 
        ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
-                           ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+
        netif_dbg(dev, ifup, dev->net,
                  "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
 
-       write_buf = 0xFFFFFFFF;
-       ret = smsc95xx_write_reg(dev, INT_STS, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
-                           ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+       check_warn_return(ret, "Failed to write INT_STS: %d\n", ret);
 
        ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
        netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
 
        /* Configure GPIO pins as LED outputs */
        write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
                LED_GPIO_CFG_FDX_LED;
        ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
-                           ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret);
 
        /* Init Tx */
-       write_buf = 0;
-       ret = smsc95xx_write_reg(dev, FLOW, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, FLOW, 0);
+       check_warn_return(ret, "Failed to write FLOW: %d\n", ret);
 
-       read_buf = AFC_CFG_DEFAULT;
-       ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
+       check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret);
 
        /* Don't need mac_cr_lock during initialisation */
        ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret);
 
        /* Init Rx */
        /* Set Vlan */
-       write_buf = (u32)ETH_P_8021Q;
-       ret = smsc95xx_write_reg(dev, VLAN1, write_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
-               return ret;
-       }
+       ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
+       check_warn_return(ret, "Failed to write VLAN1: %d\n", ret);
 
        /* Enable or disable checksum offload engines */
-       smsc95xx_set_features(dev->net, dev->net->features);
+       ret = smsc95xx_set_features(dev->net, dev->net->features);
+       check_warn_return(ret, "Failed to set checksum offload features");
 
        smsc95xx_set_multicast(dev->net);
 
-       if (smsc95xx_phy_initialize(dev) < 0)
-               return -EIO;
+       ret = smsc95xx_phy_initialize(dev);
+       check_warn_return(ret, "Failed to init PHY");
 
        ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
 
        /* enable PHY interrupts */
        read_buf |= INT_EP_CTL_PHY_INT_;
 
        ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
 
-       smsc95xx_start_tx_path(dev);
-       smsc95xx_start_rx_path(dev);
+       ret = smsc95xx_start_tx_path(dev);
+       check_warn_return(ret, "Failed to start TX path");
+
+       ret = smsc95xx_start_rx_path(dev);
+       check_warn_return(ret, "Failed to start RX path");
 
        netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
        return 0;
@@ -1017,10 +1022,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
 
        ret = usbnet_get_endpoints(dev, intf);
-       if (ret < 0) {
-               netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
-               return ret;
-       }
+       check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret);
 
        dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
                GFP_KERNEL);
@@ -1064,6 +1066,153 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
        }
 }
 
+static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       ret = usbnet_suspend(intf, message);
+       check_warn_return(ret, "usbnet_suspend error");
+
+       /* if no wol options set, enter lowest power SUSPEND2 mode */
+       if (!(pdata->wolopts & SUPPORTED_WAKE)) {
+               netdev_info(dev->net, "entering SUSPEND2 mode");
+
+               /* disable energy detect (link up) & wake up events */
+               ret = smsc95xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
+
+               ret = smsc95xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+               check_warn_return(ret, "Error reading PM_CTRL");
+
+               val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
+
+               ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+               check_warn_return(ret, "Error writing PM_CTRL");
+
+               /* enter suspend2 mode */
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+               check_warn_return(ret, "Error reading PM_CTRL");
+
+               val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+               val |= PM_CTL_SUS_MODE_2;
+
+               ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+               check_warn_return(ret, "Error writing PM_CTRL");
+
+               return 0;
+       }
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               /* clear any pending magic packet status */
+               ret = smsc95xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val |= WUCSR_MPR_;
+
+               ret = smsc95xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+       }
+
+       /* enable/disable magic packup wake */
+       ret = smsc95xx_read_reg(dev, WUCSR, &val);
+       check_warn_return(ret, "Error reading WUCSR");
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               netdev_info(dev->net, "enabling magic packet wakeup");
+               val |= WUCSR_MPEN_;
+       } else {
+               netdev_info(dev->net, "disabling magic packet wakeup");
+               val &= ~WUCSR_MPEN_;
+       }
+
+       ret = smsc95xx_write_reg(dev, WUCSR, val);
+       check_warn_return(ret, "Error writing WUCSR");
+
+       /* enable wol wakeup source */
+       ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+       check_warn_return(ret, "Error reading PM_CTRL");
+
+       val |= PM_CTL_WOL_EN_;
+
+       ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+       check_warn_return(ret, "Error writing PM_CTRL");
+
+       /* enable receiver */
+       smsc95xx_start_rx_path(dev);
+
+       /* some wol options are enabled, so enter SUSPEND0 */
+       netdev_info(dev->net, "entering SUSPEND0 mode");
+
+       ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+       check_warn_return(ret, "Error reading PM_CTRL");
+
+       val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
+       val |= PM_CTL_SUS_MODE_0;
+
+       ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+       check_warn_return(ret, "Error writing PM_CTRL");
+
+       /* clear wol status */
+       val &= ~PM_CTL_WUPS_;
+       val |= PM_CTL_WUPS_WOL_;
+       ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+       check_warn_return(ret, "Error writing PM_CTRL");
+
+       /* read back PM_CTRL */
+       ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+       check_warn_return(ret, "Error reading PM_CTRL");
+
+       smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+       return 0;
+}
+
+static int smsc95xx_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+       int ret;
+       u32 val;
+
+       BUG_ON(!dev);
+
+       if (pdata->wolopts & WAKE_MAGIC) {
+               smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+               /* Disable magic packup wake */
+               ret = smsc95xx_read_reg(dev, WUCSR, &val);
+               check_warn_return(ret, "Error reading WUCSR");
+
+               val &= ~WUCSR_MPEN_;
+
+               ret = smsc95xx_write_reg(dev, WUCSR, val);
+               check_warn_return(ret, "Error writing WUCSR");
+
+               /* clear wake-up status */
+               ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+               check_warn_return(ret, "Error reading PM_CTRL");
+
+               val &= ~PM_CTL_WOL_EN_;
+               val |= PM_CTL_WUPS_;
+
+               ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+               check_warn_return(ret, "Error writing PM_CTRL");
+       }
+
+       return usbnet_resume(intf);
+       check_warn_return(ret, "usbnet_resume error");
+
+       return 0;
+}
+
 static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
 {
        skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -1326,8 +1475,9 @@ static struct usb_driver smsc95xx_driver = {
        .name           = "smsc95xx",
        .id_table       = products,
        .probe          = usbnet_probe,
-       .suspend        = usbnet_suspend,
-       .resume         = usbnet_resume,
+       .suspend        = smsc95xx_suspend,
+       .resume         = smsc95xx_resume,
+       .reset_resume   = smsc95xx_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
 };
index 86bc44977fbd98372844b622cf171153a4c60d32..2ff9815aa27c5e9e098dd587e8291398f2dce25f 100644 (file)
@@ -63,6 +63,7 @@
 #define INT_STS_TDFO_                  (0x00001000)
 #define INT_STS_RXDF_                  (0x00000800)
 #define INT_STS_GPIOS_                 (0x000007FF)
+#define INT_STS_CLEAR_ALL_             (0xFFFFFFFF)
 
 #define RX_CFG                         (0x0C)
 #define RX_FIFO_FLUSH_                 (0x00000001)
 #define HW_CFG_BCE_                    (0x00000002)
 #define HW_CFG_SRST_                   (0x00000001)
 
+#define RX_FIFO_INF                    (0x18)
+
 #define PM_CTRL                                (0x20)
+#define PM_CTL_RES_CLR_WKP_STS         (0x00000200)
 #define PM_CTL_DEV_RDY_                        (0x00000080)
 #define PM_CTL_SUS_MODE_               (0x00000060)
 #define PM_CTL_SUS_MODE_0              (0x00000000)
 #define PM_CTL_SUS_MODE_1              (0x00000020)
-#define PM_CTL_SUS_MODE_2              (0x00000060)
+#define PM_CTL_SUS_MODE_2              (0x00000040)
+#define PM_CTL_SUS_MODE_3              (0x00000060)
 #define PM_CTL_PHY_RST_                        (0x00000010)
 #define PM_CTL_WOL_EN_                 (0x00000008)
 #define PM_CTL_ED_EN_                  (0x00000004)
 #define WUFF                           (0x128)
 
 #define WUCSR                          (0x12C)
+#define WUCSR_GUE_                     (0x00000200)
+#define WUCSR_WUFR_                    (0x00000040)
+#define WUCSR_MPR_                     (0x00000020)
+#define WUCSR_WAKE_EN_                 (0x00000004)
+#define WUCSR_MPEN_                    (0x00000002)
 
 #define COE_CR                         (0x130)
 #define Tx_COE_EN_                     (0x00010000)
index 5852361032c459735e915db5443ac08d560f2c7c..e522ff70444cd0d7e8f1ce34132055e438ded7ce 100644 (file)
@@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        if (tbp[IFLA_ADDRESS] == NULL)
                eth_hw_addr_random(peer);
 
+       if (ifmp && (dev->ifindex != 0))
+               peer->ifindex = ifmp->ifi_index;
+
        err = register_netdevice(peer);
        put_net(net);
        net = NULL;
index 83d2b0c34c5e63045eaeb63c3e40b14ab68e7875..cbf8b06253528e9e5557c31e844581d7334d7e21 100644 (file)
@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
        /* In theory, this can happen: if we don't get any buffers in
         * we will *never* try to fill again. */
        if (still_empty)
-               queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
+               schedule_delayed_work(&vi->refill, HZ/2);
 }
 
 static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -540,7 +540,7 @@ again:
 
        if (vi->num < vi->max / 2) {
                if (!try_fill_recv(vi, GFP_ATOMIC))
-                       queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+                       schedule_delayed_work(&vi->refill, 0);
        }
 
        /* Out of packets? */
@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
 
        /* Make sure we have some buffers: if oom use wq. */
        if (!try_fill_recv(vi, GFP_KERNEL))
-               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+               schedule_delayed_work(&vi->refill, 0);
 
        virtnet_napi_enable(vi);
        return 0;
@@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
                goto done;
 
        if (v & VIRTIO_NET_S_ANNOUNCE) {
-               netif_notify_peers(vi->dev);
+               netdev_notify_peers(vi->dev);
                virtnet_ack_link_announce(vi);
        }
 
@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
 
-       queue_work(system_nrt_wq, &vi->config_work);
+       schedule_work(&vi->config_work);
 }
 
 static int init_vqs(struct virtnet_info *vi)
@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
           otherwise get link status from config. */
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
                netif_carrier_off(dev);
-               queue_work(system_nrt_wq, &vi->config_work);
+               schedule_work(&vi->config_work);
        } else {
                vi->status = VIRTIO_NET_S_LINK_UP;
                netif_carrier_on(dev);
@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
        netif_device_attach(vi->dev);
 
        if (!try_fill_recv(vi, GFP_KERNEL))
-               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+               schedule_delayed_work(&vi->refill, 0);
 
        mutex_lock(&vi->config_lock);
        vi->config_enable = true;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
new file mode 100644 (file)
index 0000000..51de9ed
--- /dev/null
@@ -0,0 +1,1219 @@
+/*
+ * VXLAN: Virtual eXtensiable Local Area Network
+ *
+ * Copyright (c) 2012 Vyatta Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO
+ *  - use IANA UDP port number (when defined)
+ *  - IPv6 (not in RFC)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/rculist.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/version.h>
+#include <linux/hash.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define VXLAN_VERSION  "0.1"
+
+#define VNI_HASH_BITS  10
+#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS  8
+#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
+#define FDB_AGE_DEFAULT 300 /* 5 min */
+#define FDB_AGE_INTERVAL (10 * HZ)     /* rescan interval */
+
+#define VXLAN_N_VID    (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+/* VLAN + IP header + UDP + VXLAN */
+#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
+
+#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
+
+/* VXLAN protocol header */
+struct vxlanhdr {
+       __be32 vx_flags;
+       __be32 vx_vni;
+};
+
+/* UDP port for VXLAN traffic. */
+static unsigned int vxlan_port __read_mostly = 8472;
+module_param_named(udp_port, vxlan_port, uint, 0444);
+MODULE_PARM_DESC(udp_port, "Destination UDP port");
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-net private data for this module */
+static unsigned int vxlan_net_id;
+struct vxlan_net {
+       struct socket     *sock;        /* UDP encap socket */
+       struct hlist_head vni_list[VNI_HASH_SIZE];
+};
+
+/* Forwarding table entry */
+struct vxlan_fdb {
+       struct hlist_node hlist;        /* linked list of entries */
+       struct rcu_head   rcu;
+       unsigned long     updated;      /* jiffies */
+       unsigned long     used;
+       __be32            remote_ip;
+       u16               state;        /* see ndm_state */
+       u8                eth_addr[ETH_ALEN];
+};
+
+/* Per-cpu network traffic stats */
+struct vxlan_stats {
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       struct u64_stats_sync   syncp;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+       struct hlist_node hlist;
+       struct net_device *dev;
+       struct vxlan_stats __percpu *stats;
+       __u32             vni;          /* virtual network id */
+       __be32            gaddr;        /* multicast group */
+       __be32            saddr;        /* source address */
+       unsigned int      link;         /* link to multicast over */
+       __u8              tos;          /* TOS override */
+       __u8              ttl;
+       bool              learn;
+
+       unsigned long     age_interval;
+       struct timer_list age_timer;
+       spinlock_t        hash_lock;
+       unsigned int      addrcnt;
+       unsigned int      addrmax;
+       unsigned int      addrexceeded;
+
+       struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
+/* salt for hash table */
+static u32 vxlan_salt __read_mostly;
+
+static inline struct hlist_head *vni_head(struct net *net, u32 id)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+       return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
+}
+
+/* Look up VNI in a per net namespace table */
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
+{
+       struct vxlan_dev *vxlan;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
+               if (vxlan->vni == id)
+                       return vxlan;
+       }
+
+       return NULL;
+}
+
+/* Fill in neighbour message in skbuff. */
+static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
+                          const struct vxlan_fdb *fdb,
+                          u32 portid, u32 seq, int type, unsigned int flags)
+{
+       unsigned long now = jiffies;
+       struct nda_cacheinfo ci;
+       struct nlmsghdr *nlh;
+       struct ndmsg *ndm;
+
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+       if (nlh == NULL)
+               return -EMSGSIZE;
+
+       ndm = nlmsg_data(nlh);
+       memset(ndm, 0, sizeof(*ndm));
+       ndm->ndm_family = AF_BRIDGE;
+       ndm->ndm_state = fdb->state;
+       ndm->ndm_ifindex = vxlan->dev->ifindex;
+       ndm->ndm_flags = NTF_SELF;
+       ndm->ndm_type = NDA_DST;
+
+       if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+               goto nla_put_failure;
+
+       if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
+               goto nla_put_failure;
+
+       ci.ndm_used      = jiffies_to_clock_t(now - fdb->used);
+       ci.ndm_confirmed = 0;
+       ci.ndm_updated   = jiffies_to_clock_t(now - fdb->updated);
+       ci.ndm_refcnt    = 0;
+
+       if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+               goto nla_put_failure;
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static inline size_t vxlan_nlmsg_size(void)
+{
+       return NLMSG_ALIGN(sizeof(struct ndmsg))
+               + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+               + nla_total_size(sizeof(__be32)) /* NDA_DST */
+               + nla_total_size(sizeof(struct nda_cacheinfo));
+}
+
+static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
+                            const struct vxlan_fdb *fdb, int type)
+{
+       struct net *net = dev_net(vxlan->dev);
+       struct sk_buff *skb;
+       int err = -ENOBUFS;
+
+       skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
+       if (skb == NULL)
+               goto errout;
+
+       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
+       if (err < 0) {
+               /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(skb);
+               goto errout;
+       }
+
+       rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+       return;
+errout:
+       if (err < 0)
+               rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+/* Hash Ethernet address */
+static u32 eth_hash(const unsigned char *addr)
+{
+       u64 value = get_unaligned((u64 *)addr);
+
+       /* only want 6 bytes */
+#ifdef __BIG_ENDIAN
+       value <<= 16;
+#else
+       value >>= 16;
+#endif
+       return hash_64(value, FDB_HASH_BITS);
+}
+
+/* Hash chain to use given mac address */
+static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
+                                               const u8 *mac)
+{
+       return &vxlan->fdb_head[eth_hash(mac)];
+}
+
+/* Look up Ethernet address in forwarding table */
+static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+                                       const u8 *mac)
+
+{
+       struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
+       struct vxlan_fdb *f;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_rcu(f, node, head, hlist) {
+               if (compare_ether_addr(mac, f->eth_addr) == 0)
+                       return f;
+       }
+
+       return NULL;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+                           const u8 *mac, __be32 ip,
+                           __u16 state, __u16 flags)
+{
+       struct vxlan_fdb *f;
+       int notify = 0;
+
+       f = vxlan_find_mac(vxlan, mac);
+       if (f) {
+               if (flags & NLM_F_EXCL) {
+                       netdev_dbg(vxlan->dev,
+                                  "lost race to create %pM\n", mac);
+                       return -EEXIST;
+               }
+               if (f->state != state) {
+                       f->state = state;
+                       f->updated = jiffies;
+                       notify = 1;
+               }
+       } else {
+               if (!(flags & NLM_F_CREATE))
+                       return -ENOENT;
+
+               if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+                       return -ENOSPC;
+
+               netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
+               f = kmalloc(sizeof(*f), GFP_ATOMIC);
+               if (!f)
+                       return -ENOMEM;
+
+               notify = 1;
+               f->remote_ip = ip;
+               f->state = state;
+               f->updated = f->used = jiffies;
+               memcpy(f->eth_addr, mac, ETH_ALEN);
+
+               ++vxlan->addrcnt;
+               hlist_add_head_rcu(&f->hlist,
+                                  vxlan_fdb_head(vxlan, mac));
+       }
+
+       if (notify)
+               vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
+
+       return 0;
+}
+
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+{
+       netdev_dbg(vxlan->dev,
+                   "delete %pM\n", f->eth_addr);
+
+       --vxlan->addrcnt;
+       vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
+
+       hlist_del_rcu(&f->hlist);
+       kfree_rcu(f, rcu);
+}
+
+/* Add static entry (via netlink) */
+static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                        struct net_device *dev,
+                        const unsigned char *addr, u16 flags)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       __be32 ip;
+       int err;
+
+       if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
+               pr_info("RTM_NEWNEIGH with invalid state %#x\n",
+                       ndm->ndm_state);
+               return -EINVAL;
+       }
+
+       if (tb[NDA_DST] == NULL)
+               return -EINVAL;
+
+       if (nla_len(tb[NDA_DST]) != sizeof(__be32))
+               return -EAFNOSUPPORT;
+
+       ip = nla_get_be32(tb[NDA_DST]);
+
+       spin_lock_bh(&vxlan->hash_lock);
+       err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
+       spin_unlock_bh(&vxlan->hash_lock);
+
+       return err;
+}
+
+/* Delete entry (via netlink) */
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+                           const unsigned char *addr)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f;
+       int err = -ENOENT;
+
+       spin_lock_bh(&vxlan->hash_lock);
+       f = vxlan_find_mac(vxlan, addr);
+       if (f) {
+               vxlan_fdb_destroy(vxlan, f);
+               err = 0;
+       }
+       spin_unlock_bh(&vxlan->hash_lock);
+
+       return err;
+}
+
+/* Dump forwarding table */
+static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                         struct net_device *dev, int idx)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       unsigned int h;
+
+       for (h = 0; h < FDB_HASH_SIZE; ++h) {
+               struct vxlan_fdb *f;
+               struct hlist_node *n;
+               int err;
+
+               hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
+                       if (idx < cb->args[0])
+                               goto skip;
+
+                       err = vxlan_fdb_info(skb, vxlan, f,
+                                            NETLINK_CB(cb->skb).portid,
+                                            cb->nlh->nlmsg_seq,
+                                            RTM_NEWNEIGH,
+                                            NLM_F_MULTI);
+                       if (err < 0)
+                               break;
+skip:
+                       ++idx;
+               }
+       }
+
+       return idx;
+}
+
+/* Watch incoming packets to learn mapping between Ethernet address
+ * and Tunnel endpoint.
+ */
+static void vxlan_snoop(struct net_device *dev,
+                       __be32 src_ip, const u8 *src_mac)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f;
+       int err;
+
+       f = vxlan_find_mac(vxlan, src_mac);
+       if (likely(f)) {
+               f->used = jiffies;
+               if (likely(f->remote_ip == src_ip))
+                       return;
+
+               if (net_ratelimit())
+                       netdev_info(dev,
+                                   "%pM migrated from %pI4 to %pI4\n",
+                                   src_mac, &f->remote_ip, &src_ip);
+
+               f->remote_ip = src_ip;
+               f->updated = jiffies;
+       } else {
+               /* learned new entry */
+               spin_lock(&vxlan->hash_lock);
+               err = vxlan_fdb_create(vxlan, src_mac, src_ip,
+                                      NUD_REACHABLE,
+                                      NLM_F_EXCL|NLM_F_CREATE);
+               spin_unlock(&vxlan->hash_lock);
+       }
+}
+
+
+/* See if multicast group is already in use by other ID */
+static bool vxlan_group_used(struct vxlan_net *vn,
+                            const struct vxlan_dev *this)
+{
+       const struct vxlan_dev *vxlan;
+       struct hlist_node *node;
+       unsigned h;
+
+       for (h = 0; h < VNI_HASH_SIZE; ++h)
+               hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
+                       if (vxlan == this)
+                               continue;
+
+                       if (!netif_running(vxlan->dev))
+                               continue;
+
+                       if (vxlan->gaddr == this->gaddr)
+                               return true;
+               }
+
+       return false;
+}
+
+/* kernel equivalent to IP_ADD_MEMBERSHIP */
+static int vxlan_join_group(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+       struct sock *sk = vn->sock->sk;
+       struct ip_mreqn mreq = {
+               .imr_multiaddr.s_addr = vxlan->gaddr,
+       };
+       int err;
+
+       /* Already a member of group */
+       if (vxlan_group_used(vn, vxlan))
+               return 0;
+
+       /* Need to drop RTNL to call multicast join */
+       rtnl_unlock();
+       lock_sock(sk);
+       err = ip_mc_join_group(sk, &mreq);
+       release_sock(sk);
+       rtnl_lock();
+
+       return err;
+}
+
+
+/* kernel equivalent to IP_DROP_MEMBERSHIP */
+static int vxlan_leave_group(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+       int err = 0;
+       struct sock *sk = vn->sock->sk;
+       struct ip_mreqn mreq = {
+               .imr_multiaddr.s_addr = vxlan->gaddr,
+       };
+
+       /* Only leave group when last vxlan is done. */
+       if (vxlan_group_used(vn, vxlan))
+               return 0;
+
+       /* Need to drop RTNL to call multicast leave */
+       rtnl_unlock();
+       lock_sock(sk);
+       err = ip_mc_leave_group(sk, &mreq);
+       release_sock(sk);
+       rtnl_lock();
+
+       return err;
+}
+
+/* Callback from net/ipv4/udp.c to receive packets */
+static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+       struct iphdr *oip;
+       struct vxlanhdr *vxh;
+       struct vxlan_dev *vxlan;
+       struct vxlan_stats *stats;
+       __u32 vni;
+       int err;
+
+       /* pop off outer UDP header */
+       __skb_pull(skb, sizeof(struct udphdr));
+
+       /* Need Vxlan and inner Ethernet header to be present */
+       if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
+               goto error;
+
+       /* Drop packets with reserved bits set */
+       vxh = (struct vxlanhdr *) skb->data;
+       if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
+           (vxh->vx_vni & htonl(0xff))) {
+               netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+                          ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+               goto error;
+       }
+
+       __skb_pull(skb, sizeof(struct vxlanhdr));
+       skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
+
+       /* Is this VNI defined? */
+       vni = ntohl(vxh->vx_vni) >> 8;
+       vxlan = vxlan_find_vni(sock_net(sk), vni);
+       if (!vxlan) {
+               netdev_dbg(skb->dev, "unknown vni %d\n", vni);
+               goto drop;
+       }
+
+       if (!pskb_may_pull(skb, ETH_HLEN)) {
+               vxlan->dev->stats.rx_length_errors++;
+               vxlan->dev->stats.rx_errors++;
+               goto drop;
+       }
+
+       /* Re-examine inner Ethernet packet */
+       oip = ip_hdr(skb);
+       skb->protocol = eth_type_trans(skb, vxlan->dev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+       /* Ignore packet loops (and multicast echo) */
+       if (compare_ether_addr(eth_hdr(skb)->h_source,
+                              vxlan->dev->dev_addr) == 0)
+               goto drop;
+
+       if (vxlan->learn)
+               vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+
+       __skb_tunnel_rx(skb, vxlan->dev);
+       skb_reset_network_header(skb);
+
+       err = IP_ECN_decapsulate(oip, skb);
+       if (unlikely(err)) {
+               if (log_ecn_error)
+                       net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                            &oip->saddr, oip->tos);
+               if (err > 1) {
+                       ++vxlan->dev->stats.rx_frame_errors;
+                       ++vxlan->dev->stats.rx_errors;
+                       goto drop;
+               }
+       }
+
+       stats = this_cpu_ptr(vxlan->stats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->syncp);
+
+       netif_rx(skb);
+
+       return 0;
+error:
+       /* Put UDP header back */
+       __skb_push(skb, sizeof(struct udphdr));
+
+       return 1;
+drop:
+       /* Consume bad packet */
+       kfree_skb(skb);
+       return 0;
+}
+
+/* Extract dsfield from inner protocol */
+static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
+                                  const struct sk_buff *skb)
+{
+       if (skb->protocol == htons(ETH_P_IP))
+               return iph->tos;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               return ipv6_get_dsfield((const struct ipv6hdr *)iph);
+       else
+               return 0;
+}
+
+/* Propogate ECN bits out */
+static inline u8 vxlan_ecn_encap(u8 tos,
+                                const struct iphdr *iph,
+                                const struct sk_buff *skb)
+{
+       u8 inner = vxlan_get_dsfield(iph, skb);
+
+       return INET_ECN_encapsulate(tos, inner);
+}
+
+/* Transmit local packets over Vxlan
+ *
+ * Outer IP header inherits ECN and DF from inner header.
+ * Outer UDP destination is the VXLAN assigned port.
+ *           source port is based on hash of flow if available
+ *                       otherwise use a random value
+ */
+static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct rtable *rt;
+       const struct ethhdr *eth;
+       const struct iphdr *old_iph;
+       struct iphdr *iph;
+       struct vxlanhdr *vxh;
+       struct udphdr *uh;
+       struct flowi4 fl4;
+       struct vxlan_fdb *f;
+       unsigned int pkt_len = skb->len;
+       u32 hash;
+       __be32 dst;
+       __be16 df = 0;
+       __u8 tos, ttl;
+       int err;
+
+       /* Need space for new headers (invalidates iph ptr) */
+       if (skb_cow_head(skb, VXLAN_HEADROOM))
+               goto drop;
+
+       eth = (void *)skb->data;
+       old_iph = ip_hdr(skb);
+
+       if (!is_multicast_ether_addr(eth->h_dest) &&
+           (f = vxlan_find_mac(vxlan, eth->h_dest)))
+               dst = f->remote_ip;
+       else if (vxlan->gaddr) {
+               dst = vxlan->gaddr;
+       } else
+               goto drop;
+
+       ttl = vxlan->ttl;
+       if (!ttl && IN_MULTICAST(ntohl(dst)))
+               ttl = 1;
+
+       tos = vxlan->tos;
+       if (tos == 1)
+               tos = vxlan_get_dsfield(old_iph, skb);
+
+       hash = skb_get_rxhash(skb);
+
+       rt = ip_route_output_gre(dev_net(dev), &fl4, dst,
+                                vxlan->saddr, vxlan->vni,
+                                RT_TOS(tos), vxlan->link);
+       if (IS_ERR(rt)) {
+               netdev_dbg(dev, "no route to %pI4\n", &dst);
+               dev->stats.tx_carrier_errors++;
+               goto tx_error;
+       }
+
+       if (rt->dst.dev == dev) {
+               netdev_dbg(dev, "circular route to %pI4\n", &dst);
+               ip_rt_put(rt);
+               dev->stats.collisions++;
+               goto tx_error;
+       }
+
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+       IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+                             IPSKB_REROUTED);
+       skb_dst_drop(skb);
+       skb_dst_set(skb, &rt->dst);
+
+       vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+       vxh->vx_flags = htonl(VXLAN_FLAGS);
+       vxh->vx_vni = htonl(vxlan->vni << 8);
+
+       __skb_push(skb, sizeof(*uh));
+       skb_reset_transport_header(skb);
+       uh = udp_hdr(skb);
+
+       uh->dest = htons(vxlan_port);
+       uh->source = hash ? :random32();
+
+       uh->len = htons(skb->len);
+       uh->check = 0;
+
+       __skb_push(skb, sizeof(*iph));
+       skb_reset_network_header(skb);
+       iph             = ip_hdr(skb);
+       iph->version    = 4;
+       iph->ihl        = sizeof(struct iphdr) >> 2;
+       iph->frag_off   = df;
+       iph->protocol   = IPPROTO_UDP;
+       iph->tos        = vxlan_ecn_encap(tos, old_iph, skb);
+       iph->daddr      = fl4.daddr;
+       iph->saddr      = fl4.saddr;
+       iph->ttl        = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+       /* See __IPTUNNEL_XMIT */
+       skb->ip_summed = CHECKSUM_NONE;
+       ip_select_ident(iph, &rt->dst, NULL);
+
+       err = ip_local_out(skb);
+       if (likely(net_xmit_eval(err) == 0)) {
+               struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_packets++;
+               stats->tx_bytes += pkt_len;
+               u64_stats_update_end(&stats->syncp);
+       } else {
+               dev->stats.tx_errors++;
+               dev->stats.tx_aborted_errors++;
+       }
+       return NETDEV_TX_OK;
+
+drop:
+       dev->stats.tx_dropped++;
+       goto tx_free;
+
+tx_error:
+       dev->stats.tx_errors++;
+tx_free:
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+/* Walk the forwarding table and purge stale entries */
+static void vxlan_cleanup(unsigned long arg)
+{
+       struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
+       unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
+       unsigned int h;
+
+       if (!netif_running(vxlan->dev))
+               return;
+
+       spin_lock_bh(&vxlan->hash_lock);
+       for (h = 0; h < FDB_HASH_SIZE; ++h) {
+               struct hlist_node *p, *n;
+               hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
+                       struct vxlan_fdb *f
+                               = container_of(p, struct vxlan_fdb, hlist);
+                       unsigned long timeout;
+
+                       if (f->state == NUD_PERMANENT)
+                               continue;
+
+                       timeout = f->used + vxlan->age_interval * HZ;
+                       if (time_before_eq(timeout, jiffies)) {
+                               netdev_dbg(vxlan->dev,
+                                          "garbage collect %pM\n",
+                                          f->eth_addr);
+                               f->state = NUD_STALE;
+                               vxlan_fdb_destroy(vxlan, f);
+                       } else if (time_before(timeout, next_timer))
+                               next_timer = timeout;
+               }
+       }
+       spin_unlock_bh(&vxlan->hash_lock);
+
+       mod_timer(&vxlan->age_timer, next_timer);
+}
+
+/* Setup stats when device is created */
+static int vxlan_init(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       vxlan->stats = alloc_percpu(struct vxlan_stats);
+       if (!vxlan->stats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/* Start ageing timer and join group when device is brought up */
+static int vxlan_open(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       int err;
+
+       if (vxlan->gaddr) {
+               err = vxlan_join_group(dev);
+               if (err)
+                       return err;
+       }
+
+       if (vxlan->age_interval)
+               mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
+
+       return 0;
+}
+
+/* Purge the forwarding table */
+static void vxlan_flush(struct vxlan_dev *vxlan)
+{
+       unsigned h;
+
+       spin_lock_bh(&vxlan->hash_lock);
+       for (h = 0; h < FDB_HASH_SIZE; ++h) {
+               struct hlist_node *p, *n;
+               hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
+                       struct vxlan_fdb *f
+                               = container_of(p, struct vxlan_fdb, hlist);
+                       vxlan_fdb_destroy(vxlan, f);
+               }
+       }
+       spin_unlock_bh(&vxlan->hash_lock);
+}
+
+/* Cleanup timer and forwarding table on shutdown */
+static int vxlan_stop(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       if (vxlan->gaddr)
+               vxlan_leave_group(dev);
+
+       del_timer_sync(&vxlan->age_timer);
+
+       vxlan_flush(vxlan);
+
+       return 0;
+}
+
+/* Merge per-cpu statistics */
+static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
+                                              struct rtnl_link_stats64 *stats)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_stats tmp, sum = { 0 };
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu) {
+               unsigned int start;
+               const struct vxlan_stats *stats
+                       = per_cpu_ptr(vxlan->stats, cpu);
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&stats->syncp);
+                       memcpy(&tmp, stats, sizeof(tmp));
+               } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+
+               sum.tx_bytes   += tmp.tx_bytes;
+               sum.tx_packets += tmp.tx_packets;
+               sum.rx_bytes   += tmp.rx_bytes;
+               sum.rx_packets += tmp.rx_packets;
+       }
+
+       stats->tx_bytes   = sum.tx_bytes;
+       stats->tx_packets = sum.tx_packets;
+       stats->rx_bytes   = sum.rx_bytes;
+       stats->rx_packets = sum.rx_packets;
+
+       stats->multicast = dev->stats.multicast;
+       stats->rx_length_errors = dev->stats.rx_length_errors;
+       stats->rx_frame_errors = dev->stats.rx_frame_errors;
+       stats->rx_errors = dev->stats.rx_errors;
+
+       stats->tx_dropped = dev->stats.tx_dropped;
+       stats->tx_carrier_errors  = dev->stats.tx_carrier_errors;
+       stats->tx_aborted_errors  = dev->stats.tx_aborted_errors;
+       stats->collisions  = dev->stats.collisions;
+       stats->tx_errors = dev->stats.tx_errors;
+
+       return stats;
+}
+
+/* Stub, nothing needs to be done. */
+static void vxlan_set_multicast_list(struct net_device *dev)
+{
+}
+
+static const struct net_device_ops vxlan_netdev_ops = {
+       .ndo_init               = vxlan_init,
+       .ndo_open               = vxlan_open,
+       .ndo_stop               = vxlan_stop,
+       .ndo_start_xmit         = vxlan_xmit,
+       .ndo_get_stats64        = vxlan_stats64,
+       .ndo_set_rx_mode        = vxlan_set_multicast_list,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_fdb_add            = vxlan_fdb_add,
+       .ndo_fdb_del            = vxlan_fdb_delete,
+       .ndo_fdb_dump           = vxlan_fdb_dump,
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type vxlan_type = {
+       .name = "vxlan",
+};
+
+static void vxlan_free(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       free_percpu(vxlan->stats);
+       free_netdev(dev);
+}
+
+/* Initialize the device structure. */
+static void vxlan_setup(struct net_device *dev)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       unsigned h;
+
+       eth_hw_addr_random(dev);
+       ether_setup(dev);
+
+       dev->netdev_ops = &vxlan_netdev_ops;
+       dev->destructor = vxlan_free;
+       SET_NETDEV_DEVTYPE(dev, &vxlan_type);
+
+       dev->tx_queue_len = 0;
+       dev->features   |= NETIF_F_LLTX;
+       dev->features   |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+
+       spin_lock_init(&vxlan->hash_lock);
+
+       init_timer_deferrable(&vxlan->age_timer);
+       vxlan->age_timer.function = vxlan_cleanup;
+       vxlan->age_timer.data = (unsigned long) vxlan;
+
+       vxlan->dev = dev;
+
+       for (h = 0; h < FDB_HASH_SIZE; ++h)
+               INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
+}
+
+static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
+       [IFLA_VXLAN_ID]         = { .type = NLA_U32 },
+       [IFLA_VXLAN_GROUP]      = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+       [IFLA_VXLAN_LINK]       = { .type = NLA_U32 },
+       [IFLA_VXLAN_LOCAL]      = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+       [IFLA_VXLAN_TOS]        = { .type = NLA_U8 },
+       [IFLA_VXLAN_TTL]        = { .type = NLA_U8 },
+       [IFLA_VXLAN_LEARNING]   = { .type = NLA_U8 },
+       [IFLA_VXLAN_AGEING]     = { .type = NLA_U32 },
+       [IFLA_VXLAN_LIMIT]      = { .type = NLA_U32 },
+};
+
+static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
+                       pr_debug("invalid link address (not ethernet)\n");
+                       return -EINVAL;
+               }
+
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
+                       pr_debug("invalid all zero ethernet address\n");
+                       return -EADDRNOTAVAIL;
+               }
+       }
+
+       if (!data)
+               return -EINVAL;
+
+       if (data[IFLA_VXLAN_ID]) {
+               __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+               if (id >= VXLAN_VID_MASK)
+                       return -ERANGE;
+       }
+
+       if (data[IFLA_VXLAN_GROUP]) {
+               __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+               if (!IN_MULTICAST(ntohl(gaddr))) {
+                       pr_debug("group address is not IPv4 multicast\n");
+                       return -EADDRNOTAVAIL;
+               }
+       }
+       return 0;
+}
+
+static int vxlan_newlink(struct net *net, struct net_device *dev,
+                        struct nlattr *tb[], struct nlattr *data[])
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       __u32 vni;
+       int err;
+
+       if (!data[IFLA_VXLAN_ID])
+               return -EINVAL;
+
+       vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+       if (vxlan_find_vni(net, vni)) {
+               pr_info("duplicate VNI %u\n", vni);
+               return -EEXIST;
+       }
+       vxlan->vni = vni;
+
+       if (data[IFLA_VXLAN_GROUP])
+               vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+
+       if (data[IFLA_VXLAN_LOCAL])
+               vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+
+       if (data[IFLA_VXLAN_LINK]) {
+               vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
+               if (!tb[IFLA_MTU]) {
+                       struct net_device *lowerdev;
+                       lowerdev = __dev_get_by_index(net, vxlan->link);
+                       dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+               }
+       }
+
+       if (data[IFLA_VXLAN_TOS])
+               vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+
+       if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
+               vxlan->learn = true;
+
+       if (data[IFLA_VXLAN_AGEING])
+               vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
+       else
+               vxlan->age_interval = FDB_AGE_DEFAULT;
+
+       if (data[IFLA_VXLAN_LIMIT])
+               vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+       err = register_netdevice(dev);
+       if (!err)
+               hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
+
+       return err;
+}
+
+static void vxlan_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       hlist_del_rcu(&vxlan->hlist);
+
+       unregister_netdevice_queue(dev, head);
+}
+
+static size_t vxlan_get_size(const struct net_device *dev)
+{
+
+       return nla_total_size(sizeof(__u32)) +  /* IFLA_VXLAN_ID */
+               nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
+               nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
+               nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TOS */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_LEARNING */
+               nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
+               nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
+               0;
+}
+
+static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       const struct vxlan_dev *vxlan = netdev_priv(dev);
+
+       if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
+               goto nla_put_failure;
+
+       if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
+               goto nla_put_failure;
+
+       if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
+               goto nla_put_failure;
+
+       if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
+               goto nla_put_failure;
+
+       if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+           nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
+           nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
+           nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
+       .kind           = "vxlan",
+       .maxtype        = IFLA_VXLAN_MAX,
+       .policy         = vxlan_policy,
+       .priv_size      = sizeof(struct vxlan_dev),
+       .setup          = vxlan_setup,
+       .validate       = vxlan_validate,
+       .newlink        = vxlan_newlink,
+       .dellink        = vxlan_dellink,
+       .get_size       = vxlan_get_size,
+       .fill_info      = vxlan_fill_info,
+};
+
+static __net_init int vxlan_init_net(struct net *net)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct sock *sk;
+       struct sockaddr_in vxlan_addr = {
+               .sin_family = AF_INET,
+               .sin_addr.s_addr = htonl(INADDR_ANY),
+       };
+       int rc;
+       unsigned h;
+
+       /* Create UDP socket for encapsulation receive. */
+       rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
+       if (rc < 0) {
+               pr_debug("UDP socket create failed\n");
+               return rc;
+       }
+       /* Put in proper namespace */
+       sk = vn->sock->sk;
+       sk_change_net(sk, net);
+
+       vxlan_addr.sin_port = htons(vxlan_port);
+
+       rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
+                        sizeof(vxlan_addr));
+       if (rc < 0) {
+               pr_debug("bind for UDP socket %pI4:%u (%d)\n",
+                        &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
+               sk_release_kernel(sk);
+               vn->sock = NULL;
+               return rc;
+       }
+
+       /* Disable multicast loopback */
+       inet_sk(sk)->mc_loop = 0;
+
+       /* Mark socket as an encapsulation socket. */
+       udp_sk(sk)->encap_type = 1;
+       udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
+       udp_encap_enable();
+
+       for (h = 0; h < VNI_HASH_SIZE; ++h)
+               INIT_HLIST_HEAD(&vn->vni_list[h]);
+
+       return 0;
+}
+
+static __net_exit void vxlan_exit_net(struct net *net)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+       if (vn->sock) {
+               sk_release_kernel(vn->sock->sk);
+               vn->sock = NULL;
+       }
+}
+
+static struct pernet_operations vxlan_net_ops = {
+       .init = vxlan_init_net,
+       .exit = vxlan_exit_net,
+       .id   = &vxlan_net_id,
+       .size = sizeof(struct vxlan_net),
+};
+
+static int __init vxlan_init_module(void)
+{
+       int rc;
+
+       get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
+
+       rc = register_pernet_device(&vxlan_net_ops);
+       if (rc)
+               goto out1;
+
+       rc = rtnl_link_register(&vxlan_link_ops);
+       if (rc)
+               goto out2;
+
+       return 0;
+
+out2:
+       unregister_pernet_device(&vxlan_net_ops);
+out1:
+       return rc;
+}
+module_init(vxlan_init_module);
+
+static void __exit vxlan_cleanup_module(void)
+{
+       rtnl_link_unregister(&vxlan_link_ops);
+       unregister_pernet_device(&vxlan_net_ops);
+}
+module_exit(vxlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VXLAN_VERSION);
+MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
+MODULE_ALIAS_RTNL_LINK("vxlan");
index 025426132754a4f584f3cd7d5eda4362c176bab8..9c34d2fccfac61508705a4021f436e9a9024e936 100644 (file)
@@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
        struct sk_buff *skb;
        const struct i2400m_tlv_detailed_device_info *ddi;
        struct net_device *net_dev = i2400m->wimax_dev.net_dev;
-       const unsigned char zeromac[ETH_ALEN] = { 0 };
 
        d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
        skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
                 "to that of boot mode's\n");
        dev_warn(dev, "device reports     %pM\n", ddi->mac_address);
        dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
-       if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
+       if (is_zero_ether_addr(ddi->mac_address))
                dev_err(dev, "device reports an invalid MAC address, "
                        "not updating\n");
        else {
index 689a71c1af71b049e5f9cfa9ecfec806a4f678f3..154a4965be4fd9922e62684d73467b87fa46194e 100644 (file)
@@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
-static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct adm8211_tx_hdr *txhdr;
        size_t payload_len, hdrlen;
index f9f15bb3f03a8b0df468f954af8b234123141caa..3cd05a7173f6ce37945c04237d3dc493aa080647 100644 (file)
@@ -87,7 +87,6 @@ static struct pci_driver airo_driver = {
 /* Include Wireless Extension definition and check version - Jean II */
 #include <linux/wireless.h>
 #define WIRELESS_SPY           /* enable iwspy support */
-#include <net/iw_handler.h>    /* New driver API */
 
 #define CISCO_EXT              /* enable Cisco extensions */
 #ifdef CISCO_EXT
@@ -232,8 +231,10 @@ static int adhoc;
 
 static int probe = 1;
 
+static kuid_t proc_kuid;
 static int proc_uid /* = 0 */;
 
+static kgid_t proc_kgid;
 static int proc_gid /* = 0 */;
 
 static int airo_perm = 0555;
@@ -4499,78 +4500,79 @@ struct proc_data {
 static int setup_proc_entry( struct net_device *dev,
                             struct airo_info *apriv ) {
        struct proc_dir_entry *entry;
+
        /* First setup the device directory */
        strcpy(apriv->proc_name,dev->name);
        apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
                                            airo_entry);
        if (!apriv->proc_entry)
                goto fail;
-       apriv->proc_entry->uid = proc_uid;
-       apriv->proc_entry->gid = proc_gid;
+       apriv->proc_entry->uid = proc_kuid;
+       apriv->proc_entry->gid = proc_kgid;
 
        /* Setup the StatsDelta */
        entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm,
                                 apriv->proc_entry, &proc_statsdelta_ops, dev);
        if (!entry)
                goto fail_stats_delta;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the Stats */
        entry = proc_create_data("Stats", S_IRUGO & proc_perm,
                                 apriv->proc_entry, &proc_stats_ops, dev);
        if (!entry)
                goto fail_stats;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the Status */
        entry = proc_create_data("Status", S_IRUGO & proc_perm,
                                 apriv->proc_entry, &proc_status_ops, dev);
        if (!entry)
                goto fail_status;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the Config */
        entry = proc_create_data("Config", proc_perm,
                                 apriv->proc_entry, &proc_config_ops, dev);
        if (!entry)
                goto fail_config;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the SSID */
        entry = proc_create_data("SSID", proc_perm,
                                 apriv->proc_entry, &proc_SSID_ops, dev);
        if (!entry)
                goto fail_ssid;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the APList */
        entry = proc_create_data("APList", proc_perm,
                                 apriv->proc_entry, &proc_APList_ops, dev);
        if (!entry)
                goto fail_aplist;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the BSSList */
        entry = proc_create_data("BSSList", proc_perm,
                                 apriv->proc_entry, &proc_BSSList_ops, dev);
        if (!entry)
                goto fail_bsslist;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        /* Setup the WepKey */
        entry = proc_create_data("WepKey", proc_perm,
                                 apriv->proc_entry, &proc_wepkey_ops, dev);
        if (!entry)
                goto fail_wepkey;
-       entry->uid = proc_uid;
-       entry->gid = proc_gid;
+       entry->uid = proc_kuid;
+       entry->gid = proc_kgid;
 
        return 0;
 
@@ -5697,11 +5699,16 @@ static int __init airo_init_module( void )
 {
        int i;
 
+       proc_kuid = make_kuid(&init_user_ns, proc_uid);
+       proc_kgid = make_kgid(&init_user_ns, proc_gid);
+       if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid))
+               return -EINVAL;
+
        airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL);
 
        if (airo_entry) {
-               airo_entry->uid = proc_uid;
-               airo_entry->gid = proc_gid;
+               airo_entry->uid = proc_kuid;
+               airo_entry->gid = proc_kgid;
        }
 
        for (i = 0; i < 4 && io[i] && irq[i]; i++) {
@@ -5976,13 +5983,11 @@ static int airo_set_wap(struct net_device *dev,
        Cmd cmd;
        Resp rsp;
        APListRid APList_rid;
-       static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-       static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 
        if (awrq->sa_family != ARPHRD_ETHER)
                return -EINVAL;
-       else if (!memcmp(any, awrq->sa_data, ETH_ALEN) ||
-                !memcmp(off, awrq->sa_data, ETH_ALEN)) {
+       else if (is_broadcast_ether_addr(awrq->sa_data) ||
+                is_zero_ether_addr(awrq->sa_data)) {
                memset(&cmd, 0, sizeof(cmd));
                cmd.cmd=CMD_LOSE_SYNC;
                if (down_interruptible(&local->sem))
index 88b8d64c90f1b49a302deaca3af07788ee4b27c1..99b9ddf21273b2244b2a8f9e03847a68a736bb61 100644 (file)
@@ -498,36 +498,6 @@ exit:
        return ret;
 }
 
-#define HEX2STR_BUFFERS 4
-#define HEX2STR_MAX_LEN 64
-
-/* Convert binary data into hex string */
-static char *hex2str(void *buf, size_t len)
-{
-       static atomic_t a = ATOMIC_INIT(0);
-       static char bufs[HEX2STR_BUFFERS][3 * HEX2STR_MAX_LEN + 1];
-       char *ret = bufs[atomic_inc_return(&a) & (HEX2STR_BUFFERS - 1)];
-       char *obuf = ret;
-       u8 *ibuf = buf;
-
-       if (len > HEX2STR_MAX_LEN)
-               len = HEX2STR_MAX_LEN;
-
-       if (len == 0)
-               goto exit;
-
-       while (len--) {
-               obuf = hex_byte_pack(obuf, *ibuf++);
-               *obuf++ = '-';
-       }
-       obuf--;
-
-exit:
-       *obuf = '\0';
-
-       return ret;
-}
-
 /* LED trigger */
 static int tx_activity;
 static void at76_ledtrig_tx_timerfunc(unsigned long data);
@@ -1004,9 +974,9 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
            WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
 
        for (i = 0; i < WEP_KEYS; i++)
-               at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s",
+               at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %*phD",
                         wiphy_name(priv->hw->wiphy), i,
-                        hex2str(m->wep_default_keyvalue[i], key_len));
+                        key_len, m->wep_default_keyvalue[i]);
 exit:
        kfree(m);
 }
@@ -1031,7 +1001,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
        at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
                 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
                 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
-                "current_bssid %pM current_essid %s current_bss_type %d "
+                "current_bssid %pM current_essid %*phD current_bss_type %d "
                 "pm_mode %d ibss_change %d res %d "
                 "multi_domain_capability_implemented %d "
                 "international_roaming %d country_string %.3s",
@@ -1041,7 +1011,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
                 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
                 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
                 m->CFP_period, m->current_bssid,
-                hex2str(m->current_essid, IW_ESSID_MAX_SIZE),
+                IW_ESSID_MAX_SIZE, m->current_essid,
                 m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
                 m->res, m->multi_domain_capability_implemented,
                 m->multi_domain_capability_enabled, m->country_string);
@@ -1069,7 +1039,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
                 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
                 "scan_type %d scan_channel %d probe_delay %u "
                 "min_channel_time %d max_channel_time %d listen_int %d "
-                "desired_ssid %s desired_bssid %pM desired_bsstype %d",
+                "desired_ssid %*phD desired_bssid %pM desired_bsstype %d",
                 wiphy_name(priv->hw->wiphy),
                 le32_to_cpu(m->max_tx_msdu_lifetime),
                 le32_to_cpu(m->max_rx_lifetime),
@@ -1080,7 +1050,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
                 le16_to_cpu(m->min_channel_time),
                 le16_to_cpu(m->max_channel_time),
                 le16_to_cpu(m->listen_interval),
-                hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE),
+                IW_ESSID_MAX_SIZE, m->desired_ssid,
                 m->desired_bssid, m->desired_bsstype);
 exit:
        kfree(m);
@@ -1160,13 +1130,13 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
                goto exit;
        }
 
-       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s",
+       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %*phD",
                 wiphy_name(priv->hw->wiphy),
-                hex2str(m->channel_list, sizeof(m->channel_list)));
+                (int)sizeof(m->channel_list), m->channel_list);
 
-       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s",
+       at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %*phD",
                 wiphy_name(priv->hw->wiphy),
-                hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel)));
+                (int)sizeof(m->tx_powerlevel), m->tx_powerlevel);
 exit:
        kfree(m);
 }
@@ -1369,9 +1339,9 @@ static int at76_startup_device(struct at76_priv *priv)
        int ret;
 
        at76_dbg(DBG_PARAMS,
-                "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d "
+                "%s param: ssid %.*s (%*phD) mode %s ch %d wep %s key %d "
                 "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size,
-                priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE),
+                priv->essid, IW_ESSID_MAX_SIZE, priv->essid,
                 priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
                 priv->channel, priv->wep_enabled ? "enabled" : "disabled",
                 priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
@@ -1726,7 +1696,9 @@ static void at76_mac80211_tx_callback(struct urb *urb)
        ieee80211_wake_queues(priv->hw);
 }
 
-static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw,
+                            struct ieee80211_tx_control *control,
+                            struct sk_buff *skb)
 {
        struct at76_priv *priv = hw->priv;
        struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
index 6169fbd23ed10a527e26973c2e00f2bfe6bd82af..4521342c62cc37654ee1889b9ee177e395706f27 100644 (file)
@@ -159,6 +159,7 @@ struct ath_common {
 
        bool btcoex_enabled;
        bool disable_ani;
+       bool antenna_diversity;
 };
 
 struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
index 64a453a6dfe442d22c533435787df147b86101fe..3150def17193b72652bb068c8fde9ca99e0a6e5f 100644 (file)
@@ -1331,7 +1331,6 @@ struct ath5k_hw {
        unsigned int            nexttbtt;       /* next beacon time in TU */
        struct ath5k_txq        *cabq;          /* content after beacon */
 
-       int                     power_level;    /* Requested tx power in dBm */
        bool                    assoc;          /* associate state */
        bool                    enable_beacon;  /* true if beacons are on */
 
@@ -1425,6 +1424,7 @@ struct ath5k_hw {
                /* Value in dB units */
                s16             txp_cck_ofdm_pwr_delta;
                bool            txp_setup;
+               int             txp_requested;  /* Requested tx power in dBm */
        } ah_txpower;
 
        struct ath5k_nfcal_hist ah_nfcal_hist;
index 2aab20ee9f387f8c89289ca409e6ead0fa83722c..9fd6d9a9942ec298b81be9a8e62697df1d13c037 100644 (file)
@@ -723,7 +723,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
                ieee80211_get_hdrlen_from_skb(skb), padsize,
                get_hw_packet_type(skb),
-               (ah->power_level * 2),
+               (ah->ah_txpower.txp_requested * 2),
                hw_rate,
                info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
                cts_rate, duration);
@@ -1778,7 +1778,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
        ds->ds_data = bf->skbaddr;
        ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
                        ieee80211_get_hdrlen_from_skb(skb), padsize,
-                       AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
+                       AR5K_PKT_TYPE_BEACON,
+                       (ah->ah_txpower.txp_requested * 2),
                        ieee80211_get_tx_rate(ah->hw, info)->hw_value,
                        1, AR5K_TXKEYIX_INVALID,
                        antenna, flags, 0, 0);
@@ -2445,6 +2446,7 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
        hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
                        IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
                        IEEE80211_HW_SIGNAL_DBM |
+                       IEEE80211_HW_MFP_CAPABLE |
                        IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
        hw->wiphy->interface_modes =
index d56453e43d7e353e0890d3962e264448d7185771..7a28538e6e05ba6c001e9b04aa913955a339ac44 100644 (file)
@@ -55,7 +55,8 @@
 \********************/
 
 static void
-ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+        struct sk_buff *skb)
 {
        struct ath5k_hw *ah = hw->priv;
        u16 qnum = skb_get_queue_mapping(skb);
@@ -207,8 +208,8 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
        }
 
        if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
-       (ah->power_level != conf->power_level)) {
-               ah->power_level = conf->power_level;
+       (ah->ah_txpower.txp_requested != conf->power_level)) {
+               ah->ah_txpower.txp_requested = conf->power_level;
 
                /* Half dB steps */
                ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
@@ -488,6 +489,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        if (ath5k_modparam_nohwcrypt)
                return -EOPNOTSUPP;
 
+       if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
+               return -EOPNOTSUPP;
+
        if (vif->type == NL80211_IFTYPE_ADHOC &&
            (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
             key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
@@ -522,7 +526,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                        if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
                                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
                        if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
-                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                        ret = 0;
                }
                break;
index 8b71a2d947e0c9348c1e1b402b4d6092e4d0b587..ab363f34b4df71c76f1fa9198c245e0bac28da7a 100644 (file)
@@ -1975,11 +1975,13 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
                        spur_delta_phase = (spur_offset << 18) / 25;
                        spur_freq_sigma_delta = (spur_delta_phase >> 10);
                        symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
+                       break;
                case AR5K_BWMODE_5MHZ:
                        /* Both sample_freq and chip_freq are 10MHz (?) */
                        spur_delta_phase = (spur_offset << 19) / 25;
                        spur_freq_sigma_delta = (spur_delta_phase >> 10);
                        symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
+                       break;
                default:
                        if (channel->band == IEEE80211_BAND_5GHZ) {
                                /* Both sample_freq and chip_freq are 40MHz */
@@ -3516,6 +3518,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
 {
        unsigned int i;
        u16 *rates;
+       s16 rate_idx_scaled = 0;
 
        /* max_pwr is power level we got from driver/user in 0.5dB
         * units, switch to 0.25dB units so we can compare */
@@ -3562,20 +3565,32 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
                for (i = 8; i <= 15; i++)
                        rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
 
+       /* Save min/max and current tx power for this channel
+        * in 0.25dB units.
+        *
+        * Note: We use rates[0] for current tx power because
+        * it covers most of the rates, in most cases. It's our
+        * tx power limit and what the user expects to see. */
+       ah->ah_txpower.txp_min_pwr = 2 * rates[7];
+       ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
+
+       /* Set max txpower for correct OFDM operation on all rates
+        * -that is the txpower for 54Mbit-, it's used for the PAPD
+        * gain probe and it's in 0.5dB units */
+       ah->ah_txpower.txp_ofdm = rates[7];
+
        /* Now that we have all rates setup use table offset to
         * match the power range set by user with the power indices
         * on PCDAC/PDADC table */
        for (i = 0; i < 16; i++) {
-               rates[i] += ah->ah_txpower.txp_offset;
+               rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
                /* Don't get out of bounds */
-               if (rates[i] > 63)
-                       rates[i] = 63;
+               if (rate_idx_scaled > 63)
+                       rate_idx_scaled = 63;
+               if (rate_idx_scaled < 0)
+                       rate_idx_scaled = 0;
+               rates[i] = rate_idx_scaled;
        }
-
-       /* Min/max in 0.25dB units */
-       ah->ah_txpower.txp_min_pwr = 2 * rates[7];
-       ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
-       ah->ah_txpower.txp_ofdm = rates[7];
 }
 
 
@@ -3639,10 +3654,17 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        if (!ah->ah_txpower.txp_setup ||
            (channel->hw_value != curr_channel->hw_value) ||
            (channel->center_freq != curr_channel->center_freq)) {
-               /* Reset TX power values */
+               /* Reset TX power values but preserve requested
+                * tx power from above */
+               int requested_txpower = ah->ah_txpower.txp_requested;
+
                memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+
+               /* Restore TPC setting and requested tx power */
                ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
 
+               ah->ah_txpower.txp_requested = requested_txpower;
+
                /* Calculate the powertable */
                ret = ath5k_setup_channel_powertable(ah, channel,
                                                        ee_mode, type);
@@ -3789,8 +3811,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
         * RF buffer settings on 5211/5212+ so that we
         * properly set curve indices.
         */
-       ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ?
-                       ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER);
+       ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
+                                       ah->ah_txpower.txp_requested * 2 :
+                                       AR5K_TUNE_MAX_TXPOWER);
        if (ret)
                return ret;
 
index 86aeef4b9d7ee9295fe04c5533d3959a19474c9c..7089f8160ad5bb7f2a7377bbc0adae68229274b2 100644 (file)
@@ -1488,7 +1488,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
 }
 
 static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
-                                                     char *name,
+                                                     const char *name,
                                                      enum nl80211_iftype type,
                                                      u32 *flags,
                                                      struct vif_params *params)
@@ -3477,7 +3477,7 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
        ar->num_vif--;
 }
 
-struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
                                          enum nl80211_iftype type,
                                          u8 fw_vif_idx, u8 nw_type)
 {
index 56b1ebe79812d0d90b2fc6292592809fae9637c1..780f77775a9152ca078922cbd2a7754c9a019d24 100644 (file)
@@ -25,7 +25,7 @@ enum ath6kl_cfg_suspend_mode {
        ATH6KL_CFG_SUSPEND_SCHED_SCAN,
 };
 
-struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
                                          enum nl80211_iftype type,
                                          u8 fw_vif_idx, u8 nw_type);
 void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
index ff007f500feba8176794ca200e2bfd0523d657ff..e09ec40ce71ab6c25bd801a0661c61f446496aed 100644 (file)
@@ -237,7 +237,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
                                     entry_cck->fir_step_level);
 
        /* Skip MRC CCK for pre AR9003 families */
-       if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
+       if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
                return;
 
        if (aniState->mrcCCK != entry_cck->mrc_cck_on)
index bbcfeb3b2a60ac90d046110d59161dd118b15e4a..664844c5d3d51ae8752514bb3976fa2074ba0afe 100644 (file)
@@ -311,6 +311,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                          struct ath_ant_comb *antcomb,
                                          int alt_ratio)
 {
+       ant_conf->main_gaintb = 0;
+       ant_conf->alt_gaintb = 0;
+
        if (ant_conf->div_group == 0) {
                /* Adjust the fast_div_bias based on main and alt lna conf */
                switch ((ant_conf->main_lna_conf << 4) |
@@ -360,18 +363,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                        ant_conf->alt_lna_conf) {
                case 0x01: /* A-B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x02: /* A-B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x03: /* A-B A+B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x10: /* LNA2 A-B */
                        if (!(antcomb->scan) &&
@@ -379,13 +376,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x12: /* LNA2 LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x13: /* LNA2 A+B */
                        if (!(antcomb->scan) &&
@@ -393,8 +386,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x20: /* LNA1 A-B */
                        if (!(antcomb->scan) &&
@@ -402,13 +393,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x21: /* LNA1 LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x23: /* LNA1 A+B */
                        if (!(antcomb->scan) &&
@@ -416,23 +403,15 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x3f;
                        else
                                ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x30: /* A+B A-B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x31: /* A+B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x32: /* A+B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                default:
                        break;
@@ -443,18 +422,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->alt_lna_conf) {
                case 0x01: /* A-B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x02: /* A-B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x03: /* A-B A+B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x10: /* LNA2 A-B */
                        if (!(antcomb->scan) &&
@@ -462,13 +435,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x12: /* LNA2 LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x13: /* LNA2 A+B */
                        if (!(antcomb->scan) &&
@@ -476,8 +445,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x20: /* LNA1 A-B */
                        if (!(antcomb->scan) &&
@@ -485,13 +452,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x21: /* LNA1 LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x23: /* LNA1 A+B */
                        if (!(antcomb->scan) &&
@@ -499,23 +462,77 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x30: /* A+B A-B */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x31: /* A+B LNA2 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
                        break;
                case 0x32: /* A+B LNA1 */
                        ant_conf->fast_div_bias = 0x1;
-                       ant_conf->main_gaintb = 0;
-                       ant_conf->alt_gaintb = 0;
+                       break;
+               default:
+                       break;
+               }
+       } else if (ant_conf->div_group == 3) {
+               switch ((ant_conf->main_lna_conf << 4) |
+                       ant_conf->alt_lna_conf) {
+               case 0x01: /* A-B LNA2 */
+                       ant_conf->fast_div_bias = 0x1;
+                       break;
+               case 0x02: /* A-B LNA1 */
+                       ant_conf->fast_div_bias = 0x39;
+                       break;
+               case 0x03: /* A-B A+B */
+                       ant_conf->fast_div_bias = 0x1;
+                       break;
+               case 0x10: /* LNA2 A-B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x1;
+                       }
+                       break;
+               case 0x12: /* LNA2 LNA1 */
+                       ant_conf->fast_div_bias = 0x39;
+                       break;
+               case 0x13: /* LNA2 A+B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x1;
+                       }
+                       break;
+               case 0x20: /* LNA1 A-B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x4;
+                       }
+                       break;
+               case 0x21: /* LNA1 LNA2 */
+                       ant_conf->fast_div_bias = 0x6;
+                       break;
+               case 0x23: /* LNA1 A+B */
+                       if ((antcomb->scan == 0) &&
+                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+                               ant_conf->fast_div_bias = 0x3f;
+                       } else {
+                               ant_conf->fast_div_bias = 0x6;
+                       }
+                       break;
+               case 0x30: /* A+B A-B */
+                       ant_conf->fast_div_bias = 0x1;
+                       break;
+               case 0x31: /* A+B LNA2 */
+                       ant_conf->fast_div_bias = 0x6;
+                       break;
+               case 0x32: /* A+B LNA1 */
+                       ant_conf->fast_div_bias = 0x1;
                        break;
                default:
                        break;
@@ -759,6 +776,7 @@ div_comb_done:
 void ath_ant_comb_update(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct ath_hw_antcomb_conf div_ant_conf;
        u8 lna_conf;
 
@@ -773,4 +791,7 @@ void ath_ant_comb_update(struct ath_softc *sc)
        div_ant_conf.alt_lna_conf = lna_conf;
 
        ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+
+       if (common->antenna_diversity)
+               ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
 }
index d066f2516e4753617aa55f1522427eb96f4a1115..5bbe5057ba18ae35408c2aa78dfbdf4d5cefdf0f 100644 (file)
@@ -138,7 +138,8 @@ static const struct ar9300_eeprom ar9300_default = {
         },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -713,7 +714,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
         },
         .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
         },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -1289,7 +1291,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
        },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -1865,7 +1868,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
        },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -2440,7 +2444,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
         },
         .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0},
+               .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
         },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -3524,7 +3529,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
 
        if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
                REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
-       else if (AR_SREV_9462(ah) || AR_SREV_9550(ah))
+       else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
                REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
        else {
                REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3561,9 +3566,9 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
 
 static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 {
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
        int chain;
        u32 regval;
-       u32 ant_div_ctl1;
        static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
                        AR_PHY_SWITCH_CHAIN_0,
                        AR_PHY_SWITCH_CHAIN_1,
@@ -3572,7 +3577,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 
        u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
                                AR_SWITCH_TABLE_COM_AR9462_ALL, value);
        } else if (AR_SREV_9550(ah)) {
@@ -3616,7 +3621,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                }
        }
 
-       if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+       if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
                /*
                 * main_lnaconf, alt_lnaconf, main_tb, alt_tb
@@ -3626,41 +3631,44 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                regval &= (~AR_ANT_DIV_CTRL_ALL);
                regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
                /* enable_lnadiv */
-               regval &= (~AR_PHY_9485_ANT_DIV_LNADIV);
-               regval |= ((value >> 6) & 0x1) <<
-                               AR_PHY_9485_ANT_DIV_LNADIV_S;
+               regval &= (~AR_PHY_ANT_DIV_LNADIV);
+               regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+
+               if (AR_SREV_9565(ah)) {
+                       if (ah->shared_chain_lnadiv) {
+                               regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+                       } else {
+                               regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
+                               regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+                       }
+               }
+
                REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
 
                /*enable fast_div */
                regval = REG_READ(ah, AR_PHY_CCK_DETECT);
                regval &= (~AR_FAST_DIV_ENABLE);
-               regval |= ((value >> 7) & 0x1) <<
-                               AR_FAST_DIV_ENABLE_S;
+               regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
                REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
-               ant_div_ctl1 =
-                       ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
-               /* check whether antenna diversity is enabled */
-               if ((ant_div_ctl1 >> 0x6) == 0x3) {
+
+               if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
                        regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
                        /*
                         * clear bits 25-30 main_lnaconf, alt_lnaconf,
                         * main_tb, alt_tb
                         */
-                       regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
-                                       AR_PHY_9485_ANT_DIV_ALT_LNACONF |
-                                       AR_PHY_9485_ANT_DIV_ALT_GAINTB |
-                                       AR_PHY_9485_ANT_DIV_MAIN_GAINTB));
+                       regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                                    AR_PHY_ANT_DIV_ALT_LNACONF |
+                                    AR_PHY_ANT_DIV_ALT_GAINTB |
+                                    AR_PHY_ANT_DIV_MAIN_GAINTB));
                        /* by default use LNA1 for the main antenna */
-                       regval |= (AR_PHY_9485_ANT_DIV_LNA1 <<
-                                       AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S);
-                       regval |= (AR_PHY_9485_ANT_DIV_LNA2 <<
-                                       AR_PHY_9485_ANT_DIV_ALT_LNACONF_S);
+                       regval |= (AR_PHY_ANT_DIV_LNA1 <<
+                                  AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+                       regval |= (AR_PHY_ANT_DIV_LNA2 <<
+                                  AR_PHY_ANT_DIV_ALT_LNACONF_S);
                        REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
                }
-
-
        }
-
 }
 
 static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -3847,7 +3855,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
                        REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
                        if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
                                return;
-               } else if (AR_SREV_9462(ah)) {
+               } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                        reg_val = le32_to_cpu(pBase->swreg);
                        REG_WRITE(ah, AR_PHY_PMU1, reg_val);
                } else {
@@ -3878,7 +3886,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
                        while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
                                                AR_PHY_PMU2_PGM))
                                udelay(10);
-               } else if (AR_SREV_9462(ah))
+               } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                        REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
                else {
                        reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
@@ -3981,6 +3989,62 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
                      bias & 0x3);
 }
 
+static int ar9003_hw_get_thermometer(struct ath_hw *ah)
+{
+       struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+       struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+       int thermometer =  (pBase->miscConfiguration >> 1) & 0x3;
+
+       return --thermometer;
+}
+
+static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
+{
+       int thermometer = ar9003_hw_get_thermometer(ah);
+       u8 therm_on = (thermometer < 0) ? 0 : 1;
+
+       REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+                     AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+       if (ah->caps.tx_chainmask & BIT(1))
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+       if (ah->caps.tx_chainmask & BIT(2))
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+
+       therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+       REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+                     AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+       if (ah->caps.tx_chainmask & BIT(1)) {
+               therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+       }
+       if (ah->caps.tx_chainmask & BIT(2)) {
+               therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+               REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+                             AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+       }
+}
+
+static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
+{
+       u32 data, ko, kg;
+
+       if (!AR_SREV_9462_20(ah))
+               return;
+       ar9300_otp_read_word(ah, 1, &data);
+       ko = data & 0xff;
+       kg = (data >> 8) & 0xff;
+       if (ko || kg) {
+               REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+                             AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko);
+               REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+                             AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN,
+                             kg + 256);
+       }
+}
+
 static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
                                             struct ath9k_channel *chan)
 {
@@ -3996,6 +4060,8 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
                ar9003_hw_internal_regulator_apply(ah);
        ar9003_hw_apply_tuning_caps(ah);
        ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
+       ar9003_hw_thermometer_apply(ah);
+       ar9003_hw_thermo_cal_apply(ah);
 }
 
 static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4532,7 +4598,7 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
 {
        int tempSlope = 0;
        struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-       int f[3], t[3];
+       int f[8], t[8], i;
 
        REG_RMW(ah, AR_PHY_TPC_11_B0,
                (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -4565,7 +4631,14 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
         */
        if (frequency < 4000)
                tempSlope = eep->modalHeader2G.tempSlope;
-       else if (eep->base_ext2.tempSlopeLow != 0) {
+       else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
+               for (i = 0; i < 8; i++) {
+                       t[i] = eep->base_ext1.tempslopextension[i];
+                       f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+               }
+               tempSlope = ar9003_hw_power_interpolate((s32) frequency,
+                                                       f, t, 8);
+       } else if (eep->base_ext2.tempSlopeLow != 0) {
                t[0] = eep->base_ext2.tempSlopeLow;
                f[0] = 5180;
                t[1] = eep->modalHeader5G.tempSlope;
@@ -4905,90 +4978,79 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
                                i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
                                chan->channel);
 
-                               /*
-                                * compare test group from regulatory
-                                * channel list with test mode from pCtlMode
-                                * list
-                                */
-                               if ((((cfgCtl & ~CTL_MODE_M) |
-                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-                                       ctlIndex[i]) ||
-                                   (((cfgCtl & ~CTL_MODE_M) |
-                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
-                                    ((ctlIndex[i] & CTL_MODE_M) |
-                                      SD_NO_CTL))) {
-                                       twiceMinEdgePower =
-                                         ar9003_hw_get_max_edge_power(pEepData,
-                                                                      freq, i,
-                                                                      is2ghz);
-
-                                       if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
-                                               /*
-                                                * Find the minimum of all CTL
-                                                * edge powers that apply to
-                                                * this channel
-                                                */
-                                               twiceMaxEdgePower =
-                                                       min(twiceMaxEdgePower,
-                                                           twiceMinEdgePower);
-                                               else {
-                                                       /* specific */
-                                                       twiceMaxEdgePower =
-                                                         twiceMinEdgePower;
-                                                       break;
-                                               }
+                       /*
+                        * compare test group from regulatory
+                        * channel list with test mode from pCtlMode
+                        * list
+                        */
+                       if ((((cfgCtl & ~CTL_MODE_M) |
+                              (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                               ctlIndex[i]) ||
+                           (((cfgCtl & ~CTL_MODE_M) |
+                              (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                            ((ctlIndex[i] & CTL_MODE_M) |
+                              SD_NO_CTL))) {
+                               twiceMinEdgePower =
+                                 ar9003_hw_get_max_edge_power(pEepData,
+                                                              freq, i,
+                                                              is2ghz);
+
+                               if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+                                       /*
+                                        * Find the minimum of all CTL
+                                        * edge powers that apply to
+                                        * this channel
+                                        */
+                                       twiceMaxEdgePower =
+                                               min(twiceMaxEdgePower,
+                                                   twiceMinEdgePower);
+                               else {
+                                       /* specific */
+                                       twiceMaxEdgePower = twiceMinEdgePower;
+                                       break;
                                }
                        }
+               }
 
-                       minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+               minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
 
-                       ath_dbg(common, REGULATORY,
-                               "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
-                               ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
-                               scaledPower, minCtlPower);
-
-                       /* Apply ctl mode to correct target power set */
-                       switch (pCtlMode[ctlMode]) {
-                       case CTL_11B:
-                               for (i = ALL_TARGET_LEGACY_1L_5L;
-                                    i <= ALL_TARGET_LEGACY_11S; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       case CTL_11A:
-                       case CTL_11G:
-                               for (i = ALL_TARGET_LEGACY_6_24;
-                                    i <= ALL_TARGET_LEGACY_54; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       case CTL_5GHT20:
-                       case CTL_2GHT20:
-                               for (i = ALL_TARGET_HT20_0_8_16;
-                                    i <= ALL_TARGET_HT20_21; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               pPwrArray[ALL_TARGET_HT20_22] =
-                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22],
-                                         minCtlPower);
-                               pPwrArray[ALL_TARGET_HT20_23] =
-                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23],
-                                          minCtlPower);
-                               break;
-                       case CTL_5GHT40:
-                       case CTL_2GHT40:
-                               for (i = ALL_TARGET_HT40_0_8_16;
-                                    i <= ALL_TARGET_HT40_23; i++)
-                                       pPwrArray[i] =
-                                         (u8)min((u16)pPwrArray[i],
-                                                 minCtlPower);
-                               break;
-                       default:
-                           break;
-                       }
+               ath_dbg(common, REGULATORY,
+                       "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
+                       ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+                       scaledPower, minCtlPower);
+
+               /* Apply ctl mode to correct target power set */
+               switch (pCtlMode[ctlMode]) {
+               case CTL_11B:
+                       for (i = ALL_TARGET_LEGACY_1L_5L;
+                            i <= ALL_TARGET_LEGACY_11S; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_11A:
+               case CTL_11G:
+                       for (i = ALL_TARGET_LEGACY_6_24;
+                            i <= ALL_TARGET_LEGACY_54; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_5GHT20:
+               case CTL_2GHT20:
+                       for (i = ALL_TARGET_HT20_0_8_16;
+                            i <= ALL_TARGET_HT20_23; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               case CTL_5GHT40:
+               case CTL_2GHT40:
+                       for (i = ALL_TARGET_HT40_0_8_16;
+                            i <= ALL_TARGET_HT40_23; i++)
+                               pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+                                                      minCtlPower);
+                       break;
+               default:
+                       break;
+               }
        } /* end ctl mode checking */
 }
 
index 3a1ff55bceb9011eac0610e78e42c4e5e4193f6e..41b1a75e6bec7c120264f526ccd1426cca4828a3 100644 (file)
@@ -267,7 +267,8 @@ struct cal_ctl_data_5g {
 
 struct ar9300_BaseExtension_1 {
        u8 ant_div_control;
-       u8 future[11];
+       u8 future[3];
+       u8 tempslopextension[8];
        int8_t quick_drop_low;
        int8_t quick_drop_high;
 } __packed;
index 1e8a4da5952f5217866765c52892e4d0a03555d8..1a36fa26263966e34bc6b952ffc64a0426387b11 100644 (file)
@@ -24,6 +24,7 @@
 #include "ar955x_1p0_initvals.h"
 #include "ar9580_1p0_initvals.h"
 #include "ar9462_2p0_initvals.h"
+#include "ar9565_1p0_initvals.h"
 
 /* General hardware code for the AR9003 hadware family */
 
  */
 static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 {
-#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
-               ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
-
 #define AR9462_BB_CTX_COEFJ(x) \
                ar9462_##x##_baseband_core_txfir_coeff_japan_2484
 
 #define AR9462_BBC_TXIFR_COEFFJ \
                ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
        if (AR_SREV_9330_11(ah)) {
                /* mac */
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -220,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 
                /* Awake -> Sleep Setting */
                INIT_INI_ARRAY(&ah->iniPcieSerdes,
-                               PCIE_PLL_ON_CREQ_DIS_L1_2P0);
+                              ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
                /* Sleep -> Awake Setting */
                INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
-                               PCIE_PLL_ON_CREQ_DIS_L1_2P0);
+                              ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
 
                /* Fast clock modal settings */
                INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -302,6 +301,39 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 
                INIT_INI_ARRAY(&ah->iniModesFastClock,
                                ar9580_1p0_modes_fast_clock);
+       } else if (AR_SREV_9565(ah)) {
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+                              ar9565_1p0_mac_core);
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+                              ar9565_1p0_mac_postamble);
+
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+                              ar9565_1p0_baseband_core);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+                              ar9565_1p0_baseband_postamble);
+
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+                              ar9565_1p0_radio_core);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+                              ar9565_1p0_radio_postamble);
+
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+                              ar9565_1p0_soc_preamble);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+                              ar9565_1p0_soc_postamble);
+
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9565_1p0_Common_rx_gain_table);
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
+
+               INIT_INI_ARRAY(&ah->iniPcieSerdes,
+                              ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+               INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+                              ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+
+               INIT_INI_ARRAY(&ah->iniModesFastClock,
+                               ar9565_1p0_modes_fast_clock);
        } else {
                /* mac */
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -374,6 +406,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9462_modes_low_ob_db_tx_gain_table_2p0);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_low_ob_db_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
@@ -402,6 +437,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9462_modes_high_ob_db_tx_gain_table_2p0);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_high_ob_db_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_high_ob_db_tx_gain_table_2p2);
@@ -424,6 +462,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
        else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9580_1p0_low_ob_db_tx_gain_table);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_low_ob_db_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_low_ob_db_tx_gain_table_2p2);
@@ -446,6 +487,9 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
        else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9580_1p0_high_power_tx_gain_table);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                              ar9565_1p0_modes_high_power_tx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_high_power_tx_gain_table_2p2);
@@ -538,6 +582,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
        } else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                        ar9580_1p0_wo_xlna_rx_gain_table);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9565_1p0_common_wo_xlna_rx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                        ar9300Common_wo_xlna_rx_gain_table_2p2);
index 78816b8b2173cf8e238d56e1fcf981bf027ff009..301bf72c53bf5ce3de62825d87f025b0a5fb82e5 100644 (file)
@@ -31,7 +31,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
        u32 val, ctl12, ctl17;
        u8 desc_len;
 
-       desc_len = (AR_SREV_9462(ah) ? 0x18 : 0x17);
+       desc_len = ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x18 : 0x17);
 
        val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
              (1 << AR_TxRxDesc_S) |
@@ -182,6 +182,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
        struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath_common *common = ath9k_hw_common(ah);
        u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
+       bool fatal_int;
 
        if (ath9k_hw_mci_is_enabled(ah))
                async_mask |= AR_INTR_ASYNC_MASK_MCI;
@@ -310,6 +311,22 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 
        if (sync_cause) {
                ath9k_debug_sync_cause(common, sync_cause);
+               fatal_int =
+                       (sync_cause &
+                        (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+                       ? true : false;
+
+               if (fatal_int) {
+                       if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+                               ath_dbg(common, ANY,
+                                       "received PCI FATAL interrupt\n");
+                       }
+                       if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+                               ath_dbg(common, ANY,
+                                       "received PCI PERR interrupt\n");
+                       }
+                       *masked |= ATH9K_INT_FATAL;
+               }
 
                if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
                        REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -531,7 +548,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                                rxs->rs_status |= ATH9K_RXERR_PHY;
                                rxs->rs_phyerr = phyerr;
                        }
-               };
+               }
        }
 
        if (rxsp->status11 & AR_KeyMiss)
index 9a34fcaae3ff59621d38f69bfafa58cdcb6a2d03..44c202ce6c66bb12544ddd6673abd6405962f5cc 100644 (file)
@@ -714,6 +714,7 @@ bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
 
        return true;
 }
+EXPORT_SYMBOL(ar9003_mci_start_reset);
 
 int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                         struct ath9k_hw_cal_data *caldata)
@@ -812,8 +813,8 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
                      AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
 }
 
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
-                     bool is_full_sleep)
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                    bool is_full_sleep)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
@@ -823,14 +824,13 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
                is_full_sleep, is_2g);
 
        if (!mci->gpm_addr && !mci->sched_addr) {
-               ath_dbg(common, MCI,
-                       "MCI GPM and schedule buffers are not allocated\n");
-               return;
+               ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
+               return -ENOMEM;
        }
 
        if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
-               ath_dbg(common, MCI, "BTCOEX control register is dead\n");
-               return;
+               ath_err(common, "BTCOEX control register is dead\n");
+               return -EINVAL;
        }
 
        /* Program MCI DMA related registers */
@@ -912,6 +912,8 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
 
        if (en_int)
                ar9003_mci_enable_interrupt(ah);
+
+       return 0;
 }
 
 void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
@@ -1026,6 +1028,7 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
 
                if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
                        ar9003_mci_osla_setup(ah, true);
+               REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
        } else {
                ar9003_mci_send_lna_take(ah, true);
                udelay(5);
@@ -1142,8 +1145,8 @@ void ar9003_mci_init_cal_done(struct ath_hw *ah)
        ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
 }
 
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
-                     u16 len, u32 sched_addr)
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                    u16 len, u32 sched_addr)
 {
        struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
 
@@ -1152,7 +1155,7 @@ void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
        mci->gpm_len = len;
        mci->sched_addr = sched_addr;
 
-       ar9003_mci_reset(ah, true, true, true);
+       return ar9003_mci_reset(ah, true, true, true);
 }
 EXPORT_SYMBOL(ar9003_mci_setup);
 
@@ -1201,12 +1204,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
 
                ar9003_mci_2g5g_switch(ah, false);
                break;
-       case MCI_STATE_SET_BT_CAL_START:
-               mci->bt_state = MCI_BT_CAL_START;
-               break;
-       case MCI_STATE_SET_BT_CAL:
-               mci->bt_state = MCI_BT_CAL;
-               break;
        case MCI_STATE_RESET_REQ_WAKE:
                ar9003_mci_reset_req_wakeup(ah);
                mci->update_2g5g = true;
@@ -1240,6 +1237,10 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
        case MCI_STATE_NEED_FTP_STOMP:
                value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
                break;
+       case MCI_STATE_NEED_FLUSH_BT_INFO:
+               value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
+               mci->need_flush_btinfo = false;
+               break;
        default:
                break;
        }
@@ -1289,7 +1290,7 @@ void ar9003_mci_set_power_awake(struct ath_hw *ah)
        }
        REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
        lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
-       bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
+       bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP);
 
        REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
        REG_WRITE(ah, AR_DIAG_SW, diag_sw);
@@ -1327,6 +1328,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
 
        if (first) {
                gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+
+               if (gpm_ptr >= mci->gpm_len)
+                       gpm_ptr = 0;
+
                mci->gpm_idx = gpm_ptr;
                return gpm_ptr;
        }
@@ -1371,6 +1376,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
                        more_gpm = MCI_GPM_NOMORE;
 
                temp_index = mci->gpm_idx;
+
+               if (temp_index >= mci->gpm_len)
+                       temp_index = 0;
+
                mci->gpm_idx++;
 
                if (mci->gpm_idx >= mci->gpm_len)
index d33b8e1288554dd502dc290d68c8c45684d2e1a8..2a2d01889613a610a8cb6084e6f0705234867123 100644 (file)
@@ -190,8 +190,6 @@ enum mci_bt_state {
 enum mci_state_type {
        MCI_STATE_ENABLE,
        MCI_STATE_SET_BT_AWAKE,
-       MCI_STATE_SET_BT_CAL_START,
-       MCI_STATE_SET_BT_CAL,
        MCI_STATE_LAST_SCHD_MSG_OFFSET,
        MCI_STATE_REMOTE_SLEEP,
        MCI_STATE_RESET_REQ_WAKE,
@@ -202,6 +200,7 @@ enum mci_state_type {
        MCI_STATE_RECOVER_RX,
        MCI_STATE_NEED_FTP_STOMP,
        MCI_STATE_DEBUG,
+       MCI_STATE_NEED_FLUSH_BT_INFO,
        MCI_STATE_MAX
 };
 
@@ -213,7 +212,8 @@ enum mci_gpm_coex_opcode {
        MCI_GPM_COEX_WLAN_CHANNELS,
        MCI_GPM_COEX_BT_PROFILE_INFO,
        MCI_GPM_COEX_BT_STATUS_UPDATE,
-       MCI_GPM_COEX_BT_UPDATE_FLAGS
+       MCI_GPM_COEX_BT_UPDATE_FLAGS,
+       MCI_GPM_COEX_NOOP,
 };
 
 #define MCI_GPM_NOMORE  0
@@ -249,8 +249,8 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
                             u32 *payload, u8 len, bool wait_done,
                             bool check_bt);
 u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
-                     u16 len, u32 sched_addr);
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                    u16 len, u32 sched_addr);
 void ar9003_mci_cleanup(struct ath_hw *ah);
 void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
                              u32 *rx_msg_intr);
@@ -272,8 +272,8 @@ void ar9003_mci_check_bt(struct ath_hw *ah);
 bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
 int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                         struct ath9k_hw_cal_data *caldata);
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
-                     bool is_full_sleep);
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                    bool is_full_sleep);
 void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
 void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
 void ar9003_mci_set_power_awake(struct ath_hw *ah);
index e476f9f92ce3bed0992873283e99a4f75e78a5be..759f5f5a715469bb43c054b45d4d8ed5f6917302 100644 (file)
@@ -88,7 +88,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
                        channelSel = (freq * 4) / div;
                        chan_frac = (((freq * 4) % div) * 0x20000) / div;
                        channelSel = (channelSel << 17) | chan_frac;
-               } else if (AR_SREV_9485(ah)) {
+               } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                        u32 chan_frac;
 
                        /*
@@ -206,6 +206,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
        for (i = 0; i < max_spur_cnts; i++) {
                if (AR_SREV_9462(ah) && (i == 0 || i == 3))
                        continue;
+
                negative = 0;
                if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
                    AR_SREV_9550(ah))
@@ -301,7 +302,9 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
                                int freq_offset,
                                int spur_freq_sd,
                                int spur_delta_phase,
-                               int spur_subchannel_sd)
+                               int spur_subchannel_sd,
+                               int range,
+                               int synth_freq)
 {
        int mask_index = 0;
 
@@ -316,8 +319,11 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
                      AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
        REG_RMW_FIELD(ah, AR_PHY_TIMING11,
                      AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
-       REG_RMW_FIELD(ah, AR_PHY_TIMING11,
-                     AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+
+       if (!(AR_SREV_9565(ah) && range == 10 && synth_freq == 2437))
+               REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+                             AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+
        REG_RMW_FIELD(ah, AR_PHY_TIMING4,
                      AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
        REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
@@ -358,9 +364,44 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
                      AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
 }
 
+static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
+                                    int freq_offset)
+{
+       int mask_index = 0;
+
+       mask_index = (freq_offset << 4) / 5;
+       if (mask_index < 0)
+               mask_index = mask_index - 1;
+
+       mask_index = mask_index & 0x7f;
+
+       REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+                     AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B,
+                     mask_index);
+
+       /* A == B */
+       REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+                     AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
+                     mask_index);
+
+       REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+                     AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B,
+                     mask_index);
+       REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+                     AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B, 0xe);
+       REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+                     AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
+
+       /* A == B */
+       REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+                     AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+}
+
 static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
                                     struct ath9k_channel *chan,
-                                    int freq_offset)
+                                    int freq_offset,
+                                    int range,
+                                    int synth_freq)
 {
        int spur_freq_sd = 0;
        int spur_subchannel_sd = 0;
@@ -402,7 +443,8 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
                            freq_offset,
                            spur_freq_sd,
                            spur_delta_phase,
-                           spur_subchannel_sd);
+                           spur_subchannel_sd,
+                           range, synth_freq);
 }
 
 /* Spur mitigation for OFDM */
@@ -447,7 +489,17 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
                freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
                freq_offset -= synth_freq;
                if (abs(freq_offset) < range) {
-                       ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
+                       ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
+                                                range, synth_freq);
+
+                       if (AR_SREV_9565(ah) && (i < 4)) {
+                               freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
+                                                                mode);
+                               freq_offset -= synth_freq;
+                               if (abs(freq_offset) < range)
+                                       ar9003_hw_spur_ofdm_9565(ah, freq_offset);
+                       }
+
                        break;
                }
        }
@@ -456,7 +508,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
 static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
                                    struct ath9k_channel *chan)
 {
-       ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+       if (!AR_SREV_9565(ah))
+               ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
        ar9003_hw_spur_mitigate_ofdm(ah, chan);
 }
 
@@ -552,9 +605,6 @@ static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
 
        if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
                REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
-       else if (AR_SREV_9462(ah))
-               /* xxx only when MCI support is enabled */
-               REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
        else
                REG_WRITE(ah, AR_SELFGEN_MASK, tx);
 
@@ -736,7 +786,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
        if (chan->channel == 2484)
                ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
 
-       if (AR_SREV_9462(ah))
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
                          AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
 
@@ -746,9 +796,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
        ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
        ath9k_hw_apply_txpower(ah, chan, false);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
-                               AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+                                  AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
                        ah->enabled_cals |= TX_IQ_CAL;
                else
                        ah->enabled_cals &= ~TX_IQ_CAL;
@@ -1111,7 +1161,7 @@ static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
        if (AR_SREV_9330(ah))
                ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
                ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
                ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
@@ -1223,17 +1273,17 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
 }
 
 static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
-                                  struct ath_hw_antcomb_conf *antconf)
+                                          struct ath_hw_antcomb_conf *antconf)
 {
        u32 regval;
 
        regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
-       antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >>
-                                 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S;
-       antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >>
-                                AR_PHY_9485_ANT_DIV_ALT_LNACONF_S;
-       antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >>
-                                 AR_PHY_9485_ANT_FAST_DIV_BIAS_S;
+       antconf->main_lna_conf = (regval & AR_PHY_ANT_DIV_MAIN_LNACONF) >>
+                                 AR_PHY_ANT_DIV_MAIN_LNACONF_S;
+       antconf->alt_lna_conf = (regval & AR_PHY_ANT_DIV_ALT_LNACONF) >>
+                                AR_PHY_ANT_DIV_ALT_LNACONF_S;
+       antconf->fast_div_bias = (regval & AR_PHY_ANT_FAST_DIV_BIAS) >>
+                                 AR_PHY_ANT_FAST_DIV_BIAS_S;
 
        if (AR_SREV_9330_11(ah)) {
                antconf->lna1_lna2_delta = -9;
@@ -1241,6 +1291,9 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
        } else if (AR_SREV_9485(ah)) {
                antconf->lna1_lna2_delta = -9;
                antconf->div_group = 2;
+       } else if (AR_SREV_9565(ah)) {
+               antconf->lna1_lna2_delta = -3;
+               antconf->div_group = 3;
        } else {
                antconf->lna1_lna2_delta = -3;
                antconf->div_group = 0;
@@ -1253,26 +1306,84 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        u32 regval;
 
        regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
-       regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
-                   AR_PHY_9485_ANT_DIV_ALT_LNACONF |
-                   AR_PHY_9485_ANT_FAST_DIV_BIAS |
-                   AR_PHY_9485_ANT_DIV_MAIN_GAINTB |
-                   AR_PHY_9485_ANT_DIV_ALT_GAINTB);
-       regval |= ((antconf->main_lna_conf <<
-                                       AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S)
-                  & AR_PHY_9485_ANT_DIV_MAIN_LNACONF);
-       regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S)
-                  & AR_PHY_9485_ANT_DIV_ALT_LNACONF);
-       regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S)
-                  & AR_PHY_9485_ANT_FAST_DIV_BIAS);
-       regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S)
-                  & AR_PHY_9485_ANT_DIV_MAIN_GAINTB);
-       regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S)
-                  & AR_PHY_9485_ANT_DIV_ALT_GAINTB);
+       regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                   AR_PHY_ANT_DIV_ALT_LNACONF |
+                   AR_PHY_ANT_FAST_DIV_BIAS |
+                   AR_PHY_ANT_DIV_MAIN_GAINTB |
+                   AR_PHY_ANT_DIV_ALT_GAINTB);
+       regval |= ((antconf->main_lna_conf << AR_PHY_ANT_DIV_MAIN_LNACONF_S)
+                  & AR_PHY_ANT_DIV_MAIN_LNACONF);
+       regval |= ((antconf->alt_lna_conf << AR_PHY_ANT_DIV_ALT_LNACONF_S)
+                  & AR_PHY_ANT_DIV_ALT_LNACONF);
+       regval |= ((antconf->fast_div_bias << AR_PHY_ANT_FAST_DIV_BIAS_S)
+                  & AR_PHY_ANT_FAST_DIV_BIAS);
+       regval |= ((antconf->main_gaintb << AR_PHY_ANT_DIV_MAIN_GAINTB_S)
+                  & AR_PHY_ANT_DIV_MAIN_GAINTB);
+       regval |= ((antconf->alt_gaintb << AR_PHY_ANT_DIV_ALT_GAINTB_S)
+                  & AR_PHY_ANT_DIV_ALT_GAINTB);
 
        REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
 }
 
+static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
+                                                 bool enable)
+{
+       u8 ant_div_ctl1;
+       u32 regval;
+
+       if (!AR_SREV_9565(ah))
+               return;
+
+       ah->shared_chain_lnadiv = enable;
+       ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+
+       regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+       regval &= (~AR_ANT_DIV_CTRL_ALL);
+       regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
+       regval &= ~AR_PHY_ANT_DIV_LNADIV;
+       regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+
+       if (enable)
+               regval |= AR_ANT_DIV_ENABLE;
+
+       REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+       regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+       regval &= ~AR_FAST_DIV_ENABLE;
+       regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+
+       if (enable)
+               regval |= AR_FAST_DIV_ENABLE;
+
+       REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+       if (enable) {
+               REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                           (1 << AR_PHY_ANT_SW_RX_PROT_S));
+               if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
+                       REG_SET_BIT(ah, AR_PHY_RESTART,
+                                   AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+               REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
+       } else {
+               REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+               REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                           (1 << AR_PHY_ANT_SW_RX_PROT_S));
+               REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+               REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+               regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+               regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                       AR_PHY_ANT_DIV_ALT_LNACONF |
+                       AR_PHY_ANT_DIV_MAIN_GAINTB |
+                       AR_PHY_ANT_DIV_ALT_GAINTB);
+               regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+               regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
+               REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+       }
+}
+
 static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
                                      struct ath9k_channel *chan,
                                      u8 *ini_reloaded)
@@ -1312,10 +1423,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
        ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
        ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
        ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
+
        if (AR_SREV_9462_20(ah))
-               ar9003_hw_prog_ini(ah,
-                               &ah->ini_radio_post_sys2ant,
-                               modesIndex);
+               ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
+                                  modesIndex);
 
        REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
 
@@ -1326,6 +1437,9 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
        if (IS_CHAN_A_FAST_CLOCK(ah, chan))
                REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
 
+       if (AR_SREV_9565(ah))
+               REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
+
        REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
 
        ah->modes_index = modesIndex;
@@ -1368,6 +1482,7 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 
        ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
        ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
+       ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
 
        ar9003_hw_set_nf_limits(ah);
        ar9003_hw_set_radar_conf(ah);
index 84d3d49568616c5452692b1660253f6ae70468cf..9a48e3d2f231eadefcf15dc2604d891150b389fb 100644 (file)
 #define AR_PHY_ML_CNTL_2       (AR_MRC_BASE + 0x1c)
 #define AR_PHY_TST_ADC         (AR_MRC_BASE + 0x20)
 
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A              0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A      0x00000FE0
 #define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S    5
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A                  0x1F
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S                0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A          0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S        0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B      0x00FE0000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_S    17
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B          0x0001F000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B_S        12
 
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A        0x00000FE0
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S      5
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A            0x1F
 #define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S         0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B       0x00FE0000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_S     17
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B           0x0001F000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B_S         12
+
 
 /*
  * MRC Feild Definitions
 #define AR_ANT_DIV_ENABLE_S    24
 
 
-#define AR_PHY_9485_ANT_FAST_DIV_BIAS                  0x00007e00
-#define AR_PHY_9485_ANT_FAST_DIV_BIAS_S                  9
-#define AR_PHY_9485_ANT_DIV_LNADIV                     0x01000000
-#define AR_PHY_9485_ANT_DIV_LNADIV_S                   24
-#define AR_PHY_9485_ANT_DIV_ALT_LNACONF                        0x06000000
-#define AR_PHY_9485_ANT_DIV_ALT_LNACONF_S              25
-#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF               0x18000000
-#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S             27
-#define AR_PHY_9485_ANT_DIV_ALT_GAINTB                 0x20000000
-#define AR_PHY_9485_ANT_DIV_ALT_GAINTB_S               29
-#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB                        0x40000000
-#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S              30
-
-#define AR_PHY_9485_ANT_DIV_LNA1_MINUS_LNA2            0x0
-#define AR_PHY_9485_ANT_DIV_LNA2                       0x1
-#define AR_PHY_9485_ANT_DIV_LNA1                       0x2
-#define AR_PHY_9485_ANT_DIV_LNA1_PLUS_LNA2             0x3
+#define AR_PHY_ANT_FAST_DIV_BIAS                0x00007e00
+#define AR_PHY_ANT_FAST_DIV_BIAS_S              9
+#define AR_PHY_ANT_SW_RX_PROT                   0x00800000
+#define AR_PHY_ANT_SW_RX_PROT_S                 23
+#define AR_PHY_ANT_DIV_LNADIV                   0x01000000
+#define AR_PHY_ANT_DIV_LNADIV_S                 24
+#define AR_PHY_ANT_DIV_ALT_LNACONF              0x06000000
+#define AR_PHY_ANT_DIV_ALT_LNACONF_S            25
+#define AR_PHY_ANT_DIV_MAIN_LNACONF             0x18000000
+#define AR_PHY_ANT_DIV_MAIN_LNACONF_S           27
+#define AR_PHY_ANT_DIV_ALT_GAINTB               0x20000000
+#define AR_PHY_ANT_DIV_ALT_GAINTB_S             29
+#define AR_PHY_ANT_DIV_MAIN_GAINTB              0x40000000
+#define AR_PHY_ANT_DIV_MAIN_GAINTB_S            30
+
+#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2          0x0
+#define AR_PHY_ANT_DIV_LNA2                     0x1
+#define AR_PHY_ANT_DIV_LNA1                     0x2
+#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2           0x3
 
 #define AR_PHY_EXTCHN_PWRTHR1   (AR_AGC_BASE + 0x2c)
 #define AR_PHY_EXT_CHN_WIN      (AR_AGC_BASE + 0x30)
 #define AR_PHY_FIND_SIG_RELSTEP        0x1f
 #define AR_PHY_FIND_SIG_RELSTEP_S         0
 #define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT  5
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG 0x00200000
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG_S 21
 #define AR_PHY_RESTART_DIV_GC   0x001C0000
 #define AR_PHY_RESTART_DIV_GC_S 18
 #define AR_PHY_RESTART_ENA      0x01
 #define AR_PHY_BB_THERM_ADC_1_INIT_THERM               0x000000ff
 #define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S             0
 
+#define AR_PHY_BB_THERM_ADC_3                          (AR_SM_BASE + 0x250)
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN     0x0001ff00
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN_S   8
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET         0x000000ff
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_S       0
+
 #define AR_PHY_BB_THERM_ADC_4                          (AR_SM_BASE + 0x254)
 #define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE       0x000000ff
 #define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S     0
 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S       1
 
 #define AR_PHY_65NM_CH0_SYNTH4      0x1608c
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT   (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002)
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT   ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
 #define AR_PHY_65NM_CH0_SYNTH7      0x16098
 #define AR_PHY_65NM_CH0_BIAS1       0x160c0
 #define AR_PHY_65NM_CH0_BIAS2       0x160c4
 #define AR_PHY_65NM_CH2_RXTX4       0x1690c
 
 #define AR_CH0_TOP     (AR_SREV_9300(ah) ? 0x16288 : \
-                               ((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
+                        (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
 #define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
 #define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
 
 #define AR_SWITCH_TABLE_ALL_S (0)
 
 #define AR_PHY_65NM_CH0_THERM       (AR_SREV_9300(ah) ? 0x16290 :\
-                                       (AR_SREV_9462(ah) ? 0x16294 : 0x1628c))
+                                    ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
 
 #define AR_PHY_65NM_CH0_THERM_LOCAL   0x80000000
 #define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
 #define AR_CH0_TOP2_XPABIASLVL_S       12
 
 #define AR_CH0_XTAL            (AR_SREV_9300(ah) ? 0x16294 : \
-                                       (AR_SREV_9462(ah) ? 0x16298 : 0x16290))
+                                ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : 0x16290))
 #define AR_CH0_XTAL_CAPINDAC   0x7f000000
 #define AR_CH0_XTAL_CAPINDAC_S 24
 #define AR_CH0_XTAL_CAPOUTDAC  0x00fe0000
 #define AR_CH0_XTAL_CAPOUTDAC_S        17
 
-#define AR_PHY_PMU1            (AR_SREV_9462(ah) ? 0x16340 : 0x16c40)
+#define AR_PHY_PMU1            ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : 0x16c40)
 #define AR_PHY_PMU1_PWD                0x1
 #define AR_PHY_PMU1_PWD_S      0
 
-#define AR_PHY_PMU2            (AR_SREV_9462(ah) ? 0x16344 : 0x16c44)
+#define AR_PHY_PMU2            ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : 0x16c44)
 #define AR_PHY_PMU2_PGM                0x00200000
 #define AR_PHY_PMU2_PGM_S      21
 
 
 #define AR_PHY_65NM_CH0_RXTX4_THERM_ON          0x10000000
 #define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S        28
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR      0x20000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR_S    29
 
 #define AR_PHY_65NM_RXTX4_XLNA_BIAS            0xC0000000
 #define AR_PHY_65NM_RXTX4_XLNA_BIAS_S          30
 #define AR_PHY_CL_TAB_CL_GAIN_MOD              0x1f
 #define AR_PHY_CL_TAB_CL_GAIN_MOD_S            0
 
+#define AR_BTCOEX_WL_LNADIV                                0x1a64
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD               0x00003FFF
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD_S             0
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY           0x00004000
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY_S         14
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON                       0x00008000
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON_S                     15
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION                    0x00030000
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION_S                  16
+#define AR_BTCOEX_WL_LNADIV_MODE                           0x007c0000
+#define AR_BTCOEX_WL_LNADIV_MODE_S                         18
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ    0x00800000
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ_S  23
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE       0x01000000
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE_S     24
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT   0x02000000
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT_S 25
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD          0xFC000000
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S        26
+
 #endif  /* AR9003_PHY_H */
index 4ef7dcccaa2f6bd114dfef8cd3741956ac63f328..58f30f65c6b62fa21acb46f2468f171cc7736125 100644 (file)
@@ -58,7 +58,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282},
        {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
new file mode 100644 (file)
index 0000000..843e79f
--- /dev/null
@@ -0,0 +1,1231 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P0_H
+#define INITVALS_9565_1P0_H
+
+/* AR9565 1.0 */
+
+static const u32 ar9565_1p0_mac_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00000008, 0x00000000},
+       {0x00000030, 0x000a0085},
+       {0x00000034, 0x00000005},
+       {0x00000040, 0x00000000},
+       {0x00000044, 0x00000000},
+       {0x00000048, 0x00000008},
+       {0x0000004c, 0x00000010},
+       {0x00000050, 0x00000000},
+       {0x00001040, 0x002ffc0f},
+       {0x00001044, 0x002ffc0f},
+       {0x00001048, 0x002ffc0f},
+       {0x0000104c, 0x002ffc0f},
+       {0x00001050, 0x002ffc0f},
+       {0x00001054, 0x002ffc0f},
+       {0x00001058, 0x002ffc0f},
+       {0x0000105c, 0x002ffc0f},
+       {0x00001060, 0x002ffc0f},
+       {0x00001064, 0x002ffc0f},
+       {0x000010f0, 0x00000100},
+       {0x00001270, 0x00000000},
+       {0x000012b0, 0x00000000},
+       {0x000012f0, 0x00000000},
+       {0x0000143c, 0x00000000},
+       {0x0000147c, 0x00000000},
+       {0x00001810, 0x0f000003},
+       {0x00008000, 0x00000000},
+       {0x00008004, 0x00000000},
+       {0x00008008, 0x00000000},
+       {0x0000800c, 0x00000000},
+       {0x00008018, 0x00000000},
+       {0x00008020, 0x00000000},
+       {0x00008038, 0x00000000},
+       {0x0000803c, 0x00000000},
+       {0x00008040, 0x00000000},
+       {0x00008044, 0x00000000},
+       {0x00008048, 0x00000000},
+       {0x00008054, 0x00000000},
+       {0x00008058, 0x00000000},
+       {0x0000805c, 0x000fc78f},
+       {0x00008060, 0x0000000f},
+       {0x00008064, 0x00000000},
+       {0x00008070, 0x00000310},
+       {0x00008074, 0x00000020},
+       {0x00008078, 0x00000000},
+       {0x0000809c, 0x0000000f},
+       {0x000080a0, 0x00000000},
+       {0x000080a4, 0x02ff0000},
+       {0x000080a8, 0x0e070605},
+       {0x000080ac, 0x0000000d},
+       {0x000080b0, 0x00000000},
+       {0x000080b4, 0x00000000},
+       {0x000080b8, 0x00000000},
+       {0x000080bc, 0x00000000},
+       {0x000080c0, 0x2a800000},
+       {0x000080c4, 0x06900168},
+       {0x000080c8, 0x13881c20},
+       {0x000080cc, 0x01f40000},
+       {0x000080d0, 0x00252500},
+       {0x000080d4, 0x00b00005},
+       {0x000080d8, 0x00400002},
+       {0x000080dc, 0x00000000},
+       {0x000080e0, 0xffffffff},
+       {0x000080e4, 0x0000ffff},
+       {0x000080e8, 0x3f3f3f3f},
+       {0x000080ec, 0x00000000},
+       {0x000080f0, 0x00000000},
+       {0x000080f4, 0x00000000},
+       {0x000080fc, 0x00020000},
+       {0x00008100, 0x00000000},
+       {0x00008108, 0x00000052},
+       {0x0000810c, 0x00000000},
+       {0x00008110, 0x00000000},
+       {0x00008114, 0x000007ff},
+       {0x00008118, 0x000000aa},
+       {0x0000811c, 0x00003210},
+       {0x00008124, 0x00000000},
+       {0x00008128, 0x00000000},
+       {0x0000812c, 0x00000000},
+       {0x00008130, 0x00000000},
+       {0x00008134, 0x00000000},
+       {0x00008138, 0x00000000},
+       {0x0000813c, 0x0000ffff},
+       {0x00008144, 0xffffffff},
+       {0x00008168, 0x00000000},
+       {0x0000816c, 0x00000000},
+       {0x00008170, 0x18486200},
+       {0x00008174, 0x33332210},
+       {0x00008178, 0x00000000},
+       {0x0000817c, 0x00020000},
+       {0x000081c4, 0x33332210},
+       {0x000081c8, 0x00000000},
+       {0x000081cc, 0x00000000},
+       {0x000081d4, 0x00000000},
+       {0x000081ec, 0x00000000},
+       {0x000081f0, 0x00000000},
+       {0x000081f4, 0x00000000},
+       {0x000081f8, 0x00000000},
+       {0x000081fc, 0x00000000},
+       {0x00008240, 0x00100000},
+       {0x00008244, 0x0010f424},
+       {0x00008248, 0x00000800},
+       {0x0000824c, 0x0001e848},
+       {0x00008250, 0x00000000},
+       {0x00008254, 0x00000000},
+       {0x00008258, 0x00000000},
+       {0x0000825c, 0x40000000},
+       {0x00008260, 0x00080922},
+       {0x00008264, 0x9d400010},
+       {0x00008268, 0xffffffff},
+       {0x0000826c, 0x0000ffff},
+       {0x00008270, 0x00000000},
+       {0x00008274, 0x40000000},
+       {0x00008278, 0x003e4180},
+       {0x0000827c, 0x00000004},
+       {0x00008284, 0x0000002c},
+       {0x00008288, 0x0000002c},
+       {0x0000828c, 0x000000ff},
+       {0x00008294, 0x00000000},
+       {0x00008298, 0x00000000},
+       {0x0000829c, 0x00000000},
+       {0x00008300, 0x00000140},
+       {0x00008314, 0x00000000},
+       {0x0000831c, 0x0000010d},
+       {0x00008328, 0x00000000},
+       {0x0000832c, 0x0000001f},
+       {0x00008330, 0x00000302},
+       {0x00008334, 0x00000700},
+       {0x00008338, 0xffff0000},
+       {0x0000833c, 0x02400000},
+       {0x00008340, 0x000107ff},
+       {0x00008344, 0xaa48105b},
+       {0x00008348, 0x008f0000},
+       {0x0000835c, 0x00000000},
+       {0x00008360, 0xffffffff},
+       {0x00008364, 0xffffffff},
+       {0x00008368, 0x00000000},
+       {0x00008370, 0x00000000},
+       {0x00008374, 0x000000ff},
+       {0x00008378, 0x00000000},
+       {0x0000837c, 0x00000000},
+       {0x00008380, 0xffffffff},
+       {0x00008384, 0xffffffff},
+       {0x00008390, 0xffffffff},
+       {0x00008394, 0xffffffff},
+       {0x00008398, 0x00000000},
+       {0x0000839c, 0x00000000},
+       {0x000083a4, 0x0000fa14},
+       {0x000083a8, 0x000f0c00},
+       {0x000083ac, 0x33332210},
+       {0x000083b0, 0x33332210},
+       {0x000083b4, 0x33332210},
+       {0x000083b8, 0x33332210},
+       {0x000083bc, 0x00000000},
+       {0x000083c0, 0x00000000},
+       {0x000083c4, 0x00000000},
+       {0x000083c8, 0x00000000},
+       {0x000083cc, 0x00000200},
+       {0x000083d0, 0x800301ff},
+};
+
+static const u32 ar9565_1p0_mac_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+       {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+       {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+       {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+       {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+       {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+       {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+       {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9565_1p0_baseband_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00009800, 0xafe68e30},
+       {0x00009804, 0xfd14e000},
+       {0x00009808, 0x9c0a8f6b},
+       {0x0000980c, 0x04800000},
+       {0x00009814, 0x9280c00a},
+       {0x00009818, 0x00000000},
+       {0x0000981c, 0x00020028},
+       {0x00009834, 0x6400a290},
+       {0x00009838, 0x0108ecff},
+       {0x0000983c, 0x0d000600},
+       {0x00009880, 0x201fff00},
+       {0x00009884, 0x00001042},
+       {0x000098a4, 0x00200400},
+       {0x000098b0, 0x32840bbe},
+       {0x000098d0, 0x004b6a8e},
+       {0x000098d4, 0x00000820},
+       {0x000098dc, 0x00000000},
+       {0x000098e4, 0x01ffffff},
+       {0x000098e8, 0x01ffffff},
+       {0x000098ec, 0x01ffffff},
+       {0x000098f0, 0x00000000},
+       {0x000098f4, 0x00000000},
+       {0x00009bf0, 0x80000000},
+       {0x00009c04, 0xff55ff55},
+       {0x00009c08, 0x0320ff55},
+       {0x00009c0c, 0x00000000},
+       {0x00009c10, 0x00000000},
+       {0x00009c14, 0x00046384},
+       {0x00009c18, 0x05b6b440},
+       {0x00009c1c, 0x00b6b440},
+       {0x00009d00, 0xc080a333},
+       {0x00009d04, 0x40206c10},
+       {0x00009d08, 0x009c4060},
+       {0x00009d0c, 0x1883800a},
+       {0x00009d10, 0x01834061},
+       {0x00009d14, 0x00c00400},
+       {0x00009d18, 0x00000000},
+       {0x00009e08, 0x0078230c},
+       {0x00009e24, 0x990bb515},
+       {0x00009e28, 0x126f0000},
+       {0x00009e30, 0x06336f77},
+       {0x00009e34, 0x6af6532f},
+       {0x00009e38, 0x0cc80c00},
+       {0x00009e40, 0x0d261820},
+       {0x00009e4c, 0x00001004},
+       {0x00009e50, 0x00ff03f1},
+       {0x00009e54, 0xe4c355c7},
+       {0x00009e5c, 0xe9198724},
+       {0x00009fc0, 0x823e4fc8},
+       {0x00009fc4, 0x0001efb5},
+       {0x00009fcc, 0x40000014},
+       {0x0000a20c, 0x00000000},
+       {0x0000a220, 0x00000000},
+       {0x0000a224, 0x00000000},
+       {0x0000a228, 0x10002310},
+       {0x0000a23c, 0x00000000},
+       {0x0000a244, 0x0c000000},
+       {0x0000a2a0, 0x00000001},
+       {0x0000a2c0, 0x00000001},
+       {0x0000a2c8, 0x00000000},
+       {0x0000a2cc, 0x18c43433},
+       {0x0000a2d4, 0x00000000},
+       {0x0000a2ec, 0x00000000},
+       {0x0000a2f0, 0x00000000},
+       {0x0000a2f4, 0x00000000},
+       {0x0000a2f8, 0x00000000},
+       {0x0000a344, 0x00000000},
+       {0x0000a34c, 0x00000000},
+       {0x0000a350, 0x0000a000},
+       {0x0000a364, 0x00000000},
+       {0x0000a370, 0x00000000},
+       {0x0000a390, 0x00000001},
+       {0x0000a394, 0x00000444},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
+       {0x0000a3a4, 0x00000000},
+       {0x0000a3a8, 0xaaaaaaaa},
+       {0x0000a3ac, 0x3c466478},
+       {0x0000a3c0, 0x20202020},
+       {0x0000a3c4, 0x22222220},
+       {0x0000a3c8, 0x20200020},
+       {0x0000a3cc, 0x20202020},
+       {0x0000a3d0, 0x20202020},
+       {0x0000a3d4, 0x20202020},
+       {0x0000a3d8, 0x20202020},
+       {0x0000a3dc, 0x20202020},
+       {0x0000a3e0, 0x20202020},
+       {0x0000a3e4, 0x20202020},
+       {0x0000a3e8, 0x20202020},
+       {0x0000a3ec, 0x20202020},
+       {0x0000a3f0, 0x00000000},
+       {0x0000a3f4, 0x00000006},
+       {0x0000a3f8, 0x0c9bd380},
+       {0x0000a3fc, 0x000f0f01},
+       {0x0000a400, 0x8fa91f01},
+       {0x0000a404, 0x00000000},
+       {0x0000a408, 0x0e79e5c6},
+       {0x0000a40c, 0x00820820},
+       {0x0000a414, 0x1ce739ce},
+       {0x0000a418, 0x2d001dce},
+       {0x0000a41c, 0x1ce739ce},
+       {0x0000a420, 0x000001ce},
+       {0x0000a424, 0x1ce739ce},
+       {0x0000a428, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce},
+       {0x0000a434, 0x00000000},
+       {0x0000a438, 0x00001801},
+       {0x0000a43c, 0x00000000},
+       {0x0000a440, 0x00000000},
+       {0x0000a444, 0x00000000},
+       {0x0000a448, 0x05000096},
+       {0x0000a44c, 0x00000001},
+       {0x0000a450, 0x00010000},
+       {0x0000a454, 0x03000000},
+       {0x0000a458, 0x00000000},
+       {0x0000a644, 0xbfad9d74},
+       {0x0000a648, 0x0048060a},
+       {0x0000a64c, 0x00003c37},
+       {0x0000a670, 0x03020100},
+       {0x0000a674, 0x09080504},
+       {0x0000a678, 0x0d0c0b0a},
+       {0x0000a67c, 0x13121110},
+       {0x0000a680, 0x31301514},
+       {0x0000a684, 0x35343332},
+       {0x0000a688, 0x00000036},
+       {0x0000a690, 0x00000838},
+       {0x0000a6b4, 0x00512c01},
+       {0x0000a7c0, 0x00000000},
+       {0x0000a7c4, 0xfffffffc},
+       {0x0000a7c8, 0x00000000},
+       {0x0000a7cc, 0x00000000},
+       {0x0000a7d0, 0x00000000},
+       {0x0000a7d4, 0x00000004},
+       {0x0000a7dc, 0x00000001},
+       {0x0000a7f0, 0x80000000},
+};
+
+static const u32 ar9565_1p0_baseband_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d},
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+       {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+       {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
+       {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+       {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+       {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+       {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+       {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+       {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+       {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+       {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+       {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+       {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+       {0x0000a204, 0x07318fc0, 0x07318fc4, 0x07318fc4, 0x07318fc0},
+       {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+       {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+       {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+       {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+       {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+       {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+       {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+       {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+       {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+       {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+       {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+       {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+       {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+       {0x0000a288, 0x00100510, 0x00100510, 0x00100510, 0x00100510},
+       {0x0000a28c, 0x00021551, 0x00021551, 0x00021551, 0x00021551},
+       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+       {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
+       {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+       {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+       {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_radio_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00016000, 0x36db6db6},
+       {0x00016004, 0x6db6db40},
+       {0x00016008, 0x73f00000},
+       {0x0001600c, 0x00000000},
+       {0x00016010, 0x6d823601},
+       {0x00016040, 0x7f80fff8},
+       {0x0001604c, 0x1c99e04f},
+       {0x00016050, 0x6db6db6c},
+       {0x00016058, 0x6c200000},
+       {0x00016080, 0x000c0000},
+       {0x00016084, 0x9a68048c},
+       {0x00016088, 0x54214514},
+       {0x0001608c, 0x1203040b},
+       {0x00016090, 0x24926490},
+       {0x00016098, 0xd28b3330},
+       {0x000160a0, 0x0a108ffe},
+       {0x000160a4, 0x812fc491},
+       {0x000160a8, 0x423c8000},
+       {0x000160b4, 0x92000000},
+       {0x000160b8, 0x0285dddc},
+       {0x000160bc, 0x02908888},
+       {0x000160c0, 0x006db6d0},
+       {0x000160c4, 0x6dd6db60},
+       {0x000160c8, 0x6db6db6c},
+       {0x000160cc, 0x6de6c1b0},
+       {0x00016100, 0x3fffbe04},
+       {0x00016104, 0xfff80000},
+       {0x00016108, 0x00200400},
+       {0x00016110, 0x00000000},
+       {0x00016144, 0x02084080},
+       {0x00016148, 0x000080c0},
+       {0x00016280, 0x050a0001},
+       {0x00016284, 0x3d841440},
+       {0x00016288, 0x00000000},
+       {0x0001628c, 0xe3000000},
+       {0x00016290, 0xa1004080},
+       {0x00016294, 0x40000028},
+       {0x00016298, 0x55aa2900},
+       {0x00016340, 0x131c827a},
+       {0x00016344, 0x00300000},
+};
+
+static const u32 ar9565_1p0_radio_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+       {0x000160ac, 0xa4646c08, 0xa4646c08, 0xa4646c08, 0xa4646c08},
+       {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+       {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+       {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9565_1p0_soc_preamble[][2] = {
+       /* Addr      allmodes  */
+       {0x00004078, 0x00000002},
+       {0x000040a4, 0x00a0c9c9},
+       {0x00007020, 0x00000000},
+       {0x00007034, 0x00000002},
+       {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9565_1p0_soc_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+};
+
+static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x01910190},
+       {0x0000a030, 0x01930192},
+       {0x0000a034, 0x01950194},
+       {0x0000a038, 0x038a0196},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x22222229},
+       {0x0000a084, 0x1d1d1d1d},
+       {0x0000a088, 0x1d1d1d1d},
+       {0x0000a08c, 0x1d1d1d1d},
+       {0x0000a090, 0x171d1d1d},
+       {0x0000a094, 0x11111717},
+       {0x0000a098, 0x00030311},
+       {0x0000a09c, 0x00000000},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x01000101},
+       {0x0000a0c8, 0x011e011f},
+       {0x0000a0cc, 0x011c011d},
+       {0x0000a0d0, 0x02030204},
+       {0x0000a0d4, 0x02010202},
+       {0x0000a0d8, 0x021f0200},
+       {0x0000a0dc, 0x0302021e},
+       {0x0000a0e0, 0x03000301},
+       {0x0000a0e4, 0x031e031f},
+       {0x0000a0e8, 0x0402031d},
+       {0x0000a0ec, 0x04000401},
+       {0x0000a0f0, 0x041e041f},
+       {0x0000a0f4, 0x0502041d},
+       {0x0000a0f8, 0x05000501},
+       {0x0000a0fc, 0x051e051f},
+       {0x0000a100, 0x06010602},
+       {0x0000a104, 0x061f0600},
+       {0x0000a108, 0x061d061e},
+       {0x0000a10c, 0x07020703},
+       {0x0000a110, 0x07000701},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x01000101},
+       {0x0000a148, 0x011e011f},
+       {0x0000a14c, 0x011c011d},
+       {0x0000a150, 0x02030204},
+       {0x0000a154, 0x02010202},
+       {0x0000a158, 0x021f0200},
+       {0x0000a15c, 0x0302021e},
+       {0x0000a160, 0x03000301},
+       {0x0000a164, 0x031e031f},
+       {0x0000a168, 0x0402031d},
+       {0x0000a16c, 0x04000401},
+       {0x0000a170, 0x041e041f},
+       {0x0000a174, 0x0502041d},
+       {0x0000a178, 0x05000501},
+       {0x0000a17c, 0x051e051f},
+       {0x0000a180, 0x06010602},
+       {0x0000a184, 0x061f0600},
+       {0x0000a188, 0x061d061e},
+       {0x0000a18c, 0x07020703},
+       {0x0000a190, 0x07000701},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x32323232},
+       {0x0000b084, 0x2f2f3232},
+       {0x0000b088, 0x23282a2d},
+       {0x0000b08c, 0x1c1e2123},
+       {0x0000b090, 0x14171919},
+       {0x0000b094, 0x0e0e1214},
+       {0x0000b098, 0x03050707},
+       {0x0000b09c, 0x00030303},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+       {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+       {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+       {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+       {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+       {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+       {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+       {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+       {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+       {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+       {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+       {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+       {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+       {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+       {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+       {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+       {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+       {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+       {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+       {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+       {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+       {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
+       /* Addr      allmodes  */
+       {0x00018c00, 0x18212ede},
+       {0x00018c04, 0x000801d8},
+       {0x00018c08, 0x0003780c},
+};
+
+static const u32 ar9565_1p0_modes_fast_clock[][3] = {
+       /* Addr      5G_HT20     5G_HT40   */
+       {0x00001030, 0x00000268, 0x000004d0},
+       {0x00001070, 0x0000018c, 0x00000318},
+       {0x000010b0, 0x00000fd0, 0x00001fa0},
+       {0x00008014, 0x044c044c, 0x08980898},
+       {0x0000801c, 0x148ec02b, 0x148ec057},
+       {0x00008318, 0x000044c0, 0x00008980},
+       {0x00009e00, 0x03721821, 0x03721821},
+       {0x0000a230, 0x0000400b, 0x00004016},
+       {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x03820190},
+       {0x0000a030, 0x03840383},
+       {0x0000a034, 0x03880385},
+       {0x0000a038, 0x038a0389},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x29292929},
+       {0x0000a084, 0x29292929},
+       {0x0000a088, 0x29292929},
+       {0x0000a08c, 0x29292929},
+       {0x0000a090, 0x22292929},
+       {0x0000a094, 0x1d1d2222},
+       {0x0000a098, 0x0c111117},
+       {0x0000a09c, 0x00030303},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x00bf00a0},
+       {0x0000a0c4, 0x11a011a1},
+       {0x0000a0c8, 0x11be11bf},
+       {0x0000a0cc, 0x11bc11bd},
+       {0x0000a0d0, 0x22632264},
+       {0x0000a0d4, 0x22612262},
+       {0x0000a0d8, 0x227f2260},
+       {0x0000a0dc, 0x4322227e},
+       {0x0000a0e0, 0x43204321},
+       {0x0000a0e4, 0x433e433f},
+       {0x0000a0e8, 0x4462433d},
+       {0x0000a0ec, 0x44604461},
+       {0x0000a0f0, 0x447e447f},
+       {0x0000a0f4, 0x5582447d},
+       {0x0000a0f8, 0x55805581},
+       {0x0000a0fc, 0x559e559f},
+       {0x0000a100, 0x66816682},
+       {0x0000a104, 0x669f6680},
+       {0x0000a108, 0x669d669e},
+       {0x0000a10c, 0x77627763},
+       {0x0000a110, 0x77607761},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x00bf00a0},
+       {0x0000a144, 0x11a011a1},
+       {0x0000a148, 0x11be11bf},
+       {0x0000a14c, 0x11bc11bd},
+       {0x0000a150, 0x22632264},
+       {0x0000a154, 0x22612262},
+       {0x0000a158, 0x227f2260},
+       {0x0000a15c, 0x4322227e},
+       {0x0000a160, 0x43204321},
+       {0x0000a164, 0x433e433f},
+       {0x0000a168, 0x4462433d},
+       {0x0000a16c, 0x44604461},
+       {0x0000a170, 0x447e447f},
+       {0x0000a174, 0x5582447d},
+       {0x0000a178, 0x55805581},
+       {0x0000a17c, 0x559e559f},
+       {0x0000a180, 0x66816682},
+       {0x0000a184, 0x669f6680},
+       {0x0000a188, 0x669d669e},
+       {0x0000a18c, 0x77627763},
+       {0x0000a190, 0x77607761},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x32323232},
+       {0x0000b084, 0x2f2f3232},
+       {0x0000b088, 0x23282a2d},
+       {0x0000b08c, 0x1c1e2123},
+       {0x0000b090, 0x14171919},
+       {0x0000b094, 0x0e0e1214},
+       {0x0000b098, 0x03050707},
+       {0x0000b09c, 0x00030303},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_modes_low_ob_db_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+       {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+       {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+       {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+       {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+       {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+       {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+       {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+       {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+       {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+       {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+       {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+       {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+       {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+       {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+       {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+       {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+       {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+       {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+       {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+       {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+       {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x10022223, 0x10022223, 0x0c000200, 0x0c000200},
+       {0x0000a510, 0x15022620, 0x15022620, 0x10000202, 0x10000202},
+       {0x0000a514, 0x19022622, 0x19022622, 0x13000400, 0x13000400},
+       {0x0000a518, 0x1c022822, 0x1c022822, 0x17000402, 0x17000402},
+       {0x0000a51c, 0x21022842, 0x21022842, 0x1b000404, 0x1b000404},
+       {0x0000a520, 0x24022c41, 0x24022c41, 0x1e000603, 0x1e000603},
+       {0x0000a524, 0x29023042, 0x29023042, 0x23000a02, 0x23000a02},
+       {0x0000a528, 0x2d023044, 0x2d023044, 0x27000a04, 0x27000a04},
+       {0x0000a52c, 0x31023644, 0x31023644, 0x2a000a20, 0x2a000a20},
+       {0x0000a530, 0x36025643, 0x36025643, 0x2e000e20, 0x2e000e20},
+       {0x0000a534, 0x3a025a44, 0x3a025a44, 0x32000e22, 0x32000e22},
+       {0x0000a538, 0x3d025e45, 0x3d025e45, 0x36000e24, 0x36000e24},
+       {0x0000a53c, 0x43025e4a, 0x43025e4a, 0x3a001640, 0x3a001640},
+       {0x0000a540, 0x4a025e6c, 0x4a025e6c, 0x3e001660, 0x3e001660},
+       {0x0000a544, 0x50025e8e, 0x50025e8e, 0x41001861, 0x41001861},
+       {0x0000a548, 0x56025eb2, 0x56025eb2, 0x45001a81, 0x45001a81},
+       {0x0000a54c, 0x5c025eb5, 0x5c025eb5, 0x49001a83, 0x49001a83},
+       {0x0000a550, 0x62025ef6, 0x62025ef6, 0x4c001c84, 0x4c001c84},
+       {0x0000a554, 0x65025f56, 0x65025f56, 0x4f001ce3, 0x4f001ce3},
+       {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
+       {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
+       {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
+       {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804201, 0x00804201, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x00804201, 0x00804201, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x02008201, 0x02008201, 0x02008501, 0x02008501},
+       {0x0000a620, 0x02c10a03, 0x02c10a03, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04815205, 0x04815205, 0x02c10b04, 0x02c10b04},
+       {0x0000a628, 0x0581d406, 0x0581d406, 0x03814b04, 0x03814b04},
+       {0x0000a62c, 0x0581d607, 0x0581d607, 0x05018e05, 0x05018e05},
+       {0x0000a630, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x0000a634, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x0000a638, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x0000a63c, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+       {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+       {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+       {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+       {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+       {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+       {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+       {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+       {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+       {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+       {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+       {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+       {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+       {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+       {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+       {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+       {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+       {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+       {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+       {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+       {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+       {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+       {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+       {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+       {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+       {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+       {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x056d82e6, 0x056d82e6, 0x056d82e6, 0x056d82e6},
+       {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+#endif /* INITVALS_9565_1P0_H */
index b09285c36c4aaaeaa27ffb1f3be1adadb263dddc..dfe6a4707fd22684a5994de3b6c4d9dcadd456f2 100644 (file)
@@ -173,6 +173,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
 
 #define ATH_AN_2_TID(_an, _tidno)  (&(_an)->tid[(_tidno)])
 
+#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+
 #define ATH_TX_COMPLETE_POLL_INT       1000
 
 enum ATH_AGGR_STATUS {
@@ -280,6 +282,7 @@ struct ath_tx_control {
        struct ath_txq *txq;
        struct ath_node *an;
        u8 paprd;
+       struct ieee80211_sta *sta;
 };
 
 #define ATH_TX_ERROR        0x01
@@ -422,7 +425,6 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_beacon(struct ath_softc *sc);
-void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
 
 /*******************/
 /* Link Monitoring */
@@ -472,7 +474,7 @@ struct ath_btcoex {
        unsigned long op_flags;
        int bt_stomp_type; /* Types of BT stomping */
        u32 btcoex_no_stomp; /* in usec */
-       u32 btcoex_period; /* in usec */
+       u32 btcoex_period; /* in msec */
        u32 btscan_no_stomp; /* in usec */
        u32 duty_cycle;
        u32 bt_wait_time;
@@ -537,6 +539,7 @@ struct ath9k_wow_pattern {
 #ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
 void ath_deinit_leds(struct ath_softc *sc);
+void ath_fill_led_pin(struct ath_softc *sc);
 #else
 static inline void ath_init_leds(struct ath_softc *sc)
 {
@@ -545,6 +548,9 @@ static inline void ath_init_leds(struct ath_softc *sc)
 static inline void ath_deinit_leds(struct ath_softc *sc)
 {
 }
+static inline void ath_fill_led_pin(struct ath_softc *sc)
+{
+}
 #endif
 
 /*******************************/
@@ -596,8 +602,6 @@ struct ath_ant_comb {
        int main_conf;
        enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
        enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
-       int first_bias;
-       int second_bias;
        bool first_ratio;
        bool second_ratio;
        unsigned long scan_start_time;
index acd437384fe47840852aeb3f967524ac52265975..419e9a3f2feda6c20fc7a504120f26b0820bb9ca 100644 (file)
@@ -43,8 +43,8 @@ static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
        { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
 };
 
-static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
-                                   [AR9300_NUM_WLAN_WEIGHTS] = {
+static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+                                [AR9300_NUM_WLAN_WEIGHTS] = {
        { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
        { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
        { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
@@ -208,14 +208,37 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
                            AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
 }
 
+/*
+ * For AR9002, bt_weight/wlan_weight are used.
+ * For AR9003 and above, stomp_type is used.
+ */
 void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
                                u32 bt_weight,
-                               u32 wlan_weight)
+                               u32 wlan_weight,
+                               enum ath_stomp_type stomp_type)
 {
        struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
-       btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
-                                    SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+       if (AR_SREV_9300_20_OR_LATER(ah)) {
+               const u32 *weight = ar9003_wlan_weights[stomp_type];
+               int i;
+
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+                       if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+                           btcoex_hw->mci.stomp_ftp)
+                               stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
+                       weight = mci_wlan_weights[stomp_type];
+               }
+
+               for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+                       btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
+                       btcoex_hw->wlan_weight[i] = weight[i];
+               }
+       } else {
+               btcoex_hw->bt_coex_weights =
+                       SM(bt_weight, AR_BTCOEX_BT_WGHT) |
+                       SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+       }
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
 
@@ -282,7 +305,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
                ath9k_hw_btcoex_enable_2wire(ah);
                break;
        case ATH_BTCOEX_CFG_3WIRE:
-               if (AR_SREV_9462(ah)) {
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                        ath9k_hw_btcoex_enable_mci(ah);
                        return;
                }
@@ -304,7 +327,7 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
        int i;
 
        btcoex_hw->enabled = false;
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
                for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
                        REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
@@ -332,26 +355,6 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
 
-static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
-                        enum ath_stomp_type stomp_type)
-{
-       struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
-       const u32 *weight = ar9003_wlan_weights[stomp_type];
-       int i;
-
-       if (AR_SREV_9462(ah)) {
-               if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
-                   btcoex->mci.stomp_ftp)
-                       stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
-               weight = ar9462_wlan_weights[stomp_type];
-       }
-
-       for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
-               btcoex->bt_weight[i] = AR9300_BT_WGHT;
-               btcoex->wlan_weight[i] = weight[i];
-       }
-}
-
 /*
  * Configures appropriate weight based on stomp type.
  */
@@ -359,22 +362,22 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
                              enum ath_stomp_type stomp_type)
 {
        if (AR_SREV_9300_20_OR_LATER(ah)) {
-               ar9003_btcoex_bt_stomp(ah, stomp_type);
+               ath9k_hw_btcoex_set_weight(ah, 0, 0, stomp_type);
                return;
        }
 
        switch (stomp_type) {
        case ATH_BTCOEX_STOMP_ALL:
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                               AR_STOMP_ALL_WLAN_WGHT);
+                                          AR_STOMP_ALL_WLAN_WGHT, 0);
                break;
        case ATH_BTCOEX_STOMP_LOW:
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                               AR_STOMP_LOW_WLAN_WGHT);
+                                          AR_STOMP_LOW_WLAN_WGHT, 0);
                break;
        case ATH_BTCOEX_STOMP_NONE:
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                               AR_STOMP_NONE_WLAN_WGHT);
+                                          AR_STOMP_NONE_WLAN_WGHT, 0);
                break;
        default:
                ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
index 20092f98658f84b3f0ad2429543f168de2d2866b..385197ad79b006f494c3659dc88efb343038c184 100644 (file)
@@ -107,7 +107,8 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
 void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
 void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
                                u32 bt_weight,
-                               u32 wlan_weight);
+                               u32 wlan_weight,
+                               enum ath_stomp_type stomp_type);
 void ath9k_hw_btcoex_disable(struct ath_hw *ah);
 void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
                              enum ath_stomp_type stomp_type);
index c8ef30127adb53da590bf4d6350798fa261dffa6..6727b566d294a43073c2a4cf3cc532f0e633b34e 100644 (file)
@@ -222,6 +222,57 @@ static const struct file_operations fops_disable_ani = {
        .llseek = default_llseek,
 };
 
+static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       char buf[32];
+       unsigned int len;
+
+       len = sprintf(buf, "%d\n", common->antenna_diversity);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ant_diversity(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       unsigned long antenna_diversity;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       if (!AR_SREV_9565(sc->sc_ah))
+               goto exit;
+
+       buf[len] = '\0';
+       if (strict_strtoul(buf, 0, &antenna_diversity))
+               return -EINVAL;
+
+       common->antenna_diversity = !!antenna_diversity;
+       ath9k_ps_wakeup(sc);
+       ath_ant_comb_update(sc);
+       ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
+               common->antenna_diversity);
+       ath9k_ps_restore(sc);
+exit:
+       return count;
+}
+
+static const struct file_operations fops_ant_diversity = {
+       .read = read_file_ant_diversity,
+       .write = write_file_ant_diversity,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static ssize_t read_file_dma(struct file *file, char __user *user_buf,
                             size_t count, loff_t *ppos)
 {
@@ -373,6 +424,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
                sc->debug.stats.istats.tsfoor++;
        if (status & ATH9K_INT_MCI)
                sc->debug.stats.istats.mci++;
+       if (status & ATH9K_INT_GENTIMER)
+               sc->debug.stats.istats.gen_timer++;
 }
 
 static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +471,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
        PR_IS("DTIM", dtim);
        PR_IS("TSFOOR", tsfoor);
        PR_IS("MCI", mci);
+       PR_IS("GENTIMER", gen_timer);
        PR_IS("TOTAL", total);
 
        len += snprintf(buf + len, mxlen - len,
@@ -1598,12 +1652,12 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_samps);
 #endif
-
        debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
-
        debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
+       debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc, &fops_ant_diversity);
 
        return 0;
 }
index 8b9d080d89da7ae3b276589bd8398fc39d211bb8..2ed9785a38fa0467a8ab8be32d30c912a5194a87 100644 (file)
@@ -41,7 +41,6 @@ enum ath_reset_type {
        RESET_TYPE_PLL_HANG,
        RESET_TYPE_MAC_HANG,
        RESET_TYPE_BEACON_STUCK,
-       RESET_TYPE_MCI,
        __RESET_TYPE_MAX
 };
 
@@ -74,6 +73,8 @@ enum ath_reset_type {
  * from a beacon differs from the PCU's internal TSF by more than a
  * (programmable) threshold
  * @local_timeout: Internal bus timeout.
+ * @mci: MCI interrupt, specific to MCI based BTCOEX chipsets
+ * @gen_timer: Generic hardware timer interrupt
  */
 struct ath_interrupt_stats {
        u32 total;
@@ -100,6 +101,7 @@ struct ath_interrupt_stats {
        u32 bb_watchdog;
        u32 tsfoor;
        u32 mci;
+       u32 gen_timer;
 
        /* Sync-cause stats */
        u32 sync_cause_all;
index 484b313059061ac3b6fdc74f18787ef059e58933..319c651fa6c5298d66d6d4a44969b569aafcf5aa 100644 (file)
@@ -96,6 +96,7 @@
 
 #define ATH9K_POW_SM(_r, _s)   (((_r) & 0x3f) << (_s))
 #define FREQ2FBIN(x, y)                ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y)                ((y) ? (2300 + x) : (4800 + 5 * x))
 #define ath9k_hw_use_flash(_ah)        (!(_ah->ah_flags & AH_USE_EEPROM))
 
 #define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
 #define EEP_RFSILENT_ENABLED_S      0
 #define EEP_RFSILENT_POLARITY       0x0002
 #define EEP_RFSILENT_POLARITY_S     1
-#define EEP_RFSILENT_GPIO_SEL       (AR_SREV_9462(ah) ? 0x00fc : 0x001c)
+#define EEP_RFSILENT_GPIO_SEL       ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00fc : 0x001c)
 #define EEP_RFSILENT_GPIO_SEL_S     2
 
 #define AR5416_OPFLAGS_11A           0x01
index 9f83f71742a5ecb774f95c3d563f2e0dc7d37ab7..d9ed141a053e6a885fcdf031866f9daa1e5918d2 100644 (file)
@@ -44,25 +44,6 @@ void ath_init_leds(struct ath_softc *sc)
        if (AR_SREV_9100(sc->sc_ah))
                return;
 
-       if (sc->sc_ah->led_pin < 0) {
-               if (AR_SREV_9287(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9287;
-               else if (AR_SREV_9485(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9485;
-               else if (AR_SREV_9300(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9300;
-               else if (AR_SREV_9462(sc->sc_ah))
-                       sc->sc_ah->led_pin = ATH_LED_PIN_9462;
-               else
-                       sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
-       }
-
-       /* Configure gpio 1 for output */
-       ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
-                           AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-       /* LED off, active low */
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
        if (!led_blink)
                sc->led_cdev.default_trigger =
                        ieee80211_get_radio_led_name(sc->hw);
@@ -78,6 +59,31 @@ void ath_init_leds(struct ath_softc *sc)
 
        sc->led_registered = true;
 }
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+
+       if (AR_SREV_9100(ah) || (ah->led_pin >= 0))
+               return;
+
+       if (AR_SREV_9287(ah))
+               ah->led_pin = ATH_LED_PIN_9287;
+       else if (AR_SREV_9485(sc->sc_ah))
+               ah->led_pin = ATH_LED_PIN_9485;
+       else if (AR_SREV_9300(sc->sc_ah))
+               ah->led_pin = ATH_LED_PIN_9300;
+       else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
+               ah->led_pin = ATH_LED_PIN_9462;
+       else
+               ah->led_pin = ATH_LED_PIN_DEF;
+
+       /* Configure gpio 1 for output */
+       ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+       /* LED off, active low */
+       ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+}
 #endif
 
 /*******************/
@@ -228,7 +234,12 @@ static void ath_btcoex_period_timer(unsigned long data)
        ath9k_hw_btcoex_enable(ah);
        spin_unlock_bh(&btcoex->btcoex_lock);
 
-       if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+       /*
+        * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
+        * ensure that we properly convert btcoex_period to usec
+        * for any comparision with (btcoex/btscan_)no_stomp.
+        */
+       if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
                if (btcoex->hw_timer_enabled)
                        ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
 
@@ -309,8 +320,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
        ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
 
        /* make sure duty cycle timer is also stopped when resuming */
-       if (btcoex->hw_timer_enabled)
+       if (btcoex->hw_timer_enabled) {
                ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+               btcoex->hw_timer_enabled = false;
+       }
 
        btcoex->bt_priority_cnt = 0;
        btcoex->bt_priority_time = jiffies;
@@ -331,18 +344,20 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
 
        del_timer_sync(&btcoex->period_timer);
 
-       if (btcoex->hw_timer_enabled)
+       if (btcoex->hw_timer_enabled) {
                ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
-       btcoex->hw_timer_enabled = false;
+               btcoex->hw_timer_enabled = false;
+       }
 }
 
 void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
 
-       if (btcoex->hw_timer_enabled)
+       if (btcoex->hw_timer_enabled) {
                ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+               btcoex->hw_timer_enabled = false;
+       }
 }
 
 u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -380,7 +395,10 @@ void ath9k_start_btcoex(struct ath_softc *sc)
            !ah->btcoex_hw.enabled) {
                if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
                        ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                                                  AR_STOMP_LOW_WLAN_WGHT);
+                                                  AR_STOMP_LOW_WLAN_WGHT, 0);
+               else
+                       ath9k_hw_btcoex_set_weight(ah, 0, 0,
+                                                  ATH_BTCOEX_STOMP_NONE);
                ath9k_hw_btcoex_enable(ah);
 
                if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
@@ -397,7 +415,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
                if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
                        ath9k_btcoex_timer_pause(sc);
                ath9k_hw_btcoex_disable(ah);
-               if (AR_SREV_9462(ah))
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                        ath_mci_flush_profile(&sc->btcoex.mci);
        }
 }
index aa327adcc3d8ffdf09261884580914ab41556ade..924c4616c3d990dc7f03e46ed299bdc94396d765 100644 (file)
@@ -973,8 +973,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
 static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
 {
        int transfer, err;
-       const void *data = hif_dev->firmware->data;
-       size_t len = hif_dev->firmware->size;
+       const void *data = hif_dev->fw_data;
+       size_t len = hif_dev->fw_size;
        u32 addr = AR9271_FIRMWARE;
        u8 *buf = kzalloc(4096, GFP_KERNEL);
        u32 firm_offset;
@@ -1017,7 +1017,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
                return -EIO;
 
        dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
-                hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
+                hif_dev->fw_name, (unsigned long) hif_dev->fw_size);
 
        return 0;
 }
@@ -1072,14 +1072,15 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
  */
 static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
 {
-       struct device *parent = hif_dev->udev->dev.parent;
+       struct device *dev = &hif_dev->udev->dev;
+       struct device *parent = dev->parent;
 
        complete(&hif_dev->fw_done);
 
        if (parent)
                device_lock(parent);
 
-       device_release_driver(&hif_dev->udev->dev);
+       device_release_driver(dev);
 
        if (parent)
                device_unlock(parent);
@@ -1099,11 +1100,11 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
 
        hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
                                                 &hif_dev->udev->dev);
-       if (hif_dev->htc_handle == NULL) {
-               goto err_fw;
-       }
+       if (hif_dev->htc_handle == NULL)
+               goto err_dev_alloc;
 
-       hif_dev->firmware = fw;
+       hif_dev->fw_data = fw->data;
+       hif_dev->fw_size = fw->size;
 
        /* Proceed with initialization */
 
@@ -1121,6 +1122,8 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
                goto err_htc_hw_init;
        }
 
+       release_firmware(fw);
+       hif_dev->flags |= HIF_USB_READY;
        complete(&hif_dev->fw_done);
 
        return;
@@ -1129,8 +1132,8 @@ err_htc_hw_init:
        ath9k_hif_usb_dev_deinit(hif_dev);
 err_dev_init:
        ath9k_htc_hw_free(hif_dev->htc_handle);
+err_dev_alloc:
        release_firmware(fw);
-       hif_dev->firmware = NULL;
 err_fw:
        ath9k_hif_usb_firmware_fail(hif_dev);
 }
@@ -1277,11 +1280,10 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
 
        wait_for_completion(&hif_dev->fw_done);
 
-       if (hif_dev->firmware) {
+       if (hif_dev->flags & HIF_USB_READY) {
                ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
                ath9k_htc_hw_free(hif_dev->htc_handle);
                ath9k_hif_usb_dev_deinit(hif_dev);
-               release_firmware(hif_dev->firmware);
        }
 
        usb_set_intfdata(interface, NULL);
@@ -1317,13 +1319,23 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
        struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
        struct htc_target *htc_handle = hif_dev->htc_handle;
        int ret;
+       const struct firmware *fw;
 
        ret = ath9k_hif_usb_alloc_urbs(hif_dev);
        if (ret)
                return ret;
 
-       if (hif_dev->firmware) {
+       if (hif_dev->flags & HIF_USB_READY) {
+               /* request cached firmware during suspend/resume cycle */
+               ret = request_firmware(&fw, hif_dev->fw_name,
+                                      &hif_dev->udev->dev);
+               if (ret)
+                       goto fail_resume;
+
+               hif_dev->fw_data = fw->data;
+               hif_dev->fw_size = fw->size;
                ret = ath9k_hif_usb_download_fw(hif_dev);
+               release_firmware(fw);
                if (ret)
                        goto fail_resume;
        } else {
index 487ff658b4c1890f597045c739d919ff6dd16689..51496e74b83eaf3521230421f3a07350c385e3d4 100644 (file)
@@ -85,12 +85,14 @@ struct cmd_buf {
 };
 
 #define HIF_USB_START BIT(0)
+#define HIF_USB_READY BIT(1)
 
 struct hif_device_usb {
        struct usb_device *udev;
        struct usb_interface *interface;
        const struct usb_device_id *usb_device_id;
-       const struct firmware *firmware;
+       const void *fw_data;
+       size_t fw_size;
        struct completion fw_done;
        struct htc_target *htc_handle;
        struct hif_usb_tx tx;
index 936e920fb88e7cc0dfc40766f27b0e7ab1fa7ab3..b30596fcf73a57ed5e63d81d2b465b7e4e4b7f48 100644 (file)
@@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
 
 int ath9k_tx_init(struct ath9k_htc_priv *priv);
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+                      struct ieee80211_sta *sta,
                       struct sk_buff *skb, u8 slot, bool is_cab);
 void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
 bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
index 77d541feb9102a9af2e8aabfea180da57fc5d317..f42d2eb6af99302f449ef44beac8f0120238e73d 100644 (file)
@@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
                        goto next;
                }
 
-               ret = ath9k_htc_tx_start(priv, skb, tx_slot, true);
+               ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
                if (ret != 0) {
                        ath9k_htc_tx_clear_slot(priv, tx_slot);
                        dev_kfree_skb_any(skb);
index 07df279c8d467a0ee33bf4c44015a6b2d3f69dbf..0eacfc13c9155feb4af8cb7c4d1e4b1918c0cd39 100644 (file)
@@ -161,7 +161,7 @@ void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
 
        if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
                ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                                          AR_STOMP_LOW_WLAN_WGHT);
+                                          AR_STOMP_LOW_WLAN_WGHT, 0);
                ath9k_hw_btcoex_enable(ah);
                ath_htc_resume_btcoex_work(priv);
        }
@@ -173,17 +173,26 @@ void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
 
        if (ah->btcoex_hw.enabled &&
            ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
-               ath9k_hw_btcoex_disable(ah);
                if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
                        ath_htc_cancel_btcoex_work(priv);
+               ath9k_hw_btcoex_disable(ah);
        }
 }
 
 void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
 {
        struct ath_hw *ah = priv->ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        int qnum;
 
+       /*
+        * Check if BTCOEX is globally disabled.
+        */
+       if (!common->btcoex_enabled) {
+               ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_NONE;
+               return;
+       }
+
        if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
                ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
        }
index a035a380d669b6dcd723e9e3d8aabef70feba79b..d98255eb1b9aa4f1809df6d7c9b1b2d6eb2b141b 100644 (file)
@@ -30,6 +30,10 @@ int htc_modparam_nohwcrypt;
 module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
 
+static int ath9k_htc_btcoex_enable;
+module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
+MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
 #define CHAN2G(_freq, _idx)  { \
        .center_freq = (_freq), \
        .hw_value = (_idx), \
@@ -635,6 +639,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
        common->hw = priv->hw;
        common->priv = priv;
        common->debug_mask = ath9k_debug;
+       common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
 
        spin_lock_init(&priv->beacon_lock);
        spin_lock_init(&priv->tx.tx_lock);
index c785129692ff028db0954cf01fe2296c0a4e46c8..ca78e33ca23ec1393dd6fa9b109a7443cf865cf8 100644 (file)
@@ -489,24 +489,20 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
                ista = (struct ath9k_htc_sta *) sta->drv_priv;
                memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
                memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
-               tsta.is_vif_sta = 0;
                ista->index = sta_idx;
+               tsta.is_vif_sta = 0;
+               maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+                                sta->ht_cap.ampdu_factor);
+               tsta.maxampdu = cpu_to_be16(maxampdu);
        } else {
                memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
                tsta.is_vif_sta = 1;
+               tsta.maxampdu = cpu_to_be16(0xffff);
        }
 
        tsta.sta_index = sta_idx;
        tsta.vif_index = avp->index;
 
-       if (!sta) {
-               tsta.maxampdu = cpu_to_be16(0xffff);
-       } else {
-               maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
-                                sta->ht_cap.ampdu_factor);
-               tsta.maxampdu = cpu_to_be16(maxampdu);
-       }
-
        WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
        if (ret) {
                if (sta)
@@ -856,7 +852,9 @@ set_timer:
 /* mac80211 Callbacks */
 /**********************/
 
-static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +881,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto fail_tx;
        }
 
-       ret = ath9k_htc_tx_start(priv, skb, slot, false);
+       ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
        if (ret != 0) {
                ath_dbg(common, XMIT, "Tx failed\n");
                goto clear_slot;
@@ -1331,6 +1329,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
        return ret;
 }
 
+static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_sta *sta, u32 changed)
+{
+       struct ath9k_htc_priv *priv = hw->priv;
+       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_htc_target_rate trate;
+
+       mutex_lock(&priv->mutex);
+       ath9k_htc_ps_wakeup(priv);
+
+       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+               memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+               ath9k_htc_setup_rate(priv, sta, &trate);
+               if (!ath9k_htc_send_rate_cmd(priv, &trate))
+                       ath_dbg(common, CONFIG,
+                               "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+                               sta->addr, be32_to_cpu(trate.capflags));
+               else
+                       ath_dbg(common, CONFIG,
+                               "Unable to update supported rates for sta: %pM\n",
+                               sta->addr);
+       }
+
+       ath9k_htc_ps_restore(priv);
+       mutex_unlock(&priv->mutex);
+}
+
 static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif, u16 queue,
                             const struct ieee80211_tx_queue_params *params)
@@ -1419,7 +1445,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
                                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
                        if (priv->ah->sw_mgmt_crypto &&
                            key->cipher == WLAN_CIPHER_SUITE_CCMP)
-                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                        ret = 0;
                }
                break;
@@ -1758,6 +1784,7 @@ struct ieee80211_ops ath9k_htc_ops = {
        .sta_add            = ath9k_htc_sta_add,
        .sta_remove         = ath9k_htc_sta_remove,
        .conf_tx            = ath9k_htc_conf_tx,
+       .sta_rc_update      = ath9k_htc_sta_rc_update,
        .bss_info_changed   = ath9k_htc_bss_info_changed,
        .set_key            = ath9k_htc_set_key,
        .get_tsf            = ath9k_htc_get_tsf,
index 47e61d0da33bf1043b02dcc5e1422aa3c83e37cc..06cdcb772d786038b7f1e5219349a6a682b216c9 100644 (file)
@@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
 }
 
 int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+                      struct ieee80211_sta *sta,
                       struct sk_buff *skb,
                       u8 slot, bool is_cab)
 {
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_vif *vif = tx_info->control.vif;
        struct ath9k_htc_sta *ista;
        struct ath9k_htc_vif *avp = NULL;
index 265bf77598a268c60a4392165db2a12fde95324c..0f2b97f6b7390e32a920e0d645449e820d9d195a 100644 (file)
@@ -78,6 +78,13 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
 }
 
+static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
+                                                       bool enable)
+{
+       if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
+               ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
+}
+
 /* Private hardware call ops */
 
 /* PHY ops */
index 4faf0a3958765bd07ae683fdd6dceb879a5adbfd..f9a6ec5cf4704818a6da783338ada8e3e737c759 100644 (file)
@@ -24,6 +24,7 @@
 #include "rc.h"
 #include "ar9003_mac.h"
 #include "ar9003_mci.h"
+#include "ar9003_phy.h"
 #include "debug.h"
 #include "ath9k.h"
 
@@ -355,7 +356,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                        (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
                ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
 
-               if (AR_SREV_9462(ah))
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                        ah->is_pciexpress = true;
                else
                        ah->is_pciexpress = (val &
@@ -602,6 +603,11 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        if (AR_SREV_9462(ah))
                ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
 
+       if (AR_SREV_9565(ah)) {
+               ah->WARegVal |= AR_WA_BIT22;
+               REG_WRITE(ah, AR_WA, ah->WARegVal);
+       }
+
        ath9k_hw_init_defaults(ah);
        ath9k_hw_init_config(ah);
 
@@ -647,6 +653,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        case AR_SREV_VERSION_9340:
        case AR_SREV_VERSION_9462:
        case AR_SREV_VERSION_9550:
+       case AR_SREV_VERSION_9565:
                break;
        default:
                ath_err(common,
@@ -708,7 +715,7 @@ int ath9k_hw_init(struct ath_hw *ah)
        int ret;
        struct ath_common *common = ath9k_hw_common(ah);
 
-       /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
+       /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */
        switch (ah->hw_version.devid) {
        case AR5416_DEVID_PCI:
        case AR5416_DEVID_PCIE:
@@ -728,6 +735,7 @@ int ath9k_hw_init(struct ath_hw *ah)
        case AR9300_DEVID_AR9580:
        case AR9300_DEVID_AR9462:
        case AR9485_DEVID_AR1111:
+       case AR9300_DEVID_AR9565:
                break;
        default:
                if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -800,8 +808,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
 {
        u32 pll;
 
-       if (AR_SREV_9485(ah)) {
-
+       if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
                REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
                              AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
@@ -912,7 +919,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
        }
 
        pll = ath9k_hw_compute_pll_control(ah, chan);
-
+       if (AR_SREV_9565(ah))
+               pll |= 0x40000;
        REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
 
        if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
@@ -1726,12 +1734,12 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
        if (!ret)
                goto fail;
 
-       ath9k_hw_loadnf(ah, ah->curchan);
-       ath9k_hw_start_nfcal(ah, true);
-
        if (ath9k_hw_mci_is_enabled(ah))
                ar9003_mci_2g5g_switch(ah, false);
 
+       ath9k_hw_loadnf(ah, ah->curchan);
+       ath9k_hw_start_nfcal(ah, true);
+
        if (AR_SREV_9271(ah))
                ar9002_hw_load_ani_reg(ah, chan);
 
@@ -2018,6 +2026,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        ath9k_hw_apply_gpio_override(ah);
 
+       if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
+               REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
        return 0;
 }
 EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2034,7 +2045,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
 {
        REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
                REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
                REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
@@ -2401,7 +2412,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        if (eeval & AR5416_OPFLAGS_11G)
                pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
 
-       if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
+       if (AR_SREV_9485(ah) ||
+           AR_SREV_9285(ah) ||
+           AR_SREV_9330(ah) ||
+           AR_SREV_9565(ah))
                chip_chainmask = 1;
        else if (AR_SREV_9462(ah))
                chip_chainmask = 3;
@@ -2489,7 +2503,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
-               if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah))
+               if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah))
                        pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
 
                pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -2525,7 +2539,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        }
 
 
-       if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+       if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
                /*
                 * enable the diversity-combining algorithm only when
@@ -2568,14 +2582,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                        ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
        }
 
-       if (AR_SREV_9462(ah)) {
-
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
                        pCap->hw_caps |= ATH9K_HW_CAP_MCI;
 
                if (AR_SREV_9462_20(ah))
                        pCap->hw_caps |= ATH9K_HW_CAP_RTT;
-
        }
 
 
@@ -2741,7 +2753,7 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
 
        ENABLE_REGWRITE_BUFFER(ah);
 
-       if (AR_SREV_9462(ah))
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
                bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
 
        REG_WRITE(ah, AR_RX_FILTER, bits);
@@ -3038,7 +3050,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
        REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
                    gen_tmr_configuration[timer->index].mode_mask);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                /*
                 * Starting from AR9462, each generic timer can select which tsf
                 * to use. But we still follow the old rule, 0 - 7 use tsf and
@@ -3072,6 +3084,16 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
        REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
                        gen_tmr_configuration[timer->index].mode_mask);
 
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+               /*
+                * Need to switch back to TSF if it was using TSF2.
+                */
+               if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) {
+                       REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+                                   (1 << timer->index));
+               }
+       }
+
        /* Disable both trigger and thresh interrupt masks */
        REG_CLR_BIT(ah, AR_IMR_S5,
                (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
@@ -3153,6 +3175,7 @@ static struct {
        { AR_SREV_VERSION_9485,         "9485" },
        { AR_SREV_VERSION_9462,         "9462" },
        { AR_SREV_VERSION_9550,         "9550" },
+       { AR_SREV_VERSION_9565,         "9565" },
 };
 
 /* For devices with external radios */
index de6968fc64f42920304fe52dd93a493c3ce679c6..566a4ce4f156e8200c29fa4e71da7376a7e1df5d 100644 (file)
@@ -50,6 +50,7 @@
 #define AR9300_DEVID_AR9330    0x0035
 #define AR9300_DEVID_QCA955X   0x0038
 #define AR9485_DEVID_AR1111    0x0037
+#define AR9300_DEVID_AR9565     0x0036
 
 #define AR5416_AR9100_DEVID    0x000b
 
@@ -685,7 +686,7 @@ struct ath_hw_ops {
                        struct ath_hw_antcomb_conf *antconf);
        void (*antdiv_comb_conf_set)(struct ath_hw *ah,
                        struct ath_hw_antcomb_conf *antconf);
-
+       void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
 };
 
 struct ath_nf_limits {
@@ -729,6 +730,7 @@ struct ath_hw {
        bool aspm_enabled;
        bool is_monitoring;
        bool need_an_top2_fixup;
+       bool shared_chain_lnadiv;
        u16 tx_trig_level;
 
        u32 nf_regs[6];
index f33712140fa550aac98bcfac152c60366d597795..fad3ccd5cd91aa8ab5b96603701275303a3f6392 100644 (file)
@@ -46,6 +46,10 @@ static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
+static int ath9k_enable_diversity;
+module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
+MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
+
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
 
@@ -258,7 +262,7 @@ static void setup_ht_cap(struct ath_softc *sc,
        ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
        ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
 
-       if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
+       if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
                max_streams = 1;
        else if (AR_SREV_9462(ah))
                max_streams = 2;
@@ -546,6 +550,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        common->debug_mask = ath9k_debug;
        common->btcoex_enabled = ath9k_btcoex_enable == 1;
        common->disable_ani = false;
+
+       /*
+        * Enable Antenna diversity only when BTCOEX is disabled
+        * and the user manually requests the feature.
+        */
+       if (!common->btcoex_enabled && ath9k_enable_diversity)
+               common->antenna_diversity = 1;
+
        spin_lock_init(&common->cc_lock);
 
        spin_lock_init(&sc->sc_serial_rw);
@@ -597,6 +609,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 
        ath9k_cmn_init_crypto(sc->sc_ah);
        ath9k_init_misc(sc);
+       ath_fill_led_pin(sc);
 
        if (common->bus_ops->aspm_init)
                common->bus_ops->aspm_init(common);
index a22df749b8db3d8641b4ef8cb78ad99b9e34adbb..31ab82e3ba85fdee932f1c8b2037c262037b1f24 100644 (file)
@@ -696,7 +696,9 @@ mutex_unlock:
        return r;
 }
 
-static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        memset(&txctl, 0, sizeof(struct ath_tx_control));
        txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
+       txctl.sta = control->sta;
 
        ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
 
@@ -983,47 +986,21 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       int ret = 0;
 
-       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-       case NL80211_IFTYPE_WDS:
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_MESH_POINT:
-               break;
-       default:
-               ath_err(common, "Interface type %d not yet supported\n",
-                       vif->type);
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
-
-       if (ath9k_uses_beacons(vif->type)) {
-               if (sc->nbcnvifs >= ATH_BCBUF) {
-                       ath_err(common, "Not enough beacon buffers when adding"
-                               " new interface of type: %i\n",
-                               vif->type);
-                       ret = -ENOBUFS;
-                       goto out;
-               }
-       }
-
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
-
        sc->nvifs++;
 
+       ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, vif);
+       ath9k_ps_restore(sc);
+
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
-out:
        mutex_unlock(&sc->mutex);
-       ath9k_ps_restore(sc);
-       return ret;
+       return 0;
 }
 
 static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1033,21 +1010,9 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       int ret = 0;
 
        ath_dbg(common, CONFIG, "Change Interface\n");
-
        mutex_lock(&sc->mutex);
-       ath9k_ps_wakeup(sc);
-
-       if (ath9k_uses_beacons(new_type) &&
-           !ath9k_uses_beacons(vif->type)) {
-               if (sc->nbcnvifs >= ATH_BCBUF) {
-                       ath_err(common, "No beacon slot available\n");
-                       ret = -ENOBUFS;
-                       goto out;
-               }
-       }
 
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
@@ -1055,14 +1020,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        vif->type = new_type;
        vif->p2p = p2p;
 
+       ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, vif);
+       ath9k_ps_restore(sc);
+
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
-out:
-       ath9k_ps_restore(sc);
        mutex_unlock(&sc->mutex);
-       return ret;
+       return 0;
 }
 
 static void ath9k_remove_interface(struct ieee80211_hw *hw,
@@ -1073,7 +1039,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        ath_dbg(common, CONFIG, "Detach Interface\n");
 
-       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        sc->nvifs--;
@@ -1081,10 +1046,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
 
+       ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, NULL);
+       ath9k_ps_restore(sc);
 
        mutex_unlock(&sc->mutex);
-       ath9k_ps_restore(sc);
 }
 
 static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1440,7 +1406,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
                                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
                        if (sc->sc_ah->sw_mgmt_crypto &&
                            key->cipher == WLAN_CIPHER_SUITE_CCMP)
-                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+                               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                        ret = 0;
                }
                break;
@@ -2257,7 +2223,7 @@ static int ath9k_suspend(struct ieee80211_hw *hw,
        mutex_lock(&sc->mutex);
 
        ath_cancel_work(sc);
-       del_timer_sync(&common->ani.timer);
+       ath_stop_ani(sc);
        del_timer_sync(&sc->rx_poll_timer);
 
        if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
index fb536e7e661b630a464700185b5ff845f4e83982..ec2d7c80756753f02e37f30ac3d42b45ec463259 100644 (file)
@@ -80,6 +80,7 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
        struct ath_mci_profile_info *info, *tinfo;
 
        mci->aggr_limit = 0;
+       mci->num_mgmt = 0;
 
        if (list_empty(&mci->info))
                return;
@@ -120,7 +121,14 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
        if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
                goto skip_tuning;
 
+       mci->aggr_limit = 0;
        btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
+       btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+       if (NUM_PROF(mci))
+               btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+       else
+               btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+                                                       ATH_BTCOEX_STOMP_LOW;
 
        if (num_profile == 1) {
                info = list_first_entry(&mci->info,
@@ -132,7 +140,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
                        else if (info->T == 6) {
                                mci->aggr_limit = 6;
                                btcoex->duty_cycle = 30;
-                       }
+                       } else
+                               mci->aggr_limit = 6;
                        ath_dbg(common, MCI,
                                "Single SCO, aggregation limit %d 1/4 ms\n",
                                mci->aggr_limit);
@@ -191,6 +200,23 @@ skip_tuning:
        ath9k_btcoex_timer_resume(sc);
 }
 
+static void ath_mci_wait_btcal_done(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+
+       /* Stop tx & rx */
+       ieee80211_stop_queues(sc->hw);
+       ath_stoprecv(sc);
+       ath_drain_all_txq(sc, false);
+
+       /* Wait for cal done */
+       ar9003_mci_start_reset(ah, ah->curchan);
+
+       /* Resume tx & rx */
+       ath_startrecv(sc);
+       ieee80211_wake_queues(sc->hw);
+}
+
 static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
 {
        struct ath_hw *ah = sc->sc_ah;
@@ -201,8 +227,8 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
        switch (opcode) {
        case MCI_GPM_BT_CAL_REQ:
                if (mci_hw->bt_state == MCI_BT_AWAKE) {
-                       ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
-                       ath9k_queue_reset(sc, RESET_TYPE_MCI);
+                       mci_hw->bt_state = MCI_BT_CAL_START;
+                       ath_mci_wait_btcal_done(sc);
                }
                ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
                break;
@@ -224,8 +250,8 @@ static void ath9k_mci_work(struct work_struct *work)
        ath_mci_update_scheme(sc);
 }
 
-static void ath_mci_process_profile(struct ath_softc *sc,
-                                   struct ath_mci_profile_info *info)
+static u8 ath_mci_process_profile(struct ath_softc *sc,
+                                 struct ath_mci_profile_info *info)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_btcoex *btcoex = &sc->btcoex;
@@ -251,25 +277,15 @@ static void ath_mci_process_profile(struct ath_softc *sc,
 
        if (info->start) {
                if (!entry && !ath_mci_add_profile(common, mci, info))
-                       return;
+                       return 0;
        } else
                ath_mci_del_profile(common, mci, entry);
 
-       btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
-       mci->aggr_limit = mci->num_sco ? 6 : 0;
-
-       btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
-       if (NUM_PROF(mci))
-               btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
-       else
-               btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
-                                                       ATH_BTCOEX_STOMP_LOW;
-
-       ieee80211_queue_work(sc->hw, &sc->mci_work);
+       return 1;
 }
 
-static void ath_mci_process_status(struct ath_softc *sc,
-                                  struct ath_mci_profile_status *status)
+static u8 ath_mci_process_status(struct ath_softc *sc,
+                                struct ath_mci_profile_status *status)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
        struct ath_mci_profile *mci = &btcoex->mci;
@@ -278,14 +294,14 @@ static void ath_mci_process_status(struct ath_softc *sc,
 
        /* Link status type are not handled */
        if (status->is_link)
-               return;
+               return 0;
 
        info.conn_handle = status->conn_handle;
        if (ath_mci_find_profile(mci, &info))
-               return;
+               return 0;
 
        if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
-               return;
+               return 0;
 
        if (status->is_critical)
                __set_bit(status->conn_handle, mci->status);
@@ -299,7 +315,9 @@ static void ath_mci_process_status(struct ath_softc *sc,
        } while (++i < ATH_MCI_MAX_PROFILE);
 
        if (old_num_mgmt != mci->num_mgmt)
-               ieee80211_queue_work(sc->hw, &sc->mci_work);
+               return 1;
+
+       return 0;
 }
 
 static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -308,9 +326,16 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
        struct ath_mci_profile_info profile_info;
        struct ath_mci_profile_status profile_status;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       u8 major, minor;
+       u8 major, minor, update_scheme = 0;
        u32 seq_num;
 
+       if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
+           ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
+               ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
+               ath_mci_flush_profile(&sc->btcoex.mci);
+               ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
+       }
+
        switch (opcode) {
        case MCI_GPM_COEX_VERSION_QUERY:
                ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
@@ -336,7 +361,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
                        break;
                }
 
-               ath_mci_process_profile(sc, &profile_info);
+               update_scheme += ath_mci_process_profile(sc, &profile_info);
                break;
        case MCI_GPM_COEX_BT_STATUS_UPDATE:
                profile_status.is_link = *(rx_payload +
@@ -352,12 +377,14 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
                        profile_status.is_link, profile_status.conn_handle,
                        profile_status.is_critical, seq_num);
 
-               ath_mci_process_status(sc, &profile_status);
+               update_scheme += ath_mci_process_status(sc, &profile_status);
                break;
        default:
                ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
                break;
        }
+       if (update_scheme)
+               ieee80211_queue_work(sc->hw, &sc->mci_work);
 }
 
 int ath_mci_setup(struct ath_softc *sc)
@@ -365,6 +392,7 @@ int ath_mci_setup(struct ath_softc *sc)
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_mci_coex *mci = &sc->mci_coex;
        struct ath_mci_buf *buf = &mci->sched_buf;
+       int ret;
 
        buf->bf_addr = dma_alloc_coherent(sc->dev,
                                  ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
@@ -384,9 +412,13 @@ int ath_mci_setup(struct ath_softc *sc)
        mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
        mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
 
-       ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
-                        mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
-                        mci->sched_buf.bf_paddr);
+       ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+                              mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+                              mci->sched_buf.bf_paddr);
+       if (ret) {
+               ath_err(common, "Failed to initialize MCI\n");
+               return ret;
+       }
 
        INIT_WORK(&sc->mci_work, ath9k_mci_work);
        ath_dbg(common, MCI, "MCI Initialized\n");
@@ -551,9 +583,11 @@ void ath_mci_intr(struct ath_softc *sc)
        }
 
        if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
-           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
                mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
                             AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+               ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
+       }
 }
 
 void ath_mci_enable(struct ath_softc *sc)
index ef11dc639461383960fcfa865f7dde01313da1e2..0e630a99b68b8fa729af6007de32e97d16c7f6ff 100644 (file)
@@ -38,6 +38,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
        { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E  AR9580 */
        { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
        { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
+       { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E  AR9565 */
        { 0 }
 };
 
@@ -122,7 +123,8 @@ static void ath_pci_aspm_init(struct ath_common *common)
        if (!parent)
                return;
 
-       if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
+       if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
+           (AR_SREV_9285(ah))) {
                /* Bluetooth coexistance requires disabling ASPM. */
                pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
                        PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
index e034add9cd5a478a2dd085f5133a18898913b932..27ed80b5488133175a0b348a3f9752ebe69ed016 100644 (file)
@@ -25,141 +25,141 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
        8, /* MCS start */
        {
                [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */
+                       5400, 0, 12 }, /* 6 Mb */
                [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
-                       7800,  1, 18, 0, 1, 1, 1 }, /* 9 Mb */
+                       7800,  1, 18 }, /* 9 Mb */
                [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */
+                       10000, 2, 24 }, /* 12 Mb */
                [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */
+                       13900, 3, 36 }, /* 18 Mb */
                [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */
+                       17300, 4, 48 }, /* 24 Mb */
                [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */
+                       23000, 5, 72 }, /* 36 Mb */
                [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */
+                       27400, 6, 96 }, /* 48 Mb */
                [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */
+                       29300, 7, 108 }, /* 54 Mb */
                [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */
+                       6400, 0, 0 }, /* 6.5 Mb */
                [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */
+                       12700, 1, 1 }, /* 13 Mb */
                [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */
+                       18800, 2, 2 }, /* 19.5 Mb */
                [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */
+                       25000, 3, 3 }, /* 26 Mb */
                [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */
+                       36700, 4, 4 }, /* 39 Mb */
                [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */
+                       48100, 5, 5 }, /* 52 Mb */
                [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */
+                       53500, 6, 6 }, /* 58.5 Mb */
                [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */
+                       59000, 7, 7 }, /* 65 Mb */
                [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */
+                       65400, 7, 7 }, /* 75 Mb */
                [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */
+                       12700, 8, 8 }, /* 13 Mb */
                [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */
+                       24800, 9, 9 }, /* 26 Mb */
                [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */
+                       36600, 10, 10 }, /* 39 Mb */
                [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */
+                       48100, 11, 11 }, /* 52 Mb */
                [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */
+                       69500, 12, 12 }, /* 78 Mb */
                [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */
+                       89500, 13, 13 }, /* 104 Mb */
                [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */
+                       98900, 14, 14 }, /* 117 Mb */
                [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */
+                       108300, 15, 15 }, /* 130 Mb */
                [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */
+                       120000, 15, 15 }, /* 144.4 Mb */
                [26] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */
+                       17400, 16, 16 }, /* 19.5 Mb */
                [27] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */
+                       35100, 17, 17 }, /* 39 Mb */
                [28] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */
+                       52600, 18, 18 }, /* 58.5 Mb */
                [29] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */
+                       70400, 19, 19 }, /* 78 Mb */
                [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */
+                       104900, 20, 20 }, /* 117 Mb */
                [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/
+                       115800, 20, 20 }, /* 130 Mb*/
                [32] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */
+                       137200, 21, 21 }, /* 156 Mb */
                [33] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */
+                       151100, 21, 21 }, /* 173.3 Mb */
                [34] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */
+                       152800, 22, 22 }, /* 175.5 Mb */
                [35] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/
+                       168400, 22, 22 }, /* 195 Mb*/
                [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */
+                       168400, 23, 23 }, /* 195 Mb */
                [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */
+                       185000, 23, 23 }, /* 216.7 Mb */
                [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/
+                       13200, 0, 0 }, /* 13.5 Mb*/
                [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/
+                       25900, 1, 1 }, /* 27.0 Mb*/
                [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/
+                       38600, 2, 2 }, /* 40.5 Mb*/
                [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */
+                       49800, 3, 3 }, /* 54 Mb */
                [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */
+                       72200, 4, 4 }, /* 81 Mb */
                [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */
+                       92900, 5, 5 }, /* 108 Mb */
                [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/
+                       102700, 6, 6 }, /* 121.5 Mb*/
                [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */
+                       112000, 7, 7 }, /* 135 Mb */
                [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */
+                       122000, 7, 7 }, /* 150 Mb */
                [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */
+                       25800, 8, 8 }, /* 27 Mb */
                [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */
+                       49800, 9, 9 }, /* 54 Mb */
                [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */
+                       71900, 10, 10 }, /* 81 Mb */
                [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */
+                       92500, 11, 11 }, /* 108 Mb */
                [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */
+                       130300, 12, 12 }, /* 162 Mb */
                [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */
+                       162800, 13, 13 }, /* 216 Mb */
                [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */
+                       178200, 14, 14 }, /* 243 Mb */
                [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */
+                       192100, 15, 15 }, /* 270 Mb */
                [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */
+                       207000, 15, 15 }, /* 300 Mb */
                [56] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */
+                       36100, 16, 16 }, /* 40.5 Mb */
                [57] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */
+                       72900, 17, 17 }, /* 81 Mb */
                [58] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */
+                       108300, 18, 18 }, /* 121.5 Mb */
                [59] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19, 4, 59, 59, 59 }, /*  162 Mb */
+                       142000, 19, 19 }, /*  162 Mb */
                [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20, 4, 60, 61, 61 }, /*  243 Mb */
+                       205100, 20, 20 }, /*  243 Mb */
                [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20, 4, 60, 61, 61 }, /*  270 Mb */
+                       224700, 20, 20 }, /*  270 Mb */
                [62] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21, 4, 62, 63, 63 }, /*  324 Mb */
+                       263100, 21, 21 }, /*  324 Mb */
                [63] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21, 4, 62, 63, 63 }, /*  360 Mb */
+                       288000, 21, 21 }, /*  360 Mb */
                [64] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */
+                       290700, 22, 22 }, /* 364.5 Mb */
                [65] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */
+                       317200, 22, 22 }, /* 405 Mb */
                [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */
+                       317200, 23, 23 }, /* 405 Mb */
                [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */
+                       346400, 23, 23 }, /* 450 Mb */
        },
        50,  /* probe interval */
        WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -173,149 +173,149 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
        12, /* MCS start */
        {
                [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
-                       900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */
+                       900, 0, 2 }, /* 1 Mb */
                [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
-                       1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */
+                       1900, 1, 4 }, /* 2 Mb */
                [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
-                       4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */
+                       4900, 2, 11 }, /* 5.5 Mb */
                [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
-                       8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */
+                       8100, 3, 22 }, /* 11 Mb */
                [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */
+                       5400, 4, 12 }, /* 6 Mb */
                [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
-                       7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */
+                       7800, 5, 18 }, /* 9 Mb */
                [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */
+                       10100, 6, 24 }, /* 12 Mb */
                [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */
+                       14100, 7, 36 }, /* 18 Mb */
                [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */
+                       17700, 8, 48 }, /* 24 Mb */
                [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */
+                       23700, 9, 72 }, /* 36 Mb */
                [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */
+                       27400, 10, 96 }, /* 48 Mb */
                [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */
+                       30900, 11, 108 }, /* 54 Mb */
                [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */
+                       6400, 0, 0 }, /* 6.5 Mb */
                [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */
+                       12700, 1, 1 }, /* 13 Mb */
                [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/
+                       18800, 2, 2 }, /* 19.5 Mb*/
                [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */
+                       25000, 3, 3 }, /* 26 Mb */
                [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */
+                       36700, 4, 4 }, /* 39 Mb */
                [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */
+                       48100, 5, 5 }, /* 52 Mb */
                [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */
+                       53500, 6, 6 }, /* 58.5 Mb */
                [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */
+                       59000, 7, 7 }, /* 65 Mb */
                [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/
+                       65400, 7, 7 }, /* 65 Mb*/
                [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */
+                       12700, 8, 8 }, /* 13 Mb */
                [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */
+                       24800, 9, 9 }, /* 26 Mb */
                [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */
+                       36600, 10, 10 }, /* 39 Mb */
                [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */
+                       48100, 11, 11 }, /* 52 Mb */
                [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */
+                       69500, 12, 12 }, /* 78 Mb */
                [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */
+                       89500, 13, 13 }, /* 104 Mb */
                [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */
+                       98900, 14, 14 }, /* 117 Mb */
                [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */
+                       108300, 15, 15 }, /* 130 Mb */
                [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */
+                       120000, 15, 15 }, /* 144.4 Mb */
                [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */
+                       17400, 16, 16 }, /* 19.5 Mb */
                [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */
+                       35100, 17, 17 }, /* 39 Mb */
                [32] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */
+                       52600, 18, 18 }, /* 58.5 Mb */
                [33] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */
+                       70400, 19, 19 }, /* 78 Mb */
                [34] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */
+                       104900, 20, 20 }, /* 117 Mb */
                [35] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */
+                       115800, 20, 20 }, /* 130 Mb */
                [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */
+                       137200, 21, 21 }, /* 156 Mb */
                [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */
+                       151100, 21, 21 }, /* 173.3 Mb */
                [38] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */
+                       152800, 22, 22 }, /* 175.5 Mb */
                [39] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */
+                       168400, 22, 22 }, /* 195 Mb */
                [40] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */
+                       168400, 23, 23 }, /* 195 Mb */
                [41] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */
+                       185000, 23, 23 }, /* 216.7 Mb */
                [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */
+                       13200, 0, 0 }, /* 13.5 Mb */
                [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */
+                       25900, 1, 1 }, /* 27.0 Mb */
                [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */
+                       38600, 2, 2 }, /* 40.5 Mb */
                [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */
+                       49800, 3, 3 }, /* 54 Mb */
                [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */
+                       72200, 4, 4 }, /* 81 Mb */
                [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */
+                       92900, 5, 5 }, /* 108 Mb */
                [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */
+                       102700, 6, 6 }, /* 121.5 Mb */
                [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */
+                       112000, 7, 7 }, /* 135 Mb */
                [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */
+                       122000, 7, 7 }, /* 150 Mb */
                [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */
+                       25800, 8, 8 }, /* 27 Mb */
                [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */
+                       49800, 9, 9 }, /* 54 Mb */
                [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */
+                       71900, 10, 10 }, /* 81 Mb */
                [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */
+                       92500, 11, 11 }, /* 108 Mb */
                [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */
+                       130300, 12, 12 }, /* 162 Mb */
                [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */
+                       162800, 13, 13 }, /* 216 Mb */
                [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */
+                       178200, 14, 14 }, /* 243 Mb */
                [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */
+                       192100, 15, 15 }, /* 270 Mb */
                [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */
+                       207000, 15, 15 }, /* 300 Mb */
                [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */
+                       36100, 16, 16 }, /* 40.5 Mb */
                [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */
+                       72900, 17, 17 }, /* 81 Mb */
                [62] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */
+                       108300, 18, 18 }, /* 121.5 Mb */
                [63] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */
+                       142000, 19, 19 }, /* 162 Mb */
                [64] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */
+                       205100, 20, 20 }, /* 243 Mb */
                [65] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */
+                       224700, 20, 20 }, /* 270 Mb */
                [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */
+                       263100, 21, 21 }, /* 324 Mb */
                [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */
+                       288000, 21, 21 }, /* 360 Mb */
                [68] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */
+                       290700, 22, 22 }, /* 364.5 Mb */
                [69] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */
+                       317200, 22, 22 }, /* 405 Mb */
                [70] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */
+                       317200, 23, 23 }, /* 405 Mb */
                [71] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */
+                       346400, 23, 23 }, /* 450 Mb */
        },
        50,  /* probe interval */
        WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
@@ -326,21 +326,21 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
        0,
        {
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 0, 12, 0},
+                       5400, 0, 12},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800,  1, 18, 0},
+                       7800,  1, 18},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 2, 24, 2},
+                       10000, 2, 24},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 3, 36, 2},
+                       13900, 3, 36},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 4, 48, 4},
+                       17300, 4, 48},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 5, 72, 4},
+                       23000, 5, 72},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 6, 96, 4},
+                       27400, 6, 96},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 7, 108, 4},
+                       29300, 7, 108},
        },
        50,  /* probe interval */
        0,   /* Phy rates allowed initially */
@@ -351,63 +351,62 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
        0,
        {
                { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
-                       900, 0, 2, 0},
+                       900, 0, 2},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
-                       1900, 1, 4, 1},
+                       1900, 1, 4},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
-                       4900, 2, 11, 2},
+                       4900, 2, 11},
                { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
-                       8100, 3, 22, 3},
+                       8100, 3, 22},
                { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 4, 12, 4},
+                       5400, 4, 12},
                { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800, 5, 18, 4},
+                       7800, 5, 18},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 6, 24, 6},
+                       10000, 6, 24},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 7, 36, 6},
+                       13900, 7, 36},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 8, 48, 8},
+                       17300, 8, 48},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 9, 72, 8},
+                       23000, 9, 72},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 10, 96, 8},
+                       27400, 10, 96},
                { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 11, 108, 8},
+                       29300, 11, 108},
        },
        50,  /* probe interval */
        0,   /* Phy rates allowed initially */
 };
 
-static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
+static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
                                struct ieee80211_tx_rate *rate)
 {
-       int rix = 0, i = 0;
-       static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+       int rix, i, idx = 0;
 
        if (!(rate->flags & IEEE80211_TX_RC_MCS))
                return rate->idx;
 
-       while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
-               rix++; i++;
+       for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
+               idx = ath_rc_priv->valid_rate_index[i];
+
+               if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
+                   rate_table->info[idx].ratecode == rate->idx)
+                       break;
        }
 
-       rix += rate->idx + rate_table->mcs_start;
+       rix = idx;
 
-       if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
-           (rate->flags & IEEE80211_TX_RC_SHORT_GI))
-               rix = rate_table->info[rix].ht_index;
-       else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-               rix = rate_table->info[rix].sgi_index;
-       else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
-               rix = rate_table->info[rix].cw40index;
+       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+               rix++;
 
        return rix;
 }
 
-static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
-                                  struct ath_rate_priv *ath_rc_priv)
+static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u8 i, j, idx, idx_next;
 
        for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
@@ -424,21 +423,6 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
        }
 }
 
-static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
-{
-       u8 i;
-
-       for (i = 0; i < ath_rc_priv->rate_table_size; i++)
-               ath_rc_priv->valid_rate_index[i] = 0;
-}
-
-static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
-                                          u8 index, int valid_tx_rate)
-{
-       BUG_ON(index > ath_rc_priv->rate_table_size);
-       ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
-}
-
 static inline
 int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
                                struct ath_rate_priv *ath_rc_priv,
@@ -479,8 +463,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
 }
 
 static inline int
-ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
-                    struct ath_rate_priv *ath_rc_priv,
+ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
                     u8 cur_valid_txrate, u8 *next_idx)
 {
        int8_t i;
@@ -495,10 +478,9 @@ ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
        return 0;
 }
 
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
-                                const struct ath_rate_table *rate_table,
-                                u32 capflag)
+static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u8 i, hi = 0;
 
        for (i = 0; i < rate_table->rate_cnt; i++) {
@@ -506,14 +488,14 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
                        u32 phy = rate_table->info[i].phy;
                        u8 valid_rate_count = 0;
 
-                       if (!ath_rc_valid_phyrate(phy, capflag, 0))
+                       if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
                                continue;
 
                        valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
 
                        ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
                        ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1);
+                       ath_rc_priv->valid_rate_index[i] = true;
                        hi = i;
                }
        }
@@ -521,76 +503,73 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
        return hi;
 }
 
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
-                               const struct ath_rate_table *rate_table,
-                               struct ath_rateset *rateset,
-                               u32 capflag)
+static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
+                                      u32 phy, u32 capflag)
 {
-       u8 i, j, hi = 0;
+       if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
+               return false;
 
-       /* Use intersection of working rates and valid rates */
-       for (i = 0; i < rateset->rs_nrates; i++) {
-               for (j = 0; j < rate_table->rate_cnt; j++) {
-                       u32 phy = rate_table->info[j].phy;
-                       u16 rate_flags = rate_table->info[j].rate_flags;
-                       u8 rate = rateset->rs_rates[i];
-                       u8 dot11rate = rate_table->info[j].dot11rate;
-
-                       /* We allow a rate only if its valid and the
-                        * capflag matches one of the validity
-                        * (VALID/VALID_20/VALID_40) flags */
-
-                       if ((rate == dot11rate) &&
-                           (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
-                           WLAN_RC_CAP_MODE(capflag) &&
-                           (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
-                           !WLAN_RC_PHY_HT(phy)) {
-                               u8 valid_rate_count = 0;
-
-                               if (!ath_rc_valid_phyrate(phy, capflag, 0))
-                                       continue;
-
-                               valid_rate_count =
-                                       ath_rc_priv->valid_phy_ratecnt[phy];
-
-                               ath_rc_priv->valid_phy_rateidx[phy]
-                                       [valid_rate_count] = j;
-                               ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                               ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
-                               hi = max(hi, j);
-                       }
-               }
-       }
+       if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
+               return false;
 
-       return hi;
+       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+               return false;
+
+       return true;
 }
 
-static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
-                                 const struct ath_rate_table *rate_table,
-                                 struct ath_rateset *rateset, u32 capflag)
+static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
+                                  u32 phy, u32 capflag)
 {
-       u8 i, j, hi = 0;
+       if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
+               return false;
+
+       if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+               return false;
+
+       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+               return false;
+
+       return true;
+}
+
+static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
+{
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+       struct ath_rateset *rateset;
+       u32 phy, capflag = ath_rc_priv->ht_cap;
+       u16 rate_flags;
+       u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
+
+       if (legacy)
+               rateset = &ath_rc_priv->neg_rates;
+       else
+               rateset = &ath_rc_priv->neg_ht_rates;
 
-       /* Use intersection of working rates and valid rates */
        for (i = 0; i < rateset->rs_nrates; i++) {
                for (j = 0; j < rate_table->rate_cnt; j++) {
-                       u32 phy = rate_table->info[j].phy;
-                       u16 rate_flags = rate_table->info[j].rate_flags;
-                       u8 rate = rateset->rs_rates[i];
-                       u8 dot11rate = rate_table->info[j].dot11rate;
-
-                       if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) ||
-                           !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) ||
-                           !WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+                       phy = rate_table->info[j].phy;
+                       rate_flags = rate_table->info[j].rate_flags;
+                       rate = rateset->rs_rates[i];
+                       dot11rate = rate_table->info[j].dot11rate;
+
+                       if (legacy &&
+                           !ath_rc_check_legacy(rate, dot11rate,
+                                                rate_flags, phy, capflag))
+                               continue;
+
+                       if (!legacy &&
+                           !ath_rc_check_ht(rate, dot11rate,
+                                            rate_flags, phy, capflag))
                                continue;
 
                        if (!ath_rc_valid_phyrate(phy, capflag, 0))
                                continue;
 
-                       ath_rc_priv->valid_phy_rateidx[phy]
-                               [ath_rc_priv->valid_phy_ratecnt[phy]] = j;
+                       valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
+                       ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
                        ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
+                       ath_rc_priv->valid_rate_index[j] = true;
                        hi = max(hi, j);
                }
        }
@@ -598,13 +577,10 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
        return hi;
 }
 
-/* Finds the highest rate index we can use */
-static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
-                                struct ath_rate_priv *ath_rc_priv,
-                                const struct ath_rate_table *rate_table,
-                                int *is_probing,
-                                bool legacy)
+static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
+                                int *is_probing)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        u32 best_thruput, this_thruput, now_msec;
        u8 rate, next_rate, best_rate, maxindex, minindex;
        int8_t index = 0;
@@ -624,8 +600,6 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
                u8 per_thres;
 
                rate = ath_rc_priv->valid_rate_index[index];
-               if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
-                       continue;
                if (rate > ath_rc_priv->rate_max_phy)
                        continue;
 
@@ -707,8 +681,6 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
        rate->count = tries;
        rate->idx = rate_table->info[rix].ratecode;
 
-       if (txrc->short_preamble)
-               rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
        if (txrc->rts || rtsctsenable)
                rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
 
@@ -726,37 +698,25 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
                                   const struct ath_rate_table *rate_table,
                                   struct ieee80211_tx_info *tx_info)
 {
-       struct ieee80211_tx_rate *rates = tx_info->control.rates;
-       int i = 0, rix = 0, cix, enable_g_protection = 0;
+       struct ieee80211_bss_conf *bss_conf;
 
-       /* get the cix for the lowest valid rix */
-       for (i = 3; i >= 0; i--) {
-               if (rates[i].count && (rates[i].idx >= 0)) {
-                       rix = ath_rc_get_rateindex(rate_table, &rates[i]);
-                       break;
-               }
-       }
-       cix = rate_table->info[rix].ctrl_rate;
+       if (!tx_info->control.vif)
+               return;
+       /*
+        * For legacy frames, mac80211 takes care of CTS protection.
+        */
+       if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
+               return;
 
-       /* All protection frames are transmited at 2Mb/s for 802.11g,
-        * otherwise we transmit them at 1Mb/s */
-       if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
-           !conf_is_ht(&sc->hw->conf))
-               enable_g_protection = 1;
+       bss_conf = &tx_info->control.vif->bss_conf;
+
+       if (!bss_conf->basic_rates)
+               return;
 
        /*
-        * If 802.11g protection is enabled, determine whether to use RTS/CTS or
-        * just CTS.  Note that this is only done for OFDM/HT unicast frames.
+        * For now, use the lowest allowed basic rate for HT frames.
         */
-       if ((tx_info->control.vif &&
-            tx_info->control.vif->bss_conf.use_cts_prot) &&
-           (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
-            WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
-               rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
-               cix = rate_table->info[enable_g_protection].ctrl_rate;
-       }
-
-       tx_info->control.rts_cts_rate_idx = cix;
+       tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
 }
 
 static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
@@ -789,14 +749,8 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        try_per_rate = 4;
 
        rate_table = ath_rc_priv->rate_table;
-       rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-                                    &is_probe, false);
+       rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
 
-       /*
-        * If we're in HT mode and both us and our peer supports LDPC.
-        * We don't need to check our own device's capabilities as our own
-        * ht capabilities would have already been intersected with our peer's.
-        */
        if (conf_is_ht(&sc->hw->conf) &&
            (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
                tx_info->flags |= IEEE80211_TX_CTL_LDPC;
@@ -806,52 +760,45 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
 
        if (is_probe) {
-               /* set one try for probe rates. For the
-                * probes don't enable rts */
+               /*
+                * Set one try for probe rates. For the
+                * probes don't enable RTS.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       1, rix, 0);
-
-               /* Get the next tried/allowed rate. No RTS for the next series
-                * after the probe rate
+               /*
+                * Get the next tried/allowed rate.
+                * No RTS for the next series after the probe rate.
                 */
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       try_per_rate, rix, 0);
 
                tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
-               /* Set the chosen rate. No RTS for first series entry. */
+               /*
+                * Set the chosen rate. No RTS for first series entry.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
                                       try_per_rate, rix, 0);
        }
 
-       /* Fill in the other rates for multirate retry */
-       for ( ; i < 3; i++) {
+       for ( ; i < 4; i++) {
+               /*
+                * Use twice the number of tries for the last MRR segment.
+                */
+               if (i + 1 == 4)
+                       try_per_rate = 8;
+
+               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
 
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-               /* All other rates in the series have RTS enabled */
+               /*
+                * All other rates in the series have RTS enabled.
+                */
                ath_rc_rate_set_series(rate_table, &rates[i], txrc,
                                       try_per_rate, rix, 1);
        }
 
-       /* Use twice the number of tries for the last MRR segment. */
-       try_per_rate = 8;
-
-       /*
-        * If the last rate in the rate series is MCS and has
-        * more than 80% of per thresh, then use a legacy rate
-        * as last retry to ensure that the frame is tried in both
-        * MCS and legacy rate.
-        */
-       ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
-       if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
-           (ath_rc_priv->per[rix] > 45))
-               rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
-                               &is_probe, true);
-
-       /* All other rates in the series have RTS enabled */
-       ath_rc_rate_set_series(rate_table, &rates[i], txrc,
-                              try_per_rate, rix, 1);
        /*
         * NB:Change rate series to enable aggregation when operating
         * at lower MCS rates. When first rate in series is MCS2
@@ -893,7 +840,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                rates[0].count = ATH_TXMAXTRY;
        }
 
-       /* Setup RTS/CTS */
        ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
 }
 
@@ -1046,9 +992,6 @@ static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
        stats->per = per;
 }
 
-/* Update PER, RSSI and whatever else that the code thinks it is doing.
-   If you can make sense of all this, you really need to go out more. */
-
 static void ath_rc_update_ht(struct ath_softc *sc,
                             struct ath_rate_priv *ath_rc_priv,
                             struct ieee80211_tx_info *tx_info,
@@ -1077,8 +1020,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
        if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
            rate_table->info[tx_rate].ratekbps <=
            rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
-               ath_rc_get_lower_rix(rate_table, ath_rc_priv,
-                                    (u8)tx_rate, &ath_rc_priv->rate_max_phy);
+               ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
+                                    &ath_rc_priv->rate_max_phy);
 
                /* Don't probe for a little while. */
                ath_rc_priv->probe_time = now_msec;
@@ -1122,25 +1065,42 @@ static void ath_rc_update_ht(struct ath_softc *sc,
 
 }
 
+static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+       struct ath_rc_stats *stats;
+
+       stats = &rc->rcstats[final_rate];
+       stats->success++;
+}
 
 static void ath_rc_tx_status(struct ath_softc *sc,
                             struct ath_rate_priv *ath_rc_priv,
-                            struct ieee80211_tx_info *tx_info,
-                            int final_ts_idx, int xretries, int long_retry)
+                            struct sk_buff *skb)
 {
-       const struct ath_rate_table *rate_table;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rates = tx_info->status.rates;
+       struct ieee80211_tx_rate *rate;
+       int final_ts_idx = 0, xretries = 0, long_retry = 0;
        u8 flags;
        u32 i = 0, rix;
 
-       rate_table = ath_rc_priv->rate_table;
+       for (i = 0; i < sc->hw->max_rates; i++) {
+               rate = &tx_info->status.rates[i];
+               if (rate->idx < 0 || !rate->count)
+                       break;
+
+               final_ts_idx = i;
+               long_retry = rate->count - 1;
+       }
+
+       if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
+               xretries = 1;
 
        /*
         * If the first rate is not the final index, there
         * are intermediate rate failures to be processed.
         */
        if (final_ts_idx != 0) {
-               /* Process intermediate rates that failed.*/
                for (i = 0; i < final_ts_idx ; i++) {
                        if (rates[i].count != 0 && (rates[i].idx >= 0)) {
                                flags = rates[i].flags;
@@ -1152,32 +1112,24 @@ static void ath_rc_tx_status(struct ath_softc *sc,
                                    !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
                                        return;
 
-                               rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+                               rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
                                ath_rc_update_ht(sc, ath_rc_priv, tx_info,
-                                               rix, xretries ? 1 : 2,
-                                               rates[i].count);
+                                                rix, xretries ? 1 : 2,
+                                                rates[i].count);
                        }
                }
-       } else {
-               /*
-                * Handle the special case of MIMO PS burst, where the second
-                * aggregate is sent out with only one rate and one try.
-                * Treating it as an excessive retry penalizes the rate
-                * inordinately.
-                */
-               if (rates[0].count == 1 && xretries == 1)
-                       xretries = 2;
        }
 
-       flags = rates[i].flags;
+       flags = rates[final_ts_idx].flags;
 
        /* If HT40 and we have switched mode from 40 to 20 => don't update */
        if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
            !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
                return;
 
-       rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+       rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
        ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
+       ath_debug_stat_rc(ath_rc_priv, rix);
 }
 
 static const
@@ -1185,8 +1137,6 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
                                             enum ieee80211_band band,
                                             bool is_ht)
 {
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
        switch(band) {
        case IEEE80211_BAND_2GHZ:
                if (is_ht)
@@ -1197,34 +1147,25 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
                        return &ar5416_11na_ratetable;
                return &ar5416_11a_ratetable;
        default:
-               ath_dbg(common, CONFIG, "Invalid band\n");
                return NULL;
        }
 }
 
 static void ath_rc_init(struct ath_softc *sc,
-                       struct ath_rate_priv *ath_rc_priv,
-                       struct ieee80211_supported_band *sband,
-                       struct ieee80211_sta *sta,
-                       const struct ath_rate_table *rate_table)
+                       struct ath_rate_priv *ath_rc_priv)
 {
+       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
        struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
        u8 i, j, k, hi = 0, hthi = 0;
 
-       /* Initial rate table size. Will change depending
-        * on the working rate set */
        ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
 
-       /* Initialize thresholds according to the global rate table */
        for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
                ath_rc_priv->per[i] = 0;
+               ath_rc_priv->valid_rate_index[i] = 0;
        }
 
-       /* Determine the valid rates */
-       ath_rc_init_valid_rate_idx(ath_rc_priv);
-
        for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
                for (j = 0; j < RATE_TABLE_SIZE; j++)
                        ath_rc_priv->valid_phy_rateidx[i][j] = 0;
@@ -1232,25 +1173,19 @@ static void ath_rc_init(struct ath_softc *sc,
        }
 
        if (!rateset->rs_nrates) {
-               /* No working rate, just initialize valid rates */
-               hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
-                                           ath_rc_priv->ht_cap);
+               hi = ath_rc_init_validrates(ath_rc_priv);
        } else {
-               /* Use intersection of working rates and valid rates */
-               hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table,
-                                          rateset, ath_rc_priv->ht_cap);
-               if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) {
-                       hthi = ath_rc_setvalid_htrates(ath_rc_priv,
-                                                      rate_table,
-                                                      ht_mcs,
-                                                      ath_rc_priv->ht_cap);
-               }
+               hi = ath_rc_setvalid_rates(ath_rc_priv, true);
+
+               if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
+                       hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
+
                hi = max(hi, hthi);
        }
 
        ath_rc_priv->rate_table_size = hi + 1;
        ath_rc_priv->rate_max_phy = 0;
-       BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
 
        for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
                for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1258,28 +1193,26 @@ static void ath_rc_init(struct ath_softc *sc,
                                ath_rc_priv->valid_phy_rateidx[i][j];
                }
 
-               if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1)
-                   || !ath_rc_priv->valid_phy_ratecnt[i])
+               if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
+                   !ath_rc_priv->valid_phy_ratecnt[i])
                        continue;
 
                ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
        }
-       BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-       BUG_ON(k > RATE_TABLE_SIZE);
+       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+       WARN_ON(k > RATE_TABLE_SIZE);
 
        ath_rc_priv->max_valid_rate = k;
-       ath_rc_sort_validrates(rate_table, ath_rc_priv);
+       ath_rc_sort_validrates(ath_rc_priv);
        ath_rc_priv->rate_max_phy = (k > 4) ?
-                                       ath_rc_priv->valid_rate_index[k-4] :
-                                       ath_rc_priv->valid_rate_index[k-1];
-       ath_rc_priv->rate_table = rate_table;
+               ath_rc_priv->valid_rate_index[k-4] :
+               ath_rc_priv->valid_rate_index[k-1];
 
        ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
                ath_rc_priv->ht_cap);
 }
 
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
-                              bool is_cw40, bool is_sgi)
+static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
 {
        u8 caps = 0;
 
@@ -1289,10 +1222,14 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
                        caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
                else if (sta->ht_cap.mcs.rx_mask[1])
                        caps |= WLAN_RC_DS_FLAG;
-               if (is_cw40)
+               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
                        caps |= WLAN_RC_40_FLAG;
-               if (is_sgi)
-                       caps |= WLAN_RC_SGI_FLAG;
+                       if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+                               caps |= WLAN_RC_SGI_FLAG;
+               } else {
+                       if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+                               caps |= WLAN_RC_SGI_FLAG;
+               }
        }
 
        return caps;
@@ -1319,15 +1256,6 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
 /* mac80211 Rate Control callbacks */
 /***********************************/
 
-static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-       struct ath_rc_stats *stats;
-
-       stats = &rc->rcstats[final_rate];
-       stats->success++;
-}
-
-
 static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
                          struct ieee80211_sta *sta, void *priv_sta,
                          struct sk_buff *skb)
@@ -1335,22 +1263,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
        struct ath_softc *sc = priv;
        struct ath_rate_priv *ath_rc_priv = priv_sta;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr;
-       int final_ts_idx = 0, tx_status = 0;
-       int long_retry = 0;
-       __le16 fc;
-       int i;
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-       fc = hdr->frame_control;
-       for (i = 0; i < sc->hw->max_rates; i++) {
-               struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
-               if (rate->idx < 0 || !rate->count)
-                       break;
-
-               final_ts_idx = i;
-               long_retry = rate->count - 1;
-       }
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       __le16 fc = hdr->frame_control;
 
        if (!priv_sta || !ieee80211_is_data(fc))
                return;
@@ -1363,11 +1277,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
        if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
                return;
 
-       if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
-               tx_status = 1;
-
-       ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
-                        long_retry);
+       ath_rc_tx_status(sc, ath_rc_priv, skb);
 
        /* Check if aggregation has to be enabled for this tid */
        if (conf_is_ht(&sc->hw->conf) &&
@@ -1383,19 +1293,14 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
                                ieee80211_start_tx_ba_session(sta, tid, 0);
                }
        }
-
-       ath_debug_stat_rc(ath_rc_priv,
-               ath_rc_get_rateindex(ath_rc_priv->rate_table,
-                       &tx_info->status.rates[final_ts_idx]));
 }
 
 static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                           struct ieee80211_sta *sta, void *priv_sta)
 {
        struct ath_softc *sc = priv;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_rate_priv *ath_rc_priv = priv_sta;
-       const struct ath_rate_table *rate_table;
-       bool is_cw40, is_sgi = false;
        int i, j = 0;
 
        for (i = 0; i < sband->n_bitrates; i++) {
@@ -1417,20 +1322,15 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                ath_rc_priv->neg_ht_rates.rs_nrates = j;
        }
 
-       is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
-       if (is_cw40)
-               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
-       else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
-
-       /* Choose rate table first */
-
-       rate_table = ath_choose_rate_table(sc, sband->band,
-                             sta->ht_cap.ht_supported);
+       ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
+                                                       sta->ht_cap.ht_supported);
+       if (!ath_rc_priv->rate_table) {
+               ath_err(common, "No rate table chosen\n");
+               return;
+       }
 
-       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
-       ath_rc_init(sc, priv_sta, sband, sta, rate_table);
+       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+       ath_rc_init(sc, priv_sta);
 }
 
 static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
@@ -1439,40 +1339,14 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
 {
        struct ath_softc *sc = priv;
        struct ath_rate_priv *ath_rc_priv = priv_sta;
-       const struct ath_rate_table *rate_table = NULL;
-       bool oper_cw40 = false, oper_sgi;
-       bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
-       bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
-
-       /* FIXME: Handle AP mode later when we support CWM */
 
        if (changed & IEEE80211_RC_BW_CHANGED) {
-               if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
-                       return;
-
-               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-                       oper_cw40 = true;
-
-               if (oper_cw40)
-                       oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
-                                  true : false;
-               else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-                       oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
-                                  true : false;
-               else
-                       oper_sgi = false;
-
-               if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
-                       rate_table = ath_choose_rate_table(sc, sband->band,
-                                                  sta->ht_cap.ht_supported);
-                       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
-                                                  oper_cw40, oper_sgi);
-                       ath_rc_init(sc, priv_sta, sband, sta, rate_table);
-
-                       ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
-                               "Operating HT Bandwidth changed to: %d\n",
-                               sc->hw->conf.channel_type);
-               }
+               ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+               ath_rc_init(sc, priv_sta);
+
+               ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
+                       "Operating HT Bandwidth changed to: %d\n",
+                       sc->hw->conf.channel_type);
        }
 }
 
@@ -1484,7 +1358,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
        struct ath_rate_priv *rc = file->private_data;
        char *buf;
        unsigned int len = 0, max;
-       int i = 0;
+       int rix;
        ssize_t retval;
 
        if (rc->rate_table == NULL)
@@ -1500,7 +1374,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
                       "HT", "MCS", "Rate",
                       "Success", "Retries", "XRetries", "PER");
 
-       for (i = 0; i < rc->rate_table_size; i++) {
+       for (rix = 0; rix < rc->max_valid_rate; rix++) {
+               u8 i = rc->valid_rate_index[rix];
                u32 ratekbps = rc->rate_table->info[i].ratekbps;
                struct ath_rc_stats *stats = &rc->rcstats[i];
                char mcs[5];
index 75f8e9b06b2859d2866f16653c3a22913ee6baf5..268e67dc5fb2d945a26a332081b32167d9e133dc 100644 (file)
@@ -160,10 +160,6 @@ struct ath_rate_table {
                u32 user_ratekbps;
                u8 ratecode;
                u8 dot11rate;
-               u8 ctrl_rate;
-               u8 cw40index;
-               u8 sgi_index;
-               u8 ht_index;
        } info[RATE_TABLE_SIZE];
        u32 probe_interval;
        u8 initial_ratemax;
index 4480c0cc655f6f6ffca73774b2178e748936155e..83d16e7ed27239bfa4734a2b1b66e98b2800dbf4 100644 (file)
@@ -424,8 +424,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
                rfilt |= ATH9K_RX_FILTER_COMP_BAR;
 
        if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
-               /* The following may also be needed for other older chips */
-               if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
+               /* This is needed for older chips */
+               if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
                        rfilt |= ATH9K_RX_FILTER_PROM;
                rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
        }
index 87cac8eb78349f1abb6321f4d1f552769726bb20..4e6760f8596d2dc07543ab36d419c1977c3c95ec 100644 (file)
 #define AR_SREV_REVISION_9580_10       4 /* AR9580 1.0 */
 #define AR_SREV_VERSION_9462           0x280
 #define AR_SREV_REVISION_9462_20       2
+#define AR_SREV_VERSION_9565            0x2C0
+#define AR_SREV_REVISION_9565_10        0
 #define AR_SREV_VERSION_9550           0x400
 
 #define AR_SREV_5416(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
        ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
 
+#define AR_SREV_9565(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
+
+#define AR_SREV_9565_10(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+        ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
+
 #define AR_SREV_9550(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
 
index 44a08eb53c62bc74baed99b25176d554571a5f58..a483d518758cfe4d57f1b38c8e8812b3ce3d8a74 100644 (file)
@@ -497,7 +497,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
 
        REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
 
-       if (AR_SREV_9462(ah)) {
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                /*
                 * this is needed to prevent the chip waking up
                 * the host within 3-4 seconds with certain
index 0d4155aec48d72196d5c64eee5c2517766760632..36618e3a5e609831184b973e30b3a9aa739522f3 100644 (file)
@@ -568,7 +568,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                if (!an->sleeping) {
                        ath_tx_queue_tid(txq, tid);
 
-                       if (ts->ts_status & ATH9K_TXERR_FILT)
+                       if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
                                tid->ac->clear_ps_filter = true;
                }
        }
@@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
        TX_STAT_INC(txq->axq_qnum, queued);
 }
 
-static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
+static void setup_frame_info(struct ieee80211_hw *hw,
+                            struct ieee80211_sta *sta,
+                            struct sk_buff *skb,
                             int framelen)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        const struct ieee80211_rate *rate;
@@ -1819,10 +1820,14 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath9k_channel *curchan = ah->curchan;
+
        if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
            (curchan->channelFlags & CHANNEL_5GHZ) &&
            (chainmask == 0x7) && (rate < 0x90))
                return 0x3;
+       else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
+                IS_CCK_RATE(rate))
+               return 0x2;
        else
                return chainmask;
 }
@@ -1935,7 +1940,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
+       struct ieee80211_sta *sta = txctl->sta;
        struct ieee80211_vif *vif = info->control.vif;
        struct ath_softc *sc = hw->priv;
        struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1984,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
            !ieee80211_is_data(hdr->frame_control))
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
 
-       setup_frame_info(hw, skb, frmlen);
+       setup_frame_info(hw, sta, skb, frmlen);
 
        /*
         * At this point, the vif, hw_key and sta pointers in the tx control
index 376be11161c0bdaaa310205523ebf3d61ba1bdcd..2aa4a59c72c87d4045de3f405d697e0067cf71da 100644 (file)
@@ -425,6 +425,7 @@ struct ar9170 {
        bool rx_has_plcp;
        struct sk_buff *rx_failover;
        int rx_failover_missing;
+       u32 ampdu_ref;
 
        /* FIFO for collecting outstanding BlockAckRequest */
        struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
 void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
 
 /* TX */
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_tx_control *control,
+                   struct sk_buff *skb);
 void carl9170_tx_janitor(struct work_struct *work);
 void carl9170_tx_process_status(struct ar9170 *ar,
                                const struct carl9170_rsp *cmd);
index c5ca6f1f5836c26f1360fde607e02decaca340d0..24ac2876a7337ad2a015f69a4f273165d3d8d7f0 100644 (file)
@@ -341,6 +341,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
                if (SUPP(CARL9170FW_WLANTX_CAB)) {
                        if_comb_types |=
                                BIT(NL80211_IFTYPE_AP) |
+                               BIT(NL80211_IFTYPE_MESH_POINT) |
                                BIT(NL80211_IFTYPE_P2P_GO);
                }
        }
index 53415bfd8bef7e673749904369775520ff74beae..e3b1b6e87760ed0e70ca77dfcb23ae3e4b9df982 100644 (file)
@@ -304,7 +304,8 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
        struct ath_common *common = &ar->common;
        u8 *mac_addr, *bssid;
        u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
-       u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS;
+       u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS |
+               AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE;
        u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
                      AR9170_MAC_RX_CTRL_SHORT_FILTER;
        u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
@@ -318,10 +319,10 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
                bssid = common->curbssid;
 
                switch (vif->type) {
-               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_ADHOC:
                        cam_mode |= AR9170_MAC_CAM_IBSS;
                        break;
+               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_AP:
                        cam_mode |= AR9170_MAC_CAM_AP;
 
index 858e58dfc4dc67176f7d9693d7babb598ea6980e..67997b39aba79f0d14c47ffdbe4e85248bece20b 100644 (file)
@@ -616,10 +616,12 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
 
                        goto unlock;
 
+               case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_AP:
                        if ((vif->type == NL80211_IFTYPE_STATION) ||
                            (vif->type == NL80211_IFTYPE_WDS) ||
-                           (vif->type == NL80211_IFTYPE_AP))
+                           (vif->type == NL80211_IFTYPE_AP) ||
+                           (vif->type == NL80211_IFTYPE_MESH_POINT))
                                break;
 
                        err = -EBUSY;
@@ -1147,6 +1149,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                ktype = AR9170_ENC_ALG_AESCCMP;
+               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
                break;
        default:
                return -EOPNOTSUPP;
@@ -1778,6 +1781,7 @@ void *carl9170_alloc(size_t priv_size)
        hw->wiphy->interface_modes = 0;
 
        hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
+                    IEEE80211_HW_MFP_CAPABLE |
                     IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                     IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_PS_NULLFUNC_STACK |
index 6f6a34155667d0da84fdb5c9d63824ce7d1ac37b..a0b72307854799b81c36649d916da2940be90ad6 100644 (file)
@@ -206,6 +206,7 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
 
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_MESH_POINT:
                        carl9170_update_beacon(ar, true);
                        break;
 
@@ -623,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
 #undef TID_CHECK
 }
 
-static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
+static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
+                                struct ieee80211_rx_status *rx_status)
 {
        __le16 fc;
 
@@ -636,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
                return true;
        }
 
+       rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+       rx_status->ampdu_reference = ar->ampdu_ref;
+
        /*
         * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
         * certain frame types can be part of an aMPDU.
@@ -684,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
        if (unlikely(len < sizeof(*mac)))
                goto drop;
 
+       memset(&status, 0, sizeof(status));
+
        mpdu_len = len - sizeof(*mac);
 
        mac = (void *)(buf + mpdu_len);
        mac_status = mac->status;
        switch (mac_status & AR9170_RX_STATUS_MPDU) {
        case AR9170_RX_STATUS_MPDU_FIRST:
+               ar->ampdu_ref++;
                /* Aggregated MPDUs start with an PLCP header */
                if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
                        head = (void *) buf;
@@ -720,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
                break;
 
        case AR9170_RX_STATUS_MPDU_LAST:
+               status.flag |= RX_FLAG_AMPDU_IS_LAST;
+
                /*
                 * The last frame of an A-MPDU has an extra tail
                 * which does contain the phy status of the whole
                 * aggregate.
                 */
-
                if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
                        mpdu_len -= sizeof(struct ar9170_rx_phystatus);
                        phy = (void *)(buf + mpdu_len);
@@ -773,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
        if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
                goto drop;
 
-       memset(&status, 0, sizeof(status));
        if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
                goto drop;
 
-       if (!carl9170_ampdu_check(ar, buf, mac_status))
+       if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
                goto drop;
 
        if (phy)
index 6a8681407a1de93373072d5b66dbb39238683804..84377cf580e06a29a69d4ca469ed5736f4aa9936 100644 (file)
@@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
        return false;
 }
 
-static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
+static int carl9170_tx_prepare(struct ar9170 *ar,
+                              struct ieee80211_sta *sta,
+                              struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        struct _carl9170_tx_superframe *txc;
        struct carl9170_vif_info *cvif;
        struct ieee80211_tx_info *info;
        struct ieee80211_tx_rate *txrate;
-       struct ieee80211_sta *sta;
        struct carl9170_tx_info *arinfo;
        unsigned int hw_queue;
        int i;
@@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
        else
                cvif = NULL;
 
-       sta = info->control.sta;
-
        txc = (void *)skb_push(skb, sizeof(*txc));
        memset(txc, 0, sizeof(*txc));
 
@@ -1457,20 +1456,21 @@ err_unlock_rcu:
        return false;
 }
 
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_tx_control *control,
+                   struct sk_buff *skb)
 {
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
-       struct ieee80211_sta *sta;
+       struct ieee80211_sta *sta = control->sta;
        bool run;
 
        if (unlikely(!IS_STARTED(ar)))
                goto err_free;
 
        info = IEEE80211_SKB_CB(skb);
-       sta = info->control.sta;
 
-       if (unlikely(carl9170_tx_prepare(ar, skb)))
+       if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
                goto err_free;
 
        carl9170_tx_accounting(ar, skb);
index 4648bbf76abcb617d97d5a40aa4a4255b1228c74..098fe9ee7096958a73093c2cbe18d7b37216e556 100644 (file)
@@ -4,6 +4,7 @@ b43-y                           += tables.o
 b43-$(CONFIG_B43_PHY_N)                += tables_nphy.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2055.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2056.o
+b43-$(CONFIG_B43_PHY_N)                += radio_2057.o
 b43-y                          += phy_common.o
 b43-y                          += phy_g.o
 b43-y                          += phy_a.o
index 7c899fc7ddd0ea1bced38d801b4eb18d0e1341f6..b298e5d68be2f0a58cf02d45d2ccd9a1e1e464bd 100644 (file)
@@ -241,16 +241,18 @@ enum {
 #define B43_SHM_SH_PHYVER              0x0050  /* PHY version */
 #define B43_SHM_SH_PHYTYPE             0x0052  /* PHY type */
 #define B43_SHM_SH_ANTSWAP             0x005C  /* Antenna swap threshold */
-#define B43_SHM_SH_HOSTFLO             0x005E  /* Hostflags for ucode options (low) */
-#define B43_SHM_SH_HOSTFMI             0x0060  /* Hostflags for ucode options (middle) */
-#define B43_SHM_SH_HOSTFHI             0x0062  /* Hostflags for ucode options (high) */
+#define B43_SHM_SH_HOSTF1              0x005E  /* Hostflags 1 for ucode options */
+#define B43_SHM_SH_HOSTF2              0x0060  /* Hostflags 2 for ucode options */
+#define B43_SHM_SH_HOSTF3              0x0062  /* Hostflags 3 for ucode options */
 #define B43_SHM_SH_RFATT               0x0064  /* Current radio attenuation value */
 #define B43_SHM_SH_RADAR               0x0066  /* Radar register */
 #define B43_SHM_SH_PHYTXNOI            0x006E  /* PHY noise directly after TX (lower 8bit only) */
 #define B43_SHM_SH_RFRXSP1             0x0072  /* RF RX SP Register 1 */
+#define B43_SHM_SH_HOSTF4              0x0078  /* Hostflags 4 for ucode options */
 #define B43_SHM_SH_CHAN                        0x00A0  /* Current channel (low 8bit only) */
 #define  B43_SHM_SH_CHAN_5GHZ          0x0100  /* Bit set, if 5 Ghz channel */
 #define  B43_SHM_SH_CHAN_40MHZ         0x0200  /* Bit set, if 40 Mhz channel width */
+#define B43_SHM_SH_HOSTF5              0x00D4  /* Hostflags 5 for ucode options */
 #define B43_SHM_SH_BCMCFIFOID          0x0108  /* Last posted cookie to the bcast/mcast FIFO */
 /* TSSI information */
 #define B43_SHM_SH_TSSI_CCK            0x0058  /* TSSI for last 4 CCK frames (32bit) */
@@ -415,6 +417,8 @@ enum {
 #define B43_PHYTYPE_HT                 0x07
 #define B43_PHYTYPE_LCN                        0x08
 #define B43_PHYTYPE_LCNXN              0x09
+#define B43_PHYTYPE_LCN40              0x0a
+#define B43_PHYTYPE_AC                 0x0b
 
 /* PHYRegisters */
 #define B43_PHY_ILT_A_CTRL             0x0072
index a140165dfee0515b70263cddf6e7902f4171de48..73730e94e0ac79fdbdf257f1cf969179e1c62a46 100644 (file)
@@ -533,11 +533,11 @@ u64 b43_hf_read(struct b43_wldev *dev)
 {
        u64 ret;
 
-       ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI);
+       ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3);
        ret <<= 16;
-       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI);
+       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2);
        ret <<= 16;
-       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO);
+       ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1);
 
        return ret;
 }
@@ -550,9 +550,9 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
        lo = (value & 0x00000000FFFFULL);
        mi = (value & 0x0000FFFF0000ULL) >> 16;
        hi = (value & 0xFFFF00000000ULL) >> 32;
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo);
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi);
-       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
+       b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
 }
 
 /* Read the firmware capabilities bitmask (Opensource firmware only) */
@@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work)
 }
 
 static void b43_op_tx(struct ieee80211_hw *hw,
-                    struct sk_buff *skb)
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct b43_wl *wl = hw_to_b43_wl(hw);
 
@@ -4282,6 +4283,35 @@ out:
        return err;
 }
 
+static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
+{
+       switch (phy_type) {
+       case B43_PHYTYPE_A:
+               return "A";
+       case B43_PHYTYPE_B:
+               return "B";
+       case B43_PHYTYPE_G:
+               return "G";
+       case B43_PHYTYPE_N:
+               return "N";
+       case B43_PHYTYPE_LP:
+               return "LP";
+       case B43_PHYTYPE_SSLPN:
+               return "SSLPN";
+       case B43_PHYTYPE_HT:
+               return "HT";
+       case B43_PHYTYPE_LCN:
+               return "LCN";
+       case B43_PHYTYPE_LCNXN:
+               return "LCNXN";
+       case B43_PHYTYPE_LCN40:
+               return "LCN40";
+       case B43_PHYTYPE_AC:
+               return "AC";
+       }
+       return "UNKNOWN";
+}
+
 /* Get PHY and RADIO versioning numbers */
 static int b43_phy_versioning(struct b43_wldev *dev)
 {
@@ -4342,13 +4372,13 @@ static int b43_phy_versioning(struct b43_wldev *dev)
                unsupported = 1;
        }
        if (unsupported) {
-               b43err(dev->wl, "FOUND UNSUPPORTED PHY "
-                      "(Analog %u, Type %u, Revision %u)\n",
-                      analog_type, phy_type, phy_rev);
+               b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n",
+                      analog_type, phy_type, b43_phy_name(dev, phy_type),
+                      phy_rev);
                return -EOPNOTSUPP;
        }
-       b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n",
-              analog_type, phy_type, phy_rev);
+       b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n",
+               analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
 
        /* Get RADIO versioning */
        if (dev->dev->core_rev >= 24) {
index 3f8883b14d9cc98ca334890541320aa50836d36e..f01676ac481b25071e9f7aae9ad790dbd5836ff0 100644 (file)
@@ -240,6 +240,21 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
                          (b43_radio_read16(dev, offset) & mask) | set);
 }
 
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+                         u16 value, int delay, int timeout)
+{
+       u16 val;
+       int i;
+
+       for (i = 0; i < timeout; i += delay) {
+               val = b43_radio_read(dev, offset);
+               if ((val & mask) == value)
+                       return true;
+               udelay(delay);
+       }
+       return false;
+}
+
 u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
 {
        assert_mac_suspended(dev);
@@ -428,7 +443,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
        average = (a + b + c + d + 2) / 4;
        if (is_ofdm) {
                /* Adjust for CCK-boost */
-               if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
+               if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1)
                    & B43_HF_CCKBOOST)
                        average = (average >= 13) ? (average - 13) : 0;
        }
index 9233b13fc16d8a205eb474a3870f59bc3b6b7e8c..f1b999349876bbfc8cad799858434f5e64a14b37 100644 (file)
@@ -364,6 +364,12 @@ void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
  */
 void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
 
+/**
+ * b43_radio_wait_value - Waits for a given value in masked register read
+ */
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+                         u16 value, int delay, int timeout);
+
 /**
  * b43_radio_lock - Lock firmware radio register access
  */
index b92bb9c92ad1bb3a228f9e0fbf806da9686c6a5f..3c35382ee6c23ebfbaed8d1dcfdf33e804faec96 100644 (file)
@@ -32,6 +32,7 @@
 #include "tables_nphy.h"
 #include "radio_2055.h"
 #include "radio_2056.h"
+#include "radio_2057.h"
 #include "main.h"
 
 struct nphy_txgains {
@@ -126,6 +127,46 @@ ok:
        b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
+static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
+                                             u16 value, u8 core, bool off,
+                                             u8 override)
+{
+       const struct nphy_rf_control_override_rev7 *e;
+       u16 en_addrs[3][2] = {
+               { 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
+       };
+       u16 en_addr;
+       u16 en_mask = field;
+       u16 val_addr;
+       u8 i;
+
+       /* Remember: we can get NULL! */
+       e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
+
+       for (i = 0; i < 2; i++) {
+               if (override >= ARRAY_SIZE(en_addrs)) {
+                       b43err(dev->wl, "Invalid override value %d\n", override);
+                       return;
+               }
+               en_addr = en_addrs[override][i];
+
+               val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+
+               if (off) {
+                       b43_phy_mask(dev, en_addr, ~en_mask);
+                       if (e) /* Do it safer, better than wl */
+                               b43_phy_mask(dev, val_addr, ~e->val_mask);
+               } else {
+                       if (!core || (core & (1 << i))) {
+                               b43_phy_set(dev, en_addr, en_mask);
+                               if (e)
+                                       b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
+                       }
+               }
+       }
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
 static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
                                                u16 value, u8 core, bool off)
@@ -458,6 +499,137 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
                b43_nphy_stay_in_carrier_search(dev, false);
 }
 
+/**************************************************
+ * Radio 0x2057
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
+static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 tmp;
+
+       if (phy->radio_rev == 5) {
+               b43_phy_mask(dev, 0x342, ~0x2);
+               udelay(10);
+               b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
+               b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
+       }
+
+       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
+       udelay(10);
+       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
+               b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+               return 0;
+       }
+       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
+       tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
+       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
+
+       if (phy->radio_rev == 5) {
+               b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
+               b43_radio_mask(dev, 0x1ca, ~0x2);
+       }
+       if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
+               b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
+               b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
+                                 tmp << 2);
+       }
+
+       return tmp & 0x3e;
+}
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
+static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+                       phy->radio_rev == 6);
+       u16 tmp;
+
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x61);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
+       }
+       b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000))
+               b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x69);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
+       }
+       b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000))
+               b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       if (special) {
+               b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
+               b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+       } else {
+               b43_radio_write(dev, 0x1AE, 0x73);
+               b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+               b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
+       }
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+                                 5000000)) {
+               b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+               return 0;
+       }
+       tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
+       b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       return tmp;
+}
+
+static void b43_radio_2057_init_pre(struct b43_wldev *dev)
+{
+       b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+       /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+       b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE);
+       b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+       b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU);
+}
+
+static void b43_radio_2057_init_post(struct b43_wldev *dev)
+{
+       b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
+
+       b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
+       b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
+       mdelay(2);
+       b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
+       b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
+
+       if (dev->phy.n->init_por) {
+               b43_radio_2057_rcal(dev);
+               b43_radio_2057_rccal(dev);
+       }
+       b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
+
+       dev->phy.n->init_por = false;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
+static void b43_radio_2057_init(struct b43_wldev *dev)
+{
+       b43_radio_2057_init_pre(dev);
+       r2057_upload_inittabs(dev);
+       b43_radio_2057_init_post(dev);
+}
+
 /**************************************************
  * Radio 0x2056
  **************************************************/
@@ -545,7 +717,9 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        enum ieee80211_band band = b43_current_band(dev->wl);
        u16 offset;
        u8 i;
-       u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+       u16 bias, cbias;
+       u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
+       u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
 
        B43_WARN_ON(dev->phy.rev < 3);
 
@@ -630,7 +804,56 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
                }
        } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
-               /* TODO */
+               u16 freq = dev->phy.channel_freq;
+               if (freq < 5100) {
+                       paa_boost = 0xA;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xF;
+                       mixa_boost = 0xF;
+               } else if (freq < 5340) {
+                       paa_boost = 0x8;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xFB;
+                       mixa_boost = 0xF;
+               } else if (freq < 5650) {
+                       paa_boost = 0x0;
+                       pada_boost = 0x77;
+                       pgaa_boost = 0xB;
+                       mixa_boost = 0xF;
+               } else {
+                       paa_boost = 0x0;
+                       pada_boost = 0x77;
+                       if (freq != 5825)
+                               pgaa_boost = -(freq - 18) / 36 + 168;
+                       else
+                               pgaa_boost = 6;
+                       mixa_boost = 0xF;
+               }
+
+               for (i = 0; i < 2; i++) {
+                       offset = i ? B2056_TX1 : B2056_TX0;
+
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_TXSPARE1, 0x30);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PA_SPARE2, 0xee);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_PADA_CASCBIAS, 0x03);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+                       b43_radio_write(dev,
+                               offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+               }
        }
 
        udelay(50);
@@ -643,6 +866,37 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        udelay(300);
 }
 
+static u8 b43_radio_2056_rcal(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 mast2, tmp;
+
+       if (phy->rev != 3)
+               return 0;
+
+       mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2);
+       b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7);
+
+       udelay(10);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+       udelay(10);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09);
+
+       if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
+                                 1000000)) {
+               b43err(dev->wl, "Radio recalibration timeout\n");
+               return 0;
+       }
+
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+       tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT);
+       b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00);
+
+       b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2);
+
+       return tmp & 0x1f;
+}
+
 static void b43_radio_init2056_pre(struct b43_wldev *dev)
 {
        b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
@@ -665,10 +919,8 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
        b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
        b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
        b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
-       /*
-       if (nphy->init_por)
-               Call Radio 2056 Recalibrate
-       */
+       if (dev->phy.n->init_por)
+               b43_radio_2056_rcal(dev);
 }
 
 /*
@@ -680,6 +932,8 @@ static void b43_radio_init2056(struct b43_wldev *dev)
        b43_radio_init2056_pre(dev);
        b2056_upload_inittabs(dev, 0, 0);
        b43_radio_init2056_post(dev);
+
+       dev->phy.n->init_por = false;
 }
 
 /**************************************************
@@ -753,8 +1007,6 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
-       int i;
-       u16 val;
        bool workaround = false;
 
        if (sprom->revision < 4)
@@ -777,15 +1029,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
        b43_radio_set(dev, B2055_CAL_MISC, 0x1);
        msleep(1);
        b43_radio_set(dev, B2055_CAL_MISC, 0x40);
-       for (i = 0; i < 200; i++) {
-               val = b43_radio_read(dev, B2055_CAL_COUT2);
-               if (val & 0x80) {
-                       i = 0;
-                       break;
-               }
-               udelay(10);
-       }
-       if (i)
+       if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
                b43err(dev->wl, "radio post init timeout\n");
        b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
        b43_switch_channel(dev, dev->phy.channel);
@@ -1860,12 +2104,334 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
 static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
 {
-       if (dev->phy.rev >= 3)
+       if (dev->phy.rev >= 7)
+               ; /* TODO */
+       else if (dev->phy.rev >= 3)
                b43_nphy_gain_ctl_workarounds_rev3plus(dev);
        else
                b43_nphy_gain_ctl_workarounds_rev1_2(dev);
 }
 
+/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
+static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
+{
+       if (!offset)
+               offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
+       return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
+}
+
+static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
+{
+       struct ssb_sprom *sprom = dev->dev->bus_sprom;
+       struct b43_phy *phy = &dev->phy;
+
+       u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+                                       0x1F };
+       u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+
+       u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
+       u8 ntab7_138_146[] = { 0x11, 0x11 };
+       u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
+
+       u16 lpf_20, lpf_40, lpf_11b;
+       u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
+       u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
+       bool rccal_ovrd = false;
+
+       u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
+       u16 bias, conv, filt;
+
+       u32 tmp32;
+       u8 core;
+
+       if (phy->rev == 7) {
+               b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
+               b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
+       }
+       if (phy->rev <= 8) {
+               b43_phy_write(dev, 0x23F, 0x1B0);
+               b43_phy_write(dev, 0x240, 0x1B0);
+       }
+       if (phy->rev >= 8)
+               b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
+
+       b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
+       b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2);
+       tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+       tmp32 &= 0xffffff;
+       b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
+
+       if (b43_nphy_ipa(dev))
+               b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+                               rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+
+       b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000);
+       b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000);
+
+       lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
+       lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
+       lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
+       if (b43_nphy_ipa(dev)) {
+               if ((phy->radio_rev == 5 && phy->is_40mhz) ||
+                   phy->radio_rev == 7 || phy->radio_rev == 8) {
+                       bcap_val = b43_radio_read(dev, 0x16b);
+                       scap_val = b43_radio_read(dev, 0x16a);
+                       scap_val_11b = scap_val;
+                       bcap_val_11b = bcap_val;
+                       if (phy->radio_rev == 5 && phy->is_40mhz) {
+                               scap_val_11n_20 = scap_val;
+                               bcap_val_11n_20 = bcap_val;
+                               scap_val_11n_40 = bcap_val_11n_40 = 0xc;
+                               rccal_ovrd = true;
+                       } else { /* Rev 7/8 */
+                               lpf_20 = 4;
+                               lpf_11b = 1;
+                               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                                       scap_val_11n_20 = 0xc;
+                                       bcap_val_11n_20 = 0xc;
+                                       scap_val_11n_40 = 0xa;
+                                       bcap_val_11n_40 = 0xa;
+                               } else {
+                                       scap_val_11n_20 = 0x14;
+                                       bcap_val_11n_20 = 0x14;
+                                       scap_val_11n_40 = 0xf;
+                                       bcap_val_11n_40 = 0xf;
+                               }
+                               rccal_ovrd = true;
+                       }
+               }
+       } else {
+               if (phy->radio_rev == 5) {
+                       lpf_20 = 1;
+                       lpf_40 = 3;
+                       bcap_val = b43_radio_read(dev, 0x16b);
+                       scap_val = b43_radio_read(dev, 0x16a);
+                       scap_val_11b = scap_val;
+                       bcap_val_11b = bcap_val;
+                       scap_val_11n_20 = 0x11;
+                       scap_val_11n_40 = 0x11;
+                       bcap_val_11n_20 = 0x13;
+                       bcap_val_11n_40 = 0x13;
+                       rccal_ovrd = true;
+               }
+       }
+       if (rccal_ovrd) {
+               rx2tx_lut_20_11b = (bcap_val_11b << 8) |
+                                  (scap_val_11b << 3) |
+                                  lpf_11b;
+               rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
+                                  (scap_val_11n_20 << 3) |
+                                  lpf_20;
+               rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
+                                  (scap_val_11n_40 << 3) |
+                                  lpf_40;
+               for (core = 0; core < 2; core++) {
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
+                                      rx2tx_lut_20_11b);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
+                                      rx2tx_lut_20_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
+                                      rx2tx_lut_20_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
+                                      rx2tx_lut_40_11n);
+                       b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
+                                      rx2tx_lut_40_11n);
+               }
+               b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2);
+       }
+       b43_phy_write(dev, 0x32F, 0x3);
+       if (phy->radio_rev == 4 || phy->radio_rev == 6)
+               b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0);
+
+       if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
+               if (sprom->revision &&
+                   sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) {
+                       b43_radio_write(dev, 0x5, 0x05);
+                       b43_radio_write(dev, 0x6, 0x30);
+                       b43_radio_write(dev, 0x7, 0x00);
+                       b43_radio_set(dev, 0x4f, 0x1);
+                       b43_radio_set(dev, 0xd4, 0x1);
+                       bias = 0x1f;
+                       conv = 0x6f;
+                       filt = 0xaa;
+               } else {
+                       bias = 0x2b;
+                       conv = 0x7f;
+                       filt = 0xee;
+               }
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       for (core = 0; core < 2; core++) {
+                               if (core == 0) {
+                                       b43_radio_write(dev, 0x5F, bias);
+                                       b43_radio_write(dev, 0x64, conv);
+                                       b43_radio_write(dev, 0x66, filt);
+                               } else {
+                                       b43_radio_write(dev, 0xE8, bias);
+                                       b43_radio_write(dev, 0xE9, conv);
+                                       b43_radio_write(dev, 0xEB, filt);
+                               }
+                       }
+               }
+       }
+
+       if (b43_nphy_ipa(dev)) {
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+                           phy->radio_rev == 6) {
+                               for (core = 0; core < 2; core++) {
+                                       if (core == 0)
+                                               b43_radio_write(dev, 0x51,
+                                                               0x7f);
+                                       else
+                                               b43_radio_write(dev, 0xd6,
+                                                               0x7f);
+                               }
+                       }
+                       if (phy->radio_rev == 3) {
+                               for (core = 0; core < 2; core++) {
+                                       if (core == 0) {
+                                               b43_radio_write(dev, 0x64,
+                                                               0x13);
+                                               b43_radio_write(dev, 0x5F,
+                                                               0x1F);
+                                               b43_radio_write(dev, 0x66,
+                                                               0xEE);
+                                               b43_radio_write(dev, 0x59,
+                                                               0x8A);
+                                               b43_radio_write(dev, 0x80,
+                                                               0x3E);
+                                       } else {
+                                               b43_radio_write(dev, 0x69,
+                                                               0x13);
+                                               b43_radio_write(dev, 0xE8,
+                                                               0x1F);
+                                               b43_radio_write(dev, 0xEB,
+                                                               0xEE);
+                                               b43_radio_write(dev, 0xDE,
+                                                               0x8A);
+                                               b43_radio_write(dev, 0x105,
+                                                               0x3E);
+                                       }
+                               }
+                       } else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
+                               if (!phy->is_40mhz) {
+                                       b43_radio_write(dev, 0x5F, 0x14);
+                                       b43_radio_write(dev, 0xE8, 0x12);
+                               } else {
+                                       b43_radio_write(dev, 0x5F, 0x16);
+                                       b43_radio_write(dev, 0xE8, 0x16);
+                               }
+                       }
+               } else {
+                       u16 freq = phy->channel_freq;
+                       if ((freq >= 5180 && freq <= 5230) ||
+                           (freq >= 5745 && freq <= 5805)) {
+                               b43_radio_write(dev, 0x7D, 0xFF);
+                               b43_radio_write(dev, 0xFE, 0xFF);
+                       }
+               }
+       } else {
+               if (phy->radio_rev != 5) {
+                       for (core = 0; core < 2; core++) {
+                               if (core == 0) {
+                                       b43_radio_write(dev, 0x5c, 0x61);
+                                       b43_radio_write(dev, 0x51, 0x70);
+                               } else {
+                                       b43_radio_write(dev, 0xe1, 0x61);
+                                       b43_radio_write(dev, 0xd6, 0x70);
+                               }
+                       }
+               }
+       }
+
+       if (phy->radio_rev == 4) {
+               b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+               for (core = 0; core < 2; core++) {
+                       if (core == 0) {
+                               b43_radio_write(dev, 0x1a1, 0x00);
+                               b43_radio_write(dev, 0x1a2, 0x3f);
+                               b43_radio_write(dev, 0x1a6, 0x3f);
+                       } else {
+                               b43_radio_write(dev, 0x1a7, 0x00);
+                               b43_radio_write(dev, 0x1ab, 0x3f);
+                               b43_radio_write(dev, 0x1ac, 0x3f);
+                       }
+               }
+       } else {
+               b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4);
+
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
+               b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4);
+               b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4);
+       }
+
+       b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
+
+       b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
+       b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
+       b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
+
+       if (!phy->is_40mhz) {
+               b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
+               b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
+       } else {
+               b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
+               b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
+       }
+
+       b43_nphy_gain_ctl_workarounds(dev);
+
+       /* TODO
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4,
+                           aux_adc_vmid_rev7_core0);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4,
+                           aux_adc_vmid_rev7_core1);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4,
+                           aux_adc_gain_rev7);
+       b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4,
+                           aux_adc_gain_rev7);
+       */
+}
+
 static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -1916,7 +2482,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
                        rx2tx_delays[6] = 1;
                        rx2tx_events[7] = 0x1F;
                }
-               b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+               b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays,
                                         ARRAY_SIZE(rx2tx_events));
        }
 
@@ -1926,8 +2492,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 
        b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
 
-       b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
-       b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+       if (!dev->phy.is_40mhz) {
+               b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+               b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+       } else {
+               b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D);
+               b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D);
+       }
 
        b43_nphy_gain_ctl_workarounds(dev);
 
@@ -1963,13 +2534,14 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
 
        if (dev->phy.rev == 4 &&
-               b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+           b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
                b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
                                0x70);
                b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
                                0x70);
        }
 
+       /* Dropped probably-always-true condition */
        b43_phy_write(dev, 0x224, 0x03eb);
        b43_phy_write(dev, 0x225, 0x03eb);
        b43_phy_write(dev, 0x226, 0x0341);
@@ -1982,6 +2554,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_phy_write(dev, 0x22d, 0x042b);
        b43_phy_write(dev, 0x22e, 0x0381);
        b43_phy_write(dev, 0x22f, 0x0381);
+
+       if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
+               ; /* TODO: 0x0080000000000000 HF */
 }
 
 static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -1996,6 +2571,12 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
        u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
 
+       if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
+           dev->dev->board_type == 0x8B) {
+               delays1[0] = 0x1;
+               delays1[5] = 0x14;
+       }
+
        if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
            nphy->band5g_pwrgain) {
                b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
@@ -2007,8 +2588,10 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
 
        b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
        b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
-       b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
-       b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+       if (dev->phy.rev < 3) {
+               b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+               b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+       }
 
        if (dev->phy.rev < 2) {
                b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
@@ -2024,11 +2607,6 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
        b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
 
-       if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
-           dev->dev->board_type == 0x8B) {
-               delays1[0] = 0x1;
-               delays1[5] = 0x14;
-       }
        b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
        b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
 
@@ -2055,11 +2633,13 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
        b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
 
-       b43_phy_mask(dev, B43_NPHY_PIL_DW1,
-                       ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
-       b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+       if (dev->phy.rev < 3) {
+               b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+                            ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+               b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+       }
 
        if (dev->phy.rev == 2)
                b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
@@ -2083,7 +2663,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
        b43_phy_set(dev, B43_NPHY_IQFLIP,
                    B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
 
-       if (dev->phy.rev >= 3)
+       if (dev->phy.rev >= 7)
+               b43_nphy_workarounds_rev7plus(dev);
+       else if (dev->phy.rev >= 3)
                b43_nphy_workarounds_rev3plus(dev);
        else
                b43_nphy_workarounds_rev1_2(dev);
@@ -2542,7 +3124,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
                b43_nphy_ipa_internal_tssi_setup(dev);
 
        if (phy->rev >= 7)
-               ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+               b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0);
        else if (phy->rev >= 3)
                b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
 
@@ -2554,7 +3136,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
        b43_nphy_rssi_select(dev, 0, 0);
 
        if (phy->rev >= 7)
-               ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+               b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0);
        else if (phy->rev >= 3)
                b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
 
@@ -4761,6 +5343,7 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
        nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
        nphy->spur_avoid = (phy->rev >= 3) ?
                                B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
+       nphy->init_por = true;
        nphy->gain_boost = true; /* this way we follow wl, assume it is true */
        nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
        nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4801,6 +5384,8 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
                nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
                nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
        }
+
+       nphy->init_por = true;
 }
 
 static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -4887,7 +5472,9 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
        if (blocked) {
                b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
                                ~B43_NPHY_RFCTL_CMD_CHIP0PU);
-               if (dev->phy.rev >= 3) {
+               if (dev->phy.rev >= 7) {
+                       /* TODO */
+               } else if (dev->phy.rev >= 3) {
                        b43_radio_mask(dev, 0x09, ~0x2);
 
                        b43_radio_write(dev, 0x204D, 0);
@@ -4905,7 +5492,10 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
                        b43_radio_write(dev, 0x3064, 0);
                }
        } else {
-               if (dev->phy.rev >= 3) {
+               if (dev->phy.rev >= 7) {
+                       b43_radio_2057_init(dev);
+                       b43_switch_channel(dev, dev->phy.channel);
+               } else if (dev->phy.rev >= 3) {
                        b43_radio_init2056(dev);
                        b43_switch_channel(dev, dev->phy.channel);
                } else {
index fd12b386fea1cc5a1c0796589b650d12e6917fcf..092c0140c2490d777056399db075cfcbb24a2855 100644 (file)
@@ -785,6 +785,7 @@ struct b43_phy_n {
        u16 papd_epsilon_offset[2];
        s32 preamble_override;
        u32 bb_mult_save;
+       bool init_por;
 
        bool gain_boost;
        bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
new file mode 100644 (file)
index 0000000..d61d683
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+
+  Broadcom B43 wireless driver
+  IEEE 802.11n 2057 radio device data tables
+
+  Copyright (c) 2010 RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; see the file COPYING.  If not, write to
+  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+  Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "radio_2057.h"
+#include "phy_common.h"
+
+static u16 r2057_rev4_init[42][2] = {
+       { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
+       { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
+       { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
+       { 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c },
+       { 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 },
+       { 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c },
+       { 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+       { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+       { 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+       { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+       { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+};
+
+static u16 r2057_rev5_init[44][2] = {
+       { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+       { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+       { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+       { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+       { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+       { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+       { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+       { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 },
+       { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 },
+       { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev5a_init[45][2] = {
+       { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+       { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+       { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+       { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+       { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+       { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 },
+       { 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 },
+       { 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+       { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+       { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 },
+       { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev7_init[54][2] = {
+       { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+       { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+       { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
+       { 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 },
+       { 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f },
+       { 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+       { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee },
+       { 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 },
+       { 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 },
+       { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+       { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+       { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+       { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+       { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+static u16 r2057_rev8_init[54][2] = {
+       { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+       { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+       { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
+       { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f },
+       { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+       { 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+       { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 },
+       { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee },
+       { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 },
+       { 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+       { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+       { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+       { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+       { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+void r2057_upload_inittabs(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+       u16 *table = NULL;
+       u16 size, i;
+
+       if (phy->rev == 7) {
+               table = r2057_rev4_init[0];
+               size = ARRAY_SIZE(r2057_rev4_init);
+       } else if (phy->rev == 8 || phy->rev == 9) {
+               if (phy->radio_rev == 5) {
+                       if (phy->radio_rev == 8) {
+                               table = r2057_rev5_init[0];
+                               size = ARRAY_SIZE(r2057_rev5_init);
+                       } else {
+                               table = r2057_rev5a_init[0];
+                               size = ARRAY_SIZE(r2057_rev5a_init);
+                       }
+               } else if (phy->radio_rev == 7) {
+                       table = r2057_rev7_init[0];
+                       size = ARRAY_SIZE(r2057_rev7_init);
+               } else if (phy->radio_rev == 9) {
+                       table = r2057_rev8_init[0];
+                       size = ARRAY_SIZE(r2057_rev8_init);
+               }
+       }
+
+       if (table) {
+               for (i = 0; i < 10; i++) {
+                       pr_info("radio_write 0x%X ", *table);
+                       table++;
+                       pr_info("0x%X\n", *table);
+                       table++;
+               }
+       }
+}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
new file mode 100644 (file)
index 0000000..eeebd8f
--- /dev/null
@@ -0,0 +1,430 @@
+#ifndef B43_RADIO_2057_H_
+#define B43_RADIO_2057_H_
+
+#include <linux/types.h>
+
+#include "tables_nphy.h"
+
+#define R2057_DACBUF_VINCM_CORE0               0x000
+#define R2057_IDCODE                           0x001
+#define R2057_RCCAL_MASTER                     0x002
+#define R2057_RCCAL_CAP_SIZE                   0x003
+#define R2057_RCAL_CONFIG                      0x004
+#define R2057_GPAIO_CONFIG                     0x005
+#define R2057_GPAIO_SEL1                       0x006
+#define R2057_GPAIO_SEL0                       0x007
+#define R2057_CLPO_CONFIG                      0x008
+#define R2057_BANDGAP_CONFIG                   0x009
+#define R2057_BANDGAP_RCAL_TRIM                        0x00a
+#define R2057_AFEREG_CONFIG                    0x00b
+#define R2057_TEMPSENSE_CONFIG                 0x00c
+#define R2057_XTAL_CONFIG1                     0x00d
+#define R2057_XTAL_ICORE_SIZE                  0x00e
+#define R2057_XTAL_BUF_SIZE                    0x00f
+#define R2057_XTAL_PULLCAP_SIZE                        0x010
+#define R2057_RFPLL_MASTER                     0x011
+#define R2057_VCOMONITOR_VTH_L                 0x012
+#define R2057_VCOMONITOR_VTH_H                 0x013
+#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT   0x014
+#define R2057_VCO_VARCSIZE_IDAC                        0x015
+#define R2057_VCOCAL_COUNTVAL0                 0x016
+#define R2057_VCOCAL_COUNTVAL1                 0x017
+#define R2057_VCOCAL_INTCLK_COUNT              0x018
+#define R2057_VCOCAL_MASTER                    0x019
+#define R2057_VCOCAL_NUMCAPCHANGE              0x01a
+#define R2057_VCOCAL_WINSIZE                   0x01b
+#define R2057_VCOCAL_DELAY_AFTER_REFRESH       0x01c
+#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP     0x01d
+#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP      0x01e
+#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP     0x01f
+#define R2057_VCO_FORCECAPEN_FORCECAP1         0x020
+#define R2057_VCO_FORCECAP0                    0x021
+#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE    0x022
+#define R2057_RFPLL_PFD_RESET_PW               0x023
+#define R2057_RFPLL_LOOPFILTER_R2              0x024
+#define R2057_RFPLL_LOOPFILTER_R1              0x025
+#define R2057_RFPLL_LOOPFILTER_C3              0x026
+#define R2057_RFPLL_LOOPFILTER_C2              0x027
+#define R2057_RFPLL_LOOPFILTER_C1              0x028
+#define R2057_CP_KPD_IDAC                      0x029
+#define R2057_RFPLL_IDACS                      0x02a
+#define R2057_RFPLL_MISC_EN                    0x02b
+#define R2057_RFPLL_MMD0                       0x02c
+#define R2057_RFPLL_MMD1                       0x02d
+#define R2057_RFPLL_MISC_CAL_RESETN            0x02e
+#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES     0x02f
+#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE                0x030
+#define R2057_VCOCAL_READCAP0                  0x031
+#define R2057_VCOCAL_READCAP1                  0x032
+#define R2057_VCOCAL_STATUS                    0x033
+#define R2057_LOGEN_PUS                                0x034
+#define R2057_LOGEN_PTAT_RESETS                        0x035
+#define R2057_VCOBUF_IDACS                     0x036
+#define R2057_VCOBUF_TUNE                      0x037
+#define R2057_CMOSBUF_TX2GQ_IDACS              0x038
+#define R2057_CMOSBUF_TX2GI_IDACS              0x039
+#define R2057_CMOSBUF_TX5GQ_IDACS              0x03a
+#define R2057_CMOSBUF_TX5GI_IDACS              0x03b
+#define R2057_CMOSBUF_RX2GQ_IDACS              0x03c
+#define R2057_CMOSBUF_RX2GI_IDACS              0x03d
+#define R2057_CMOSBUF_RX5GQ_IDACS              0x03e
+#define R2057_CMOSBUF_RX5GI_IDACS              0x03f
+#define R2057_LOGEN_MX2G_IDACS                 0x040
+#define R2057_LOGEN_MX2G_TUNE                  0x041
+#define R2057_LOGEN_MX5G_IDACS                 0x042
+#define R2057_LOGEN_MX5G_TUNE                  0x043
+#define R2057_LOGEN_MX5G_RCCR                  0x044
+#define R2057_LOGEN_INDBUF2G_IDAC              0x045
+#define R2057_LOGEN_INDBUF2G_IBOOST            0x046
+#define R2057_LOGEN_INDBUF2G_TUNE              0x047
+#define R2057_LOGEN_INDBUF5G_IDAC              0x048
+#define R2057_LOGEN_INDBUF5G_IBOOST            0x049
+#define R2057_LOGEN_INDBUF5G_TUNE              0x04a
+#define R2057_CMOSBUF_TX_RCCR                  0x04b
+#define R2057_CMOSBUF_RX_RCCR                  0x04c
+#define R2057_LOGEN_SEL_PKDET                  0x04d
+#define R2057_CMOSBUF_SHAREIQ_PTAT             0x04e
+#define R2057_RXTXBIAS_CONFIG_CORE0            0x04f
+#define R2057_TXGM_TXRF_PUS_CORE0              0x050
+#define R2057_TXGM_IDAC_BLEED_CORE0            0x051
+#define R2057_TXGM_GAIN_CORE0                  0x056
+#define R2057_TXGM2G_PKDET_PUS_CORE0           0x057
+#define R2057_PAD2G_PTATS_CORE0                        0x058
+#define R2057_PAD2G_IDACS_CORE0                        0x059
+#define R2057_PAD2G_BOOST_PU_CORE0             0x05a
+#define R2057_PAD2G_CASCV_GAIN_CORE0           0x05b
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0      0x05c
+#define R2057_TXMIX2G_LODC_CORE0               0x05d
+#define R2057_PAD2G_TUNE_PUS_CORE0             0x05e
+#define R2057_IPA2G_GAIN_CORE0                 0x05f
+#define R2057_TSSI2G_SPARE1_CORE0              0x060
+#define R2057_TSSI2G_SPARE2_CORE0              0x061
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0     0x062
+#define R2057_IPA2G_IMAIN_CORE0                        0x063
+#define R2057_IPA2G_CASCONV_CORE0              0x064
+#define R2057_IPA2G_CASCOFFV_CORE0             0x065
+#define R2057_IPA2G_BIAS_FILTER_CORE0          0x066
+#define R2057_TX5G_PKDET_CORE0                 0x069
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE0         0x06a
+#define R2057_PAD5G_PTATS1_CORE0               0x06b
+#define R2057_PAD5G_CLASS_PTATS2_CORE0         0x06c
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0                0x06d
+#define R2057_PAD5G_CASCV_IMAIN_CORE0          0x06e
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0    0x06f
+#define R2057_PGA_BOOST_TUNE_CORE0             0x070
+#define R2057_PGA_GAIN_CORE0                   0x071
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0    0x072
+#define R2057_TXMIX5G_BOOST_TUNE_CORE0         0x073
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE0                0x074
+#define R2057_IPA5G_IAUX_CORE0                 0x075
+#define R2057_IPA5G_GAIN_CORE0                 0x076
+#define R2057_TSSI5G_SPARE1_CORE0              0x077
+#define R2057_TSSI5G_SPARE2_CORE0              0x078
+#define R2057_IPA5G_CASCOFFV_PU_CORE0          0x079
+#define R2057_IPA5G_PTAT_CORE0                 0x07a
+#define R2057_IPA5G_IMAIN_CORE0                        0x07b
+#define R2057_IPA5G_CASCONV_CORE0              0x07c
+#define R2057_IPA5G_BIAS_FILTER_CORE0          0x07d
+#define R2057_PAD_BIAS_FILTER_BWS_CORE0                0x080
+#define R2057_TR2G_CONFIG1_CORE0_NU            0x081
+#define R2057_TR2G_CONFIG2_CORE0_NU            0x082
+#define R2057_LNA5G_RFEN_CORE0                 0x083
+#define R2057_TR5G_CONFIG2_CORE0_NU            0x084
+#define R2057_RXRFBIAS_IBOOST_PU_CORE0         0x085
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0        0x086
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0     0x087
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0      0x088
+#define R2057_RXMIX_CMFBITAIL_PU_CORE0         0x089
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE0         0x08a
+#define R2057_LNA2_IAUX_PTAT_CORE0             0x08b
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE0         0x08c
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0    0x08d
+#define R2057_RXRFBIAS_BANDSEL_CORE0           0x08e
+#define R2057_TIA_CONFIG_CORE0                 0x08f
+#define R2057_TIA_IQGAIN_CORE0                 0x090
+#define R2057_TIA_IBIAS2_CORE0                 0x091
+#define R2057_TIA_IBIAS1_CORE0                 0x092
+#define R2057_TIA_SPARE_Q_CORE0                        0x093
+#define R2057_TIA_SPARE_I_CORE0                        0x094
+#define R2057_RXMIX2G_PUS_CORE0                        0x095
+#define R2057_RXMIX2G_VCMREFS_CORE0            0x096
+#define R2057_RXMIX2G_LODC_QI_CORE0            0x097
+#define R2057_W12G_BW_LNA2G_PUS_CORE0          0x098
+#define R2057_LNA2G_GAIN_CORE0                 0x099
+#define R2057_LNA2G_TUNE_CORE0                 0x09a
+#define R2057_RXMIX5G_PUS_CORE0                        0x09b
+#define R2057_RXMIX5G_VCMREFS_CORE0            0x09c
+#define R2057_RXMIX5G_LODC_QI_CORE0            0x09d
+#define R2057_W15G_BW_LNA5G_PUS_CORE0          0x09e
+#define R2057_LNA5G_GAIN_CORE0                 0x09f
+#define R2057_LNA5G_TUNE_CORE0                 0x0a0
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0       0x0a1
+#define R2057_RXBB_BIAS_MASTER_CORE0           0x0a2
+#define R2057_RXBB_VGABUF_IDACS_CORE0          0x0a3
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0    0x0a4
+#define R2057_TXBUF_VINCM_CORE0                        0x0a5
+#define R2057_TXBUF_IDACS_CORE0                        0x0a6
+#define R2057_LPF_RESP_RXBUF_BW_CORE0          0x0a7
+#define R2057_RXBB_CC_CORE0                    0x0a8
+#define R2057_RXBB_SPARE3_CORE0                        0x0a9
+#define R2057_RXBB_RCCAL_HPC_CORE0             0x0aa
+#define R2057_LPF_IDACS_CORE0                  0x0ab
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0     0x0ac
+#define R2057_TXBUF_GAIN_CORE0                 0x0ad
+#define R2057_AFELOOPBACK_AACI_RESP_CORE0      0x0ae
+#define R2057_RXBUF_DEGEN_CORE0                        0x0af
+#define R2057_RXBB_SPARE2_CORE0                        0x0b0
+#define R2057_RXBB_SPARE1_CORE0                        0x0b1
+#define R2057_RSSI_MASTER_CORE0                        0x0b2
+#define R2057_W2_MASTER_CORE0                  0x0b3
+#define R2057_NB_MASTER_CORE0                  0x0b4
+#define R2057_W2_IDACS0_Q_CORE0                        0x0b5
+#define R2057_W2_IDACS1_Q_CORE0                        0x0b6
+#define R2057_W2_IDACS0_I_CORE0                        0x0b7
+#define R2057_W2_IDACS1_I_CORE0                        0x0b8
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0     0x0b9
+#define R2057_NB_IDACS_Q_CORE0                 0x0ba
+#define R2057_NB_IDACS_I_CORE0                 0x0bb
+#define R2057_BACKUP4_CORE0                    0x0c1
+#define R2057_BACKUP3_CORE0                    0x0c2
+#define R2057_BACKUP2_CORE0                    0x0c3
+#define R2057_BACKUP1_CORE0                    0x0c4
+#define R2057_SPARE16_CORE0                    0x0c5
+#define R2057_SPARE15_CORE0                    0x0c6
+#define R2057_SPARE14_CORE0                    0x0c7
+#define R2057_SPARE13_CORE0                    0x0c8
+#define R2057_SPARE12_CORE0                    0x0c9
+#define R2057_SPARE11_CORE0                    0x0ca
+#define R2057_TX2G_BIAS_RESETS_CORE0           0x0cb
+#define R2057_TX5G_BIAS_RESETS_CORE0           0x0cc
+#define R2057_IQTEST_SEL_PU                    0x0cd
+#define R2057_XTAL_CONFIG2                     0x0ce
+#define R2057_BUFS_MISC_LPFBW_CORE0            0x0cf
+#define R2057_TXLPF_RCCAL_CORE0                        0x0d0
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0  0x0d1
+#define R2057_LPF_GAIN_CORE0                   0x0d2
+#define R2057_DACBUF_IDACS_BW_CORE0            0x0d3
+#define R2057_RXTXBIAS_CONFIG_CORE1            0x0d4
+#define R2057_TXGM_TXRF_PUS_CORE1              0x0d5
+#define R2057_TXGM_IDAC_BLEED_CORE1            0x0d6
+#define R2057_TXGM_GAIN_CORE1                  0x0db
+#define R2057_TXGM2G_PKDET_PUS_CORE1           0x0dc
+#define R2057_PAD2G_PTATS_CORE1                        0x0dd
+#define R2057_PAD2G_IDACS_CORE1                        0x0de
+#define R2057_PAD2G_BOOST_PU_CORE1             0x0df
+#define R2057_PAD2G_CASCV_GAIN_CORE1           0x0e0
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1      0x0e1
+#define R2057_TXMIX2G_LODC_CORE1               0x0e2
+#define R2057_PAD2G_TUNE_PUS_CORE1             0x0e3
+#define R2057_IPA2G_GAIN_CORE1                 0x0e4
+#define R2057_TSSI2G_SPARE1_CORE1              0x0e5
+#define R2057_TSSI2G_SPARE2_CORE1              0x0e6
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1     0x0e7
+#define R2057_IPA2G_IMAIN_CORE1                        0x0e8
+#define R2057_IPA2G_CASCONV_CORE1              0x0e9
+#define R2057_IPA2G_CASCOFFV_CORE1             0x0ea
+#define R2057_IPA2G_BIAS_FILTER_CORE1          0x0eb
+#define R2057_TX5G_PKDET_CORE1                 0x0ee
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE1         0x0ef
+#define R2057_PAD5G_PTATS1_CORE1               0x0f0
+#define R2057_PAD5G_CLASS_PTATS2_CORE1         0x0f1
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1                0x0f2
+#define R2057_PAD5G_CASCV_IMAIN_CORE1          0x0f3
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1    0x0f4
+#define R2057_PGA_BOOST_TUNE_CORE1             0x0f5
+#define R2057_PGA_GAIN_CORE1                   0x0f6
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1    0x0f7
+#define R2057_TXMIX5G_BOOST_TUNE_CORE1         0x0f8
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE1                0x0f9
+#define R2057_IPA5G_IAUX_CORE1                 0x0fa
+#define R2057_IPA5G_GAIN_CORE1                 0x0fb
+#define R2057_TSSI5G_SPARE1_CORE1              0x0fc
+#define R2057_TSSI5G_SPARE2_CORE1              0x0fd
+#define R2057_IPA5G_CASCOFFV_PU_CORE1          0x0fe
+#define R2057_IPA5G_PTAT_CORE1                 0x0ff
+#define R2057_IPA5G_IMAIN_CORE1                        0x100
+#define R2057_IPA5G_CASCONV_CORE1              0x101
+#define R2057_IPA5G_BIAS_FILTER_CORE1          0x102
+#define R2057_PAD_BIAS_FILTER_BWS_CORE1                0x105
+#define R2057_TR2G_CONFIG1_CORE1_NU            0x106
+#define R2057_TR2G_CONFIG2_CORE1_NU            0x107
+#define R2057_LNA5G_RFEN_CORE1                 0x108
+#define R2057_TR5G_CONFIG2_CORE1_NU            0x109
+#define R2057_RXRFBIAS_IBOOST_PU_CORE1         0x10a
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1        0x10b
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1     0x10c
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1      0x10d
+#define R2057_RXMIX_CMFBITAIL_PU_CORE1         0x10e
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE1         0x10f
+#define R2057_LNA2_IAUX_PTAT_CORE1             0x110
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE1         0x111
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1    0x112
+#define R2057_RXRFBIAS_BANDSEL_CORE1           0x113
+#define R2057_TIA_CONFIG_CORE1                 0x114
+#define R2057_TIA_IQGAIN_CORE1                 0x115
+#define R2057_TIA_IBIAS2_CORE1                 0x116
+#define R2057_TIA_IBIAS1_CORE1                 0x117
+#define R2057_TIA_SPARE_Q_CORE1                        0x118
+#define R2057_TIA_SPARE_I_CORE1                        0x119
+#define R2057_RXMIX2G_PUS_CORE1                        0x11a
+#define R2057_RXMIX2G_VCMREFS_CORE1            0x11b
+#define R2057_RXMIX2G_LODC_QI_CORE1            0x11c
+#define R2057_W12G_BW_LNA2G_PUS_CORE1          0x11d
+#define R2057_LNA2G_GAIN_CORE1                 0x11e
+#define R2057_LNA2G_TUNE_CORE1                 0x11f
+#define R2057_RXMIX5G_PUS_CORE1                        0x120
+#define R2057_RXMIX5G_VCMREFS_CORE1            0x121
+#define R2057_RXMIX5G_LODC_QI_CORE1            0x122
+#define R2057_W15G_BW_LNA5G_PUS_CORE1          0x123
+#define R2057_LNA5G_GAIN_CORE1                 0x124
+#define R2057_LNA5G_TUNE_CORE1                 0x125
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1       0x126
+#define R2057_RXBB_BIAS_MASTER_CORE1           0x127
+#define R2057_RXBB_VGABUF_IDACS_CORE1          0x128
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1    0x129
+#define R2057_TXBUF_VINCM_CORE1                        0x12a
+#define R2057_TXBUF_IDACS_CORE1                        0x12b
+#define R2057_LPF_RESP_RXBUF_BW_CORE1          0x12c
+#define R2057_RXBB_CC_CORE1                    0x12d
+#define R2057_RXBB_SPARE3_CORE1                        0x12e
+#define R2057_RXBB_RCCAL_HPC_CORE1             0x12f
+#define R2057_LPF_IDACS_CORE1                  0x130
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1     0x131
+#define R2057_TXBUF_GAIN_CORE1                 0x132
+#define R2057_AFELOOPBACK_AACI_RESP_CORE1      0x133
+#define R2057_RXBUF_DEGEN_CORE1                        0x134
+#define R2057_RXBB_SPARE2_CORE1                        0x135
+#define R2057_RXBB_SPARE1_CORE1                        0x136
+#define R2057_RSSI_MASTER_CORE1                        0x137
+#define R2057_W2_MASTER_CORE1                  0x138
+#define R2057_NB_MASTER_CORE1                  0x139
+#define R2057_W2_IDACS0_Q_CORE1                        0x13a
+#define R2057_W2_IDACS1_Q_CORE1                        0x13b
+#define R2057_W2_IDACS0_I_CORE1                        0x13c
+#define R2057_W2_IDACS1_I_CORE1                        0x13d
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1     0x13e
+#define R2057_NB_IDACS_Q_CORE1                 0x13f
+#define R2057_NB_IDACS_I_CORE1                 0x140
+#define R2057_BACKUP4_CORE1                    0x146
+#define R2057_BACKUP3_CORE1                    0x147
+#define R2057_BACKUP2_CORE1                    0x148
+#define R2057_BACKUP1_CORE1                    0x149
+#define R2057_SPARE16_CORE1                    0x14a
+#define R2057_SPARE15_CORE1                    0x14b
+#define R2057_SPARE14_CORE1                    0x14c
+#define R2057_SPARE13_CORE1                    0x14d
+#define R2057_SPARE12_CORE1                    0x14e
+#define R2057_SPARE11_CORE1                    0x14f
+#define R2057_TX2G_BIAS_RESETS_CORE1           0x150
+#define R2057_TX5G_BIAS_RESETS_CORE1           0x151
+#define R2057_SPARE8_CORE1                     0x152
+#define R2057_SPARE7_CORE1                     0x153
+#define R2057_BUFS_MISC_LPFBW_CORE1            0x154
+#define R2057_TXLPF_RCCAL_CORE1                        0x155
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1  0x156
+#define R2057_LPF_GAIN_CORE1                   0x157
+#define R2057_DACBUF_IDACS_BW_CORE1            0x158
+#define R2057_DACBUF_VINCM_CORE1               0x159
+#define R2057_RCCAL_START_R1_Q1_P1             0x15a
+#define R2057_RCCAL_X1                         0x15b
+#define R2057_RCCAL_TRC0                       0x15c
+#define R2057_RCCAL_TRC1                       0x15d
+#define R2057_RCCAL_DONE_OSCCAP                        0x15e
+#define R2057_RCCAL_N0_0                       0x15f
+#define R2057_RCCAL_N0_1                       0x160
+#define R2057_RCCAL_N1_0                       0x161
+#define R2057_RCCAL_N1_1                       0x162
+#define R2057_RCAL_STATUS                      0x163
+#define R2057_XTALPUOVR_PINCTRL                        0x164
+#define R2057_OVR_REG0                         0x165
+#define R2057_OVR_REG1                         0x166
+#define R2057_OVR_REG2                         0x167
+#define R2057_OVR_REG3                         0x168
+#define R2057_OVR_REG4                         0x169
+#define R2057_RCCAL_SCAP_VAL                   0x16a
+#define R2057_RCCAL_BCAP_VAL                   0x16b
+#define R2057_RCCAL_HPC_VAL                    0x16c
+#define R2057_RCCAL_OVERRIDES                  0x16d
+#define R2057_TX0_IQCAL_GAIN_BW                        0x170
+#define R2057_TX0_LOFT_FINE_I                  0x171
+#define R2057_TX0_LOFT_FINE_Q                  0x172
+#define R2057_TX0_LOFT_COARSE_I                        0x173
+#define R2057_TX0_LOFT_COARSE_Q                        0x174
+#define R2057_TX0_TX_SSI_MASTER                        0x175
+#define R2057_TX0_IQCAL_VCM_HG                 0x176
+#define R2057_TX0_IQCAL_IDAC                   0x177
+#define R2057_TX0_TSSI_VCM                     0x178
+#define R2057_TX0_TX_SSI_MUX                   0x179
+#define R2057_TX0_TSSIA                                0x17a
+#define R2057_TX0_TSSIG                                0x17b
+#define R2057_TX0_TSSI_MISC1                   0x17c
+#define R2057_TX0_TXRXCOUPLE_2G_ATTEN          0x17d
+#define R2057_TX0_TXRXCOUPLE_2G_PWRUP          0x17e
+#define R2057_TX0_TXRXCOUPLE_5G_ATTEN          0x17f
+#define R2057_TX0_TXRXCOUPLE_5G_PWRUP          0x180
+#define R2057_TX1_IQCAL_GAIN_BW                        0x190
+#define R2057_TX1_LOFT_FINE_I                  0x191
+#define R2057_TX1_LOFT_FINE_Q                  0x192
+#define R2057_TX1_LOFT_COARSE_I                        0x193
+#define R2057_TX1_LOFT_COARSE_Q                        0x194
+#define R2057_TX1_TX_SSI_MASTER                        0x195
+#define R2057_TX1_IQCAL_VCM_HG                 0x196
+#define R2057_TX1_IQCAL_IDAC                   0x197
+#define R2057_TX1_TSSI_VCM                     0x198
+#define R2057_TX1_TX_SSI_MUX                   0x199
+#define R2057_TX1_TSSIA                                0x19a
+#define R2057_TX1_TSSIG                                0x19b
+#define R2057_TX1_TSSI_MISC1                   0x19c
+#define R2057_TX1_TXRXCOUPLE_2G_ATTEN          0x19d
+#define R2057_TX1_TXRXCOUPLE_2G_PWRUP          0x19e
+#define R2057_TX1_TXRXCOUPLE_5G_ATTEN          0x19f
+#define R2057_TX1_TXRXCOUPLE_5G_PWRUP          0x1a0
+#define R2057_AFE_VCM_CAL_MASTER_CORE0         0x1a1
+#define R2057_AFE_SET_VCM_I_CORE0              0x1a2
+#define R2057_AFE_SET_VCM_Q_CORE0              0x1a3
+#define R2057_AFE_STATUS_VCM_IQADC_CORE0       0x1a4
+#define R2057_AFE_STATUS_VCM_I_CORE0           0x1a5
+#define R2057_AFE_STATUS_VCM_Q_CORE0           0x1a6
+#define R2057_AFE_VCM_CAL_MASTER_CORE1         0x1a7
+#define R2057_AFE_SET_VCM_I_CORE1              0x1a8
+#define R2057_AFE_SET_VCM_Q_CORE1              0x1a9
+#define R2057_AFE_STATUS_VCM_IQADC_CORE1       0x1aa
+#define R2057_AFE_STATUS_VCM_I_CORE1           0x1ab
+#define R2057_AFE_STATUS_VCM_Q_CORE1           0x1ac
+
+#define R2057v7_DACBUF_VINCM_CORE0             0x1ad
+#define R2057v7_RCCAL_MASTER                   0x1ae
+#define R2057v7_TR2G_CONFIG3_CORE0_NU          0x1af
+#define R2057v7_TR2G_CONFIG3_CORE1_NU          0x1b0
+#define R2057v7_LOGEN_PUS1                     0x1b1
+#define R2057v7_OVR_REG5                       0x1b2
+#define R2057v7_OVR_REG6                       0x1b3
+#define R2057v7_OVR_REG7                       0x1b4
+#define R2057v7_OVR_REG8                       0x1b5
+#define R2057v7_OVR_REG9                       0x1b6
+#define R2057v7_OVR_REG10                      0x1b7
+#define R2057v7_OVR_REG11                      0x1b8
+#define R2057v7_OVR_REG12                      0x1b9
+#define R2057v7_OVR_REG13                      0x1ba
+#define R2057v7_OVR_REG14                      0x1bb
+#define R2057v7_OVR_REG15                      0x1bc
+#define R2057v7_OVR_REG16                      0x1bd
+#define R2057v7_OVR_REG1                       0x1be
+#define R2057v7_OVR_REG18                      0x1bf
+#define R2057v7_OVR_REG19                      0x1c0
+#define R2057v7_OVR_REG20                      0x1c1
+#define R2057v7_OVR_REG21                      0x1c2
+#define R2057v7_OVR_REG2                       0x1c3
+#define R2057v7_OVR_REG23                      0x1c4
+#define R2057v7_OVR_REG24                      0x1c5
+#define R2057v7_OVR_REG25                      0x1c6
+#define R2057v7_OVR_REG26                      0x1c7
+#define R2057v7_OVR_REG27                      0x1c8
+#define R2057v7_OVR_REG28                      0x1c9
+#define R2057v7_IQTEST_SEL_PU2                 0x1ca
+
+#define R2057_VCM_MASK                         0x7
+
+void r2057_upload_inittabs(struct b43_wldev *dev);
+
+#endif /* B43_RADIO_2057_H_ */
index f0d8377429c695dc6d5cbe4342d49a4bc513ef5d..97d4e27bf36f3c3f14b336086efe770dbf671f77 100644 (file)
@@ -2757,6 +2757,49 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
        { 0x00C0,  6, 0xE7, 0xF9, 0xEC, 0xFB }  /* field == 0x4000 (fls 15) */
 };
 
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over0[] = {
+       { 0x0004, 0x07A, 0x07D, 0x0002, 1 },
+       { 0x0008, 0x07A, 0x07D, 0x0004, 2 },
+       { 0x0010, 0x07A, 0x07D, 0x0010, 4 },
+       { 0x0020, 0x07A, 0x07D, 0x0020, 5 },
+       { 0x0040, 0x07A, 0x07D, 0x0040, 6 },
+       { 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
+       { 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
+       { 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
+       { 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
+       { 0x6000, 0x348, 0x349, 0xFFFF, 0 },
+       { 0x2000, 0x348, 0x349, 0x000F, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over1[] = {
+       { 0x0002, 0x340, 0x341, 0x0002, 1 },
+       { 0x0008, 0x340, 0x341, 0x0008, 3 },
+       { 0x0020, 0x340, 0x341, 0x0020, 5 },
+       { 0x0010, 0x340, 0x341, 0x0010, 4 },
+       { 0x0004, 0x340, 0x341, 0x0004, 2 },
+       { 0x0080, 0x340, 0x341, 0x0700, 8 },
+       { 0x0800, 0x340, 0x341, 0x4000, 14 },
+       { 0x0400, 0x340, 0x341, 0x2000, 13 },
+       { 0x0200, 0x340, 0x341, 0x0800, 12 },
+       { 0x0100, 0x340, 0x341, 0x0100, 11 },
+       { 0x0040, 0x340, 0x341, 0x0040, 6 },
+       { 0x0001, 0x340, 0x341, 0x0001, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+                       tbl_rf_control_override_rev7_over2[] = {
+       { 0x0008, 0x344, 0x345, 0x0008, 3 },
+       { 0x0002, 0x344, 0x345, 0x0002, 1 },
+       { 0x0001, 0x344, 0x345, 0x0001, 0 },
+       { 0x0004, 0x344, 0x345, 0x0004, 2 },
+       { 0x0010, 0x344, 0x345, 0x0010, 4 },
+};
+
 struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
        { 10, 14, 19, 27 },
        { -5, 6, 10, 15 },
@@ -3248,3 +3291,35 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
 
        return e;
 }
+
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+       struct b43_wldev *dev, u16 field, u8 override)
+{
+       const struct nphy_rf_control_override_rev7 *e;
+       u8 size, i;
+
+       switch (override) {
+       case 0:
+               e = tbl_rf_control_override_rev7_over0;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0);
+               break;
+       case 1:
+               e = tbl_rf_control_override_rev7_over1;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1);
+               break;
+       case 2:
+               e = tbl_rf_control_override_rev7_over2;
+               size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2);
+               break;
+       default:
+               b43err(dev->wl, "Invalid override value %d\n", override);
+               return NULL;
+       }
+
+       for (i = 0; i < size; i++) {
+               if (e[i].field == field)
+                       return &e[i];
+       }
+
+       return NULL;
+}
index f348953c02308b5c29f4048f5e4e87ebff8a37cb..c600700ceedc05ae9cae24e0f221d927b5310fa7 100644 (file)
@@ -35,6 +35,14 @@ struct nphy_rf_control_override_rev3 {
        u8 val_addr1;
 };
 
+struct nphy_rf_control_override_rev7 {
+       u16 field;
+       u16 val_addr_core0;
+       u16 val_addr_core1;
+       u16 val_mask;
+       u8 val_shift;
+};
+
 struct nphy_gain_ctl_workaround_entry {
        s8 lna1_gain[4];
        s8 lna2_gain[4];
@@ -202,5 +210,7 @@ extern const struct nphy_rf_control_override_rev2
        tbl_rf_control_override_rev2[];
 extern const struct nphy_rf_control_override_rev3
        tbl_rf_control_override_rev3[];
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+       struct b43_wldev *dev, u16 field, u8 override);
 
 #endif /* B43_TABLES_NPHY_H_ */
index 8156135a0590775311baa7936f723f6f28242ff1..18e208e3eca1c4be2ec3e7a8bbf02056d71132ff 100644 (file)
@@ -1920,7 +1920,7 @@ static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
                return 0;
        ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
                    (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
-                    & mask) | set);
+                    & ~mask) | set);
 
        return 0;
 }
@@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work)
 }
 
 static void b43legacy_op_tx(struct ieee80211_hw *hw,
+                           struct ieee80211_tx_control *control,
                            struct sk_buff *skb)
 {
        struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
@@ -3894,6 +3895,8 @@ static void b43legacy_remove(struct ssb_device *dev)
        cancel_work_sync(&wl->firmware_load);
 
        B43legacy_WARN_ON(!wl);
+       if (!wldev->fw.ucode)
+               return;                 /* NULL if fw never loaded */
        if (wl->current_dev == wldev)
                ieee80211_unregister_hw(wl->hw);
 
index b480088b3dbe2e0834a28228341a66a74c79bee0..c9d811eb6556bfa4494155391398c2bb2b1df815 100644 (file)
@@ -55,6 +55,14 @@ config BRCMFMAC_USB
          IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
          use the driver for an USB wireless card.
 
+config BRCMISCAN
+       bool "Broadcom I-Scan (OBSOLETE)"
+       depends on BRCMFMAC
+       ---help---
+         This option enables the I-Scan method. By default fullmac uses the
+         new E-Scan method which uses less memory in firmware and gives no
+         limitation on the number of scan results.
+
 config BRCMDBG
        bool "Broadcom driver debug functions"
        depends on BRCMSMAC || BRCMFMAC
index 8e7e6928c93699bf9b7df35f9efc4c9b26928481..3b2c4c20e7fcfcccaa2a6706108473f805c0f13b 100644 (file)
@@ -185,7 +185,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
        return err;
 }
 
-static int
+int
 brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                        void *data, bool write)
 {
@@ -249,7 +249,9 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+       sdio_release_host(sdiodev->func[1]);
        brcmf_dbg(INFO, "data:0x%02x\n", data);
 
        if (ret)
@@ -264,7 +266,9 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+       sdio_release_host(sdiodev->func[1]);
        brcmf_dbg(INFO, "data:0x%08x\n", data);
 
        if (ret)
@@ -279,7 +283,9 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+       sdio_release_host(sdiodev->func[1]);
 
        if (ret)
                *ret = retval;
@@ -291,7 +297,9 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
        int retval;
 
        brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
+       sdio_claim_host(sdiodev->func[1]);
        retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+       sdio_release_host(sdiodev->func[1]);
 
        if (ret)
                *ret = retval;
@@ -356,15 +364,20 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pkt->len);
 
+       sdio_claim_host(sdiodev->func[1]);
+
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
        err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
        if (err)
-               return err;
+               goto done;
 
        incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
        err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
                                         fn, addr, pkt);
 
+done:
+       sdio_release_host(sdiodev->func[1]);
+
        return err;
 }
 
@@ -378,15 +391,20 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pktq->qlen);
 
+       sdio_claim_host(sdiodev->func[1]);
+
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
        err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
        if (err)
-               return err;
+               goto done;
 
        incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
        err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
                                        pktq);
 
+done:
+       sdio_release_host(sdiodev->func[1]);
+
        return err;
 }
 
@@ -428,10 +446,12 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        if (flags & SDIO_REQ_ASYNC)
                return -ENOTSUPP;
 
+       sdio_claim_host(sdiodev->func[1]);
+
        if (bar0 != sdiodev->sbwad) {
                err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
                if (err)
-                       return err;
+                       goto done;
 
                sdiodev->sbwad = bar0;
        }
@@ -443,8 +463,13 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        if (width == 4)
                addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-       return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
-                                         addr, pkt);
+       err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
+                                        addr, pkt);
+
+done:
+       sdio_release_host(sdiodev->func[1]);
+
+       return err;
 }
 
 int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
@@ -485,8 +510,10 @@ int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
        brcmf_dbg(TRACE, "Enter\n");
 
        /* issue abort cmd52 command through F0 */
+       sdio_claim_host(sdiodev->func[1]);
        brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
                                 SDIO_CCCR_ABORT, &t_func);
+       sdio_release_host(sdiodev->func[1]);
 
        brcmf_dbg(TRACE, "Exit\n");
        return 0;
index 7c4ee72f9d56006cb7e069a9472a0524bbebb9c9..c3247d5b3c222bfcc6ab8a05236515be073f55b6 100644 (file)
@@ -42,6 +42,7 @@
 
 #define DMA_ALIGN_MASK 0x03
 
+#define SDIO_DEVICE_ID_BROADCOM_43241  0x4324
 #define SDIO_DEVICE_ID_BROADCOM_4329   0x4329
 #define SDIO_DEVICE_ID_BROADCOM_4330   0x4330
 #define SDIO_DEVICE_ID_BROADCOM_4334   0x4334
@@ -51,6 +52,7 @@
 
 /* devices we support, null terminated */
 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+       {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
@@ -101,7 +103,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
        if (regaddr == SDIO_CCCR_IOEx) {
                sdfunc = sdiodev->func[2];
                if (sdfunc) {
-                       sdio_claim_host(sdfunc);
                        if (*byte & SDIO_FUNC_ENABLE_2) {
                                /* Enable Function 2 */
                                err_ret = sdio_enable_func(sdfunc);
@@ -117,7 +118,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
                                                  "Disable F2 failed:%d\n",
                                                  err_ret);
                        }
-                       sdio_release_host(sdfunc);
                }
        } else if ((regaddr == SDIO_CCCR_ABORT) ||
                   (regaddr == SDIO_CCCR_IENx)) {
@@ -126,17 +126,13 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
                if (!sdfunc)
                        return -ENOMEM;
                sdfunc->num = 0;
-               sdio_claim_host(sdfunc);
                sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
-               sdio_release_host(sdfunc);
                kfree(sdfunc);
        } else if (regaddr < 0xF0) {
                brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
                err_ret = -EPERM;
        } else {
-               sdio_claim_host(sdfunc);
                sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
-               sdio_release_host(sdfunc);
        }
 
        return err_ret;
@@ -157,7 +153,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
                /* handle F0 separately */
                err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
        } else {
-               sdio_claim_host(sdiodev->func[func]);
                if (rw) /* CMD52 Write */
                        sdio_writeb(sdiodev->func[func], *byte, regaddr,
                                    &err_ret);
@@ -168,7 +163,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
                        *byte = sdio_readb(sdiodev->func[func], regaddr,
                                           &err_ret);
                }
-               sdio_release_host(sdiodev->func[func]);
        }
 
        if (err_ret)
@@ -195,8 +189,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
        brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
-       /* Claim host controller */
-       sdio_claim_host(sdiodev->func[func]);
 
        if (rw) {               /* CMD52 Write */
                if (nbytes == 4)
@@ -217,9 +209,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
                        brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
        }
 
-       /* Release host controller */
-       sdio_release_host(sdiodev->func[func]);
-
        if (err_ret)
                brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
                          rw ? "write" : "read", err_ret);
@@ -273,9 +262,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
 
-       /* Claim host controller */
-       sdio_claim_host(sdiodev->func[func]);
-
        skb_queue_walk(pktq, pkt) {
                uint pkt_len = pkt->len;
                pkt_len += 3;
@@ -298,9 +284,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
                SGCount++;
        }
 
-       /* Release host controller */
-       sdio_release_host(sdiodev->func[func]);
-
        brcmf_dbg(TRACE, "Exit\n");
        return err_ret;
 }
@@ -326,9 +309,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
 
-       /* Claim host controller */
-       sdio_claim_host(sdiodev->func[func]);
-
        pkt_len += 3;
        pkt_len &= (uint)~3;
 
@@ -342,9 +322,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
                          write ? "TX" : "RX", pkt, addr, pkt_len);
        }
 
-       /* Release host controller */
-       sdio_release_host(sdiodev->func[func]);
-
        return status;
 }
 
index a11fe54f595091dbec82f57066a2be6bc29bf26c..17e7ae73e0089600780a54a35ab0e7c75dc08a87 100644 (file)
@@ -27,6 +27,7 @@
  * IO codes that are interpreted by dongle firmware
  ******************************************************************************/
 #define BRCMF_C_UP                             2
+#define BRCMF_C_DOWN                           3
 #define BRCMF_C_SET_PROMISC                    10
 #define BRCMF_C_GET_RATE                       12
 #define BRCMF_C_GET_INFRA                      19
 #define BRCMF_C_REASSOC                                53
 #define BRCMF_C_SET_ROAM_TRIGGER               55
 #define BRCMF_C_SET_ROAM_DELTA                 57
+#define BRCMF_C_GET_BCNPRD                     75
+#define BRCMF_C_SET_BCNPRD                     76
 #define BRCMF_C_GET_DTIMPRD                    77
+#define BRCMF_C_SET_DTIMPRD                    78
 #define BRCMF_C_SET_COUNTRY                    84
 #define BRCMF_C_GET_PM                         85
 #define BRCMF_C_SET_PM                         86
 #define BRCMF_EVENT_MSG_FLUSHTXQ       0x02
 #define BRCMF_EVENT_MSG_GROUP          0x04
 
+#define BRCMF_ESCAN_REQ_VERSION 1
+
+#define WLC_BSS_RSSI_ON_CHANNEL                0x0002
+
+#define BRCMF_MAXRATES_IN_SET          16      /* max # of rates in rateset */
+#define BRCMF_STA_ASSOC                        0x10            /* Associated */
+
 struct brcmf_event_msg {
        __be16 version;
        __be16 flags;
@@ -140,6 +151,8 @@ struct brcmf_event_msg {
        __be32 datalen;
        u8 addr[ETH_ALEN];
        char ifname[IFNAMSIZ];
+       u8 ifidx;
+       u8 bsscfgidx;
 } __packed;
 
 struct brcm_ethhdr {
@@ -454,6 +467,24 @@ struct brcmf_scan_results_le {
        __le32 count;
 };
 
+struct brcmf_escan_params_le {
+       __le32 version;
+       __le16 action;
+       __le16 sync_id;
+       struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_escan_result_le {
+       __le32 buflen;
+       __le32 version;
+       __le16 sync_id;
+       __le16 bss_count;
+       struct brcmf_bss_info_le bss_info_le;
+};
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
+       sizeof(struct brcmf_bss_info_le))
+
 /* used for association with a specific BSSID and chanspec list */
 struct brcmf_assoc_params_le {
        /* 00:00:00:00:00:00: broadcast scan */
@@ -542,6 +573,28 @@ struct brcmf_channel_info_le {
        __le32 scan_channel;
 };
 
+struct brcmf_sta_info_le {
+       __le16  ver;            /* version of this struct */
+       __le16  len;            /* length in bytes of this structure */
+       __le16  cap;            /* sta's advertised capabilities */
+       __le32  flags;          /* flags defined below */
+       __le32  idle;           /* time since data pkt rx'd from sta */
+       u8      ea[ETH_ALEN];           /* Station address */
+       __le32  count;                  /* # rates in this set */
+       u8      rates[BRCMF_MAXRATES_IN_SET];   /* rates in 500kbps units */
+                                               /* w/hi bit set if basic */
+       __le32  in;             /* seconds elapsed since associated */
+       __le32  listen_interval_inms; /* Min Listen interval in ms for STA */
+       __le32  tx_pkts;        /* # of packets transmitted */
+       __le32  tx_failures;    /* # of packets failed */
+       __le32  rx_ucast_pkts;  /* # of unicast packets received */
+       __le32  rx_mcast_pkts;  /* # of multicast packets received */
+       __le32  tx_rate;        /* Rate of last successful tx frame */
+       __le32  rx_rate;        /* Rate of last successful rx frame */
+       __le32  rx_decrypt_succeeds;    /* # of packet decrypted successfully */
+       __le32  rx_decrypt_failures;    /* # of packet decrypted failed */
+};
+
 /* Bus independent dongle command */
 struct brcmf_dcmd {
        uint cmd;               /* common dongle cmd definition */
@@ -561,7 +614,7 @@ struct brcmf_pub {
        /* Linkage ponters */
        struct brcmf_bus *bus_if;
        struct brcmf_proto *prot;
-       struct brcmf_cfg80211_dev *config;
+       struct brcmf_cfg80211_info *config;
        struct device *dev;             /* fullmac dongle device pointer */
 
        /* Internal brcmf items */
@@ -634,10 +687,13 @@ extern const struct bcmevent_name bcmevent_names[];
 
 extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
                          char *buf, uint len);
+extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
+                                  char *buf, uint buflen, s32 bssidx);
 
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
+extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
 
 /* Return pointer to interface name */
 extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -657,10 +713,6 @@ extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
 
 extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
 
-/* Send packet to dongle via data channel */
-extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
-                        struct sk_buff *pkt);
-
 extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
 extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
                                             int enable, int master_mode);
index 537f499cc5d26f1174747443e863f798177b3377..9b8ee19ea55d12ccf45a644e139490dd5d2b4843 100644 (file)
@@ -103,7 +103,7 @@ extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
 extern void brcmf_detach(struct device *dev);
 
 /* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on);
+extern void brcmf_txflowblock(struct device *dev, bool state);
 
 /* Notify tx completion */
 extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
index 6f70953f0bade06ef046a845829a887611c0a8b8..15c5db5752d199d2ec4c78913c44a5713dfb78a8 100644 (file)
@@ -80,12 +80,60 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
        strncpy(buf, name, buflen);
 
        /* append data onto the end of the name string */
-       memcpy(&buf[len], data, datalen);
-       len += datalen;
+       if (data && datalen) {
+               memcpy(&buf[len], data, datalen);
+               len += datalen;
+       }
 
        return len;
 }
 
+uint
+brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
+                      char *buf, uint buflen, s32 bssidx)
+{
+       const s8 *prefix = "bsscfg:";
+       s8 *p;
+       u32 prefixlen;
+       u32 namelen;
+       u32 iolen;
+       __le32 bssidx_le;
+
+       if (bssidx == 0)
+               return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
+
+       prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+       namelen = (u32) strlen(name) + 1; /* lengh of iovar  name + null */
+       iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
+
+       if (buflen < 0 || iolen > (u32)buflen) {
+               brcmf_dbg(ERROR, "buffer is too short\n");
+               return 0;
+       }
+
+       p = buf;
+
+       /* copy prefix, no null */
+       memcpy(p, prefix, prefixlen);
+       p += prefixlen;
+
+       /* copy iovar name including null */
+       memcpy(p, name, namelen);
+       p += namelen;
+
+       /* bss config index as first data */
+       bssidx_le = cpu_to_le32(bssidx);
+       memcpy(p, &bssidx_le, sizeof(bssidx_le));
+       p += sizeof(bssidx_le);
+
+       /* parameter buffer follows */
+       if (datalen)
+               memcpy(p, data, datalen);
+
+       return iolen;
+
+}
+
 bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
                      struct sk_buff *pkt, int prec)
 {
@@ -205,7 +253,8 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
                BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
                BRCMF_E_IF, "IF"}, {
                BRCMF_E_RSSI, "RSSI"}, {
-               BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
+               BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
+               BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
        };
        uint event_type, flags, auth_type, datalen;
        static u32 seqnum_prev;
@@ -350,6 +399,11 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
                brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
                break;
 
+       case BRCMF_E_ESCAN_RESULT:
+               brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
+               datalen = 0;
+               break;
+
        case BRCMF_E_PFN_NET_FOUND:
        case BRCMF_E_PFN_NET_LOST:
        case BRCMF_E_PFN_SCAN_COMPLETE:
@@ -425,13 +479,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
        }
 
        /* show any appended data */
-       if (datalen) {
-               buf = (unsigned char *) event_data;
-               brcmf_dbg(EVENT, " data (%d) : ", datalen);
-               for (i = 0; i < datalen; i++)
-                       brcmf_dbg(EVENT, " 0x%02x ", *buf++);
-               brcmf_dbg(EVENT, "\n");
-       }
+       brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
 }
 #endif                         /* DEBUG */
 
@@ -522,8 +570,9 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
        }
 
 #ifdef DEBUG
-       brcmf_c_show_host_event(event, event_data);
-#endif                         /* DEBUG */
+       if (BRCMF_EVENT_ON())
+               brcmf_c_show_host_event(event, event_data);
+#endif /* DEBUG */
 
        return 0;
 }
index b784920532d31b3ae9deb1ca0453244efb010b39..fb508c2256ddc8a7244b61ad1e6b3fd13b41bf30 100644 (file)
@@ -55,6 +55,7 @@ do {                                                                  \
 #define BRCMF_HDRS_ON()                (brcmf_msg_level & BRCMF_HDRS_VAL)
 #define BRCMF_BYTES_ON()       (brcmf_msg_level & BRCMF_BYTES_VAL)
 #define BRCMF_GLOM_ON()                (brcmf_msg_level & BRCMF_GLOM_VAL)
+#define BRCMF_EVENT_ON()       (brcmf_msg_level & BRCMF_EVENT_VAL)
 
 #else  /* (defined DEBUG) || (defined DEBUG) */
 
@@ -65,6 +66,7 @@ do {                                                                  \
 #define BRCMF_HDRS_ON()                0
 #define BRCMF_BYTES_ON()       0
 #define BRCMF_GLOM_ON()                0
+#define BRCMF_EVENT_ON()       0
 
 #endif                         /* defined(DEBUG) */
 
index 9ab24528f9b9c0e54d40854ae6e43e8ffd9f654c..d7c76ce9d8cb3d74dccb79c863e5db1efd9a7446 100644 (file)
@@ -272,30 +272,6 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
        schedule_work(&drvr->multicast_work);
 }
 
-int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
-{
-       /* Reject if down */
-       if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
-               return -ENODEV;
-
-       /* Update multicast statistic */
-       if (pktbuf->len >= ETH_ALEN) {
-               u8 *pktdata = (u8 *) (pktbuf->data);
-               struct ethhdr *eh = (struct ethhdr *)pktdata;
-
-               if (is_multicast_ether_addr(eh->h_dest))
-                       drvr->tx_multicast++;
-               if (ntohs(eh->h_proto) == ETH_P_PAE)
-                       atomic_inc(&drvr->pend_8021x_cnt);
-       }
-
-       /* If the protocol uses a data header, apply it */
-       brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
-
-       /* Use bus module to send data frame */
-       return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
-}
-
 static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        int ret;
@@ -338,7 +314,22 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                }
        }
 
-       ret = brcmf_sendpkt(drvr, ifp->idx, skb);
+       /* Update multicast statistic */
+       if (skb->len >= ETH_ALEN) {
+               u8 *pktdata = (u8 *)(skb->data);
+               struct ethhdr *eh = (struct ethhdr *)pktdata;
+
+               if (is_multicast_ether_addr(eh->h_dest))
+                       drvr->tx_multicast++;
+               if (ntohs(eh->h_proto) == ETH_P_PAE)
+                       atomic_inc(&drvr->pend_8021x_cnt);
+       }
+
+       /* If the protocol uses a data header, apply it */
+       brcmf_proto_hdrpush(drvr, ifp->idx, skb);
+
+       /* Use bus module to send data frame */
+       ret =  drvr->bus_if->brcmf_bus_txdata(drvr->dev, skb);
 
 done:
        if (ret)
@@ -350,19 +341,23 @@ done:
        return 0;
 }
 
-void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state)
+void brcmf_txflowblock(struct device *dev, bool state)
 {
        struct net_device *ndev;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
+       int i;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       ndev = drvr->iflist[ifidx]->ndev;
-       if (state == ON)
-               netif_stop_queue(ndev);
-       else
-               netif_wake_queue(ndev);
+       for (i = 0; i < BRCMF_MAX_IFS; i++)
+               if (drvr->iflist[i]) {
+                       ndev = drvr->iflist[i]->ndev;
+                       if (state)
+                               netif_stop_queue(ndev);
+                       else
+                               netif_wake_queue(ndev);
+               }
 }
 
 static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
@@ -775,6 +770,14 @@ done:
        return err;
 }
 
+int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
+{
+       brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
+                 dcmd->cmd, dcmd->buf, dcmd->len);
+
+       return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
+}
+
 static int brcmf_netdev_stop(struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
index 472f2ef5c65237b9bb52f577723a3fed734a5041..3564686add9a1099aa048c5ba4526f5052288abd 100644 (file)
@@ -482,6 +482,15 @@ struct sdpcm_shared_le {
        __le32 brpt_addr;
 };
 
+/* SDIO read frame info */
+struct brcmf_sdio_read {
+       u8 seq_num;
+       u8 channel;
+       u16 len;
+       u16 len_left;
+       u16 len_nxtfrm;
+       u8 dat_offset;
+};
 
 /* misc chip info needed by some of the routines */
 /* Private data for SDIO bus interaction */
@@ -494,9 +503,8 @@ struct brcmf_sdio {
        u32 ramsize;            /* Size of RAM in SOCRAM (bytes) */
 
        u32 hostintmask;        /* Copy of Host Interrupt Mask */
-       u32 intstatus;  /* Intstatus bits (events) pending */
-       bool dpc_sched;         /* Indicates DPC schedule (intrpt rcvd) */
-       bool fcstate;           /* State of dongle flow-control */
+       atomic_t intstatus;     /* Intstatus bits (events) pending */
+       atomic_t fcstate;       /* State of dongle flow-control */
 
        uint blocksize;         /* Block size of SDIO transfers */
        uint roundup;           /* Max roundup limit */
@@ -508,9 +516,11 @@ struct brcmf_sdio {
 
        u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
        u8 *rxhdr;              /* Header of current rx frame (in hdrbuf) */
-       u16 nextlen;            /* Next Read Len from last header */
        u8 rx_seq;              /* Receive sequence number (expected) */
+       struct brcmf_sdio_read cur_read;
+                               /* info of current read frame */
        bool rxskip;            /* Skip receive (awaiting NAK ACK) */
+       bool rxpending;         /* Data frame pending in dongle */
 
        uint rxbound;           /* Rx frames to read before resched */
        uint txbound;           /* Tx frames to send before resched */
@@ -531,7 +541,7 @@ struct brcmf_sdio {
 
        bool intr;              /* Use interrupts */
        bool poll;              /* Use polling */
-       bool ipend;             /* Device interrupt is pending */
+       atomic_t ipend;         /* Device interrupt is pending */
        uint spurious;          /* Count of spurious interrupts */
        uint pollrate;          /* Ticks between device polls */
        uint polltick;          /* Tick counter */
@@ -549,12 +559,9 @@ struct brcmf_sdio {
        s32 idleclock;  /* How to set bus driver when idle */
        s32 sd_rxchain;
        bool use_rxchain;       /* If brcmf should use PKT chains */
-       bool sleeping;          /* Is SDIO bus sleeping? */
        bool rxflow_mode;       /* Rx flow control mode */
        bool rxflow;            /* Is rx flow control on */
        bool alp_only;          /* Don't use HT clock (ALP only) */
-/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
-       bool usebufpool;
 
        u8 *ctrl_frame_buf;
        u32 ctrl_frame_len;
@@ -570,8 +577,8 @@ struct brcmf_sdio {
        bool wd_timer_valid;
        uint save_ms;
 
-       struct task_struct *dpc_tsk;
-       struct completion dpc_wait;
+       struct workqueue_struct *brcmf_wq;
+       struct work_struct datawork;
        struct list_head dpc_tsklst;
        spinlock_t dpc_tl_lock;
 
@@ -657,15 +664,6 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
 
 #define HOSTINTMASK            (I_HMB_SW_MASK | I_CHIPACTIVE)
 
-/* Packet free applicable unconditionally for sdio and sdspi.
- * Conditional if bufpool was present for gspi bus.
- */
-static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
-{
-       if (bus->usebufpool)
-               brcmu_pkt_buf_free_skb(pkt);
-}
-
 /* Turn backplane clock on or off */
 static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 {
@@ -853,81 +851,6 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
        return 0;
 }
 
-static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
-{
-       int ret;
-
-       brcmf_dbg(INFO, "request %s (currently %s)\n",
-                 sleep ? "SLEEP" : "WAKE",
-                 bus->sleeping ? "SLEEP" : "WAKE");
-
-       /* Done if we're already in the requested state */
-       if (sleep == bus->sleeping)
-               return 0;
-
-       /* Going to sleep: set the alarm and turn off the lights... */
-       if (sleep) {
-               /* Don't sleep if something is pending */
-               if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
-                       return -EBUSY;
-
-               /* Make sure the controller has the bus up */
-               brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
-               /* Tell device to start using OOB wakeup */
-               ret = w_sdreg32(bus, SMB_USE_OOB,
-                               offsetof(struct sdpcmd_regs, tosbmailbox));
-               if (ret != 0)
-                       brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
-
-               /* Turn off our contribution to the HT clock request */
-               brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
-               brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
-
-               /* Isolate the bus */
-               brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-                                SBSDIO_DEVCTL_PADS_ISO, NULL);
-
-               /* Change state */
-               bus->sleeping = true;
-
-       } else {
-               /* Waking up: bus power up is ok, set local state */
-
-               brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-                                0, NULL);
-
-               /* Make sure the controller has the bus up */
-               brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
-               /* Send misc interrupt to indicate OOB not needed */
-               ret = w_sdreg32(bus, 0,
-                               offsetof(struct sdpcmd_regs, tosbmailboxdata));
-               if (ret == 0)
-                       ret = w_sdreg32(bus, SMB_DEV_INT,
-                               offsetof(struct sdpcmd_regs, tosbmailbox));
-
-               if (ret != 0)
-                       brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
-
-               /* Make sure we have SD bus access */
-               brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
-               /* Change state */
-               bus->sleeping = false;
-       }
-
-       return 0;
-}
-
-static void bus_wake(struct brcmf_sdio *bus)
-{
-       if (bus->sleeping)
-               brcmf_sdbrcm_bussleep(bus, false);
-}
-
 static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
 {
        u32 intstatus = 0;
@@ -1056,7 +979,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
        }
 
        /* Clear partial in any case */
-       bus->nextlen = 0;
+       bus->cur_read.len = 0;
 
        /* If we can't reach the device, signal failure */
        if (err)
@@ -1108,6 +1031,96 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
        }
 }
 
+static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
+                               struct brcmf_sdio_read *rd)
+{
+       u16 len, checksum;
+       u8 rx_seq, fc, tx_seq_max;
+
+       /*
+        * 4 bytes hardware header (frame tag)
+        * Byte 0~1: Frame length
+        * Byte 2~3: Checksum, bit-wise inverse of frame length
+        */
+       len = get_unaligned_le16(header);
+       checksum = get_unaligned_le16(header + sizeof(u16));
+       /* All zero means no more to read */
+       if (!(len | checksum)) {
+               bus->rxpending = false;
+               return false;
+       }
+       if ((u16)(~(len ^ checksum))) {
+               brcmf_dbg(ERROR, "HW header checksum error\n");
+               bus->sdcnt.rx_badhdr++;
+               brcmf_sdbrcm_rxfail(bus, false, false);
+               return false;
+       }
+       if (len < SDPCM_HDRLEN) {
+               brcmf_dbg(ERROR, "HW header length error\n");
+               return false;
+       }
+       rd->len = len;
+
+       /*
+        * 8 bytes hardware header
+        * Byte 0: Rx sequence number
+        * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
+        * Byte 2: Length of next data frame
+        * Byte 3: Data offset
+        * Byte 4: Flow control bits
+        * Byte 5: Maximum Sequence number allow for Tx
+        * Byte 6~7: Reserved
+        */
+       rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
+       rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
+       if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
+               brcmf_dbg(ERROR, "HW header length too long\n");
+               bus->sdiodev->bus_if->dstats.rx_errors++;
+               bus->sdcnt.rx_toolong++;
+               brcmf_sdbrcm_rxfail(bus, false, false);
+               rd->len = 0;
+               return false;
+       }
+       rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
+               brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq);
+               bus->sdcnt.rx_badhdr++;
+               brcmf_sdbrcm_rxfail(bus, false, false);
+               rd->len = 0;
+               return false;
+       }
+       if (rd->seq_num != rx_seq) {
+               brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n",
+                         rx_seq, rd->seq_num);
+               bus->sdcnt.rx_badseq++;
+               rd->seq_num = rx_seq;
+       }
+       rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+       if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
+               /* only warm for NON glom packet */
+               if (rd->channel != SDPCM_GLOM_CHANNEL)
+                       brcmf_dbg(ERROR, "seq %d: next length error\n", rx_seq);
+               rd->len_nxtfrm = 0;
+       }
+       fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       if (bus->flowcontrol != fc) {
+               if (~bus->flowcontrol & fc)
+                       bus->sdcnt.fc_xoff++;
+               if (bus->flowcontrol & ~fc)
+                       bus->sdcnt.fc_xon++;
+               bus->sdcnt.fc_rcvd++;
+               bus->flowcontrol = fc;
+       }
+       tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
+               brcmf_dbg(ERROR, "seq %d: max tx seq number error\n", rx_seq);
+               tx_seq_max = bus->tx_seq + 2;
+       }
+       bus->tx_max = tx_seq_max;
+
+       return true;
+}
+
 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 {
        u16 dlen, totlen;
@@ -1122,6 +1135,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
        int ifidx = 0;
        bool usechain = bus->use_rxchain;
+       u16 next_len;
 
        /* If packets, issue read(s) and send up packet chain */
        /* Return sequence numbers consumed? */
@@ -1185,10 +1199,10 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                if (pnext) {
                        brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
                                  totlen, num);
-                       if (BRCMF_GLOM_ON() && bus->nextlen &&
-                           totlen != bus->nextlen) {
+                       if (BRCMF_GLOM_ON() && bus->cur_read.len &&
+                           totlen != bus->cur_read.len) {
                                brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
-                                         bus->nextlen, totlen, rxseq);
+                                         bus->cur_read.len, totlen, rxseq);
                        }
                        pfirst = pnext = NULL;
                } else {
@@ -1199,7 +1213,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                /* Done with descriptor packet */
                brcmu_pkt_buf_free_skb(bus->glomd);
                bus->glomd = NULL;
-               bus->nextlen = 0;
+               bus->cur_read.len = 0;
        }
 
        /* Ok -- either we just generated a packet chain,
@@ -1272,12 +1286,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 
                chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
                seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
-               bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
-               if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+               next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+               if ((next_len << 4) > MAX_RX_DATASZ) {
                        brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
-                                 bus->nextlen, seq);
-                       bus->nextlen = 0;
+                                 next_len, seq);
+                       next_len = 0;
                }
+               bus->cur_read.len = next_len << 4;
                doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
                txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
 
@@ -1378,7 +1393,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                                bus->sdcnt.rxglomfail++;
                                brcmf_sdbrcm_free_glom(bus);
                        }
-                       bus->nextlen = 0;
+                       bus->cur_read.len = 0;
                        return 0;
                }
 
@@ -1573,422 +1588,166 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
        }
 }
 
-static void
-brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
-                        struct sk_buff **pkt, u8 **rxbuf)
+static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 {
-       int sdret;              /* Return code from calls */
-
-       *pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
-       if (*pkt == NULL)
-               return;
-
-       pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
-       *rxbuf = (u8 *) ((*pkt)->data);
-       /* Read the entire frame */
-       sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
-                                     SDIO_FUNC_2, F2SYNC, *pkt);
-       bus->sdcnt.f2rxdata++;
-
-       if (sdret < 0) {
-               brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
-                         rdlen, sdret);
-               brcmu_pkt_buf_free_skb(*pkt);
-               bus->sdiodev->bus_if->dstats.rx_errors++;
-               /* Force retry w/normal header read.
-                * Don't attempt NAK for
-                * gSPI
-                */
-               brcmf_sdbrcm_rxfail(bus, true, true);
-               *pkt = NULL;
-       }
-}
-
-/* Checks the header */
-static int
-brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
-                 u8 rxseq, u16 nextlen, u16 *len)
-{
-       u16 check;
-       bool len_consistent;    /* Result of comparing readahead len and
-                                  len from hw-hdr */
-
-       memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
-
-       /* Extract hardware header fields */
-       *len = get_unaligned_le16(bus->rxhdr);
-       check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
-       /* All zeros means readahead info was bad */
-       if (!(*len | check)) {
-               brcmf_dbg(INFO, "(nextlen): read zeros in HW header???\n");
-               goto fail;
-       }
-
-       /* Validate check bytes */
-       if ((u16)~(*len ^ check)) {
-               brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
-                         nextlen, *len, check);
-               bus->sdcnt.rx_badhdr++;
-               brcmf_sdbrcm_rxfail(bus, false, false);
-               goto fail;
-       }
-
-       /* Validate frame length */
-       if (*len < SDPCM_HDRLEN) {
-               brcmf_dbg(ERROR, "(nextlen): HW hdr length invalid: %d\n",
-                         *len);
-               goto fail;
-       }
-
-       /* Check for consistency with readahead info */
-       len_consistent = (nextlen != (roundup(*len, 16) >> 4));
-       if (len_consistent) {
-               /* Mismatch, force retry w/normal
-                       header (may be >4K) */
-               brcmf_dbg(ERROR, "(nextlen): mismatch, nextlen %d len %d rnd %d; expected rxseq %d\n",
-                         nextlen, *len, roundup(*len, 16),
-                         rxseq);
-               brcmf_sdbrcm_rxfail(bus, true, true);
-               goto fail;
-       }
-
-       return 0;
-
-fail:
-       brcmf_sdbrcm_pktfree2(bus, pkt);
-       return -EINVAL;
-}
-
-/* Return true if there may be more frames to read */
-static uint
-brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
-{
-       u16 len, check; /* Extracted hardware header fields */
-       u8 chan, seq, doff;     /* Extracted software header fields */
-       u8 fcbits;              /* Extracted fcbits from software header */
-
        struct sk_buff *pkt;            /* Packet for event or data frames */
        u16 pad;                /* Number of pad bytes to read */
-       u16 rdlen;              /* Total number of bytes to read */
-       u8 rxseq;               /* Next sequence number to expect */
        uint rxleft = 0;        /* Remaining number of frames allowed */
        int sdret;              /* Return code from calls */
-       u8 txmax;               /* Maximum tx sequence offered */
-       u8 *rxbuf;
        int ifidx = 0;
        uint rxcount = 0;       /* Total frames read */
+       struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
+       u8 head_read = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
 
        /* Not finished unless we encounter no more frames indication */
-       *finished = false;
+       bus->rxpending = true;
 
-       for (rxseq = bus->rx_seq, rxleft = maxframes;
+       for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
             !bus->rxskip && rxleft &&
             bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
-            rxseq++, rxleft--) {
+            rd->seq_num++, rxleft--) {
 
                /* Handle glomming separately */
                if (bus->glomd || !skb_queue_empty(&bus->glom)) {
                        u8 cnt;
                        brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
                                  bus->glomd, skb_peek(&bus->glom));
-                       cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
+                       cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
                        brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
-                       rxseq += cnt - 1;
+                       rd->seq_num += cnt - 1;
                        rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
                        continue;
                }
 
-               /* Try doing single read if we can */
-               if (bus->nextlen) {
-                       u16 nextlen = bus->nextlen;
-                       bus->nextlen = 0;
-
-                       rdlen = len = nextlen << 4;
-                       brcmf_pad(bus, &pad, &rdlen);
-
-                       /*
-                        * After the frame is received we have to
-                        * distinguish whether it is data
-                        * or non-data frame.
-                        */
-                       brcmf_alloc_pkt_and_read(bus, rdlen, &pkt, &rxbuf);
-                       if (pkt == NULL) {
-                               /* Give up on data, request rtx of events */
-                               brcmf_dbg(ERROR, "(nextlen): brcmf_alloc_pkt_and_read failed: len %d rdlen %d expected rxseq %d\n",
-                                         len, rdlen, rxseq);
-                               continue;
-                       }
-
-                       if (brcmf_check_rxbuf(bus, pkt, rxbuf, rxseq, nextlen,
-                                             &len) < 0)
+               rd->len_left = rd->len;
+               /* read header first for unknow frame length */
+               if (!rd->len) {
+                       sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
+                                                     bus->sdiodev->sbwad,
+                                                     SDIO_FUNC_2, F2SYNC,
+                                                     bus->rxhdr,
+                                                     BRCMF_FIRSTREAD);
+                       bus->sdcnt.f2rxhdrs++;
+                       if (sdret < 0) {
+                               brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n",
+                                         sdret);
+                               bus->sdcnt.rx_hdrfail++;
+                               brcmf_sdbrcm_rxfail(bus, true, true);
                                continue;
-
-                       /* Extract software header fields */
-                       chan = SDPCM_PACKET_CHANNEL(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-                       seq = SDPCM_PACKET_SEQUENCE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-                       doff = SDPCM_DOFFSET_VALUE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-                       txmax = SDPCM_WINDOW_VALUE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-                       bus->nextlen =
-                           bus->rxhdr[SDPCM_FRAMETAG_LEN +
-                                      SDPCM_NEXTLEN_OFFSET];
-                       if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
-                               brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
-                                         bus->nextlen, seq);
-                               bus->nextlen = 0;
                        }
 
-                       bus->sdcnt.rx_readahead_cnt++;
-
-                       /* Handle Flow Control */
-                       fcbits = SDPCM_FCMASK_VALUE(
-                                       &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-                       if (bus->flowcontrol != fcbits) {
-                               if (~bus->flowcontrol & fcbits)
-                                       bus->sdcnt.fc_xoff++;
-
-                               if (bus->flowcontrol & ~fcbits)
-                                       bus->sdcnt.fc_xon++;
-
-                               bus->sdcnt.fc_rcvd++;
-                               bus->flowcontrol = fcbits;
-                       }
-
-                       /* Check and update sequence number */
-                       if (rxseq != seq) {
-                               brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
-                                         seq, rxseq);
-                               bus->sdcnt.rx_badseq++;
-                               rxseq = seq;
-                       }
-
-                       /* Check window for sanity */
-                       if ((u8) (txmax - bus->tx_seq) > 0x40) {
-                               brcmf_dbg(ERROR, "got unlikely tx max %d with tx_seq %d\n",
-                                         txmax, bus->tx_seq);
-                               txmax = bus->tx_seq + 2;
-                       }
-                       bus->tx_max = txmax;
-
-                       brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
-                                          rxbuf, len, "Rx Data:\n");
-                       brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
-                                            BRCMF_DATA_ON()) &&
-                                          BRCMF_HDRS_ON(),
+                       brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
                                           bus->rxhdr, SDPCM_HDRLEN,
                                           "RxHdr:\n");
 
-                       if (chan == SDPCM_CONTROL_CHANNEL) {
-                               brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n",
-                                         seq);
-                               /* Force retry w/normal header read */
-                               bus->nextlen = 0;
-                               brcmf_sdbrcm_rxfail(bus, false, true);
-                               brcmf_sdbrcm_pktfree2(bus, pkt);
-                               continue;
+                       if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
+                               if (!bus->rxpending)
+                                       break;
+                               else
+                                       continue;
                        }
 
-                       /* Validate data offset */
-                       if ((doff < SDPCM_HDRLEN) || (doff > len)) {
-                               brcmf_dbg(ERROR, "(nextlen): bad data offset %d: HW len %d min %d\n",
-                                         doff, len, SDPCM_HDRLEN);
-                               brcmf_sdbrcm_rxfail(bus, false, false);
-                               brcmf_sdbrcm_pktfree2(bus, pkt);
+                       if (rd->channel == SDPCM_CONTROL_CHANNEL) {
+                               brcmf_sdbrcm_read_control(bus, bus->rxhdr,
+                                                         rd->len,
+                                                         rd->dat_offset);
+                               /* prepare the descriptor for the next read */
+                               rd->len = rd->len_nxtfrm << 4;
+                               rd->len_nxtfrm = 0;
+                               /* treat all packet as event if we don't know */
+                               rd->channel = SDPCM_EVENT_CHANNEL;
                                continue;
                        }
-
-                       /* All done with this one -- now deliver the packet */
-                       goto deliver;
-               }
-
-               /* Read frame header (hardware and software) */
-               sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-                                             SDIO_FUNC_2, F2SYNC, bus->rxhdr,
-                                             BRCMF_FIRSTREAD);
-               bus->sdcnt.f2rxhdrs++;
-
-               if (sdret < 0) {
-                       brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
-                       bus->sdcnt.rx_hdrfail++;
-                       brcmf_sdbrcm_rxfail(bus, true, true);
-                       continue;
-               }
-               brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
-                                  bus->rxhdr, SDPCM_HDRLEN, "RxHdr:\n");
-
-
-               /* Extract hardware header fields */
-               len = get_unaligned_le16(bus->rxhdr);
-               check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
-               /* All zeros means no more frames */
-               if (!(len | check)) {
-                       *finished = true;
-                       break;
-               }
-
-               /* Validate check bytes */
-               if ((u16) ~(len ^ check)) {
-                       brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
-                                 len, check);
-                       bus->sdcnt.rx_badhdr++;
-                       brcmf_sdbrcm_rxfail(bus, false, false);
-                       continue;
-               }
-
-               /* Validate frame length */
-               if (len < SDPCM_HDRLEN) {
-                       brcmf_dbg(ERROR, "HW hdr length invalid: %d\n", len);
-                       continue;
-               }
-
-               /* Extract software header fields */
-               chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-               seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-               doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-               txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-               /* Validate data offset */
-               if ((doff < SDPCM_HDRLEN) || (doff > len)) {
-                       brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
-                                 doff, len, SDPCM_HDRLEN, seq);
-                       bus->sdcnt.rx_badhdr++;
-                       brcmf_sdbrcm_rxfail(bus, false, false);
-                       continue;
-               }
-
-               /* Save the readahead length if there is one */
-               bus->nextlen =
-                   bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
-               if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
-                       brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
-                                 bus->nextlen, seq);
-                       bus->nextlen = 0;
-               }
-
-               /* Handle Flow Control */
-               fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
-               if (bus->flowcontrol != fcbits) {
-                       if (~bus->flowcontrol & fcbits)
-                               bus->sdcnt.fc_xoff++;
-
-                       if (bus->flowcontrol & ~fcbits)
-                               bus->sdcnt.fc_xon++;
-
-                       bus->sdcnt.fc_rcvd++;
-                       bus->flowcontrol = fcbits;
-               }
-
-               /* Check and update sequence number */
-               if (rxseq != seq) {
-                       brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
-                       bus->sdcnt.rx_badseq++;
-                       rxseq = seq;
-               }
-
-               /* Check window for sanity */
-               if ((u8) (txmax - bus->tx_seq) > 0x40) {
-                       brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
-                                 txmax, bus->tx_seq);
-                       txmax = bus->tx_seq + 2;
-               }
-               bus->tx_max = txmax;
-
-               /* Call a separate function for control frames */
-               if (chan == SDPCM_CONTROL_CHANNEL) {
-                       brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
-                       continue;
-               }
-
-               /* precondition: chan is either SDPCM_DATA_CHANNEL,
-                  SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
-                  SDPCM_GLOM_CHANNEL */
-
-               /* Length to read */
-               rdlen = (len > BRCMF_FIRSTREAD) ? (len - BRCMF_FIRSTREAD) : 0;
-
-               /* May pad read to blocksize for efficiency */
-               if (bus->roundup && bus->blocksize &&
-                       (rdlen > bus->blocksize)) {
-                       pad = bus->blocksize - (rdlen % bus->blocksize);
-                       if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
-                           ((rdlen + pad + BRCMF_FIRSTREAD) < MAX_RX_DATASZ))
-                               rdlen += pad;
-               } else if (rdlen % BRCMF_SDALIGN) {
-                       rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+                       rd->len_left = rd->len > BRCMF_FIRSTREAD ?
+                                      rd->len - BRCMF_FIRSTREAD : 0;
+                       head_read = BRCMF_FIRSTREAD;
                }
 
-               /* Satisfy length-alignment requirements */
-               if (rdlen & (ALIGNMENT - 1))
-                       rdlen = roundup(rdlen, ALIGNMENT);
-
-               if ((rdlen + BRCMF_FIRSTREAD) > MAX_RX_DATASZ) {
-                       /* Too long -- skip this frame */
-                       brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
-                                 len, rdlen);
-                       bus->sdiodev->bus_if->dstats.rx_errors++;
-                       bus->sdcnt.rx_toolong++;
-                       brcmf_sdbrcm_rxfail(bus, false, false);
-                       continue;
-               }
+               brcmf_pad(bus, &pad, &rd->len_left);
 
-               pkt = brcmu_pkt_buf_get_skb(rdlen +
-                                           BRCMF_FIRSTREAD + BRCMF_SDALIGN);
+               pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
+                                           BRCMF_SDALIGN);
                if (!pkt) {
                        /* Give up on data, request rtx of events */
-                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
-                                 rdlen, chan);
+                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed\n");
                        bus->sdiodev->bus_if->dstats.rx_dropped++;
-                       brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
+                       brcmf_sdbrcm_rxfail(bus, false,
+                                           RETRYCHAN(rd->channel));
                        continue;
                }
+               skb_pull(pkt, head_read);
+               pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
 
-               /* Leave room for what we already read, and align remainder */
-               skb_pull(pkt, BRCMF_FIRSTREAD);
-               pkt_align(pkt, rdlen, BRCMF_SDALIGN);
-
-               /* Read the remaining frame data */
                sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
                                              SDIO_FUNC_2, F2SYNC, pkt);
                bus->sdcnt.f2rxdata++;
 
                if (sdret < 0) {
-                       brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
-                                 ((chan == SDPCM_EVENT_CHANNEL) ? "event"
-                                  : ((chan == SDPCM_DATA_CHANNEL) ? "data"
-                                     : "test")), sdret);
+                       brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n",
+                                 rd->len, rd->channel, sdret);
                        brcmu_pkt_buf_free_skb(pkt);
                        bus->sdiodev->bus_if->dstats.rx_errors++;
-                       brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
+                       brcmf_sdbrcm_rxfail(bus, true,
+                                           RETRYCHAN(rd->channel));
                        continue;
                }
 
-               /* Copy the already-read portion */
-               skb_push(pkt, BRCMF_FIRSTREAD);
-               memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD);
+               if (head_read) {
+                       skb_push(pkt, head_read);
+                       memcpy(pkt->data, bus->rxhdr, head_read);
+                       head_read = 0;
+               } else {
+                       memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
+                       rd_new.seq_num = rd->seq_num;
+                       if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
+                               rd->len = 0;
+                               brcmu_pkt_buf_free_skb(pkt);
+                       }
+                       bus->sdcnt.rx_readahead_cnt++;
+                       if (rd->len != roundup(rd_new.len, 16)) {
+                               brcmf_dbg(ERROR, "frame length mismatch:read %d, should be %d\n",
+                                         rd->len,
+                                         roundup(rd_new.len, 16) >> 4);
+                               rd->len = 0;
+                               brcmf_sdbrcm_rxfail(bus, true, true);
+                               brcmu_pkt_buf_free_skb(pkt);
+                               continue;
+                       }
+                       rd->len_nxtfrm = rd_new.len_nxtfrm;
+                       rd->channel = rd_new.channel;
+                       rd->dat_offset = rd_new.dat_offset;
+
+                       brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
+                                            BRCMF_DATA_ON()) &&
+                                          BRCMF_HDRS_ON(),
+                                          bus->rxhdr, SDPCM_HDRLEN,
+                                          "RxHdr:\n");
+
+                       if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
+                               brcmf_dbg(ERROR, "readahead on control packet %d?\n",
+                                         rd_new.seq_num);
+                               /* Force retry w/normal header read */
+                               rd->len = 0;
+                               brcmf_sdbrcm_rxfail(bus, false, true);
+                               brcmu_pkt_buf_free_skb(pkt);
+                               continue;
+                       }
+               }
 
                brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
-                                  pkt->data, len, "Rx Data:\n");
+                                  pkt->data, rd->len, "Rx Data:\n");
 
-deliver:
                /* Save superframe descriptor and allocate packet frame */
-               if (chan == SDPCM_GLOM_CHANNEL) {
+               if (rd->channel == SDPCM_GLOM_CHANNEL) {
                        if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
                                brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
-                                         len);
+                                         rd->len);
                                brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
-                                                  pkt->data, len,
+                                                  pkt->data, rd->len,
                                                   "Glom Data:\n");
-                               __skb_trim(pkt, len);
+                               __skb_trim(pkt, rd->len);
                                skb_pull(pkt, SDPCM_HDRLEN);
                                bus->glomd = pkt;
                        } else {
@@ -1996,12 +1755,23 @@ deliver:
                                          "descriptor!\n", __func__);
                                brcmf_sdbrcm_rxfail(bus, false, false);
                        }
+                       /* prepare the descriptor for the next read */
+                       rd->len = rd->len_nxtfrm << 4;
+                       rd->len_nxtfrm = 0;
+                       /* treat all packet as event if we don't know */
+                       rd->channel = SDPCM_EVENT_CHANNEL;
                        continue;
                }
 
                /* Fill in packet len and prio, deliver upward */
-               __skb_trim(pkt, len);
-               skb_pull(pkt, doff);
+               __skb_trim(pkt, rd->len);
+               skb_pull(pkt, rd->dat_offset);
+
+               /* prepare the descriptor for the next read */
+               rd->len = rd->len_nxtfrm << 4;
+               rd->len_nxtfrm = 0;
+               /* treat all packet as event if we don't know */
+               rd->channel = SDPCM_EVENT_CHANNEL;
 
                if (pkt->len == 0) {
                        brcmu_pkt_buf_free_skb(pkt);
@@ -2019,17 +1789,17 @@ deliver:
                brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
                down(&bus->sdsem);
        }
+
        rxcount = maxframes - rxleft;
        /* Message if we hit the limit */
        if (!rxleft)
-               brcmf_dbg(DATA, "hit rx limit of %d frames\n",
-                         maxframes);
+               brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
        else
                brcmf_dbg(DATA, "processed %d frames\n", rxcount);
        /* Back off rxseq if awaiting rtx, update rx_seq */
        if (bus->rxskip)
-               rxseq--;
-       bus->rx_seq = rxseq;
+               rd->seq_num--;
+       bus->rx_seq = rd->seq_num;
 
        return rxcount;
 }
@@ -2227,7 +1997,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
                        if (ret != 0)
                                break;
                        if (intstatus & bus->hostintmask)
-                               bus->ipend = true;
+                               atomic_set(&bus->ipend, 1);
                }
        }
 
@@ -2235,8 +2005,8 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        if (bus->sdiodev->bus_if->drvr_up &&
            (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
            bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
-               bus->txoff = OFF;
-               brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF);
+               bus->txoff = false;
+               brcmf_txflowblock(bus->sdiodev->dev, false);
        }
 
        return cnt;
@@ -2259,16 +2029,8 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
                bus->watchdog_tsk = NULL;
        }
 
-       if (bus->dpc_tsk && bus->dpc_tsk != current) {
-               send_sig(SIGTERM, bus->dpc_tsk, 1);
-               kthread_stop(bus->dpc_tsk);
-               bus->dpc_tsk = NULL;
-       }
-
        down(&bus->sdsem);
 
-       bus_wake(bus);
-
        /* Enable clock for device interrupts */
        brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 
@@ -2327,7 +2089,7 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
        unsigned long flags;
 
        spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
-       if (!bus->sdiodev->irq_en && !bus->ipend) {
+       if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
                enable_irq(bus->sdiodev->irq);
                bus->sdiodev->irq_en = true;
        }
@@ -2339,21 +2101,69 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
 }
 #endif         /* CONFIG_BRCMFMAC_SDIO_OOB */
 
-static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
 {
-       u32 intstatus, newstatus = 0;
+       struct list_head *new_hd;
+       unsigned long flags;
+
+       if (in_interrupt())
+               new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
+       else
+               new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+       if (new_hd == NULL)
+               return;
+
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       list_add_tail(new_hd, &bus->dpc_tsklst);
+       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
+static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
+{
+       u8 idx;
+       u32 addr;
+       unsigned long val;
+       int n, ret;
+
+       idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+       addr = bus->ci->c_inf[idx].base +
+              offsetof(struct sdpcmd_regs, intstatus);
+
+       ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
+       bus->sdcnt.f1regdata++;
+       if (ret != 0)
+               val = 0;
+
+       val &= bus->hostintmask;
+       atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
+
+       /* Clear interrupts */
+       if (val) {
+               ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
+               bus->sdcnt.f1regdata++;
+       }
+
+       if (ret) {
+               atomic_set(&bus->intstatus, 0);
+       } else if (val) {
+               for_each_set_bit(n, &val, 32)
+                       set_bit(n, (unsigned long *)&bus->intstatus.counter);
+       }
+
+       return ret;
+}
+
+static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+{
+       u32 newstatus = 0;
+       unsigned long intstatus;
        uint rxlimit = bus->rxbound;    /* Rx frames to read before resched */
        uint txlimit = bus->txbound;    /* Tx frames to send before resched */
        uint framecnt = 0;      /* Temporary counter of tx/rx frames */
-       bool rxdone = true;     /* Flag for no more read data */
-       bool resched = false;   /* Flag indicating resched wanted */
-       int err;
+       int err = 0, n;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       /* Start with leftover status bits */
-       intstatus = bus->intstatus;
-
        down(&bus->sdsem);
 
        /* If waiting for HTAVAIL, check status */
@@ -2399,39 +2209,22 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                                bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
                        }
                        bus->clkstate = CLK_AVAIL;
-               } else {
-                       goto clkwait;
                }
        }
 
-       bus_wake(bus);
-
        /* Make sure backplane clock is on */
        brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
-       if (bus->clkstate == CLK_PENDING)
-               goto clkwait;
 
        /* Pending interrupt indicates new device status */
-       if (bus->ipend) {
-               bus->ipend = false;
-               err = r_sdreg32(bus, &newstatus,
-                               offsetof(struct sdpcmd_regs, intstatus));
-               bus->sdcnt.f1regdata++;
-               if (err != 0)
-                       newstatus = 0;
-               newstatus &= bus->hostintmask;
-               bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
-               if (newstatus) {
-                       err = w_sdreg32(bus, newstatus,
-                                       offsetof(struct sdpcmd_regs,
-                                                intstatus));
-                       bus->sdcnt.f1regdata++;
-               }
+       if (atomic_read(&bus->ipend) > 0) {
+               atomic_set(&bus->ipend, 0);
+               sdio_claim_host(bus->sdiodev->func[1]);
+               err = brcmf_sdio_intr_rstatus(bus);
+               sdio_release_host(bus->sdiodev->func[1]);
        }
 
-       /* Merge new bits with previous */
-       intstatus |= newstatus;
-       bus->intstatus = 0;
+       /* Start with leftover status bits */
+       intstatus = atomic_xchg(&bus->intstatus, 0);
 
        /* Handle flow-control change: read new state in case our ack
         * crossed another change interrupt.  If change still set, assume
@@ -2445,8 +2238,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                err = r_sdreg32(bus, &newstatus,
                                offsetof(struct sdpcmd_regs, intstatus));
                bus->sdcnt.f1regdata += 2;
-               bus->fcstate =
-                   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+               atomic_set(&bus->fcstate,
+                          !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
                intstatus |= (newstatus & bus->hostintmask);
        }
 
@@ -2483,32 +2276,34 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                intstatus &= ~I_HMB_FRAME_IND;
 
        /* On frame indication, read available frames */
-       if (PKT_AVAILABLE()) {
-               framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone);
-               if (rxdone || bus->rxskip)
+       if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
+               framecnt = brcmf_sdio_readframes(bus, rxlimit);
+               if (!bus->rxpending)
                        intstatus &= ~I_HMB_FRAME_IND;
                rxlimit -= min(framecnt, rxlimit);
        }
 
        /* Keep still-pending events for next scheduling */
-       bus->intstatus = intstatus;
+       if (intstatus) {
+               for_each_set_bit(n, &intstatus, 32)
+                       set_bit(n, (unsigned long *)&bus->intstatus.counter);
+       }
 
-clkwait:
        brcmf_sdbrcm_clrintr(bus);
 
        if (data_ok(bus) && bus->ctrl_frame_stat &&
                (bus->clkstate == CLK_AVAIL)) {
-               int ret, i;
+               int i;
 
-               ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+               err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
                        SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
                        (u32) bus->ctrl_frame_len);
 
-               if (ret < 0) {
+               if (err < 0) {
                        /* On failure, abort the command and
                                terminate the frame */
                        brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
-                                 ret);
+                                 err);
                        bus->sdcnt.tx_sderrs++;
 
                        brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
@@ -2530,42 +2325,34 @@ clkwait:
                                        break;
                        }
 
-               }
-               if (ret == 0)
+               } else {
                        bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
-
-               brcmf_dbg(INFO, "Return_dpc value is : %d\n", ret);
+               }
                bus->ctrl_frame_stat = false;
                brcmf_sdbrcm_wait_event_wakeup(bus);
        }
        /* Send queued frames (limit 1 if rx may still be pending) */
-       else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+       else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
                 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
                 && data_ok(bus)) {
-               framecnt = rxdone ? txlimit : min(txlimit, bus->txminmax);
+               framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
+                                           txlimit;
                framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
                txlimit -= framecnt;
        }
 
-       /* Resched if events or tx frames are pending,
-                else await next interrupt */
-       /* On failed register access, all bets are off:
-                no resched or interrupts */
        if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
                brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
                bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-               bus->intstatus = 0;
-       } else if (bus->clkstate == CLK_PENDING) {
-               brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
-               resched = true;
-       } else if (bus->intstatus || bus->ipend ||
-               (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)
-                && data_ok(bus)) || PKT_AVAILABLE()) {
-               resched = true;
+               atomic_set(&bus->intstatus, 0);
+       } else if (atomic_read(&bus->intstatus) ||
+                  atomic_read(&bus->ipend) > 0 ||
+                  (!atomic_read(&bus->fcstate) &&
+                   brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+                   data_ok(bus)) || PKT_AVAILABLE()) {
+               brcmf_sdbrcm_adddpctsk(bus);
        }
 
-       bus->dpc_sched = resched;
-
        /* If we're done for now, turn off clock request. */
        if ((bus->clkstate != CLK_PENDING)
            && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
@@ -2574,65 +2361,6 @@ clkwait:
        }
 
        up(&bus->sdsem);
-
-       return resched;
-}
-
-static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
-{
-       struct list_head *new_hd;
-       unsigned long flags;
-
-       if (in_interrupt())
-               new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
-       else
-               new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
-       if (new_hd == NULL)
-               return;
-
-       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-       list_add_tail(new_hd, &bus->dpc_tsklst);
-       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-}
-
-static int brcmf_sdbrcm_dpc_thread(void *data)
-{
-       struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
-       struct list_head *cur_hd, *tmp_hd;
-       unsigned long flags;
-
-       allow_signal(SIGTERM);
-       /* Run until signal received */
-       while (1) {
-               if (kthread_should_stop())
-                       break;
-
-               if (list_empty(&bus->dpc_tsklst))
-                       if (wait_for_completion_interruptible(&bus->dpc_wait))
-                               break;
-
-               spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-               list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
-                       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-
-                       if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
-                               /* after stopping the bus, exit thread */
-                               brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
-                               bus->dpc_tsk = NULL;
-                               spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-                               break;
-                       }
-
-                       if (brcmf_sdbrcm_dpc(bus))
-                               brcmf_sdbrcm_adddpctsk(bus);
-
-                       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-                       list_del(cur_hd);
-                       kfree(cur_hd);
-               }
-               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-       }
-       return 0;
 }
 
 static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2642,6 +2370,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       unsigned long flags;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2672,21 +2401,23 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        spin_unlock_bh(&bus->txqlock);
 
        if (pktq_len(&bus->txq) >= TXHI) {
-               bus->txoff = ON;
-               brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
+               bus->txoff = true;
+               brcmf_txflowblock(bus->sdiodev->dev, true);
        }
 
 #ifdef DEBUG
        if (pktq_plen(&bus->txq, prec) > qcount[prec])
                qcount[prec] = pktq_plen(&bus->txq, prec);
 #endif
-       /* Schedule DPC if needed to send queued packet(s) */
-       if (!bus->dpc_sched) {
-               bus->dpc_sched = true;
-               if (bus->dpc_tsk) {
-                       brcmf_sdbrcm_adddpctsk(bus);
-                       complete(&bus->dpc_wait);
-               }
+
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       if (list_empty(&bus->dpc_tsklst)) {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+               brcmf_sdbrcm_adddpctsk(bus);
+               queue_work(bus->brcmf_wq, &bus->datawork);
+       } else {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
        }
 
        return ret;
@@ -2707,6 +2438,8 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
        else
                dsize = size;
 
+       sdio_claim_host(bus->sdiodev->func[1]);
+
        /* Set the backplane window to include the start address */
        bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
        if (bcmerror) {
@@ -2748,6 +2481,8 @@ xfer_done:
                brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
                          bus->sdiodev->sbwad);
 
+       sdio_release_host(bus->sdiodev->func[1]);
+
        return bcmerror;
 }
 
@@ -2882,6 +2617,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       unsigned long flags;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2918,8 +2654,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        /* Need to lock here to protect txseq and SDIO tx calls */
        down(&bus->sdsem);
 
-       bus_wake(bus);
-
        /* Make sure backplane clock is on */
        brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 
@@ -2967,9 +2701,15 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
                } while (ret < 0 && retries++ < TXRETRIES);
        }
 
-       if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
+           list_empty(&bus->dpc_tsklst)) {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
                bus->activity = false;
                brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+       } else {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
        }
 
        up(&bus->sdsem);
@@ -3774,23 +3514,20 @@ void brcmf_sdbrcm_isr(void *arg)
        }
        /* Count the interrupt call */
        bus->sdcnt.intrcount++;
-       bus->ipend = true;
-
-       /* Shouldn't get this interrupt if we're sleeping? */
-       if (bus->sleeping) {
-               brcmf_dbg(ERROR, "INTERRUPT WHILE SLEEPING??\n");
-               return;
-       }
+       if (in_interrupt())
+               atomic_set(&bus->ipend, 1);
+       else
+               if (brcmf_sdio_intr_rstatus(bus)) {
+                       brcmf_dbg(ERROR, "failed backplane access\n");
+                       bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+               }
 
        /* Disable additional interrupts (is this needed now)? */
        if (!bus->intr)
                brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
 
-       bus->dpc_sched = true;
-       if (bus->dpc_tsk) {
-               brcmf_sdbrcm_adddpctsk(bus);
-               complete(&bus->dpc_wait);
-       }
+       brcmf_sdbrcm_adddpctsk(bus);
+       queue_work(bus->brcmf_wq, &bus->datawork);
 }
 
 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3798,13 +3535,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 #ifdef DEBUG
        struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
 #endif /* DEBUG */
+       unsigned long flags;
 
        brcmf_dbg(TIMER, "Enter\n");
 
-       /* Ignore the timer if simulating bus down */
-       if (bus->sleeping)
-               return false;
-
        down(&bus->sdsem);
 
        /* Poll period: check device if appropriate. */
@@ -3818,27 +3552,30 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
                if (!bus->intr ||
                    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
 
-                       if (!bus->dpc_sched) {
+                       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+                       if (list_empty(&bus->dpc_tsklst)) {
                                u8 devpend;
+                               spin_unlock_irqrestore(&bus->dpc_tl_lock,
+                                                      flags);
                                devpend = brcmf_sdio_regrb(bus->sdiodev,
                                                           SDIO_CCCR_INTx,
                                                           NULL);
                                intstatus =
                                    devpend & (INTR_STATUS_FUNC1 |
                                               INTR_STATUS_FUNC2);
+                       } else {
+                               spin_unlock_irqrestore(&bus->dpc_tl_lock,
+                                                      flags);
                        }
 
                        /* If there is something, make like the ISR and
                                 schedule the DPC */
                        if (intstatus) {
                                bus->sdcnt.pollcnt++;
-                               bus->ipend = true;
+                               atomic_set(&bus->ipend, 1);
 
-                               bus->dpc_sched = true;
-                               if (bus->dpc_tsk) {
-                                       brcmf_sdbrcm_adddpctsk(bus);
-                                       complete(&bus->dpc_wait);
-                               }
+                               brcmf_sdbrcm_adddpctsk(bus);
+                               queue_work(bus->brcmf_wq, &bus->datawork);
                        }
                }
 
@@ -3876,11 +3613,13 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 
        up(&bus->sdsem);
 
-       return bus->ipend;
+       return (atomic_read(&bus->ipend) > 0);
 }
 
 static bool brcmf_sdbrcm_chipmatch(u16 chipid)
 {
+       if (chipid == BCM43241_CHIP_ID)
+               return true;
        if (chipid == BCM4329_CHIP_ID)
                return true;
        if (chipid == BCM4330_CHIP_ID)
@@ -3890,6 +3629,26 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
        return false;
 }
 
+static void brcmf_sdio_dataworker(struct work_struct *work)
+{
+       struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
+                                             datawork);
+       struct list_head *cur_hd, *tmp_hd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+       list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
+               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+               brcmf_sdbrcm_dpc(bus);
+
+               spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+               list_del(cur_hd);
+               kfree(cur_hd);
+       }
+       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
@@ -4022,7 +3781,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
                         SDIO_FUNC_ENABLE_1, NULL);
 
        bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
-       bus->sleeping = false;
        bus->rxflow = false;
 
        /* Done with backplane-dependent accesses, can drop clock... */
@@ -4103,6 +3861,9 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
                /* De-register interrupt handler */
                brcmf_sdio_intr_unregister(bus->sdiodev);
 
+               cancel_work_sync(&bus->datawork);
+               destroy_workqueue(bus->brcmf_wq);
+
                if (bus->sdiodev->bus_if->drvr) {
                        brcmf_detach(bus->sdiodev->dev);
                        brcmf_sdbrcm_release_dongle(bus);
@@ -4142,8 +3903,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        bus->rxbound = BRCMF_RXBOUND;
        bus->txminmax = BRCMF_TXMINMAX;
        bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
-       bus->usebufpool = false;        /* Use bufpool if allocated,
-                                        else use locally malloced rxbuf */
 
        /* attempt to attach to the dongle */
        if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
@@ -4155,6 +3914,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        init_waitqueue_head(&bus->ctrl_wait);
        init_waitqueue_head(&bus->dcmd_resp_wait);
 
+       bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
+       if (bus->brcmf_wq == NULL) {
+               brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
+               goto fail;
+       }
+       INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+
        /* Set up the watchdog timer */
        init_timer(&bus->timer);
        bus->timer.data = (unsigned long)bus;
@@ -4172,15 +3938,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
                bus->watchdog_tsk = NULL;
        }
        /* Initialize DPC thread */
-       init_completion(&bus->dpc_wait);
        INIT_LIST_HEAD(&bus->dpc_tsklst);
        spin_lock_init(&bus->dpc_tl_lock);
-       bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
-                                  bus, "brcmf_dpc");
-       if (IS_ERR(bus->dpc_tsk)) {
-               pr_warn("brcmf_dpc thread failed to start\n");
-               bus->dpc_tsk = NULL;
-       }
 
        /* Assign bus interface call back */
        bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
index 58155e23d220fecc5f985be109901bbfce42070b..9434440bbc6536054b592588f9ef4d1bce73bd22 100644 (file)
@@ -377,6 +377,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
 
        /* Address of cores for new chips should be added here */
        switch (ci->chip) {
+       case BCM43241_CHIP_ID:
+               ci->c_inf[0].wrapbase = 0x18100000;
+               ci->c_inf[0].cib = 0x2a084411;
+               ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+               ci->c_inf[1].base = 0x18002000;
+               ci->c_inf[1].wrapbase = 0x18102000;
+               ci->c_inf[1].cib = 0x0e004211;
+               ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+               ci->c_inf[2].base = 0x18004000;
+               ci->c_inf[2].wrapbase = 0x18104000;
+               ci->c_inf[2].cib = 0x14080401;
+               ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+               ci->c_inf[3].base = 0x18003000;
+               ci->c_inf[3].wrapbase = 0x18103000;
+               ci->c_inf[3].cib = 0x07004211;
+               ci->ramsize = 0x90000;
+               break;
        case BCM4329_CHIP_ID:
                ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
                ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
index 29bf78d264e096d9b81cb2efd3ac527a34d80d61..0d30afd8c672affe82ec26cac18ba5e0138d6ac2 100644 (file)
@@ -174,6 +174,8 @@ extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
                             u8 data, int *ret);
 extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
                             u32 data, int *ret);
+extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+                                  void *data, bool write);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
index 58f89fa9c9f8a218ed29cfb93ad253c41471c648..a2b4b1e71017230b6d7ab262f52b45c4fa353bd7 100644 (file)
@@ -66,7 +66,9 @@
 #define BRCMF_USB_CBCTL_READ   1
 #define BRCMF_USB_MAX_PKT_SIZE 1600
 
+#define BRCMF_USB_43143_FW_NAME        "brcm/brcmfmac43143.bin"
 #define BRCMF_USB_43236_FW_NAME        "brcm/brcmfmac43236b.bin"
+#define BRCMF_USB_43242_FW_NAME        "brcm/brcmfmac43242a.bin"
 
 enum usbdev_suspend_state {
        USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
@@ -78,25 +80,13 @@ enum usbdev_suspend_state {
        USBOS_SUSPEND_STATE_SUSPENDED   /* Device suspended */
 };
 
-struct brcmf_usb_probe_info {
-       void *usbdev_info;
-       struct usb_device *usb; /* USB device pointer from OS */
-       uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
-       int intr_size; /* Size of interrupt message */
-       int interval;  /* Interrupt polling interval */
-       int vid;
-       int pid;
-       enum usb_device_speed device_speed;
-       enum usbdev_suspend_state suspend_state;
-       struct usb_interface *intf;
-};
-static struct brcmf_usb_probe_info usbdev_probe_info;
-
 struct brcmf_usb_image {
-       void *data;
-       u32 len;
+       struct list_head list;
+       s8 *fwname;
+       u8 *image;
+       int image_len;
 };
-static struct brcmf_usb_image g_image = { NULL, 0 };
+static struct list_head fw_image_list;
 
 struct intr_transfer_buf {
        u32 notification;
@@ -117,9 +107,8 @@ struct brcmf_usbdev_info {
        int rx_low_watermark;
        int tx_low_watermark;
        int tx_high_watermark;
-       bool txoff;
-       bool rxoff;
-       bool txoverride;
+       int tx_freecount;
+       bool tx_flowblock;
 
        struct brcmf_usbreq *tx_reqs;
        struct brcmf_usbreq *rx_reqs;
@@ -133,7 +122,6 @@ struct brcmf_usbdev_info {
 
        struct usb_device *usbdev;
        struct device *dev;
-       enum usb_device_speed  device_speed;
 
        int ctl_in_pipe, ctl_out_pipe;
        struct urb *ctl_urb; /* URB for control endpoint */
@@ -146,16 +134,11 @@ struct brcmf_usbdev_info {
        wait_queue_head_t ctrl_wait;
        ulong ctl_op;
 
-       bool rxctl_deferrespok;
-
        struct urb *bulk_urb; /* used for FW download */
        struct urb *intr_urb; /* URB for interrupt endpoint */
        int intr_size;          /* Size of interrupt message */
        int interval;           /* Interrupt polling interval */
        struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
-
-       struct brcmf_usb_probe_info probe_info;
-
 };
 
 static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -177,48 +160,17 @@ static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
        return brcmf_usb_get_buspub(dev)->devinfo;
 }
 
-#if 0
-static void
-brcmf_usb_txflowcontrol(struct brcmf_usbdev_info *devinfo, bool onoff)
+static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
 {
-       dhd_txflowcontrol(devinfo->bus_pub.netdev, 0, onoff);
+       return wait_event_timeout(devinfo->ioctl_resp_wait,
+                                 devinfo->ctl_completed,
+                                 msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
 }
-#endif
 
-static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo,
-        uint *condition, bool *pending)
-{
-       DECLARE_WAITQUEUE(wait, current);
-       int timeout = IOCTL_RESP_TIMEOUT;
-
-       /* Convert timeout in millsecond to jiffies */
-       timeout = msecs_to_jiffies(timeout);
-       /* Wait until control frame is available */
-       add_wait_queue(&devinfo->ioctl_resp_wait, &wait);
-       set_current_state(TASK_INTERRUPTIBLE);
-
-       smp_mb();
-       while (!(*condition) && (!signal_pending(current) && timeout)) {
-               timeout = schedule_timeout(timeout);
-               /* Wait until control frame is available */
-               smp_mb();
-       }
-
-       if (signal_pending(current))
-               *pending = true;
-
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&devinfo->ioctl_resp_wait, &wait);
-
-       return timeout;
-}
-
-static int brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
+static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
 {
        if (waitqueue_active(&devinfo->ioctl_resp_wait))
-               wake_up_interruptible(&devinfo->ioctl_resp_wait);
-
-       return 0;
+               wake_up(&devinfo->ioctl_resp_wait);
 }
 
 static void
@@ -324,17 +276,9 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
        devinfo->ctl_read.wLength = cpu_to_le16p(&size);
        devinfo->ctl_urb->transfer_buffer_length = size;
 
-       if (devinfo->rxctl_deferrespok) {
-               /* BMAC model */
-               devinfo->ctl_read.bRequestType = USB_DIR_IN
-                       | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
-               devinfo->ctl_read.bRequest = DL_DEFER_RESP_OK;
-       } else {
-               /* full dongle model */
-               devinfo->ctl_read.bRequestType = USB_DIR_IN
-                       | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
-               devinfo->ctl_read.bRequest = 1;
-       }
+       devinfo->ctl_read.bRequestType = USB_DIR_IN
+               | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+       devinfo->ctl_read.bRequest = 1;
 
        usb_fill_control_urb(devinfo->ctl_urb,
                devinfo->usbdev,
@@ -355,7 +299,6 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
 {
        int err = 0;
        int timeout = 0;
-       bool pending;
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 
        if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -366,15 +309,14 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
        if (test_and_set_bit(0, &devinfo->ctl_op))
                return -EIO;
 
+       devinfo->ctl_completed = false;
        err = brcmf_usb_send_ctl(devinfo, buf, len);
        if (err) {
                brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+               clear_bit(0, &devinfo->ctl_op);
                return err;
        }
-
-       devinfo->ctl_completed = false;
-       timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
-                                           &pending);
+       timeout = brcmf_usb_ioctl_resp_wait(devinfo);
        clear_bit(0, &devinfo->ctl_op);
        if (!timeout) {
                brcmf_dbg(ERROR, "Txctl wait timed out\n");
@@ -387,7 +329,6 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
 {
        int err = 0;
        int timeout = 0;
-       bool pending;
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
 
        if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -397,14 +338,14 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
        if (test_and_set_bit(0, &devinfo->ctl_op))
                return -EIO;
 
+       devinfo->ctl_completed = false;
        err = brcmf_usb_recv_ctl(devinfo, buf, len);
        if (err) {
                brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+               clear_bit(0, &devinfo->ctl_op);
                return err;
        }
-       devinfo->ctl_completed = false;
-       timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
-                                           &pending);
+       timeout = brcmf_usb_ioctl_resp_wait(devinfo);
        err = devinfo->ctl_urb_status;
        clear_bit(0, &devinfo->ctl_op);
        if (!timeout) {
@@ -418,7 +359,7 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
 }
 
 static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
-                                         struct list_head *q)
+                                         struct list_head *q, int *counter)
 {
        unsigned long flags;
        struct brcmf_usbreq  *req;
@@ -429,17 +370,22 @@ static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
        }
        req = list_entry(q->next, struct brcmf_usbreq, list);
        list_del_init(q->next);
+       if (counter)
+               (*counter)--;
        spin_unlock_irqrestore(&devinfo->qlock, flags);
        return req;
 
 }
 
 static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
-                         struct list_head *q, struct brcmf_usbreq *req)
+                         struct list_head *q, struct brcmf_usbreq *req,
+                         int *counter)
 {
        unsigned long flags;
        spin_lock_irqsave(&devinfo->qlock, flags);
        list_add_tail(&req->list, q);
+       if (counter)
+               (*counter)++;
        spin_unlock_irqrestore(&devinfo->qlock, flags);
 }
 
@@ -519,10 +465,16 @@ static void brcmf_usb_tx_complete(struct urb *urb)
        else
                devinfo->bus_pub.bus->dstats.tx_errors++;
 
+       brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
+
        brcmu_pkt_buf_free_skb(req->skb);
        req->skb = NULL;
-       brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
-
+       brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
+       if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
+               devinfo->tx_flowblock) {
+               brcmf_txflowblock(devinfo->dev, false);
+               devinfo->tx_flowblock = false;
+       }
 }
 
 static void brcmf_usb_rx_complete(struct urb *urb)
@@ -541,7 +493,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
        } else {
                devinfo->bus_pub.bus->dstats.rx_errors++;
                brcmu_pkt_buf_free_skb(skb);
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
                return;
        }
 
@@ -550,15 +502,13 @@ static void brcmf_usb_rx_complete(struct urb *urb)
                if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
                        brcmf_dbg(ERROR, "rx protocol error\n");
                        brcmu_pkt_buf_free_skb(skb);
-                       brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
                        devinfo->bus_pub.bus->dstats.rx_errors++;
-               } else {
+               } else
                        brcmf_rx_packet(devinfo->dev, ifidx, skb);
-                       brcmf_usb_rx_refill(devinfo, req);
-               }
+               brcmf_usb_rx_refill(devinfo, req);
        } else {
                brcmu_pkt_buf_free_skb(skb);
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
        }
        return;
 
@@ -575,7 +525,7 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
 
        skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
        if (!skb) {
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
                return;
        }
        req->skb = skb;
@@ -584,14 +534,14 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
                          skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
                          req);
        req->devinfo = devinfo;
-       brcmf_usb_enq(devinfo, &devinfo->rx_postq, req);
+       brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
 
        ret = usb_submit_urb(req->urb, GFP_ATOMIC);
        if (ret) {
                brcmf_usb_del_fromq(devinfo, req);
                brcmu_pkt_buf_free_skb(req->skb);
                req->skb = NULL;
-               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
        }
        return;
 }
@@ -604,7 +554,7 @@ static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
                brcmf_dbg(ERROR, "bus is not up\n");
                return;
        }
-       while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq)) != NULL)
+       while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
                brcmf_usb_rx_refill(devinfo, req);
 }
 
@@ -682,7 +632,8 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
                return -EIO;
        }
 
-       req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq);
+       req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
+                                       &devinfo->tx_freecount);
        if (!req) {
                brcmu_pkt_buf_free_skb(skb);
                brcmf_dbg(ERROR, "no req to send\n");
@@ -694,14 +645,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
        usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
                          skb->data, skb->len, brcmf_usb_tx_complete, req);
        req->urb->transfer_flags |= URB_ZERO_PACKET;
-       brcmf_usb_enq(devinfo, &devinfo->tx_postq, req);
+       brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
        ret = usb_submit_urb(req->urb, GFP_ATOMIC);
        if (ret) {
                brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n");
                brcmf_usb_del_fromq(devinfo, req);
                brcmu_pkt_buf_free_skb(req->skb);
                req->skb = NULL;
-               brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
+               brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
+                                               &devinfo->tx_freecount);
+       } else {
+               if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
+                       !devinfo->tx_flowblock) {
+                       brcmf_txflowblock(dev, true);
+                       devinfo->tx_flowblock = true;
+               }
        }
 
        return ret;
@@ -1112,10 +1070,14 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
 static bool brcmf_usb_chip_support(int chipid, int chiprev)
 {
        switch(chipid) {
+       case 43143:
+               return true;
        case 43235:
        case 43236:
        case 43238:
                return (chiprev == 3);
+       case 43242:
+               return true;
        default:
                break;
        }
@@ -1154,17 +1116,10 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
 }
 
 
-static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
+static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
 {
-       struct brcmf_usbdev_info *devinfo =
-               (struct brcmf_usbdev_info *)bus_pub;
-
        brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
 
-       /* store the image globally */
-       g_image.data = devinfo->image;
-       g_image.len = devinfo->image_len;
-
        /* free the URBS */
        brcmf_usb_free_q(&devinfo->rx_freeq, false);
        brcmf_usb_free_q(&devinfo->tx_freeq, false);
@@ -1175,7 +1130,6 @@ static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
 
        kfree(devinfo->tx_reqs);
        kfree(devinfo->rx_reqs);
-       kfree(devinfo);
 }
 
 #define TRX_MAGIC       0x30524448      /* "HDR0" */
@@ -1217,19 +1171,34 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
 {
        s8 *fwname;
        const struct firmware *fw;
+       struct brcmf_usb_image *fw_image;
        int err;
 
-       devinfo->image = g_image.data;
-       devinfo->image_len = g_image.len;
-
-       /*
-        * if we have an image we can leave here.
-        */
-       if (devinfo->image)
-               return 0;
-
-       fwname = BRCMF_USB_43236_FW_NAME;
+       switch (devinfo->bus_pub.devid) {
+       case 43143:
+               fwname = BRCMF_USB_43143_FW_NAME;
+               break;
+       case 43235:
+       case 43236:
+       case 43238:
+               fwname = BRCMF_USB_43236_FW_NAME;
+               break;
+       case 43242:
+               fwname = BRCMF_USB_43242_FW_NAME;
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
 
+       list_for_each_entry(fw_image, &fw_image_list, list) {
+               if (fw_image->fwname == fwname) {
+                       devinfo->image = fw_image->image;
+                       devinfo->image_len = fw_image->image_len;
+                       return 0;
+               }
+       }
+       /* fw image not yet loaded. Load it now and add to list */
        err = request_firmware(&fw, fwname, devinfo->dev);
        if (!fw) {
                brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname);
@@ -1240,27 +1209,32 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
                return -EINVAL;
        }
 
-       devinfo->image = vmalloc(fw->size); /* plus nvram */
-       if (!devinfo->image)
+       fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
+       if (!fw_image)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&fw_image->list);
+       list_add_tail(&fw_image->list, &fw_image_list);
+       fw_image->fwname = fwname;
+       fw_image->image = vmalloc(fw->size);
+       if (!fw_image->image)
                return -ENOMEM;
 
-       memcpy(devinfo->image, fw->data, fw->size);
-       devinfo->image_len = fw->size;
+       memcpy(fw_image->image, fw->data, fw->size);
+       fw_image->image_len = fw->size;
 
        release_firmware(fw);
+
+       devinfo->image = fw_image->image;
+       devinfo->image_len = fw_image->image_len;
+
        return 0;
 }
 
 
 static
-struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
+struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
+                                     int nrxq, int ntxq)
 {
-       struct brcmf_usbdev_info *devinfo;
-
-       devinfo = kzalloc(sizeof(struct brcmf_usbdev_info), GFP_ATOMIC);
-       if (devinfo == NULL)
-               return NULL;
-
        devinfo->bus_pub.nrxq = nrxq;
        devinfo->rx_low_watermark = nrxq / 2;
        devinfo->bus_pub.devinfo = devinfo;
@@ -1269,18 +1243,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
        /* flow control when too many tx urbs posted */
        devinfo->tx_low_watermark = ntxq / 4;
        devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
-       devinfo->dev = dev;
-       devinfo->usbdev = usbdev_probe_info.usb;
-       devinfo->tx_pipe = usbdev_probe_info.tx_pipe;
-       devinfo->rx_pipe = usbdev_probe_info.rx_pipe;
-       devinfo->rx_pipe2 = usbdev_probe_info.rx_pipe2;
-       devinfo->intr_pipe = usbdev_probe_info.intr_pipe;
-
-       devinfo->interval = usbdev_probe_info.interval;
-       devinfo->intr_size = usbdev_probe_info.intr_size;
-
-       memcpy(&devinfo->probe_info, &usbdev_probe_info,
-               sizeof(struct brcmf_usb_probe_info));
        devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
 
        /* Initialize other structure content */
@@ -1295,6 +1257,8 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
        INIT_LIST_HEAD(&devinfo->tx_freeq);
        INIT_LIST_HEAD(&devinfo->tx_postq);
 
+       devinfo->tx_flowblock = false;
+
        devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
        if (!devinfo->rx_reqs)
                goto error;
@@ -1302,6 +1266,7 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
        devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
        if (!devinfo->tx_reqs)
                goto error;
+       devinfo->tx_freecount = ntxq;
 
        devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!devinfo->intr_urb) {
@@ -1313,8 +1278,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
                brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n");
                goto error;
        }
-       devinfo->rxctl_deferrespok = 0;
-
        devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!devinfo->bulk_urb) {
                brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n");
@@ -1336,23 +1299,21 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
 
 error:
        brcmf_dbg(ERROR, "failed!\n");
-       brcmf_usb_detach(&devinfo->bus_pub);
+       brcmf_usb_detach(devinfo);
        return NULL;
 }
 
-static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
-                               u32 bustype, u32 hdrlen)
+static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
+                             const char *desc, u32 bustype, u32 hdrlen)
 {
        struct brcmf_bus *bus = NULL;
        struct brcmf_usbdev *bus_pub = NULL;
        int ret;
+       struct device *dev = devinfo->dev;
 
-
-       bus_pub = brcmf_usb_attach(BRCMF_USB_NRXQ, BRCMF_USB_NTXQ, dev);
-       if (!bus_pub) {
-               ret = -ENODEV;
-               goto fail;
-       }
+       bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
+       if (!bus_pub)
+               return -ENODEV;
 
        bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
        if (!bus) {
@@ -1387,23 +1348,21 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
        return 0;
 fail:
        /* Release resources in reverse order */
-       if (bus_pub)
-               brcmf_usb_detach(bus_pub);
        kfree(bus);
+       brcmf_usb_detach(devinfo);
        return ret;
 }
 
 static void
-brcmf_usb_disconnect_cb(struct brcmf_usbdev *bus_pub)
+brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
 {
-       if (!bus_pub)
+       if (!devinfo)
                return;
-       brcmf_dbg(TRACE, "enter: bus_pub %p\n", bus_pub);
-
-       brcmf_detach(bus_pub->devinfo->dev);
-       kfree(bus_pub->bus);
-       brcmf_usb_detach(bus_pub);
+       brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo);
 
+       brcmf_detach(devinfo->dev);
+       kfree(devinfo->bus_pub.bus);
+       brcmf_usb_detach(devinfo);
 }
 
 static int
@@ -1415,18 +1374,18 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        struct usb_device *usb = interface_to_usbdev(intf);
        int num_of_eps;
        u8 endpoint_num;
+       struct brcmf_usbdev_info *devinfo;
 
        brcmf_dbg(TRACE, "enter\n");
 
-       usbdev_probe_info.usb = usb;
-       usbdev_probe_info.intf = intf;
+       devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
+       if (devinfo == NULL)
+               return -ENOMEM;
 
-       if (id != NULL) {
-               usbdev_probe_info.vid = id->idVendor;
-               usbdev_probe_info.pid = id->idProduct;
-       }
+       devinfo->usbdev = usb;
+       devinfo->dev = &usb->dev;
 
-       usb_set_intfdata(intf, &usbdev_probe_info);
+       usb_set_intfdata(intf, devinfo);
 
        /* Check that the device supports only one configuration */
        if (usb->descriptor.bNumConfigurations != 1) {
@@ -1475,11 +1434,11 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        }
 
        endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-       usbdev_probe_info.intr_pipe = usb_rcvintpipe(usb, endpoint_num);
+       devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
 
-       usbdev_probe_info.rx_pipe = 0;
-       usbdev_probe_info.rx_pipe2 = 0;
-       usbdev_probe_info.tx_pipe = 0;
+       devinfo->rx_pipe = 0;
+       devinfo->rx_pipe2 = 0;
+       devinfo->tx_pipe = 0;
        num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
 
        /* Check data endpoints and get pipes */
@@ -1496,35 +1455,33 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
                               USB_ENDPOINT_NUMBER_MASK;
                if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
                        == USB_DIR_IN) {
-                       if (!usbdev_probe_info.rx_pipe) {
-                               usbdev_probe_info.rx_pipe =
+                       if (!devinfo->rx_pipe) {
+                               devinfo->rx_pipe =
                                        usb_rcvbulkpipe(usb, endpoint_num);
                        } else {
-                               usbdev_probe_info.rx_pipe2 =
+                               devinfo->rx_pipe2 =
                                        usb_rcvbulkpipe(usb, endpoint_num);
                        }
                } else {
-                       usbdev_probe_info.tx_pipe =
-                                       usb_sndbulkpipe(usb, endpoint_num);
+                       devinfo->tx_pipe = usb_sndbulkpipe(usb, endpoint_num);
                }
        }
 
        /* Allocate interrupt URB and data buffer */
        /* RNDIS says 8-byte intr, our old drivers used 4-byte */
        if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
-               usbdev_probe_info.intr_size = 8;
+               devinfo->intr_size = 8;
        else
-               usbdev_probe_info.intr_size = 4;
+               devinfo->intr_size = 4;
 
-       usbdev_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+       devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
 
-       usbdev_probe_info.device_speed = usb->speed;
        if (usb->speed == USB_SPEED_HIGH)
                brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
        else
                brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
 
-       ret = brcmf_usb_probe_cb(&usb->dev, "", USB_BUS, 0);
+       ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0);
        if (ret)
                goto fail;
 
@@ -1533,6 +1490,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
 fail:
        brcmf_dbg(ERROR, "failed with errno %d\n", ret);
+       kfree(devinfo);
        usb_set_intfdata(intf, NULL);
        return ret;
 
@@ -1541,11 +1499,12 @@ fail:
 static void
 brcmf_usb_disconnect(struct usb_interface *intf)
 {
-       struct usb_device *usb = interface_to_usbdev(intf);
+       struct brcmf_usbdev_info *devinfo;
 
        brcmf_dbg(TRACE, "enter\n");
-       brcmf_usb_disconnect_cb(brcmf_usb_get_buspub(&usb->dev));
-       usb_set_intfdata(intf, NULL);
+       devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
+       brcmf_usb_disconnect_cb(devinfo);
+       kfree(devinfo);
 }
 
 /*
@@ -1577,17 +1536,23 @@ static int brcmf_usb_resume(struct usb_interface *intf)
 }
 
 #define BRCMF_USB_VENDOR_ID_BROADCOM   0x0a5c
+#define BRCMF_USB_DEVICE_ID_43143      0xbd1e
 #define BRCMF_USB_DEVICE_ID_43236      0xbd17
+#define BRCMF_USB_DEVICE_ID_43242      0xbd1f
 #define BRCMF_USB_DEVICE_ID_BCMFW      0x0bdc
 
 static struct usb_device_id brcmf_usb_devid_table[] = {
+       { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
+       { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
        /* special entry for device with firmware loaded and running */
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
+MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
 MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
+MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
 
 /* TODO: suspend and resume entries */
 static struct usb_driver brcmf_usbdrvr = {
@@ -1601,15 +1566,25 @@ static struct usb_driver brcmf_usbdrvr = {
        .disable_hub_initiated_lpm = 1,
 };
 
+static void brcmf_release_fw(struct list_head *q)
+{
+       struct brcmf_usb_image *fw_image, *next;
+
+       list_for_each_entry_safe(fw_image, next, q, list) {
+               vfree(fw_image->image);
+               list_del_init(&fw_image->list);
+       }
+}
+
+
 void brcmf_usb_exit(void)
 {
        usb_deregister(&brcmf_usbdrvr);
-       vfree(g_image.data);
-       g_image.data = NULL;
-       g_image.len = 0;
+       brcmf_release_fw(&fw_image_list);
 }
 
 void brcmf_usb_init(void)
 {
+       INIT_LIST_HEAD(&fw_image_list);
        usb_register(&brcmf_usbdrvr);
 }
index 50b5553b6964b95dffecf40f4d28fdb91796efc8..c1abaa6db59ec97fc9da4a0419ba81ea922cfb42 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ieee80211.h>
 #include <linux/uaccess.h>
 #include <net/cfg80211.h>
+#include <net/netlink.h>
 
 #include <brcmu_utils.h>
 #include <defs.h>
 #include "dhd.h"
 #include "wl_cfg80211.h"
 
+#define BRCMF_SCAN_IE_LEN_MAX          2048
+#define BRCMF_PNO_VERSION              2
+#define BRCMF_PNO_TIME                 30
+#define BRCMF_PNO_REPEAT               4
+#define BRCMF_PNO_FREQ_EXPO_MAX                3
+#define BRCMF_PNO_MAX_PFN_COUNT                16
+#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
+#define BRCMF_PNO_HIDDEN_BIT           2
+#define BRCMF_PNO_WPA_AUTH_ANY         0xFFFFFFFF
+#define BRCMF_PNO_SCAN_COMPLETE                1
+#define BRCMF_PNO_SCAN_INCOMPLETE      0
+
+#define TLV_LEN_OFF                    1       /* length offset */
+#define TLV_HDR_LEN                    2       /* header length */
+#define TLV_BODY_OFF                   2       /* body offset */
+#define TLV_OUI_LEN                    3       /* oui id length */
+#define WPA_OUI                                "\x00\x50\xF2"  /* WPA OUI */
+#define WPA_OUI_TYPE                   1
+#define RSN_OUI                                "\x00\x0F\xAC"  /* RSN OUI */
+#define        WME_OUI_TYPE                    2
+
+#define VS_IE_FIXED_HDR_LEN            6
+#define WPA_IE_VERSION_LEN             2
+#define WPA_IE_MIN_OUI_LEN             4
+#define WPA_IE_SUITE_COUNT_LEN         2
+
+#define WPA_CIPHER_NONE                        0       /* None */
+#define WPA_CIPHER_WEP_40              1       /* WEP (40-bit) */
+#define WPA_CIPHER_TKIP                        2       /* TKIP: default for WPA */
+#define WPA_CIPHER_AES_CCM             4       /* AES (CCM) */
+#define WPA_CIPHER_WEP_104             5       /* WEP (104-bit) */
+
+#define RSN_AKM_NONE                   0       /* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED            1       /* Over 802.1x */
+#define RSN_AKM_PSK                    2       /* Pre-shared Key */
+#define RSN_CAP_LEN                    2       /* Length of RSN capabilities */
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK   0x000C
+
+#define VNDR_IE_CMD_LEN                        4       /* length of the set command
+                                                * string :"add", "del" (+ NUL)
+                                                */
+#define VNDR_IE_COUNT_OFFSET           4
+#define VNDR_IE_PKTFLAG_OFFSET         8
+#define VNDR_IE_VSIE_OFFSET            12
+#define VNDR_IE_HDR_SIZE               12
+#define VNDR_IE_BEACON_FLAG            0x1
+#define VNDR_IE_PRBRSP_FLAG            0x2
+#define MAX_VNDR_IE_NUMBER             5
+
+#define        DOT11_MGMT_HDR_LEN              24      /* d11 management header len */
+#define        DOT11_BCN_PRB_FIXED_LEN         12      /* beacon/probe fixed length */
+
 #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
        (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
 
@@ -42,33 +95,12 @@ static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
 
 static u32 brcmf_dbg_level = WL_DBG_ERR;
 
-static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
-{
-       dev->driver_data = data;
-}
-
-static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
-{
-       void *data = NULL;
-
-       if (dev)
-               data = dev->driver_data;
-       return data;
-}
-
-static
-struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev)
-{
-       struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev);
-       return ci->cfg_priv;
-}
-
 static bool check_sys_up(struct wiphy *wiphy)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       if (!test_bit(WL_STATUS_READY, &cfg->status)) {
                WL_INFO("device is not ready : status (%d)\n",
-                       (int)cfg_priv->status);
+                       (int)cfg->status);
                return false;
        }
        return true;
@@ -256,6 +288,25 @@ struct brcmf_tlv {
        u8 data[1];
 };
 
+/* Vendor specific ie. id = 221, oui and type defines exact ie */
+struct brcmf_vs_tlv {
+       u8 id;
+       u8 len;
+       u8 oui[3];
+       u8 oui_type;
+};
+
+struct parsed_vndr_ie_info {
+       u8 *ie_ptr;
+       u32 ie_len;     /* total length including id & length field */
+       struct brcmf_vs_tlv vndrie;
+};
+
+struct parsed_vndr_ies {
+       u32 count;
+       struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
 /* Quarter dBm units to mW
  * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
  * Table is offset so the last entry is largest mW value that fits in
@@ -353,6 +404,44 @@ brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
        return err;
 }
 
+static s32
+brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
+                             void *param, s32 paramlen,
+                             void *buf, s32 buflen, s32 bssidx)
+{
+       s32 err = -ENOMEM;
+       u32 len;
+
+       len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
+                                    buf, buflen, bssidx);
+       BUG_ON(!len);
+       if (len > 0)
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
+       if (err)
+               WL_ERR("error (%d)\n", err);
+
+       return err;
+}
+
+static s32
+brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
+                             void *param, s32 paramlen,
+                             void *buf, s32 buflen, s32 bssidx)
+{
+       s32 err = -ENOMEM;
+       u32 len;
+
+       len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
+                                    buf, buflen, bssidx);
+       BUG_ON(!len);
+       if (len > 0)
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
+       if (err)
+               WL_ERR("error (%d)\n", err);
+
+       return err;
+}
+
 static void convert_key_from_CPU(struct brcmf_wsec_key *key,
                                 struct brcmf_wsec_key_le *key_le)
 {
@@ -367,16 +456,22 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
        memcpy(key_le->ea, key->ea, sizeof(key->ea));
 }
 
-static int send_key_to_dongle(struct net_device *ndev,
-                             struct brcmf_wsec_key *key)
+static int
+send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
+                  struct net_device *ndev, struct brcmf_wsec_key *key)
 {
        int err;
        struct brcmf_wsec_key_le key_le;
 
        convert_key_from_CPU(key, &key_le);
-       err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le));
+
+       err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
+                                            sizeof(key_le),
+                                            cfg->extra_buf,
+                                            WL_EXTRA_BUF_MAX, bssidx);
+
        if (err)
-               WL_ERR("WLC_SET_KEY error (%d)\n", err);
+               WL_ERR("wsec_key error (%d)\n", err);
        return err;
 }
 
@@ -385,14 +480,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                         enum nl80211_iftype type, u32 *flags,
                         struct vif_params *params)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct wireless_dev *wdev;
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        s32 infra = 0;
+       s32 ap = 0;
        s32 err = 0;
 
-       WL_TRACE("Enter\n");
-       if (!check_sys_up(wiphy))
-               return -EIO;
+       WL_TRACE("Enter, ndev=%p, type=%d\n", ndev, type);
 
        switch (type) {
        case NL80211_IFTYPE_MONITOR:
@@ -401,29 +494,44 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                       type);
                return -EOPNOTSUPP;
        case NL80211_IFTYPE_ADHOC:
-               cfg_priv->conf->mode = WL_MODE_IBSS;
+               cfg->conf->mode = WL_MODE_IBSS;
                infra = 0;
                break;
        case NL80211_IFTYPE_STATION:
-               cfg_priv->conf->mode = WL_MODE_BSS;
+               cfg->conf->mode = WL_MODE_BSS;
                infra = 1;
                break;
+       case NL80211_IFTYPE_AP:
+               cfg->conf->mode = WL_MODE_AP;
+               ap = 1;
+               break;
        default:
                err = -EINVAL;
                goto done;
        }
 
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
-       if (err) {
-               WL_ERR("WLC_SET_INFRA error (%d)\n", err);
-               err = -EAGAIN;
+       if (ap) {
+               set_bit(WL_STATUS_AP_CREATING, &cfg->status);
+               if (!cfg->ap_info)
+                       cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
+                                              GFP_KERNEL);
+               if (!cfg->ap_info) {
+                       err = -ENOMEM;
+                       goto done;
+               }
+               WL_INFO("IF Type = AP\n");
        } else {
-               wdev = ndev->ieee80211_ptr;
-               wdev->iftype = type;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+               if (err) {
+                       WL_ERR("WLC_SET_INFRA error (%d)\n", err);
+                       err = -EAGAIN;
+                       goto done;
+               }
+               WL_INFO("IF Type = %s\n",
+                       (cfg->conf->mode == WL_MODE_IBSS) ?
+                       "Adhoc" : "Infra");
        }
-
-       WL_INFO("IF Type = %s\n",
-               (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
+       ndev->ieee80211_ptr->iftype = type;
 
 done:
        WL_TRACE("Exit\n");
@@ -474,12 +582,55 @@ brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
        return err;
 }
 
+static s32
+brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
+                           s32 bssidx)
+{
+       s8 buf[BRCMF_DCMD_SMLEN];
+       __le32 val_le;
+
+       val_le = cpu_to_le32(val);
+
+       return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
+                                            sizeof(val_le), buf, sizeof(buf),
+                                            bssidx);
+}
+
+static s32
+brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
+                           s32 bssidx)
+{
+       s8 buf[BRCMF_DCMD_SMLEN];
+       s32 err;
+       __le32 val_le;
+
+       memset(buf, 0, sizeof(buf));
+       err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
+                                           sizeof(buf), bssidx);
+       if (err == 0) {
+               memcpy(&val_le, buf, sizeof(val_le));
+               *val = le32_to_cpu(val_le);
+       }
+       return err;
+}
+
+
+/*
+ * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
+ * should return the ndev matching bssidx.
+ */
+static s32
+brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
+{
+       return 0;
+}
+
 static void brcmf_set_mpc(struct net_device *ndev, int mpc)
 {
        s32 err = 0;
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       if (test_bit(WL_STATUS_READY, &cfg->status)) {
                err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
                if (err) {
                        WL_ERR("fail to set mpc\n");
@@ -489,8 +640,8 @@ static void brcmf_set_mpc(struct net_device *ndev, int mpc)
        }
 }
 
-static void wl_iscan_prep(struct brcmf_scan_params_le *params_le,
-                         struct brcmf_ssid *ssid)
+static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
+                            struct brcmf_ssid *ssid)
 {
        memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
        params_le->bss_type = DOT11_BSSTYPE_ANY;
@@ -546,7 +697,7 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
                return -ENOMEM;
        BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
 
-       wl_iscan_prep(&params->params_le, ssid);
+       brcmf_iscan_prep(&params->params_le, ssid);
 
        params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
        params->action = cpu_to_le16(action);
@@ -565,10 +716,10 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
        return err;
 }
 
-static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        struct brcmf_ssid ssid;
        __le32 passive_scan;
        s32 err = 0;
@@ -578,19 +729,19 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
 
        iscan->state = WL_ISCAN_STATE_SCANING;
 
-       passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
-       err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN,
+       passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+       err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
                        &passive_scan, sizeof(passive_scan));
        if (err) {
                WL_ERR("error (%d)\n", err);
                return err;
        }
        brcmf_set_mpc(ndev, 0);
-       cfg_priv->iscan_kickstart = true;
+       cfg->iscan_kickstart = true;
        err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
        if (err) {
                brcmf_set_mpc(ndev, 1);
-               cfg_priv->iscan_kickstart = false;
+               cfg->iscan_kickstart = false;
                return err;
        }
        mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -599,31 +750,31 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
 }
 
 static s32
-__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
-                  struct cfg80211_scan_request *request,
-                  struct cfg80211_ssid *this_ssid)
+brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
+                    struct cfg80211_scan_request *request,
+                    struct cfg80211_ssid *this_ssid)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
        struct cfg80211_ssid *ssids;
-       struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int;
+       struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
        __le32 passive_scan;
        bool iscan_req;
        bool spec_scan;
        s32 err = 0;
        u32 SSID_len;
 
-       if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
-               WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status);
+       if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scanning already : status (%lu)\n", cfg->status);
                return -EAGAIN;
        }
-       if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) {
+       if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
                WL_ERR("Scanning being aborted : status (%lu)\n",
-                      cfg_priv->status);
+                      cfg->status);
                return -EAGAIN;
        }
-       if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+       if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
                WL_ERR("Connecting : status (%lu)\n",
-                      cfg_priv->status);
+                      cfg->status);
                return -EAGAIN;
        }
 
@@ -632,7 +783,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
        if (request) {
                /* scan bss */
                ssids = request->ssids;
-               if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len))
+               if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
                        iscan_req = true;
        } else {
                /* scan in ibss */
@@ -640,10 +791,10 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
                ssids = this_ssid;
        }
 
-       cfg_priv->scan_request = request;
-       set_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+       cfg->scan_request = request;
+       set_bit(WL_STATUS_SCANNING, &cfg->status);
        if (iscan_req) {
-               err = brcmf_do_iscan(cfg_priv);
+               err = brcmf_do_iscan(cfg);
                if (!err)
                        return err;
                else
@@ -662,7 +813,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
                        WL_SCAN("Broadcast scan\n");
                }
 
-               passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
+               passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
                err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
                                &passive_scan, sizeof(passive_scan));
                if (err) {
@@ -687,8 +838,346 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
        return 0;
 
 scan_out:
-       clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
-       cfg_priv->scan_request = NULL;
+       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+       cfg->scan_request = NULL;
+       return err;
+}
+
+static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
+                            struct cfg80211_scan_request *request)
+{
+       u32 n_ssids;
+       u32 n_channels;
+       s32 i;
+       s32 offset;
+       u16 chanspec;
+       u16 channel;
+       struct ieee80211_channel *req_channel;
+       char *ptr;
+       struct brcmf_ssid_le ssid_le;
+
+       memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+       params_le->bss_type = DOT11_BSSTYPE_ANY;
+       params_le->scan_type = 0;
+       params_le->channel_num = 0;
+       params_le->nprobes = cpu_to_le32(-1);
+       params_le->active_time = cpu_to_le32(-1);
+       params_le->passive_time = cpu_to_le32(-1);
+       params_le->home_time = cpu_to_le32(-1);
+       memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
+
+       /* if request is null exit so it will be all channel broadcast scan */
+       if (!request)
+               return;
+
+       n_ssids = request->n_ssids;
+       n_channels = request->n_channels;
+       /* Copy channel array if applicable */
+       WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
+       if (n_channels > 0) {
+               for (i = 0; i < n_channels; i++) {
+                       chanspec = 0;
+                       req_channel = request->channels[i];
+                       channel = ieee80211_frequency_to_channel(
+                                       req_channel->center_freq);
+                       if (req_channel->band == IEEE80211_BAND_2GHZ)
+                               chanspec |= WL_CHANSPEC_BAND_2G;
+                       else
+                               chanspec |= WL_CHANSPEC_BAND_5G;
+
+                       if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
+                               chanspec |= WL_CHANSPEC_BW_20;
+                               chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+                       } else {
+                               chanspec |= WL_CHANSPEC_BW_40;
+                               if (req_channel->flags &
+                                               IEEE80211_CHAN_NO_HT40PLUS)
+                                       chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+                               else
+                                       chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+                       }
+
+                       chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
+                       WL_SCAN("Chan : %d, Channel spec: %x\n",
+                               channel, chanspec);
+                       params_le->channel_list[i] = cpu_to_le16(chanspec);
+               }
+       } else {
+               WL_SCAN("Scanning all channels\n");
+       }
+       /* Copy ssid array if applicable */
+       WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids);
+       if (n_ssids > 0) {
+               offset = offsetof(struct brcmf_scan_params_le, channel_list) +
+                               n_channels * sizeof(u16);
+               offset = roundup(offset, sizeof(u32));
+               ptr = (char *)params_le + offset;
+               for (i = 0; i < n_ssids; i++) {
+                       memset(&ssid_le, 0, sizeof(ssid_le));
+                       ssid_le.SSID_len =
+                                       cpu_to_le32(request->ssids[i].ssid_len);
+                       memcpy(ssid_le.SSID, request->ssids[i].ssid,
+                              request->ssids[i].ssid_len);
+                       if (!ssid_le.SSID_len)
+                               WL_SCAN("%d: Broadcast scan\n", i);
+                       else
+                               WL_SCAN("%d: scan for  %s size =%d\n", i,
+                                       ssid_le.SSID, ssid_le.SSID_len);
+                       memcpy(ptr, &ssid_le, sizeof(ssid_le));
+                       ptr += sizeof(ssid_le);
+               }
+       } else {
+               WL_SCAN("Broadcast scan %p\n", request->ssids);
+               if ((request->ssids) && request->ssids->ssid_len) {
+                       WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID,
+                               request->ssids->ssid_len);
+                       params_le->ssid_le.SSID_len =
+                               cpu_to_le32(request->ssids->ssid_len);
+                       memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
+                               request->ssids->ssid_len);
+               }
+       }
+       /* Adding mask to channel numbers */
+       params_le->channel_num =
+               cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+                       (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+}
+
+static s32
+brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+                           struct net_device *ndev,
+                           bool aborted, bool fw_abort)
+{
+       struct brcmf_scan_params_le params_le;
+       struct cfg80211_scan_request *scan_request;
+       s32 err = 0;
+
+       WL_SCAN("Enter\n");
+
+       /* clear scan request, because the FW abort can cause a second call */
+       /* to this functon and might cause a double cfg80211_scan_done      */
+       scan_request = cfg->scan_request;
+       cfg->scan_request = NULL;
+
+       if (timer_pending(&cfg->escan_timeout))
+               del_timer_sync(&cfg->escan_timeout);
+
+       if (fw_abort) {
+               /* Do a scan abort to stop the driver's scan engine */
+               WL_SCAN("ABORT scan in firmware\n");
+               memset(&params_le, 0, sizeof(params_le));
+               memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
+               params_le.bss_type = DOT11_BSSTYPE_ANY;
+               params_le.scan_type = 0;
+               params_le.channel_num = cpu_to_le32(1);
+               params_le.nprobes = cpu_to_le32(1);
+               params_le.active_time = cpu_to_le32(-1);
+               params_le.passive_time = cpu_to_le32(-1);
+               params_le.home_time = cpu_to_le32(-1);
+               /* Scan is aborted by setting channel_list[0] to -1 */
+               params_le.channel_list[0] = cpu_to_le16(-1);
+               /* E-Scan (or anyother type) can be aborted by SCAN */
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
+                       sizeof(params_le));
+               if (err)
+                       WL_ERR("Scan abort  failed\n");
+       }
+       /*
+        * e-scan can be initiated by scheduled scan
+        * which takes precedence.
+        */
+       if (cfg->sched_escan) {
+               WL_SCAN("scheduled scan completed\n");
+               cfg->sched_escan = false;
+               if (!aborted)
+                       cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+               brcmf_set_mpc(ndev, 1);
+       } else if (scan_request) {
+               WL_SCAN("ESCAN Completed scan: %s\n",
+                               aborted ? "Aborted" : "Done");
+               cfg80211_scan_done(scan_request, aborted);
+               brcmf_set_mpc(ndev, 1);
+       }
+       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scan complete while device not scanning\n");
+               return -EPERM;
+       }
+
+       return err;
+}
+
+static s32
+brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
+               struct cfg80211_scan_request *request, u16 action)
+{
+       s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
+                         offsetof(struct brcmf_escan_params_le, params_le);
+       struct brcmf_escan_params_le *params;
+       s32 err = 0;
+
+       WL_SCAN("E-SCAN START\n");
+
+       if (request != NULL) {
+               /* Allocate space for populating ssids in struct */
+               params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
+
+               /* Allocate space for populating ssids in struct */
+               params_size += sizeof(struct brcmf_ssid) * request->n_ssids;
+       }
+
+       params = kzalloc(params_size, GFP_KERNEL);
+       if (!params) {
+               err = -ENOMEM;
+               goto exit;
+       }
+       BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
+       brcmf_escan_prep(&params->params_le, request);
+       params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+       params->action = cpu_to_le16(action);
+       params->sync_id = cpu_to_le16(0x1234);
+
+       err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
+                       cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
+       if (err) {
+               if (err == -EBUSY)
+                       WL_INFO("system busy : escan canceled\n");
+               else
+                       WL_ERR("error (%d)\n", err);
+       }
+
+       kfree(params);
+exit:
+       return err;
+}
+
+static s32
+brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
+              struct net_device *ndev, struct cfg80211_scan_request *request)
+{
+       s32 err;
+       __le32 passive_scan;
+       struct brcmf_scan_results *results;
+
+       WL_SCAN("Enter\n");
+       cfg->escan_info.ndev = ndev;
+       cfg->escan_info.wiphy = wiphy;
+       cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
+       passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
+                       &passive_scan, sizeof(passive_scan));
+       if (err) {
+               WL_ERR("error (%d)\n", err);
+               return err;
+       }
+       brcmf_set_mpc(ndev, 0);
+       results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
+       results->version = 0;
+       results->count = 0;
+       results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
+
+       err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
+       if (err)
+               brcmf_set_mpc(ndev, 1);
+       return err;
+}
+
+static s32
+brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
+                    struct cfg80211_scan_request *request,
+                    struct cfg80211_ssid *this_ssid)
+{
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct cfg80211_ssid *ssids;
+       struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
+       __le32 passive_scan;
+       bool escan_req;
+       bool spec_scan;
+       s32 err;
+       u32 SSID_len;
+
+       WL_SCAN("START ESCAN\n");
+
+       if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+               return -EAGAIN;
+       }
+       if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
+               WL_ERR("Scanning being aborted : status (%lu)\n",
+                      cfg->status);
+               return -EAGAIN;
+       }
+       if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
+               WL_ERR("Connecting : status (%lu)\n",
+                      cfg->status);
+               return -EAGAIN;
+       }
+
+       /* Arm scan timeout timer */
+       mod_timer(&cfg->escan_timeout, jiffies +
+                       WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+
+       escan_req = false;
+       if (request) {
+               /* scan bss */
+               ssids = request->ssids;
+               escan_req = true;
+       } else {
+               /* scan in ibss */
+               /* we don't do escan in ibss */
+               ssids = this_ssid;
+       }
+
+       cfg->scan_request = request;
+       set_bit(WL_STATUS_SCANNING, &cfg->status);
+       if (escan_req) {
+               err = brcmf_do_escan(cfg, wiphy, ndev, request);
+               if (!err)
+                       return err;
+               else
+                       goto scan_out;
+       } else {
+               WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
+                      ssids->ssid, ssids->ssid_len);
+               memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
+               SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
+               sr->ssid_le.SSID_len = cpu_to_le32(0);
+               spec_scan = false;
+               if (SSID_len) {
+                       memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
+                       sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
+                       spec_scan = true;
+               } else
+                       WL_SCAN("Broadcast scan\n");
+
+               passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
+                               &passive_scan, sizeof(passive_scan));
+               if (err) {
+                       WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
+                       goto scan_out;
+               }
+               brcmf_set_mpc(ndev, 0);
+               err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
+                                     sizeof(sr->ssid_le));
+               if (err) {
+                       if (err == -EBUSY)
+                               WL_INFO("BUSY: scan for \"%s\" canceled\n",
+                                       sr->ssid_le.SSID);
+                       else
+                               WL_ERR("WLC_SCAN error (%d)\n", err);
+
+                       brcmf_set_mpc(ndev, 1);
+                       goto scan_out;
+               }
+       }
+
+       return 0;
+
+scan_out:
+       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+       if (timer_pending(&cfg->escan_timeout))
+               del_timer_sync(&cfg->escan_timeout);
+       cfg->scan_request = NULL;
        return err;
 }
 
@@ -697,6 +1186,7 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
                 struct cfg80211_scan_request *request)
 {
        struct net_device *ndev = request->wdev->netdev;
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
@@ -704,7 +1194,11 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL);
+       if (cfg->iscan_on)
+               err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
+       else if (cfg->escan_on)
+               err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
+
        if (err)
                WL_ERR("scan error (%d)\n", err);
 
@@ -749,8 +1243,8 @@ static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
 
 static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
@@ -758,30 +1252,30 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
                return -EIO;
 
        if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
-           (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) {
-               cfg_priv->conf->rts_threshold = wiphy->rts_threshold;
-               err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold);
+           (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+               cfg->conf->rts_threshold = wiphy->rts_threshold;
+               err = brcmf_set_rts(ndev, cfg->conf->rts_threshold);
                if (!err)
                        goto done;
        }
        if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
-           (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) {
-               cfg_priv->conf->frag_threshold = wiphy->frag_threshold;
-               err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold);
+           (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+               cfg->conf->frag_threshold = wiphy->frag_threshold;
+               err = brcmf_set_frag(ndev, cfg->conf->frag_threshold);
                if (!err)
                        goto done;
        }
        if (changed & WIPHY_PARAM_RETRY_LONG
-           && (cfg_priv->conf->retry_long != wiphy->retry_long)) {
-               cfg_priv->conf->retry_long = wiphy->retry_long;
-               err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true);
+           && (cfg->conf->retry_long != wiphy->retry_long)) {
+               cfg->conf->retry_long = wiphy->retry_long;
+               err = brcmf_set_retry(ndev, cfg->conf->retry_long, true);
                if (!err)
                        goto done;
        }
        if (changed & WIPHY_PARAM_RETRY_SHORT
-           && (cfg_priv->conf->retry_short != wiphy->retry_short)) {
-               cfg_priv->conf->retry_short = wiphy->retry_short;
-               err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false);
+           && (cfg->conf->retry_short != wiphy->retry_short)) {
+               cfg->conf->retry_short = wiphy->retry_short;
+               err = brcmf_set_retry(ndev, cfg->conf->retry_short, false);
                if (!err)
                        goto done;
        }
@@ -791,74 +1285,19 @@ done:
        return err;
 }
 
-static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
+static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
 {
-       switch (item) {
-       case WL_PROF_SEC:
-               return &cfg_priv->profile->sec;
-       case WL_PROF_BSSID:
-               return &cfg_priv->profile->bssid;
-       case WL_PROF_SSID:
-               return &cfg_priv->profile->ssid;
-       }
-       WL_ERR("invalid item (%d)\n", item);
-       return NULL;
+       memset(prof, 0, sizeof(*prof));
 }
 
-static s32
-brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
-                 const struct brcmf_event_msg *e, void *data, s32 item)
+static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
+       size_t *join_params_size)
 {
-       s32 err = 0;
-       struct brcmf_ssid *ssid;
+       u16 chanspec = 0;
 
-       switch (item) {
-       case WL_PROF_SSID:
-               ssid = (struct brcmf_ssid *) data;
-               memset(cfg_priv->profile->ssid.SSID, 0,
-                      sizeof(cfg_priv->profile->ssid.SSID));
-               memcpy(cfg_priv->profile->ssid.SSID,
-                      ssid->SSID, ssid->SSID_len);
-               cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
-               break;
-       case WL_PROF_BSSID:
-               if (data)
-                       memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
-               else
-                       memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
-               break;
-       case WL_PROF_SEC:
-               memcpy(&cfg_priv->profile->sec, data,
-                      sizeof(cfg_priv->profile->sec));
-               break;
-       case WL_PROF_BEACONINT:
-               cfg_priv->profile->beacon_interval = *(u16 *)data;
-               break;
-       case WL_PROF_DTIMPERIOD:
-               cfg_priv->profile->dtim_period = *(u8 *)data;
-               break;
-       default:
-               WL_ERR("unsupported item (%d)\n", item);
-               err = -EOPNOTSUPP;
-               break;
-       }
-
-       return err;
-}
-
-static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
-{
-       memset(prof, 0, sizeof(*prof));
-}
-
-static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
-       size_t *join_params_size)
-{
-       u16 chanspec = 0;
-
-       if (ch != 0) {
-               if (ch <= CH_MAX_2G_CHANNEL)
-                       chanspec |= WL_CHANSPEC_BAND_2G;
+       if (ch != 0) {
+               if (ch <= CH_MAX_2G_CHANNEL)
+                       chanspec |= WL_CHANSPEC_BAND_2G;
                else
                        chanspec |= WL_CHANSPEC_BAND_5G;
 
@@ -878,20 +1317,20 @@ static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
        }
 }
 
-static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_link_down(struct brcmf_cfg80211_info *cfg)
 {
        struct net_device *ndev = NULL;
        s32 err = 0;
 
        WL_TRACE("Enter\n");
 
-       if (cfg_priv->link_up) {
-               ndev = cfg_to_ndev(cfg_priv);
+       if (cfg->link_up) {
+               ndev = cfg_to_ndev(cfg);
                WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
                err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
                if (err)
                        WL_ERR("WLC_DISASSOC failed (%d)\n", err);
-               cfg_priv->link_up = false;
+               cfg->link_up = false;
        }
        WL_TRACE("Exit\n");
 }
@@ -900,13 +1339,13 @@ static s32
 brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                      struct cfg80211_ibss_params *params)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_join_params join_params;
        size_t join_params_size = 0;
        s32 err = 0;
        s32 wsec = 0;
        s32 bcnprd;
-       struct brcmf_ssid ssid;
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
@@ -919,7 +1358,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                return -EOPNOTSUPP;
        }
 
-       set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+       set_bit(WL_STATUS_CONNECTING, &cfg->status);
 
        if (params->bssid)
                WL_CONN("BSSID: %pM\n", params->bssid);
@@ -982,40 +1421,38 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
        memset(&join_params, 0, sizeof(struct brcmf_join_params));
 
        /* SSID */
-       ssid.SSID_len = min_t(u32, params->ssid_len, 32);
-       memcpy(ssid.SSID, params->ssid, ssid.SSID_len);
-       memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len);
-       join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+       profile->ssid.SSID_len = min_t(u32, params->ssid_len, 32);
+       memcpy(profile->ssid.SSID, params->ssid, profile->ssid.SSID_len);
+       memcpy(join_params.ssid_le.SSID, params->ssid, profile->ssid.SSID_len);
+       join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
        join_params_size = sizeof(join_params.ssid_le);
-       brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
 
        /* BSSID */
        if (params->bssid) {
                memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
                join_params_size = sizeof(join_params.ssid_le) +
                                   BRCMF_ASSOC_PARAMS_FIXED_SIZE;
+               memcpy(profile->bssid, params->bssid, ETH_ALEN);
        } else {
                memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+               memset(profile->bssid, 0, ETH_ALEN);
        }
 
-       brcmf_update_prof(cfg_priv, NULL,
-                         &join_params.params_le.bssid, WL_PROF_BSSID);
-
        /* Channel */
        if (params->channel) {
                u32 target_channel;
 
-               cfg_priv->channel =
+               cfg->channel =
                        ieee80211_frequency_to_channel(
                                params->channel->center_freq);
                if (params->channel_fixed) {
                        /* adding chanspec */
-                       brcmf_ch_to_chanspec(cfg_priv->channel,
+                       brcmf_ch_to_chanspec(cfg->channel,
                                &join_params, &join_params_size);
                }
 
                /* set channel for starter */
-               target_channel = cfg_priv->channel;
+               target_channel = cfg->channel;
                err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
                                          &target_channel);
                if (err) {
@@ -1023,9 +1460,9 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
                        goto done;
                }
        } else
-               cfg_priv->channel = 0;
+               cfg->channel = 0;
 
-       cfg_priv->ibss_starter = false;
+       cfg->ibss_starter = false;
 
 
        err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
@@ -1037,7 +1474,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
 
 done:
        if (err)
-               clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               clear_bit(WL_STATUS_CONNECTING, &cfg->status);
        WL_TRACE("Exit\n");
        return err;
 }
@@ -1045,14 +1482,14 @@ done:
 static s32
 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       brcmf_link_down(cfg_priv);
+       brcmf_link_down(cfg);
 
        WL_TRACE("Exit\n");
 
@@ -1062,7 +1499,8 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 static s32 brcmf_set_wpa_version(struct net_device *ndev,
                                 struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 val = 0;
        s32 err = 0;
@@ -1079,7 +1517,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
                WL_ERR("set wpa_auth failed (%d)\n", err);
                return err;
        }
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->wpa_versions = sme->crypto.wpa_versions;
        return err;
 }
@@ -1087,7 +1525,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
 static s32 brcmf_set_auth_type(struct net_device *ndev,
                               struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 val = 0;
        s32 err = 0;
@@ -1118,7 +1557,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
                WL_ERR("set auth failed (%d)\n", err);
                return err;
        }
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->auth_type = sme->auth_type;
        return err;
 }
@@ -1127,7 +1566,8 @@ static s32
 brcmf_set_set_cipher(struct net_device *ndev,
                     struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 pval = 0;
        s32 gval = 0;
@@ -1183,7 +1623,7 @@ brcmf_set_set_cipher(struct net_device *ndev,
                return err;
        }
 
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
        sec->cipher_group = sme->crypto.cipher_group;
 
@@ -1193,7 +1633,8 @@ brcmf_set_set_cipher(struct net_device *ndev,
 static s32
 brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 val = 0;
        s32 err = 0;
@@ -1239,74 +1680,76 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
                        return err;
                }
        }
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        sec->wpa_auth = sme->crypto.akm_suites[0];
 
        return err;
 }
 
 static s32
-brcmf_set_wep_sharedkey(struct net_device *ndev,
-                    struct cfg80211_connect_params *sme)
+brcmf_set_sharedkey(struct net_device *ndev,
+                   struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        struct brcmf_wsec_key key;
        s32 val;
        s32 err = 0;
+       s32 bssidx;
 
        WL_CONN("key len (%d)\n", sme->key_len);
 
        if (sme->key_len == 0)
                return 0;
 
-       sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+       sec = &profile->sec;
        WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
                sec->wpa_versions, sec->cipher_pairwise);
 
        if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
                return 0;
 
-       if (sec->cipher_pairwise &
-           (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) {
-               memset(&key, 0, sizeof(key));
-               key.len = (u32) sme->key_len;
-               key.index = (u32) sme->key_idx;
-               if (key.len > sizeof(key.data)) {
-                       WL_ERR("Too long key length (%u)\n", key.len);
-                       return -EINVAL;
-               }
-               memcpy(key.data, sme->key, key.len);
-               key.flags = BRCMF_PRIMARY_KEY;
-               switch (sec->cipher_pairwise) {
-               case WLAN_CIPHER_SUITE_WEP40:
-                       key.algo = CRYPTO_ALGO_WEP1;
-                       break;
-               case WLAN_CIPHER_SUITE_WEP104:
-                       key.algo = CRYPTO_ALGO_WEP128;
-                       break;
-               default:
-                       WL_ERR("Invalid algorithm (%d)\n",
-                              sme->crypto.ciphers_pairwise[0]);
-                       return -EINVAL;
-               }
-               /* Set the new key/index */
-               WL_CONN("key length (%d) key index (%d) algo (%d)\n",
-                       key.len, key.index, key.algo);
-               WL_CONN("key \"%s\"\n", key.data);
-               err = send_key_to_dongle(ndev, &key);
-               if (err)
-                       return err;
+       if (!(sec->cipher_pairwise &
+           (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)))
+               return 0;
 
-               if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
-                       WL_CONN("set auth_type to shared key\n");
-                       val = 1;        /* shared key */
-                       err = brcmf_dev_intvar_set(ndev, "auth", val);
-                       if (err) {
-                               WL_ERR("set auth failed (%d)\n", err);
-                               return err;
-                       }
-               }
+       memset(&key, 0, sizeof(key));
+       key.len = (u32) sme->key_len;
+       key.index = (u32) sme->key_idx;
+       if (key.len > sizeof(key.data)) {
+               WL_ERR("Too long key length (%u)\n", key.len);
+               return -EINVAL;
+       }
+       memcpy(key.data, sme->key, key.len);
+       key.flags = BRCMF_PRIMARY_KEY;
+       switch (sec->cipher_pairwise) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               key.algo = CRYPTO_ALGO_WEP1;
+               break;
+       case WLAN_CIPHER_SUITE_WEP104:
+               key.algo = CRYPTO_ALGO_WEP128;
+               break;
+       default:
+               WL_ERR("Invalid algorithm (%d)\n",
+                      sme->crypto.ciphers_pairwise[0]);
+               return -EINVAL;
+       }
+       /* Set the new key/index */
+       WL_CONN("key length (%d) key index (%d) algo (%d)\n",
+               key.len, key.index, key.algo);
+       WL_CONN("key \"%s\"\n", key.data);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+       if (err)
+               return err;
+
+       if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+               WL_CONN("set auth_type to shared key\n");
+               val = WL_AUTH_SHARED_KEY;       /* shared key */
+               err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
+               if (err)
+                       WL_ERR("set auth failed (%d)\n", err);
        }
        return err;
 }
@@ -1315,7 +1758,8 @@ static s32
 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                    struct cfg80211_connect_params *sme)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct ieee80211_channel *chan = sme->channel;
        struct brcmf_join_params join_params;
        size_t join_params_size;
@@ -1332,15 +1776,15 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                return -EOPNOTSUPP;
        }
 
-       set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+       set_bit(WL_STATUS_CONNECTING, &cfg->status);
 
        if (chan) {
-               cfg_priv->channel =
+               cfg->channel =
                        ieee80211_frequency_to_channel(chan->center_freq);
                WL_CONN("channel (%d), center_req (%d)\n",
-                               cfg_priv->channel, chan->center_freq);
+                               cfg->channel, chan->center_freq);
        } else
-               cfg_priv->channel = 0;
+               cfg->channel = 0;
 
        WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
 
@@ -1368,20 +1812,20 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                goto done;
        }
 
-       err = brcmf_set_wep_sharedkey(ndev, sme);
+       err = brcmf_set_sharedkey(ndev, sme);
        if (err) {
-               WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err);
+               WL_ERR("brcmf_set_sharedkey failed (%d)\n", err);
                goto done;
        }
 
        memset(&join_params, 0, sizeof(join_params));
        join_params_size = sizeof(join_params.ssid_le);
 
-       ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len);
-       memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len);
-       memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len);
-       join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
-       brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
+       profile->ssid.SSID_len = min_t(u32,
+                                      sizeof(ssid.SSID), (u32)sme->ssid_len);
+       memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
+       memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+       join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
 
        memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
 
@@ -1389,7 +1833,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                WL_CONN("ssid \"%s\", len (%d)\n",
                       ssid.SSID, ssid.SSID_len);
 
-       brcmf_ch_to_chanspec(cfg_priv->channel,
+       brcmf_ch_to_chanspec(cfg->channel,
                             &join_params, &join_params_size);
        err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
                           &join_params, join_params_size);
@@ -1398,7 +1842,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
 
 done:
        if (err)
-               clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               clear_bit(WL_STATUS_CONNECTING, &cfg->status);
        WL_TRACE("Exit\n");
        return err;
 }
@@ -1407,7 +1851,8 @@ static s32
 brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
                       u16 reason_code)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_scb_val_le scbval;
        s32 err = 0;
 
@@ -1415,16 +1860,16 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+       clear_bit(WL_STATUS_CONNECTED, &cfg->status);
 
-       memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN);
+       memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
        scbval.val = cpu_to_le32(reason_code);
        err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
                              sizeof(struct brcmf_scb_val_le));
        if (err)
                WL_ERR("error (%d)\n", err);
 
-       cfg_priv->link_up = false;
+       cfg->link_up = false;
 
        WL_TRACE("Exit\n");
        return err;
@@ -1435,8 +1880,8 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
                            enum nl80211_tx_power_setting type, s32 mbm)
 {
 
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        u16 txpwrmw;
        s32 err = 0;
        s32 disable = 0;
@@ -1472,7 +1917,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
                        (s32) (brcmf_mw_to_qdbm(txpwrmw)));
        if (err)
                WL_ERR("qtxpower error (%d)\n", err);
-       cfg_priv->conf->tx_power = dbm;
+       cfg->conf->tx_power = dbm;
 
 done:
        WL_TRACE("Exit\n");
@@ -1481,8 +1926,8 @@ done:
 
 static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        s32 txpwrdbm;
        u8 result;
        s32 err = 0;
@@ -1509,16 +1954,19 @@ static s32
 brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
                               u8 key_idx, bool unicast, bool multicast)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        u32 index;
        u32 wsec;
        s32 err = 0;
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        WL_CONN("key index (%d)\n", key_idx);
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
        if (err) {
                WL_ERR("WLC_GET_WSEC error (%d)\n", err);
                goto done;
@@ -1541,9 +1989,11 @@ static s32
 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
              u8 key_idx, const u8 *mac_addr, struct key_params *params)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_wsec_key key;
        struct brcmf_wsec_key_le key_le;
        s32 err = 0;
+       s32 bssidx;
 
        memset(&key, 0, sizeof(key));
        key.index = (u32) key_idx;
@@ -1552,12 +2002,13 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
        if (!is_multicast_ether_addr(mac_addr))
                memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
        key.len = (u32) params->key_len;
+       bssidx = brcmf_find_bssidx(cfg, ndev);
        /* check for key index change */
        if (key.len == 0) {
                /* key delete */
-               err = send_key_to_dongle(ndev, &key);
+               err = send_key_to_dongle(cfg, bssidx, ndev, &key);
                if (err)
-                       return err;
+                       WL_ERR("key delete error (%d)\n", err);
        } else {
                if (key.len > sizeof(key.data)) {
                        WL_ERR("Invalid key length (%d)\n", key.len);
@@ -1613,12 +2064,12 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
                convert_key_from_CPU(&key, &key_le);
 
                brcmf_netdev_wait_pend8021x(ndev);
-               err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le,
-                                     sizeof(key_le));
-               if (err) {
-                       WL_ERR("WLC_SET_KEY error (%d)\n", err);
-                       return err;
-               }
+               err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
+                                                    sizeof(key_le),
+                                                    cfg->extra_buf,
+                                                    WL_EXTRA_BUF_MAX, bssidx);
+               if (err)
+                       WL_ERR("wsec_key error (%d)\n", err);
        }
        return err;
 }
@@ -1628,11 +2079,13 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                    u8 key_idx, bool pairwise, const u8 *mac_addr,
                    struct key_params *params)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_wsec_key key;
        s32 val;
        s32 wsec;
        s32 err = 0;
        u8 keybuf[8];
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        WL_CONN("key index (%d)\n", key_idx);
@@ -1659,25 +2112,33 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
        switch (params->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
                key.algo = CRYPTO_ALGO_WEP1;
+               val = WEP_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
                break;
        case WLAN_CIPHER_SUITE_WEP104:
                key.algo = CRYPTO_ALGO_WEP128;
+               val = WEP_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
                break;
        case WLAN_CIPHER_SUITE_TKIP:
-               memcpy(keybuf, &key.data[24], sizeof(keybuf));
-               memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
-               memcpy(&key.data[16], keybuf, sizeof(keybuf));
+               if (cfg->conf->mode != WL_MODE_AP) {
+                       WL_CONN("Swapping key\n");
+                       memcpy(keybuf, &key.data[24], sizeof(keybuf));
+                       memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+                       memcpy(&key.data[16], keybuf, sizeof(keybuf));
+               }
                key.algo = CRYPTO_ALGO_TKIP;
+               val = TKIP_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                key.algo = CRYPTO_ALGO_AES_CCM;
+               val = AES_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                key.algo = CRYPTO_ALGO_AES_CCM;
+               val = AES_ENABLED;
                WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
                break;
        default:
@@ -1686,28 +2147,23 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                goto done;
        }
 
-       err = send_key_to_dongle(ndev, &key); /* Set the new key/index */
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = send_key_to_dongle(cfg, bssidx, ndev, &key);
        if (err)
                goto done;
 
-       val = WEP_ENABLED;
-       err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
+       err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
        if (err) {
                WL_ERR("get wsec error (%d)\n", err);
                goto done;
        }
-       wsec &= ~(WEP_ENABLED);
        wsec |= val;
-       err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
        if (err) {
                WL_ERR("set wsec error (%d)\n", err);
                goto done;
        }
 
-       val = 1;                /* assume shared key. otherwise 0 */
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
-       if (err)
-               WL_ERR("WLC_SET_AUTH error (%d)\n", err);
 done:
        WL_TRACE("Exit\n");
        return err;
@@ -1717,10 +2173,10 @@ static s32
 brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
                    u8 key_idx, bool pairwise, const u8 *mac_addr)
 {
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_wsec_key key;
        s32 err = 0;
-       s32 val;
-       s32 wsec;
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
@@ -1735,7 +2191,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
        WL_CONN("key index (%d)\n", key_idx);
 
        /* Set the new key/index */
-       err = send_key_to_dongle(ndev, &key);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = send_key_to_dongle(cfg, bssidx, ndev, &key);
        if (err) {
                if (err == -EINVAL) {
                        if (key.index >= DOT11_MAX_DEFAULT_KEYS)
@@ -1744,35 +2201,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
                }
                /* Ignore this error, may happen during DISASSOC */
                err = -EAGAIN;
-               goto done;
-       }
-
-       val = 0;
-       err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
-       if (err) {
-               WL_ERR("get wsec error (%d)\n", err);
-               /* Ignore this error, may happen during DISASSOC */
-               err = -EAGAIN;
-               goto done;
-       }
-       wsec &= ~(WEP_ENABLED);
-       wsec |= val;
-       err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
-       if (err) {
-               WL_ERR("set wsec error (%d)\n", err);
-               /* Ignore this error, may happen during DISASSOC */
-               err = -EAGAIN;
-               goto done;
        }
 
-       val = 0;                /* assume open key. otherwise 1 */
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
-       if (err) {
-               WL_ERR("WLC_SET_AUTH error (%d)\n", err);
-               /* Ignore this error, may happen during DISASSOC */
-               err = -EAGAIN;
-       }
-done:
        WL_TRACE("Exit\n");
        return err;
 }
@@ -1783,10 +2213,12 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
                    void (*callback) (void *cookie, struct key_params * params))
 {
        struct key_params params;
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_cfg80211_security *sec;
        s32 wsec;
        s32 err = 0;
+       s32 bssidx;
 
        WL_TRACE("Enter\n");
        WL_CONN("key index (%d)\n", key_idx);
@@ -1795,16 +2227,17 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
 
        memset(&params, 0, sizeof(params));
 
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+       bssidx = brcmf_find_bssidx(cfg, ndev);
+       err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
        if (err) {
                WL_ERR("WLC_GET_WSEC error (%d)\n", err);
                /* Ignore this error, may happen during DISASSOC */
                err = -EAGAIN;
                goto done;
        }
-       switch (wsec) {
+       switch (wsec & ~SES_OW_ENABLED) {
        case WEP_ENABLED:
-               sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+               sec = &profile->sec;
                if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
                        params.cipher = WLAN_CIPHER_SUITE_WEP40;
                        WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
@@ -1844,53 +2277,73 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
 
 static s32
 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
-                       u8 *mac, struct station_info *sinfo)
+                          u8 *mac, struct station_info *sinfo)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_scb_val_le scb_val;
        int rssi;
        s32 rate;
        s32 err = 0;
-       u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID);
+       u8 *bssid = profile->bssid;
+       struct brcmf_sta_info_le *sta_info_le;
 
-       WL_TRACE("Enter\n");
+       WL_TRACE("Enter, MAC %pM\n", mac);
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       if (memcmp(mac, bssid, ETH_ALEN)) {
-               WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X"
-                       "wl_bssid-%X:%X:%X:%X:%X:%X\n",
-                       mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
-                       bssid[0], bssid[1], bssid[2], bssid[3],
-                       bssid[4], bssid[5]);
-               err = -ENOENT;
-               goto done;
-       }
-
-       /* Report the current tx rate */
-       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
-       if (err) {
-               WL_ERR("Could not get rate (%d)\n", err);
-       } else {
-               sinfo->filled |= STATION_INFO_TX_BITRATE;
-               sinfo->txrate.legacy = rate * 5;
-               WL_CONN("Rate %d Mbps\n", rate / 2);
-       }
+       if (cfg->conf->mode == WL_MODE_AP) {
+               err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
+                                            cfg->dcmd_buf,
+                                            WL_DCMD_LEN_MAX);
+               if (err < 0) {
+                       WL_ERR("GET STA INFO failed, %d\n", err);
+                       goto done;
+               }
+               sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
 
-       if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) {
-               memset(&scb_val, 0, sizeof(scb_val));
-               err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
-                                     sizeof(struct brcmf_scb_val_le));
+               sinfo->filled = STATION_INFO_INACTIVE_TIME;
+               sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
+               if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
+                       sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+                       sinfo->connected_time = le32_to_cpu(sta_info_le->in);
+               }
+               WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
+                        sinfo->inactive_time, sinfo->connected_time);
+       } else if (cfg->conf->mode == WL_MODE_BSS) {
+               if (memcmp(mac, bssid, ETH_ALEN)) {
+                       WL_ERR("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
+                              mac, bssid);
+                       err = -ENOENT;
+                       goto done;
+               }
+               /* Report the current tx rate */
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
                if (err) {
-                       WL_ERR("Could not get rssi (%d)\n", err);
+                       WL_ERR("Could not get rate (%d)\n", err);
+                       goto done;
                } else {
-                       rssi = le32_to_cpu(scb_val.val);
-                       sinfo->filled |= STATION_INFO_SIGNAL;
-                       sinfo->signal = rssi;
-                       WL_CONN("RSSI %d dBm\n", rssi);
+                       sinfo->filled |= STATION_INFO_TX_BITRATE;
+                       sinfo->txrate.legacy = rate * 5;
+                       WL_CONN("Rate %d Mbps\n", rate / 2);
                }
-       }
 
+               if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
+                       memset(&scb_val, 0, sizeof(scb_val));
+                       err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
+                                             sizeof(scb_val));
+                       if (err) {
+                               WL_ERR("Could not get rssi (%d)\n", err);
+                               goto done;
+                       } else {
+                               rssi = le32_to_cpu(scb_val.val);
+                               sinfo->filled |= STATION_INFO_SIGNAL;
+                               sinfo->signal = rssi;
+                               WL_CONN("RSSI %d dBm\n", rssi);
+                       }
+               }
+       } else
+               err = -EPERM;
 done:
        WL_TRACE("Exit\n");
        return err;
@@ -1902,7 +2355,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
 {
        s32 pm;
        s32 err = 0;
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 
        WL_TRACE("Enter\n");
 
@@ -1910,14 +2363,13 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
         * Powersave enable/disable request is coming from the
         * cfg80211 even before the interface is up. In that
         * scenario, driver will be storing the power save
-        * preference in cfg_priv struct to apply this to
+        * preference in cfg struct to apply this to
         * FW later while initializing the dongle
         */
-       cfg_priv->pwr_save = enabled;
-       if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       cfg->pwr_save = enabled;
+       if (!test_bit(WL_STATUS_READY, &cfg->status)) {
 
-               WL_INFO("Device is not ready,"
-                       "storing the value in cfg_priv struct\n");
+               WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
                goto done;
        }
 
@@ -1995,10 +2447,10 @@ done:
        return err;
 }
 
-static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
+static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
                                   struct brcmf_bss_info_le *bi)
 {
-       struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
        struct ieee80211_channel *notify_channel;
        struct cfg80211_bss *bss;
        struct ieee80211_supported_band *band;
@@ -2062,14 +2514,14 @@ next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
                                            le32_to_cpu(bss->length));
 }
 
-static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_scan_results *bss_list;
        struct brcmf_bss_info_le *bi = NULL;    /* must be initialized */
        s32 err = 0;
        int i;
 
-       bss_list = cfg_priv->bss_list;
+       bss_list = cfg->bss_list;
        if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
                WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
                       bss_list->version);
@@ -2078,17 +2530,17 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
        WL_SCAN("scanned AP count (%d)\n", bss_list->count);
        for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
                bi = next_bss_le(bss_list, bi);
-               err = brcmf_inform_single_bss(cfg_priv, bi);
+               err = brcmf_inform_single_bss(cfg, bi);
                if (err)
                        break;
        }
        return err;
 }
 
-static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
+static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
                          struct net_device *ndev, const u8 *bssid)
 {
-       struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
        struct ieee80211_channel *notify_channel;
        struct brcmf_bss_info_le *bi = NULL;
        struct ieee80211_supported_band *band;
@@ -2163,9 +2615,9 @@ CleanUp:
        return err;
 }
 
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv)
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_info *cfg)
 {
-       return cfg_priv->conf->mode == WL_MODE_IBSS;
+       return cfg->conf->mode == WL_MODE_IBSS;
 }
 
 /*
@@ -2182,22 +2634,62 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
        totlen = buflen;
 
        /* find tagged parameter */
-       while (totlen >= 2) {
+       while (totlen >= TLV_HDR_LEN) {
                int len = elt->len;
 
                /* validate remaining totlen */
-               if ((elt->id == key) && (totlen >= (len + 2)))
+               if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
                        return elt;
 
-               elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2));
-               totlen -= (len + 2);
+               elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
+               totlen -= (len + TLV_HDR_LEN);
+       }
+
+       return NULL;
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+                u8 *oui, u32 oui_len, u8 type)
+{
+       /* If the contents match the OUI and the type */
+       if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+           !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+           type == ie[TLV_BODY_OFF + oui_len]) {
+               return true;
        }
 
+       if (tlvs == NULL)
+               return false;
+       /* point to the next ie */
+       ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+       /* calculate the length of the rest of the buffer */
+       *tlvs_len -= (int)(ie - *tlvs);
+       /* update the pointer to the start of the buffer */
+       *tlvs = ie;
+
+       return false;
+}
+
+struct brcmf_vs_tlv *
+brcmf_find_wpaie(u8 *parse, u32 len)
+{
+       struct brcmf_tlv *ie;
+
+       while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
+               if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+                                    WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+                       return (struct brcmf_vs_tlv *)ie;
+       }
        return NULL;
 }
 
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
 {
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        struct brcmf_bss_info_le *bi;
        struct brcmf_ssid *ssid;
        struct brcmf_tlv *tim;
@@ -2208,21 +2700,21 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
        s32 err = 0;
 
        WL_TRACE("Enter\n");
-       if (brcmf_is_ibssmode(cfg_priv))
+       if (brcmf_is_ibssmode(cfg))
                return err;
 
-       ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID);
+       ssid = &profile->ssid;
 
-       *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
-       err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO,
-                       cfg_priv->extra_buf, WL_EXTRA_BUF_MAX);
+       *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
+       err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
+                       cfg->extra_buf, WL_EXTRA_BUF_MAX);
        if (err) {
                WL_ERR("Could not get bss info %d\n", err);
                goto update_bss_info_out;
        }
 
-       bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
-       err = brcmf_inform_single_bss(cfg_priv, bi);
+       bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
+       err = brcmf_inform_single_bss(cfg, bi);
        if (err)
                goto update_bss_info_out;
 
@@ -2240,7 +2732,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
                * so we speficially query dtim information to dongle.
                */
                u32 var;
-               err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv),
+               err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
                                           "dtim_assoc", &var);
                if (err) {
                        WL_ERR("wl dtim_assoc failed (%d)\n", err);
@@ -2249,20 +2741,22 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
                dtim_period = (u8)var;
        }
 
-       brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT);
-       brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+       profile->beacon_interval = beacon_interval;
+       profile->dtim_period = dtim_period;
 
 update_bss_info_out:
        WL_TRACE("Exit");
        return err;
 }
 
-static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
+       struct escan_info *escan = &cfg->escan_info;
        struct brcmf_ssid ssid;
 
-       if (cfg_priv->iscan_on) {
+       set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
+       if (cfg->iscan_on) {
                iscan->state = WL_ISCAN_STATE_IDLE;
 
                if (iscan->timer_on) {
@@ -2275,27 +2769,40 @@ static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
                /* Abort iscan running in FW */
                memset(&ssid, 0, sizeof(ssid));
                brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
+
+               if (cfg->scan_request) {
+                       /* Indidate scan abort to cfg80211 layer */
+                       WL_INFO("Terminating scan in progress\n");
+                       cfg80211_scan_done(cfg->scan_request, true);
+                       cfg->scan_request = NULL;
+               }
        }
+       if (cfg->escan_on && cfg->scan_request) {
+               escan->escan_state = WL_ESCAN_STATE_IDLE;
+               brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
+       }
+       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+       clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
 }
 
 static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
                                        bool aborted)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
+       struct net_device *ndev = cfg_to_ndev(cfg);
 
-       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
                WL_ERR("Scan complete while device not scanning\n");
                return;
        }
-       if (cfg_priv->scan_request) {
+       if (cfg->scan_request) {
                WL_SCAN("ISCAN Completed scan: %s\n",
                                aborted ? "Aborted" : "Done");
-               cfg80211_scan_done(cfg_priv->scan_request, aborted);
+               cfg80211_scan_done(cfg->scan_request, aborted);
                brcmf_set_mpc(ndev, 1);
-               cfg_priv->scan_request = NULL;
+               cfg->scan_request = NULL;
        }
-       cfg_priv->iscan_kickstart = false;
+       cfg->iscan_kickstart = false;
 }
 
 static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
@@ -2348,21 +2855,21 @@ brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
        return err;
 }
 
-static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
        iscan->state = WL_ISCAN_STATE_IDLE;
-       brcmf_inform_bss(cfg_priv);
+       brcmf_inform_bss(cfg);
        brcmf_notify_iscan_complete(iscan, false);
 
        return err;
 }
 
-static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
        /* Reschedule the timer */
@@ -2372,12 +2879,12 @@ static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
        return err;
 }
 
-static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
-       brcmf_inform_bss(cfg_priv);
+       brcmf_inform_bss(cfg);
        brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
        /* Reschedule the timer */
        mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -2386,9 +2893,9 @@ static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
        return err;
 }
 
-static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
        s32 err = 0;
 
        iscan->state = WL_ISCAN_STATE_IDLE;
@@ -2402,7 +2909,7 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
        struct brcmf_cfg80211_iscan_ctrl *iscan =
                        container_of(work, struct brcmf_cfg80211_iscan_ctrl,
                                     work);
-       struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
+       struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
        struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
        u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
 
@@ -2411,12 +2918,12 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
                iscan->timer_on = 0;
        }
 
-       if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) {
+       if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
                status = BRCMF_SCAN_RESULTS_ABORTED;
                WL_ERR("Abort iscan\n");
        }
 
-       el->handler[status](cfg_priv);
+       el->handler[status](cfg);
 }
 
 static void brcmf_iscan_timer(unsigned long data)
@@ -2431,11 +2938,11 @@ static void brcmf_iscan_timer(unsigned long data)
        }
 }
 
-static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
 
-       if (cfg_priv->iscan_on) {
+       if (cfg->iscan_on) {
                iscan->state = WL_ISCAN_STATE_IDLE;
                INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
        }
@@ -2453,26 +2960,192 @@ static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
        el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
 }
 
-static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+       struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
        int err = 0;
 
-       if (cfg_priv->iscan_on) {
-               iscan->ndev = cfg_to_ndev(cfg_priv);
+       if (cfg->iscan_on) {
+               iscan->ndev = cfg_to_ndev(cfg);
                brcmf_init_iscan_eloop(&iscan->el);
                iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
                init_timer(&iscan->timer);
                iscan->timer.data = (unsigned long) iscan;
                iscan->timer.function = brcmf_iscan_timer;
-               err = brcmf_invoke_iscan(cfg_priv);
+               err = brcmf_invoke_iscan(cfg);
                if (!err)
-                       iscan->data = cfg_priv;
+                       iscan->data = cfg;
+       }
+
+       return err;
+}
+
+static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
+{
+       struct brcmf_cfg80211_info *cfg =
+                       container_of(work, struct brcmf_cfg80211_info,
+                                    escan_timeout_work);
+
+       brcmf_notify_escan_complete(cfg,
+               cfg->escan_info.ndev, true, true);
+}
+
+static void brcmf_escan_timeout(unsigned long data)
+{
+       struct brcmf_cfg80211_info *cfg =
+                       (struct brcmf_cfg80211_info *)data;
+
+       if (cfg->scan_request) {
+               WL_ERR("timer expired\n");
+               if (cfg->escan_on)
+                       schedule_work(&cfg->escan_timeout_work);
+       }
+}
+
+static s32
+brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss,
+                             struct brcmf_bss_info_le *bss_info_le)
+{
+       if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) &&
+               (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) ==
+               CHSPEC_BAND(le16_to_cpu(bss->chanspec))) &&
+               bss_info_le->SSID_len == bss->SSID_len &&
+               !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
+               if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
+                       (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+                       s16 bss_rssi = le16_to_cpu(bss->RSSI);
+                       s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
+
+                       /* preserve max RSSI if the measurements are
+                       * both on-channel or both off-channel
+                       */
+                       if (bss_info_rssi > bss_rssi)
+                               bss->RSSI = bss_info_le->RSSI;
+               } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
+                       (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+                       /* preserve the on-channel rssi measurement
+                       * if the new measurement is off channel
+                       */
+                       bss->RSSI = bss_info_le->RSSI;
+                       bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+               }
+               return 1;
        }
+       return 0;
+}
+
+static s32
+brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
+                            struct net_device *ndev,
+                            const struct brcmf_event_msg *e, void *data)
+{
+       s32 status;
+       s32 err = 0;
+       struct brcmf_escan_result_le *escan_result_le;
+       struct brcmf_bss_info_le *bss_info_le;
+       struct brcmf_bss_info_le *bss = NULL;
+       u32 bi_length;
+       struct brcmf_scan_results *list;
+       u32 i;
+       bool aborted;
+
+       status = be32_to_cpu(e->status);
+
+       if (!ndev || !cfg->escan_on ||
+                       !test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
+                       ndev, cfg->escan_on,
+                       !test_bit(WL_STATUS_SCANNING, &cfg->status));
+               return -EPERM;
+       }
+
+       if (status == BRCMF_E_STATUS_PARTIAL) {
+               WL_SCAN("ESCAN Partial result\n");
+               escan_result_le = (struct brcmf_escan_result_le *) data;
+               if (!escan_result_le) {
+                       WL_ERR("Invalid escan result (NULL pointer)\n");
+                       goto exit;
+               }
+               if (!cfg->scan_request) {
+                       WL_SCAN("result without cfg80211 request\n");
+                       goto exit;
+               }
+
+               if (le16_to_cpu(escan_result_le->bss_count) != 1) {
+                       WL_ERR("Invalid bss_count %d: ignoring\n",
+                               escan_result_le->bss_count);
+                       goto exit;
+               }
+               bss_info_le = &escan_result_le->bss_info_le;
+
+               bi_length = le32_to_cpu(bss_info_le->length);
+               if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
+                                       WL_ESCAN_RESULTS_FIXED_SIZE)) {
+                       WL_ERR("Invalid bss_info length %d: ignoring\n",
+                               bi_length);
+                       goto exit;
+               }
+
+               if (!(cfg_to_wiphy(cfg)->interface_modes &
+                                       BIT(NL80211_IFTYPE_ADHOC))) {
+                       if (le16_to_cpu(bss_info_le->capability) &
+                                               WLAN_CAPABILITY_IBSS) {
+                               WL_ERR("Ignoring IBSS result\n");
+                               goto exit;
+                       }
+               }
 
+               list = (struct brcmf_scan_results *)
+                               cfg->escan_info.escan_buf;
+               if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
+                       WL_ERR("Buffer is too small: ignoring\n");
+                       goto exit;
+               }
+
+               for (i = 0; i < list->count; i++) {
+                       bss = bss ? (struct brcmf_bss_info_le *)
+                               ((unsigned char *)bss +
+                               le32_to_cpu(bss->length)) : list->bss_info_le;
+                       if (brcmf_compare_update_same_bss(bss, bss_info_le))
+                               goto exit;
+               }
+               memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
+                       bss_info_le, bi_length);
+               list->version = le32_to_cpu(bss_info_le->version);
+               list->buflen += bi_length;
+               list->count++;
+       } else {
+               cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+               if (cfg->scan_request) {
+                       cfg->bss_list = (struct brcmf_scan_results *)
+                               cfg->escan_info.escan_buf;
+                       brcmf_inform_bss(cfg);
+                       aborted = status != BRCMF_E_STATUS_SUCCESS;
+                       brcmf_notify_escan_complete(cfg, ndev, aborted,
+                                                   false);
+               } else
+                       WL_ERR("Unexpected scan result 0x%x\n", status);
+       }
+exit:
        return err;
 }
 
+static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
+{
+
+       if (cfg->escan_on) {
+               cfg->el.handler[BRCMF_E_ESCAN_RESULT] =
+                       brcmf_cfg80211_escan_handler;
+               cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+               /* Init scan_timeout timer */
+               init_timer(&cfg->escan_timeout);
+               cfg->escan_timeout.data = (unsigned long) cfg;
+               cfg->escan_timeout.function = brcmf_escan_timeout;
+               INIT_WORK(&cfg->escan_timeout_work,
+                       brcmf_cfg80211_escan_timeout_worker);
+       }
+}
+
 static __always_inline void brcmf_delay(u32 ms)
 {
        if (ms < 1000 / HZ) {
@@ -2481,249 +3154,1197 @@ static __always_inline void brcmf_delay(u32 ms)
        } else {
                msleep(ms);
        }
-}
+}
+
+static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+
+       /*
+        * Check for WL_STATUS_READY before any function call which
+        * could result is bus access. Don't block the resume for
+        * any driver error conditions
+        */
+       WL_TRACE("Enter\n");
+
+       if (test_bit(WL_STATUS_READY, &cfg->status))
+               brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
+
+       WL_TRACE("Exit\n");
+       return 0;
+}
+
+static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
+                                 struct cfg80211_wowlan *wow)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
+
+       WL_TRACE("Enter\n");
+
+       /*
+        * Check for WL_STATUS_READY before any function call which
+        * could result is bus access. Don't block the suspend for
+        * any driver error conditions
+        */
+
+       /*
+        * While going to suspend if associated with AP disassociate
+        * from AP to save power while system is in suspended state
+        */
+       if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
+            test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
+            test_bit(WL_STATUS_READY, &cfg->status)) {
+               WL_INFO("Disassociating from AP"
+                       " while entering suspend state\n");
+               brcmf_link_down(cfg);
+
+               /*
+                * Make sure WPA_Supplicant receives all the event
+                * generated due to DISASSOC call to the fw to keep
+                * the state fw and WPA_Supplicant state consistent
+                */
+               brcmf_delay(500);
+       }
+
+       if (test_bit(WL_STATUS_READY, &cfg->status))
+               brcmf_abort_scanning(cfg);
+       else
+               clear_bit(WL_STATUS_SCANNING, &cfg->status);
+
+       /* Turn off watchdog timer */
+       if (test_bit(WL_STATUS_READY, &cfg->status))
+               brcmf_set_mpc(ndev, 1);
+
+       WL_TRACE("Exit\n");
+
+       return 0;
+}
+
+static __used s32
+brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
+{
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       u32 buflen;
+
+       buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
+                              WL_DCMD_LEN_MAX);
+       BUG_ON(!buflen);
+
+       return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
+                              buflen);
+}
+
+static s32
+brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
+                 s32 buf_len)
+{
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+       u32 len;
+       s32 err = 0;
+
+       len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
+                           WL_DCMD_LEN_MAX);
+       BUG_ON(!len);
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
+                             WL_DCMD_LEN_MAX);
+       if (err) {
+               WL_ERR("error (%d)\n", err);
+               return err;
+       }
+       memcpy(buf, cfg->dcmd_buf, buf_len);
+
+       return err;
+}
+
+static __used s32
+brcmf_update_pmklist(struct net_device *ndev,
+                    struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
+{
+       int i, j;
+       int pmkid_len;
+
+       pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
+
+       WL_CONN("No of elements %d\n", pmkid_len);
+       for (i = 0; i < pmkid_len; i++) {
+               WL_CONN("PMKID[%d]: %pM =\n", i,
+                       &pmk_list->pmkids.pmkid[i].BSSID);
+               for (j = 0; j < WLAN_PMKID_LEN; j++)
+                       WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
+       }
+
+       if (!err)
+               brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
+                                       sizeof(*pmk_list));
+
+       return err;
+}
+
+static s32
+brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+                        struct cfg80211_pmksa *pmksa)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
+       s32 err = 0;
+       int i;
+       int pmkid_len;
+
+       WL_TRACE("Enter\n");
+       if (!check_sys_up(wiphy))
+               return -EIO;
+
+       pmkid_len = le32_to_cpu(pmkids->npmkid);
+       for (i = 0; i < pmkid_len; i++)
+               if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
+                       break;
+       if (i < WL_NUM_PMKIDS_MAX) {
+               memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
+               memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+               if (i == pmkid_len) {
+                       pmkid_len++;
+                       pmkids->npmkid = cpu_to_le32(pmkid_len);
+               }
+       } else
+               err = -EINVAL;
+
+       WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+               pmkids->pmkid[pmkid_len].BSSID);
+       for (i = 0; i < WLAN_PMKID_LEN; i++)
+               WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
+
+       err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+       WL_TRACE("Exit\n");
+       return err;
+}
+
+static s32
+brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
+                     struct cfg80211_pmksa *pmksa)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct pmkid_list pmkid;
+       s32 err = 0;
+       int i, pmkid_len;
+
+       WL_TRACE("Enter\n");
+       if (!check_sys_up(wiphy))
+               return -EIO;
+
+       memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
+       memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+
+       WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+              &pmkid.pmkid[0].BSSID);
+       for (i = 0; i < WLAN_PMKID_LEN; i++)
+               WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
+
+       pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
+       for (i = 0; i < pmkid_len; i++)
+               if (!memcmp
+                   (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+                    ETH_ALEN))
+                       break;
+
+       if ((pmkid_len > 0)
+           && (i < pmkid_len)) {
+               memset(&cfg->pmk_list->pmkids.pmkid[i], 0,
+                      sizeof(struct pmkid));
+               for (; i < (pmkid_len - 1); i++) {
+                       memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+                              &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+                              ETH_ALEN);
+                       memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+                              &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+                              WLAN_PMKID_LEN);
+               }
+               cfg->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
+       } else
+               err = -EINVAL;
+
+       err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+       WL_TRACE("Exit\n");
+       return err;
+
+}
+
+static s32
+brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       s32 err = 0;
+
+       WL_TRACE("Enter\n");
+       if (!check_sys_up(wiphy))
+               return -EIO;
+
+       memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+       err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
+
+       WL_TRACE("Exit\n");
+       return err;
+
+}
+
+/*
+ * PFN result doesn't have all the info which are
+ * required by the supplicant
+ * (For e.g IEs) Do a target Escan so that sched scan results are reported
+ * via wl_inform_single_bss in the required format. Escan does require the
+ * scan request in the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+static s32
+brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
+                               struct net_device *ndev,
+                               const struct brcmf_event_msg *e, void *data)
+{
+       struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
+       struct cfg80211_scan_request *request = NULL;
+       struct cfg80211_ssid *ssid = NULL;
+       struct ieee80211_channel *channel = NULL;
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
+       int err = 0;
+       int channel_req = 0;
+       int band = 0;
+       struct brcmf_pno_scanresults_le *pfn_result;
+       u32 result_count;
+       u32 status;
+
+       WL_SCAN("Enter\n");
+
+       if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) {
+               WL_SCAN("PFN NET LOST event. Do Nothing\n");
+               return 0;
+       }
+
+       pfn_result = (struct brcmf_pno_scanresults_le *)data;
+       result_count = le32_to_cpu(pfn_result->count);
+       status = le32_to_cpu(pfn_result->status);
+
+       /*
+        * PFN event is limited to fit 512 bytes so we may get
+        * multiple NET_FOUND events. For now place a warning here.
+        */
+       WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
+       WL_SCAN("PFN NET FOUND event. count: %d\n", result_count);
+       if (result_count > 0) {
+               int i;
+
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
+               channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
+               if (!request || !ssid || !channel) {
+                       err = -ENOMEM;
+                       goto out_err;
+               }
+
+               request->wiphy = wiphy;
+               data += sizeof(struct brcmf_pno_scanresults_le);
+               netinfo_start = (struct brcmf_pno_net_info_le *)data;
+
+               for (i = 0; i < result_count; i++) {
+                       netinfo = &netinfo_start[i];
+                       if (!netinfo) {
+                               WL_ERR("Invalid netinfo ptr. index: %d\n", i);
+                               err = -EINVAL;
+                               goto out_err;
+                       }
+
+                       WL_SCAN("SSID:%s Channel:%d\n",
+                       netinfo->SSID, netinfo->channel);
+                       memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
+                       ssid[i].ssid_len = netinfo->SSID_len;
+                       request->n_ssids++;
+
+                       channel_req = netinfo->channel;
+                       if (channel_req <= CH_MAX_2G_CHANNEL)
+                               band = NL80211_BAND_2GHZ;
+                       else
+                               band = NL80211_BAND_5GHZ;
+                       channel[i].center_freq =
+                               ieee80211_channel_to_frequency(channel_req,
+                                                              band);
+                       channel[i].band = band;
+                       channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+                       request->channels[i] = &channel[i];
+                       request->n_channels++;
+               }
+
+               /* assign parsed ssid array */
+               if (request->n_ssids)
+                       request->ssids = &ssid[0];
+
+               if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+                       /* Abort any on-going scan */
+                       brcmf_abort_scanning(cfg);
+               }
+
+               set_bit(WL_STATUS_SCANNING, &cfg->status);
+               err = brcmf_do_escan(cfg, wiphy, ndev, request);
+               if (err) {
+                       clear_bit(WL_STATUS_SCANNING, &cfg->status);
+                       goto out_err;
+               }
+               cfg->sched_escan = true;
+               cfg->scan_request = request;
+       } else {
+               WL_ERR("FALSE PNO Event. (pfn_count == 0)\n");
+               goto out_err;
+       }
+
+       kfree(ssid);
+       kfree(channel);
+       kfree(request);
+       return 0;
+
+out_err:
+       kfree(ssid);
+       kfree(channel);
+       kfree(request);
+       cfg80211_sched_scan_stopped(wiphy);
+       return err;
+}
+
+#ifndef CONFIG_BRCMISCAN
+static int brcmf_dev_pno_clean(struct net_device *ndev)
+{
+       char iovbuf[128];
+       int ret;
+
+       /* Disable pfn */
+       ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
+       if (ret == 0) {
+               /* clear pfn */
+               ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
+                                            iovbuf, sizeof(iovbuf));
+       }
+       if (ret < 0)
+               WL_ERR("failed code %d\n", ret);
+
+       return ret;
+}
+
+static int brcmf_dev_pno_config(struct net_device *ndev)
+{
+       struct brcmf_pno_param_le pfn_param;
+       char iovbuf[128];
+
+       memset(&pfn_param, 0, sizeof(pfn_param));
+       pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
+
+       /* set extra pno params */
+       pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
+       pfn_param.repeat = BRCMF_PNO_REPEAT;
+       pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
+
+       /* set up pno scan fr */
+       pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
+
+       return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
+                                     &pfn_param, sizeof(pfn_param),
+                                     iovbuf, sizeof(iovbuf));
+}
+
+static int
+brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               struct cfg80211_sched_scan_request *request)
+{
+       char iovbuf[128];
+       struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+       struct brcmf_pno_net_param_le pfn;
+       int i;
+       int ret = 0;
+
+       WL_SCAN("Enter n_match_sets:%d   n_ssids:%d\n",
+               request->n_match_sets, request->n_ssids);
+       if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+               WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+               return -EAGAIN;
+       }
+
+       if (!request || !request->n_ssids || !request->n_match_sets) {
+               WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
+                      request->n_ssids);
+               return -EINVAL;
+       }
+
+       if (request->n_ssids > 0) {
+               for (i = 0; i < request->n_ssids; i++) {
+                       /* Active scan req for ssids */
+                       WL_SCAN(">>> Active scan req for ssid (%s)\n",
+                               request->ssids[i].ssid);
+
+                       /*
+                        * match_set ssids is a supert set of n_ssid list,
+                        * so we need not add these set seperately.
+                        */
+               }
+       }
+
+       if (request->n_match_sets > 0) {
+               /* clean up everything */
+               ret = brcmf_dev_pno_clean(ndev);
+               if  (ret < 0) {
+                       WL_ERR("failed error=%d\n", ret);
+                       return ret;
+               }
+
+               /* configure pno */
+               ret = brcmf_dev_pno_config(ndev);
+               if (ret < 0) {
+                       WL_ERR("PNO setup failed!! ret=%d\n", ret);
+                       return -EINVAL;
+               }
+
+               /* configure each match set */
+               for (i = 0; i < request->n_match_sets; i++) {
+                       struct cfg80211_ssid *ssid;
+                       u32 ssid_len;
+
+                       ssid = &request->match_sets[i].ssid;
+                       ssid_len = ssid->ssid_len;
+
+                       if (!ssid_len) {
+                               WL_ERR("skip broadcast ssid\n");
+                               continue;
+                       }
+                       pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
+                       pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
+                       pfn.wsec = cpu_to_le32(0);
+                       pfn.infra = cpu_to_le32(1);
+                       pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
+                       pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
+                       memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
+                       ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
+                                                    &pfn, sizeof(pfn),
+                                                    iovbuf, sizeof(iovbuf));
+                       WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
+                               ret == 0 ? "set" : "failed",
+                               ssid->ssid);
+               }
+               /* Enable the PNO */
+               if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
+                       WL_ERR("PNO enable failed!! ret=%d\n", ret);
+                       return -EINVAL;
+               }
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
+                                         struct net_device *ndev)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+
+       WL_SCAN("enter\n");
+       brcmf_dev_pno_clean(ndev);
+       if (cfg->sched_escan)
+               brcmf_notify_escan_complete(cfg, ndev, true, true);
+       return 0;
+}
+#endif /* CONFIG_BRCMISCAN */
+
+#ifdef CONFIG_NL80211_TESTMODE
+static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg->wdev->netdev;
+       struct brcmf_dcmd *dcmd = data;
+       struct sk_buff *reply;
+       int ret;
+
+       ret = brcmf_netlink_dcmd(ndev, dcmd);
+       if (ret == 0) {
+               reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
+               nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
+               ret = cfg80211_testmode_reply(reply);
+       }
+       return ret;
+}
+#endif
+
+static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
+{
+       s32 err;
+
+       /* set auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
+       if (err < 0) {
+               WL_ERR("auth error %d\n", err);
+               return err;
+       }
+       /* set wsec */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
+       if (err < 0) {
+               WL_ERR("wsec error %d\n", err);
+               return err;
+       }
+       /* set upper-layer auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
+                                         WPA_AUTH_NONE, bssidx);
+       if (err < 0) {
+               WL_ERR("wpa_auth error %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
+{
+       if (is_rsn_ie)
+               return (memcmp(oui, RSN_OUI, TLV_OUI_LEN) == 0);
+
+       return (memcmp(oui, WPA_OUI, TLV_OUI_LEN) == 0);
+}
+
+static s32
+brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
+                    bool is_rsn_ie, s32 bssidx)
+{
+       u32 auth = 0; /* d11 open authentication */
+       u16 count;
+       s32 err = 0;
+       s32 len = 0;
+       u32 i;
+       u32 wsec;
+       u32 pval = 0;
+       u32 gval = 0;
+       u32 wpa_auth = 0;
+       u32 offset;
+       u8 *data;
+       u16 rsn_cap;
+       u32 wme_bss_disable;
+
+       WL_TRACE("Enter\n");
+       if (wpa_ie == NULL)
+               goto exit;
+
+       len = wpa_ie->len + TLV_HDR_LEN;
+       data = (u8 *)wpa_ie;
+       offset = 0;
+       if (!is_rsn_ie)
+               offset += VS_IE_FIXED_HDR_LEN;
+       offset += WPA_IE_VERSION_LEN;
+
+       /* check for multicast cipher suite */
+       if (offset + WPA_IE_MIN_OUI_LEN > len) {
+               err = -EINVAL;
+               WL_ERR("no multicast cipher suite\n");
+               goto exit;
+       }
+
+       if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+               err = -EINVAL;
+               WL_ERR("ivalid OUI\n");
+               goto exit;
+       }
+       offset += TLV_OUI_LEN;
+
+       /* pick up multicast cipher */
+       switch (data[offset]) {
+       case WPA_CIPHER_NONE:
+               gval = 0;
+               break;
+       case WPA_CIPHER_WEP_40:
+       case WPA_CIPHER_WEP_104:
+               gval = WEP_ENABLED;
+               break;
+       case WPA_CIPHER_TKIP:
+               gval = TKIP_ENABLED;
+               break;
+       case WPA_CIPHER_AES_CCM:
+               gval = AES_ENABLED;
+               break;
+       default:
+               err = -EINVAL;
+               WL_ERR("Invalid multi cast cipher info\n");
+               goto exit;
+       }
+
+       offset++;
+       /* walk thru unicast cipher list and pick up what we recognize */
+       count = data[offset] + (data[offset + 1] << 8);
+       offset += WPA_IE_SUITE_COUNT_LEN;
+       /* Check for unicast suite(s) */
+       if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+               err = -EINVAL;
+               WL_ERR("no unicast cipher suite\n");
+               goto exit;
+       }
+       for (i = 0; i < count; i++) {
+               if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+                       err = -EINVAL;
+                       WL_ERR("ivalid OUI\n");
+                       goto exit;
+               }
+               offset += TLV_OUI_LEN;
+               switch (data[offset]) {
+               case WPA_CIPHER_NONE:
+                       break;
+               case WPA_CIPHER_WEP_40:
+               case WPA_CIPHER_WEP_104:
+                       pval |= WEP_ENABLED;
+                       break;
+               case WPA_CIPHER_TKIP:
+                       pval |= TKIP_ENABLED;
+                       break;
+               case WPA_CIPHER_AES_CCM:
+                       pval |= AES_ENABLED;
+                       break;
+               default:
+                       WL_ERR("Ivalid unicast security info\n");
+               }
+               offset++;
+       }
+       /* walk thru auth management suite list and pick up what we recognize */
+       count = data[offset] + (data[offset + 1] << 8);
+       offset += WPA_IE_SUITE_COUNT_LEN;
+       /* Check for auth key management suite(s) */
+       if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+               err = -EINVAL;
+               WL_ERR("no auth key mgmt suite\n");
+               goto exit;
+       }
+       for (i = 0; i < count; i++) {
+               if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+                       err = -EINVAL;
+                       WL_ERR("ivalid OUI\n");
+                       goto exit;
+               }
+               offset += TLV_OUI_LEN;
+               switch (data[offset]) {
+               case RSN_AKM_NONE:
+                       WL_TRACE("RSN_AKM_NONE\n");
+                       wpa_auth |= WPA_AUTH_NONE;
+                       break;
+               case RSN_AKM_UNSPECIFIED:
+                       WL_TRACE("RSN_AKM_UNSPECIFIED\n");
+                       is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
+                                   (wpa_auth |= WPA_AUTH_UNSPECIFIED);
+                       break;
+               case RSN_AKM_PSK:
+                       WL_TRACE("RSN_AKM_PSK\n");
+                       is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
+                                   (wpa_auth |= WPA_AUTH_PSK);
+                       break;
+               default:
+                       WL_ERR("Ivalid key mgmt info\n");
+               }
+               offset++;
+       }
 
-static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
-{
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-
-       /*
-        * Check for WL_STATUS_READY before any function call which
-        * could result is bus access. Don't block the resume for
-        * any driver error conditions
-        */
-       WL_TRACE("Enter\n");
+       if (is_rsn_ie) {
+               wme_bss_disable = 1;
+               if ((offset + RSN_CAP_LEN) <= len) {
+                       rsn_cap = data[offset] + (data[offset + 1] << 8);
+                       if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK)
+                               wme_bss_disable = 0;
+               }
+               /* set wme_bss_disable to sync RSN Capabilities */
+               err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
+                                                 wme_bss_disable, bssidx);
+               if (err < 0) {
+                       WL_ERR("wme_bss_disable error %d\n", err);
+                       goto exit;
+               }
+       }
+       /* FOR WPS , set SES_OW_ENABLED */
+       wsec = (pval | gval | SES_OW_ENABLED);
 
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status))
-               brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
+       /* set auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
+       if (err < 0) {
+               WL_ERR("auth error %d\n", err);
+               goto exit;
+       }
+       /* set wsec */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+       if (err < 0) {
+               WL_ERR("wsec error %d\n", err);
+               goto exit;
+       }
+       /* set upper-layer auth */
+       err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
+       if (err < 0) {
+               WL_ERR("wpa_auth error %d\n", err);
+               goto exit;
+       }
 
-       WL_TRACE("Exit\n");
-       return 0;
+exit:
+       return err;
 }
 
-static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
-                                 struct cfg80211_wowlan *wow)
+static s32
+brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
+                    struct parsed_vndr_ies *vndr_ies)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
-
-       WL_TRACE("Enter\n");
+       s32 err = 0;
+       struct brcmf_vs_tlv *vndrie;
+       struct brcmf_tlv *ie;
+       struct parsed_vndr_ie_info *parsed_info;
+       s32 remaining_len;
+
+       remaining_len = (s32)vndr_ie_len;
+       memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+       ie = (struct brcmf_tlv *)vndr_ie_buf;
+       while (ie) {
+               if (ie->id != WLAN_EID_VENDOR_SPECIFIC)
+                       goto next;
+               vndrie = (struct brcmf_vs_tlv *)ie;
+               /* len should be bigger than OUI length + one */
+               if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
+                       WL_ERR("invalid vndr ie. length is too small %d\n",
+                               vndrie->len);
+                       goto next;
+               }
+               /* if wpa or wme ie, do not add ie */
+               if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
+                   ((vndrie->oui_type == WPA_OUI_TYPE) ||
+                   (vndrie->oui_type == WME_OUI_TYPE))) {
+                       WL_TRACE("Found WPA/WME oui. Do not add it\n");
+                       goto next;
+               }
 
-       /*
-        * Check for WL_STATUS_READY before any function call which
-        * could result is bus access. Don't block the suspend for
-        * any driver error conditions
-        */
+               parsed_info = &vndr_ies->ie_info[vndr_ies->count];
 
-       /*
-        * While going to suspend if associated with AP disassociate
-        * from AP to save power while system is in suspended state
-        */
-       if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
-            test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
-            test_bit(WL_STATUS_READY, &cfg_priv->status)) {
-               WL_INFO("Disassociating from AP"
-                       " while entering suspend state\n");
-               brcmf_link_down(cfg_priv);
+               /* save vndr ie information */
+               parsed_info->ie_ptr = (char *)vndrie;
+               parsed_info->ie_len = vndrie->len + TLV_HDR_LEN;
+               memcpy(&parsed_info->vndrie, vndrie, sizeof(*vndrie));
 
-               /*
-                * Make sure WPA_Supplicant receives all the event
-                * generated due to DISASSOC call to the fw to keep
-                * the state fw and WPA_Supplicant state consistent
-                */
-               brcmf_delay(500);
-       }
+               vndr_ies->count++;
 
-       set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status))
-               brcmf_term_iscan(cfg_priv);
+               WL_TRACE("** OUI %02x %02x %02x, type 0x%02x\n",
+                        parsed_info->vndrie.oui[0],
+                        parsed_info->vndrie.oui[1],
+                        parsed_info->vndrie.oui[2],
+                        parsed_info->vndrie.oui_type);
 
-       if (cfg_priv->scan_request) {
-               /* Indidate scan abort to cfg80211 layer */
-               WL_INFO("Terminating scan in progress\n");
-               cfg80211_scan_done(cfg_priv->scan_request, true);
-               cfg_priv->scan_request = NULL;
+               if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
+                       break;
+next:
+               remaining_len -= ie->len;
+               if (remaining_len <= 2)
+                       ie = NULL;
+               else
+                       ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len);
        }
-       clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
-       clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+       return err;
+}
 
-       /* Turn off watchdog timer */
-       if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
-               WL_INFO("Enable MPC\n");
-               brcmf_set_mpc(ndev, 1);
-       }
+static u32
+brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
+{
 
-       WL_TRACE("Exit\n");
+       __le32 iecount_le;
+       __le32 pktflag_le;
 
-       return 0;
-}
+       strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+       iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
 
-static __used s32
-brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
-{
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
-       u32 buflen;
+       iecount_le = cpu_to_le32(1);
+       memcpy(&iebuf[VNDR_IE_COUNT_OFFSET], &iecount_le, sizeof(iecount_le));
 
-       buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf,
-                              WL_DCMD_LEN_MAX);
-       BUG_ON(!buflen);
+       pktflag_le = cpu_to_le32(pktflag);
+       memcpy(&iebuf[VNDR_IE_PKTFLAG_OFFSET], &pktflag_le, sizeof(pktflag_le));
 
-       return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf,
-                              buflen);
+       memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
+
+       return ie_len + VNDR_IE_HDR_SIZE;
 }
 
-static s32
-brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
-                 s32 buf_len)
+s32
+brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
+                       struct net_device *ndev, s32 bssidx, s32 pktflag,
+                       u8 *vndr_ie_buf, u32 vndr_ie_len)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
-       u32 len;
        s32 err = 0;
+       u8  *iovar_ie_buf;
+       u8  *curr_ie_buf;
+       u8  *mgmt_ie_buf = NULL;
+       u32 mgmt_ie_buf_len = 0;
+       u32 *mgmt_ie_len = 0;
+       u32 del_add_ie_buf_len = 0;
+       u32 total_ie_buf_len = 0;
+       u32 parsed_ie_buf_len = 0;
+       struct parsed_vndr_ies old_vndr_ies;
+       struct parsed_vndr_ies new_vndr_ies;
+       struct parsed_vndr_ie_info *vndrie_info;
+       s32 i;
+       u8 *ptr;
+       u32 remained_buf_len;
+
+       WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
+       iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+       if (!iovar_ie_buf)
+               return -ENOMEM;
+       curr_ie_buf = iovar_ie_buf;
+       if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
+           test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
+               switch (pktflag) {
+               case VNDR_IE_PRBRSP_FLAG:
+                       mgmt_ie_buf = cfg->ap_info->probe_res_ie;
+                       mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
+                       mgmt_ie_buf_len =
+                               sizeof(cfg->ap_info->probe_res_ie);
+                       break;
+               case VNDR_IE_BEACON_FLAG:
+                       mgmt_ie_buf = cfg->ap_info->beacon_ie;
+                       mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
+                       mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+                       break;
+               default:
+                       err = -EPERM;
+                       WL_ERR("not suitable type\n");
+                       goto exit;
+               }
+               bssidx = 0;
+       } else {
+               err = -EPERM;
+               WL_ERR("not suitable type\n");
+               goto exit;
+       }
 
-       len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf,
-                           WL_DCMD_LEN_MAX);
-       BUG_ON(!len);
-       err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf,
-                             WL_DCMD_LEN_MAX);
-       if (err) {
-               WL_ERR("error (%d)\n", err);
-               return err;
+       if (vndr_ie_len > mgmt_ie_buf_len) {
+               err = -ENOMEM;
+               WL_ERR("extra IE size too big\n");
+               goto exit;
+       }
+
+       /* parse and save new vndr_ie in curr_ie_buff before comparing it */
+       if (vndr_ie_buf && vndr_ie_len && curr_ie_buf) {
+               ptr = curr_ie_buf;
+               brcmf_parse_vndr_ies(vndr_ie_buf, vndr_ie_len, &new_vndr_ies);
+               for (i = 0; i < new_vndr_ies.count; i++) {
+                       vndrie_info = &new_vndr_ies.ie_info[i];
+                       memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+                              vndrie_info->ie_len);
+                       parsed_ie_buf_len += vndrie_info->ie_len;
+               }
        }
-       memcpy(buf, cfg_priv->dcmd_buf, buf_len);
 
-       return err;
-}
+       if (mgmt_ie_buf != NULL) {
+               if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+                   (memcmp(mgmt_ie_buf, curr_ie_buf,
+                           parsed_ie_buf_len) == 0)) {
+                       WL_TRACE("Previous mgmt IE is equals to current IE");
+                       goto exit;
+               }
 
-static __used s32
-brcmf_update_pmklist(struct net_device *ndev,
-                    struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
-{
-       int i, j;
-       int pmkid_len;
+               /* parse old vndr_ie */
+               brcmf_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, &old_vndr_ies);
+
+               /* make a command to delete old ie */
+               for (i = 0; i < old_vndr_ies.count; i++) {
+                       vndrie_info = &old_vndr_ies.ie_info[i];
+
+                       WL_TRACE("DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+                                vndrie_info->vndrie.id,
+                                vndrie_info->vndrie.len,
+                                vndrie_info->vndrie.oui[0],
+                                vndrie_info->vndrie.oui[1],
+                                vndrie_info->vndrie.oui[2]);
+
+                       del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+                                                          vndrie_info->ie_ptr,
+                                                          vndrie_info->ie_len,
+                                                          "del");
+                       curr_ie_buf += del_add_ie_buf_len;
+                       total_ie_buf_len += del_add_ie_buf_len;
+               }
+       }
 
-       pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
+       *mgmt_ie_len = 0;
+       /* Add if there is any extra IE */
+       if (mgmt_ie_buf && parsed_ie_buf_len) {
+               ptr = mgmt_ie_buf;
+
+               remained_buf_len = mgmt_ie_buf_len;
+
+               /* make a command to add new ie */
+               for (i = 0; i < new_vndr_ies.count; i++) {
+                       vndrie_info = &new_vndr_ies.ie_info[i];
+
+                       WL_TRACE("ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
+                                vndrie_info->vndrie.id,
+                                vndrie_info->vndrie.len,
+                                vndrie_info->vndrie.oui[0],
+                                vndrie_info->vndrie.oui[1],
+                                vndrie_info->vndrie.oui[2]);
+
+                       del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+                                                          vndrie_info->ie_ptr,
+                                                          vndrie_info->ie_len,
+                                                          "add");
+                       /* verify remained buf size before copy data */
+                       remained_buf_len -= vndrie_info->ie_len;
+                       if (remained_buf_len < 0) {
+                               WL_ERR("no space in mgmt_ie_buf: len left %d",
+                                       remained_buf_len);
+                               break;
+                       }
 
-       WL_CONN("No of elements %d\n", pmkid_len);
-       for (i = 0; i < pmkid_len; i++) {
-               WL_CONN("PMKID[%d]: %pM =\n", i,
-                       &pmk_list->pmkids.pmkid[i].BSSID);
-               for (j = 0; j < WLAN_PMKID_LEN; j++)
-                       WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
-       }
+                       /* save the parsed IE in wl struct */
+                       memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+                              vndrie_info->ie_len);
+                       *mgmt_ie_len += vndrie_info->ie_len;
 
-       if (!err)
-               brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list,
-                                       sizeof(*pmk_list));
+                       curr_ie_buf += del_add_ie_buf_len;
+                       total_ie_buf_len += del_add_ie_buf_len;
+               }
+       }
+       if (total_ie_buf_len) {
+               err  = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
+                                                    iovar_ie_buf,
+                                                    total_ie_buf_len,
+                                                    cfg->extra_buf,
+                                                    WL_EXTRA_BUF_MAX, bssidx);
+               if (err)
+                       WL_ERR("vndr ie set error : %d\n", err);
+       }
 
+exit:
+       kfree(iovar_ie_buf);
        return err;
 }
 
 static s32
-brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
-                        struct cfg80211_pmksa *pmksa)
+brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+                       struct cfg80211_ap_settings *settings)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids;
-       s32 err = 0;
-       int i;
-       int pmkid_len;
+       s32 ie_offset;
+       struct brcmf_tlv *ssid_ie;
+       struct brcmf_ssid_le ssid_le;
+       s32 ioctl_value;
+       s32 err = -EPERM;
+       struct brcmf_tlv *rsn_ie;
+       struct brcmf_vs_tlv *wpa_ie;
+       struct brcmf_join_params join_params;
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       s32 bssidx = 0;
+
+       WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
+                settings->channel_type, settings->beacon_interval,
+                settings->dtim_period);
+       WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n",
+                settings->ssid, settings->ssid_len, settings->auth_type,
+                settings->inactivity_timeout);
+
+       if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
+               WL_ERR("Not in AP creation mode\n");
+               return -EPERM;
+       }
+
+       memset(&ssid_le, 0, sizeof(ssid_le));
+       if (settings->ssid == NULL || settings->ssid_len == 0) {
+               ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+               ssid_ie = brcmf_parse_tlvs(
+                               (u8 *)&settings->beacon.head[ie_offset],
+                               settings->beacon.head_len - ie_offset,
+                               WLAN_EID_SSID);
+               if (!ssid_ie)
+                       return -EINVAL;
 
-       WL_TRACE("Enter\n");
-       if (!check_sys_up(wiphy))
-               return -EIO;
+               memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
+               ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
+               WL_TRACE("SSID is (%s) in Head\n", ssid_le.SSID);
+       } else {
+               memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
+               ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
+       }
 
-       pmkid_len = le32_to_cpu(pmkids->npmkid);
-       for (i = 0; i < pmkid_len; i++)
-               if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
-                       break;
-       if (i < WL_NUM_PMKIDS_MAX) {
-               memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
-               memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
-               if (i == pmkid_len) {
-                       pmkid_len++;
-                       pmkids->npmkid = cpu_to_le32(pmkid_len);
+       brcmf_set_mpc(ndev, 0);
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("BRCMF_C_DOWN error %d\n", err);
+               goto exit;
+       }
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("SET INFRA error %d\n", err);
+               goto exit;
+       }
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("setting AP mode failed %d\n", err);
+               goto exit;
+       }
+
+       /* find the RSN_IE */
+       rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+                                 settings->beacon.tail_len, WLAN_EID_RSN);
+
+       /* find the WPA_IE */
+       wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
+                                 settings->beacon.tail_len);
+
+       kfree(cfg->ap_info->rsn_ie);
+       cfg->ap_info->rsn_ie = NULL;
+       kfree(cfg->ap_info->wpa_ie);
+       cfg->ap_info->wpa_ie = NULL;
+
+       if ((wpa_ie != NULL || rsn_ie != NULL)) {
+               WL_TRACE("WPA(2) IE is found\n");
+               if (wpa_ie != NULL) {
+                       /* WPA IE */
+                       err = brcmf_configure_wpaie(ndev, wpa_ie, false,
+                                                   bssidx);
+                       if (err < 0)
+                               goto exit;
+                       cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
+                                                           wpa_ie->len +
+                                                           TLV_HDR_LEN,
+                                                           GFP_KERNEL);
+               } else {
+                       /* RSN IE */
+                       err = brcmf_configure_wpaie(ndev,
+                               (struct brcmf_vs_tlv *)rsn_ie, true, bssidx);
+                       if (err < 0)
+                               goto exit;
+                       cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
+                                                           rsn_ie->len +
+                                                           TLV_HDR_LEN,
+                                                           GFP_KERNEL);
                }
-       } else
-               err = -EINVAL;
+               cfg->ap_info->security_mode = true;
+       } else {
+               WL_TRACE("No WPA(2) IEs found\n");
+               brcmf_configure_opensecurity(ndev, bssidx);
+               cfg->ap_info->security_mode = false;
+       }
+       /* Set Beacon IEs to FW */
+       err = brcmf_set_management_ie(cfg, ndev, bssidx,
+                                     VNDR_IE_BEACON_FLAG,
+                                     (u8 *)settings->beacon.tail,
+                                     settings->beacon.tail_len);
+       if (err)
+               WL_ERR("Set Beacon IE Failed\n");
+       else
+               WL_TRACE("Applied Vndr IEs for Beacon\n");
 
-       WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
-               pmkids->pmkid[pmkid_len].BSSID);
-       for (i = 0; i < WLAN_PMKID_LEN; i++)
-               WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
+       /* Set Probe Response IEs to FW */
+       err = brcmf_set_management_ie(cfg, ndev, bssidx,
+                                     VNDR_IE_PRBRSP_FLAG,
+                                     (u8 *)settings->beacon.proberesp_ies,
+                                     settings->beacon.proberesp_ies_len);
+       if (err)
+               WL_ERR("Set Probe Resp IE Failed\n");
+       else
+               WL_TRACE("Applied Vndr IEs for Probe Resp\n");
+
+       if (settings->beacon_interval) {
+               ioctl_value = settings->beacon_interval;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
+                                         &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("Beacon Interval Set Error, %d\n", err);
+                       goto exit;
+               }
+       }
+       if (settings->dtim_period) {
+               ioctl_value = settings->dtim_period;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
+                                         &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("DTIM Interval Set Error, %d\n", err);
+                       goto exit;
+               }
+       }
+       ioctl_value = 1;
+       err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+       if (err < 0) {
+               WL_ERR("BRCMF_C_UP error (%d)\n", err);
+               goto exit;
+       }
 
-       err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+       memset(&join_params, 0, sizeof(join_params));
+       /* join parameters starts with ssid */
+       memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+       /* create softap */
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
+                             sizeof(join_params));
+       if (err < 0) {
+               WL_ERR("SET SSID error (%d)\n", err);
+               goto exit;
+       }
+       clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
+       set_bit(WL_STATUS_AP_CREATED, &cfg->status);
 
-       WL_TRACE("Exit\n");
+exit:
+       if (err)
+               brcmf_set_mpc(ndev, 1);
        return err;
 }
 
-static s32
-brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
-                     struct cfg80211_pmksa *pmksa)
+static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       struct pmkid_list pmkid;
-       s32 err = 0;
-       int i, pmkid_len;
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       s32 ioctl_value;
+       s32 err = -EPERM;
 
        WL_TRACE("Enter\n");
-       if (!check_sys_up(wiphy))
-               return -EIO;
-
-       memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
-       memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
-
-       WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
-              &pmkid.pmkid[0].BSSID);
-       for (i = 0; i < WLAN_PMKID_LEN; i++)
-               WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
-
-       pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid);
-       for (i = 0; i < pmkid_len; i++)
-               if (!memcmp
-                   (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
-                    ETH_ALEN))
-                       break;
 
-       if ((pmkid_len > 0)
-           && (i < pmkid_len)) {
-               memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0,
-                      sizeof(struct pmkid));
-               for (; i < (pmkid_len - 1); i++) {
-                       memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
-                              &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID,
-                              ETH_ALEN);
-                       memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID,
-                              &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID,
-                              WLAN_PMKID_LEN);
+       if (cfg->conf->mode == WL_MODE_AP) {
+               /* Due to most likely deauths outstanding we sleep */
+               /* first to make sure they get processed by fw. */
+               msleep(400);
+               ioctl_value = 0;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("setting AP mode failed %d\n", err);
+                       goto exit;
                }
-               cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
-       } else
-               err = -EINVAL;
-
-       err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
-
-       WL_TRACE("Exit\n");
+               ioctl_value = 0;
+               err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+               if (err < 0) {
+                       WL_ERR("BRCMF_C_UP error %d\n", err);
+                       goto exit;
+               }
+               brcmf_set_mpc(ndev, 1);
+               clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
+               clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
+       }
+exit:
        return err;
-
 }
 
-static s32
-brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
+static int
+brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
+                          u8 *mac)
 {
-       struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
-       s32 err = 0;
+       struct brcmf_scb_val_le scbval;
+       s32 err;
+
+       if (!mac)
+               return -EFAULT;
+
+       WL_TRACE("Enter %pM\n", mac);
 
-       WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
                return -EIO;
 
-       memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list));
-       err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+       memcpy(&scbval.ea, mac, ETH_ALEN);
+       scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
+       err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
+                             &scbval, sizeof(scbval));
+       if (err)
+               WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
 
        WL_TRACE("Exit\n");
        return err;
-
 }
 
 static struct cfg80211_ops wl_cfg80211_ops = {
@@ -2748,7 +4369,18 @@ static struct cfg80211_ops wl_cfg80211_ops = {
        .resume = brcmf_cfg80211_resume,
        .set_pmksa = brcmf_cfg80211_set_pmksa,
        .del_pmksa = brcmf_cfg80211_del_pmksa,
-       .flush_pmksa = brcmf_cfg80211_flush_pmksa
+       .flush_pmksa = brcmf_cfg80211_flush_pmksa,
+       .start_ap = brcmf_cfg80211_start_ap,
+       .stop_ap = brcmf_cfg80211_stop_ap,
+       .del_station = brcmf_cfg80211_del_station,
+#ifndef CONFIG_BRCMISCAN
+       /* scheduled scan need e-scan, which is mutual exclusive with i-scan */
+       .sched_scan_start = brcmf_cfg80211_sched_scan_start,
+       .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+#endif
+#ifdef CONFIG_NL80211_TESTMODE
+       .testmode_cmd = brcmf_cfg80211_testmode
+#endif
 };
 
 static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
@@ -2767,8 +4399,18 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
        return err;
 }
 
-static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
-                                         struct device *ndev)
+static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
+{
+#ifndef CONFIG_BRCMFISCAN
+       /* scheduled scan settings */
+       wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
+       wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
+       wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+       wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif
+}
+
+static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
 {
        struct wireless_dev *wdev;
        s32 err = 0;
@@ -2777,9 +4419,8 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
        if (!wdev)
                return ERR_PTR(-ENOMEM);
 
-       wdev->wiphy =
-           wiphy_new(&wl_cfg80211_ops,
-                     sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
+       wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
+                               sizeof(struct brcmf_cfg80211_info));
        if (!wdev->wiphy) {
                WL_ERR("Could not allocate wiphy device\n");
                err = -ENOMEM;
@@ -2788,8 +4429,9 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
        set_wiphy_dev(wdev->wiphy, ndev);
        wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
        wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
-       wdev->wiphy->interface_modes =
-           BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+       wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                      BIT(NL80211_IFTYPE_ADHOC) |
+                                      BIT(NL80211_IFTYPE_AP);
        wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
        wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a;    /* Set
                                                * it as 11a by default.
@@ -2805,6 +4447,7 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
                                                                 * save mode
                                                                 * by default
                                                                 */
+       brcmf_wiphy_pno_params(wdev->wiphy);
        err = wiphy_register(wdev->wiphy);
        if (err < 0) {
                WL_ERR("Could not register wiphy device (%d)\n", err);
@@ -2821,9 +4464,9 @@ wiphy_new_out:
        return ERR_PTR(err);
 }
 
-static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
 {
-       struct wireless_dev *wdev = cfg_priv->wdev;
+       struct wireless_dev *wdev = cfg->wdev;
 
        if (!wdev) {
                WL_ERR("wdev is invalid\n");
@@ -2832,10 +4475,10 @@ static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
        wiphy_unregister(wdev->wiphy);
        wiphy_free(wdev->wiphy);
        kfree(wdev);
-       cfg_priv->wdev = NULL;
+       cfg->wdev = NULL;
 }
 
-static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
                            const struct brcmf_event_msg *e)
 {
        u32 event = be32_to_cpu(e->event_type);
@@ -2843,14 +4486,14 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
 
        if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
                WL_CONN("Processing set ssid\n");
-               cfg_priv->link_up = true;
+               cfg->link_up = true;
                return true;
        }
 
        return false;
 }
 
-static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
                              const struct brcmf_event_msg *e)
 {
        u32 event = be32_to_cpu(e->event_type);
@@ -2863,7 +4506,7 @@ static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
        return false;
 }
 
-static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
                               const struct brcmf_event_msg *e)
 {
        u32 event = be32_to_cpu(e->event_type);
@@ -2884,9 +4527,9 @@ static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
        return false;
 }
 
-static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
 
        kfree(conn_info->req_ie);
        conn_info->req_ie = NULL;
@@ -2896,30 +4539,30 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
        conn_info->resp_ie_len = 0;
 }
 
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
 {
-       struct net_device *ndev = cfg_to_ndev(cfg_priv);
+       struct net_device *ndev = cfg_to_ndev(cfg);
        struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
        u32 req_len;
        u32 resp_len;
        s32 err = 0;
 
-       brcmf_clear_assoc_ies(cfg_priv);
+       brcmf_clear_assoc_ies(cfg);
 
-       err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf,
+       err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
                                WL_ASSOC_INFO_MAX);
        if (err) {
                WL_ERR("could not get assoc info (%d)\n", err);
                return err;
        }
        assoc_info =
-               (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf;
+               (struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
        req_len = le32_to_cpu(assoc_info->req_len);
        resp_len = le32_to_cpu(assoc_info->resp_len);
        if (req_len) {
                err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
-                                          cfg_priv->extra_buf,
+                                          cfg->extra_buf,
                                           WL_ASSOC_INFO_MAX);
                if (err) {
                        WL_ERR("could not get assoc req (%d)\n", err);
@@ -2927,7 +4570,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
                }
                conn_info->req_ie_len = req_len;
                conn_info->req_ie =
-                   kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len,
+                   kmemdup(cfg->extra_buf, conn_info->req_ie_len,
                            GFP_KERNEL);
        } else {
                conn_info->req_ie_len = 0;
@@ -2935,7 +4578,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
        }
        if (resp_len) {
                err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
-                                          cfg_priv->extra_buf,
+                                          cfg->extra_buf,
                                           WL_ASSOC_INFO_MAX);
                if (err) {
                        WL_ERR("could not get assoc resp (%d)\n", err);
@@ -2943,7 +4586,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
                }
                conn_info->resp_ie_len = resp_len;
                conn_info->resp_ie =
-                   kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len,
+                   kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
                            GFP_KERNEL);
        } else {
                conn_info->resp_ie_len = 0;
@@ -2956,12 +4599,13 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
 }
 
 static s32
-brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
                       struct net_device *ndev,
                       const struct brcmf_event_msg *e)
 {
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
-       struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+       struct wiphy *wiphy = cfg_to_wiphy(cfg);
        struct brcmf_channel_info_le channel_le;
        struct ieee80211_channel *notify_channel;
        struct ieee80211_supported_band *band;
@@ -2971,9 +4615,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
 
        WL_TRACE("Enter\n");
 
-       brcmf_get_assoc_ies(cfg_priv);
-       brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID);
-       brcmf_update_bss_info(cfg_priv);
+       brcmf_get_assoc_ies(cfg);
+       memcpy(profile->bssid, e->addr, ETH_ALEN);
+       brcmf_update_bss_info(cfg);
 
        brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
                        sizeof(channel_le));
@@ -2989,37 +4633,35 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
        freq = ieee80211_channel_to_frequency(target_channel, band->band);
        notify_channel = ieee80211_get_channel(wiphy, freq);
 
-       cfg80211_roamed(ndev, notify_channel,
-                       (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
+       cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
                        conn_info->req_ie, conn_info->req_ie_len,
                        conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
        WL_CONN("Report roaming result\n");
 
-       set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+       set_bit(WL_STATUS_CONNECTED, &cfg->status);
        WL_TRACE("Exit\n");
        return err;
 }
 
 static s32
-brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
                       struct net_device *ndev, const struct brcmf_event_msg *e,
                       bool completed)
 {
-       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
+       struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
        s32 err = 0;
 
        WL_TRACE("Enter\n");
 
-       if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+       if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
                if (completed) {
-                       brcmf_get_assoc_ies(cfg_priv);
-                       brcmf_update_prof(cfg_priv, NULL, &e->addr,
-                                         WL_PROF_BSSID);
-                       brcmf_update_bss_info(cfg_priv);
+                       brcmf_get_assoc_ies(cfg);
+                       memcpy(profile->bssid, e->addr, ETH_ALEN);
+                       brcmf_update_bss_info(cfg);
                }
                cfg80211_connect_result(ndev,
-                                       (u8 *)brcmf_read_prof(cfg_priv,
-                                                             WL_PROF_BSSID),
+                                       (u8 *)profile->bssid,
                                        conn_info->req_ie,
                                        conn_info->req_ie_len,
                                        conn_info->resp_ie,
@@ -3028,7 +4670,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
                                                    WLAN_STATUS_AUTH_TIMEOUT,
                                        GFP_KERNEL);
                if (completed)
-                       set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+                       set_bit(WL_STATUS_CONNECTED, &cfg->status);
                WL_CONN("Report connect result - connection %s\n",
                                completed ? "succeeded" : "failed");
        }
@@ -3037,52 +4679,93 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
 }
 
 static s32
-brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
+                              struct net_device *ndev,
+                              const struct brcmf_event_msg *e, void *data)
+{
+       s32 err = 0;
+       u32 event = be32_to_cpu(e->event_type);
+       u32 reason = be32_to_cpu(e->reason);
+       u32 len = be32_to_cpu(e->datalen);
+       static int generation;
+
+       struct station_info sinfo;
+
+       WL_CONN("event %d, reason %d\n", event, reason);
+       memset(&sinfo, 0, sizeof(sinfo));
+
+       sinfo.filled = 0;
+       if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
+           reason == BRCMF_E_STATUS_SUCCESS) {
+               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+               if (!data) {
+                       WL_ERR("No IEs present in ASSOC/REASSOC_IND");
+                       return -EINVAL;
+               }
+               sinfo.assoc_req_ies = data;
+               sinfo.assoc_req_ies_len = len;
+               generation++;
+               sinfo.generation = generation;
+               cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
+       } else if ((event == BRCMF_E_DISASSOC_IND) ||
+                  (event == BRCMF_E_DEAUTH_IND) ||
+                  (event == BRCMF_E_DEAUTH)) {
+               generation++;
+               sinfo.generation = generation;
+               cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
+       }
+       return err;
+}
+
+static s32
+brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
                            struct net_device *ndev,
                            const struct brcmf_event_msg *e, void *data)
 {
+       struct brcmf_cfg80211_profile *profile = cfg->profile;
        s32 err = 0;
 
-       if (brcmf_is_linkup(cfg_priv, e)) {
+       if (cfg->conf->mode == WL_MODE_AP) {
+               err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
+       } else if (brcmf_is_linkup(cfg, e)) {
                WL_CONN("Linkup\n");
-               if (brcmf_is_ibssmode(cfg_priv)) {
-                       brcmf_update_prof(cfg_priv, NULL, (void *)e->addr,
-                               WL_PROF_BSSID);
-                       wl_inform_ibss(cfg_priv, ndev, e->addr);
+               if (brcmf_is_ibssmode(cfg)) {
+                       memcpy(profile->bssid, e->addr, ETH_ALEN);
+                       wl_inform_ibss(cfg, ndev, e->addr);
                        cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
-                       clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
-                       set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+                       clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+                       set_bit(WL_STATUS_CONNECTED, &cfg->status);
                } else
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, true);
-       } else if (brcmf_is_linkdown(cfg_priv, e)) {
+                       brcmf_bss_connect_done(cfg, ndev, e, true);
+       } else if (brcmf_is_linkdown(cfg, e)) {
                WL_CONN("Linkdown\n");
-               if (brcmf_is_ibssmode(cfg_priv)) {
-                       clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               if (brcmf_is_ibssmode(cfg)) {
+                       clear_bit(WL_STATUS_CONNECTING, &cfg->status);
                        if (test_and_clear_bit(WL_STATUS_CONNECTED,
-                               &cfg_priv->status))
-                               brcmf_link_down(cfg_priv);
+                               &cfg->status))
+                               brcmf_link_down(cfg);
                } else {
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+                       brcmf_bss_connect_done(cfg, ndev, e, false);
                        if (test_and_clear_bit(WL_STATUS_CONNECTED,
-                               &cfg_priv->status)) {
+                               &cfg->status)) {
                                cfg80211_disconnected(ndev, 0, NULL, 0,
                                        GFP_KERNEL);
-                               brcmf_link_down(cfg_priv);
+                               brcmf_link_down(cfg);
                        }
                }
-               brcmf_init_prof(cfg_priv->profile);
-       } else if (brcmf_is_nonetwork(cfg_priv, e)) {
-               if (brcmf_is_ibssmode(cfg_priv))
-                       clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+               brcmf_init_prof(cfg->profile);
+       } else if (brcmf_is_nonetwork(cfg, e)) {
+               if (brcmf_is_ibssmode(cfg))
+                       clear_bit(WL_STATUS_CONNECTING, &cfg->status);
                else
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+                       brcmf_bss_connect_done(cfg, ndev, e, false);
        }
 
        return err;
 }
 
 static s32
-brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
                            struct net_device *ndev,
                            const struct brcmf_event_msg *e, void *data)
 {
@@ -3091,17 +4774,17 @@ brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
        u32 status = be32_to_cpu(e->status);
 
        if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
-               if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status))
-                       brcmf_bss_roaming_done(cfg_priv, ndev, e);
+               if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
+                       brcmf_bss_roaming_done(cfg, ndev, e);
                else
-                       brcmf_bss_connect_done(cfg_priv, ndev, e, true);
+                       brcmf_bss_connect_done(cfg, ndev, e, true);
        }
 
        return err;
 }
 
 static s32
-brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
                        struct net_device *ndev,
                        const struct brcmf_event_msg *e, void *data)
 {
@@ -3120,7 +4803,7 @@ brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
 }
 
 static s32
-brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
                         struct net_device *ndev,
                         const struct brcmf_event_msg *e, void *data)
 {
@@ -3133,12 +4816,12 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
 
        WL_TRACE("Enter\n");
 
-       if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) {
+       if (cfg->iscan_on && cfg->iscan_kickstart) {
                WL_TRACE("Exit\n");
-               return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv));
+               return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
        }
 
-       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+       if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
                WL_ERR("Scan complete while device not scanning\n");
                scan_abort = true;
                err = -EINVAL;
@@ -3155,35 +4838,33 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
        scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
        if (scan_channel)
                WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
-       cfg_priv->bss_list = cfg_priv->scan_results;
-       bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list;
+       cfg->bss_list = cfg->scan_results;
+       bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
 
-       memset(cfg_priv->scan_results, 0, len);
+       memset(cfg->scan_results, 0, len);
        bss_list_le->buflen = cpu_to_le32(len);
        err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
-                             cfg_priv->scan_results, len);
+                             cfg->scan_results, len);
        if (err) {
                WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
                err = -EINVAL;
                scan_abort = true;
                goto scan_done_out;
        }
-       cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
-       cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version);
-       cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count);
+       cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
+       cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
+       cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
 
-       err = brcmf_inform_bss(cfg_priv);
-       if (err) {
+       err = brcmf_inform_bss(cfg);
+       if (err)
                scan_abort = true;
-               goto scan_done_out;
-       }
 
 scan_done_out:
-       if (cfg_priv->scan_request) {
+       if (cfg->scan_request) {
                WL_SCAN("calling cfg80211_scan_done\n");
-               cfg80211_scan_done(cfg_priv->scan_request, scan_abort);
+               cfg80211_scan_done(cfg->scan_request, scan_abort);
                brcmf_set_mpc(ndev, 1);
-               cfg_priv->scan_request = NULL;
+               cfg->scan_request = NULL;
        }
 
        WL_TRACE("Exit\n");
@@ -3206,68 +4887,85 @@ static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
        memset(el, 0, sizeof(*el));
        el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
        el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status;
        el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
        el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
        el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
+       el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results;
+}
+
+static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+       kfree(cfg->scan_results);
+       cfg->scan_results = NULL;
+       kfree(cfg->bss_info);
+       cfg->bss_info = NULL;
+       kfree(cfg->conf);
+       cfg->conf = NULL;
+       kfree(cfg->profile);
+       cfg->profile = NULL;
+       kfree(cfg->scan_req_int);
+       cfg->scan_req_int = NULL;
+       kfree(cfg->escan_ioctl_buf);
+       cfg->escan_ioctl_buf = NULL;
+       kfree(cfg->dcmd_buf);
+       cfg->dcmd_buf = NULL;
+       kfree(cfg->extra_buf);
+       cfg->extra_buf = NULL;
+       kfree(cfg->iscan);
+       cfg->iscan = NULL;
+       kfree(cfg->pmk_list);
+       cfg->pmk_list = NULL;
+       if (cfg->ap_info) {
+               kfree(cfg->ap_info->wpa_ie);
+               kfree(cfg->ap_info->rsn_ie);
+               kfree(cfg->ap_info);
+               cfg->ap_info = NULL;
+       }
 }
 
-static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
-       kfree(cfg_priv->scan_results);
-       cfg_priv->scan_results = NULL;
-       kfree(cfg_priv->bss_info);
-       cfg_priv->bss_info = NULL;
-       kfree(cfg_priv->conf);
-       cfg_priv->conf = NULL;
-       kfree(cfg_priv->profile);
-       cfg_priv->profile = NULL;
-       kfree(cfg_priv->scan_req_int);
-       cfg_priv->scan_req_int = NULL;
-       kfree(cfg_priv->dcmd_buf);
-       cfg_priv->dcmd_buf = NULL;
-       kfree(cfg_priv->extra_buf);
-       cfg_priv->extra_buf = NULL;
-       kfree(cfg_priv->iscan);
-       cfg_priv->iscan = NULL;
-       kfree(cfg_priv->pmk_list);
-       cfg_priv->pmk_list = NULL;
-}
-
-static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
-       cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
-       if (!cfg_priv->scan_results)
+static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+       cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+       if (!cfg->scan_results)
                goto init_priv_mem_out;
-       cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL);
-       if (!cfg_priv->conf)
+       cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+       if (!cfg->conf)
                goto init_priv_mem_out;
-       cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL);
-       if (!cfg_priv->profile)
+       cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
+       if (!cfg->profile)
                goto init_priv_mem_out;
-       cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
-       if (!cfg_priv->bss_info)
+       cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+       if (!cfg->bss_info)
                goto init_priv_mem_out;
-       cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int),
+       cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
                                         GFP_KERNEL);
-       if (!cfg_priv->scan_req_int)
+       if (!cfg->scan_req_int)
+               goto init_priv_mem_out;
+       cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+       if (!cfg->escan_ioctl_buf)
                goto init_priv_mem_out;
-       cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
-       if (!cfg_priv->dcmd_buf)
+       cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
+       if (!cfg->dcmd_buf)
                goto init_priv_mem_out;
-       cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
-       if (!cfg_priv->extra_buf)
+       cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+       if (!cfg->extra_buf)
                goto init_priv_mem_out;
-       cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL);
-       if (!cfg_priv->iscan)
+       cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
+       if (!cfg->iscan)
                goto init_priv_mem_out;
-       cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL);
-       if (!cfg_priv->pmk_list)
+       cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+       if (!cfg->pmk_list)
                goto init_priv_mem_out;
 
        return 0;
 
 init_priv_mem_out:
-       brcmf_deinit_priv_mem(cfg_priv);
+       brcmf_deinit_priv_mem(cfg);
 
        return -ENOMEM;
 }
@@ -3277,17 +4975,17 @@ init_priv_mem_out:
 */
 
 static struct brcmf_cfg80211_event_q *brcmf_deq_event(
-       struct brcmf_cfg80211_priv *cfg_priv)
+       struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_cfg80211_event_q *e = NULL;
 
-       spin_lock_irq(&cfg_priv->evt_q_lock);
-       if (!list_empty(&cfg_priv->evt_q_list)) {
-               e = list_first_entry(&cfg_priv->evt_q_list,
+       spin_lock_irq(&cfg->evt_q_lock);
+       if (!list_empty(&cfg->evt_q_list)) {
+               e = list_first_entry(&cfg->evt_q_list,
                                     struct brcmf_cfg80211_event_q, evt_q_list);
                list_del(&e->evt_q_list);
        }
-       spin_unlock_irq(&cfg_priv->evt_q_lock);
+       spin_unlock_irq(&cfg->evt_q_lock);
 
        return e;
 }
@@ -3299,23 +4997,33 @@ static struct brcmf_cfg80211_event_q *brcmf_deq_event(
 */
 
 static s32
-brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event,
-               const struct brcmf_event_msg *msg)
+brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
+               const struct brcmf_event_msg *msg, void *data)
 {
        struct brcmf_cfg80211_event_q *e;
        s32 err = 0;
        ulong flags;
+       u32 data_len;
+       u32 total_len;
 
-       e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC);
+       total_len = sizeof(struct brcmf_cfg80211_event_q);
+       if (data)
+               data_len = be32_to_cpu(msg->datalen);
+       else
+               data_len = 0;
+       total_len += data_len;
+       e = kzalloc(total_len, GFP_ATOMIC);
        if (!e)
                return -ENOMEM;
 
        e->etype = event;
        memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
+       if (data)
+               memcpy(&e->edata, data, data_len);
 
-       spin_lock_irqsave(&cfg_priv->evt_q_lock, flags);
-       list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list);
-       spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags);
+       spin_lock_irqsave(&cfg->evt_q_lock, flags);
+       list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
+       spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
 
        return err;
 }
@@ -3327,12 +5035,12 @@ static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
 
 static void brcmf_cfg80211_event_handler(struct work_struct *work)
 {
-       struct brcmf_cfg80211_priv *cfg_priv =
-                       container_of(work, struct brcmf_cfg80211_priv,
+       struct brcmf_cfg80211_info *cfg =
+                       container_of(work, struct brcmf_cfg80211_info,
                                     event_work);
        struct brcmf_cfg80211_event_q *e;
 
-       e = brcmf_deq_event(cfg_priv);
+       e = brcmf_deq_event(cfg);
        if (unlikely(!e)) {
                WL_ERR("event queue empty...\n");
                return;
@@ -3340,137 +5048,131 @@ static void brcmf_cfg80211_event_handler(struct work_struct *work)
 
        do {
                WL_INFO("event type (%d)\n", e->etype);
-               if (cfg_priv->el.handler[e->etype])
-                       cfg_priv->el.handler[e->etype](cfg_priv,
-                                                      cfg_to_ndev(cfg_priv),
+               if (cfg->el.handler[e->etype])
+                       cfg->el.handler[e->etype](cfg,
+                                                      cfg_to_ndev(cfg),
                                                       &e->emsg, e->edata);
                else
                        WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
                brcmf_put_event(e);
-       } while ((e = brcmf_deq_event(cfg_priv)));
+       } while ((e = brcmf_deq_event(cfg)));
 
 }
 
-static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
 {
-       spin_lock_init(&cfg_priv->evt_q_lock);
-       INIT_LIST_HEAD(&cfg_priv->evt_q_list);
+       spin_lock_init(&cfg->evt_q_lock);
+       INIT_LIST_HEAD(&cfg->evt_q_list);
 }
 
-static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
 {
        struct brcmf_cfg80211_event_q *e;
 
-       spin_lock_irq(&cfg_priv->evt_q_lock);
-       while (!list_empty(&cfg_priv->evt_q_list)) {
-               e = list_first_entry(&cfg_priv->evt_q_list,
+       spin_lock_irq(&cfg->evt_q_lock);
+       while (!list_empty(&cfg->evt_q_list)) {
+               e = list_first_entry(&cfg->evt_q_list,
                                     struct brcmf_cfg80211_event_q, evt_q_list);
                list_del(&e->evt_q_list);
                kfree(e);
        }
-       spin_unlock_irq(&cfg_priv->evt_q_lock);
+       spin_unlock_irq(&cfg->evt_q_lock);
 }
 
-static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
 {
        s32 err = 0;
 
-       cfg_priv->scan_request = NULL;
-       cfg_priv->pwr_save = true;
-       cfg_priv->iscan_on = true;      /* iscan on & off switch.
+       cfg->scan_request = NULL;
+       cfg->pwr_save = true;
+#ifdef CONFIG_BRCMISCAN
+       cfg->iscan_on = true;   /* iscan on & off switch.
                                 we enable iscan per default */
-       cfg_priv->roam_on = true;       /* roam on & off switch.
+       cfg->escan_on = false;  /* escan on & off switch.
+                                we disable escan per default */
+#else
+       cfg->iscan_on = false;  /* iscan on & off switch.
+                                we disable iscan per default */
+       cfg->escan_on = true;   /* escan on & off switch.
+                                we enable escan per default */
+#endif
+       cfg->roam_on = true;    /* roam on & off switch.
                                 we enable roam per default */
 
-       cfg_priv->iscan_kickstart = false;
-       cfg_priv->active_scan = true;   /* we do active scan for
+       cfg->iscan_kickstart = false;
+       cfg->active_scan = true;        /* we do active scan for
                                 specific scan per default */
-       cfg_priv->dongle_up = false;    /* dongle is not up yet */
-       brcmf_init_eq(cfg_priv);
-       err = brcmf_init_priv_mem(cfg_priv);
+       cfg->dongle_up = false; /* dongle is not up yet */
+       brcmf_init_eq(cfg);
+       err = brcmf_init_priv_mem(cfg);
        if (err)
                return err;
-       INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler);
-       brcmf_init_eloop_handler(&cfg_priv->el);
-       mutex_init(&cfg_priv->usr_sync);
-       err = brcmf_init_iscan(cfg_priv);
+       INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler);
+       brcmf_init_eloop_handler(&cfg->el);
+       mutex_init(&cfg->usr_sync);
+       err = brcmf_init_iscan(cfg);
        if (err)
                return err;
-       brcmf_init_conf(cfg_priv->conf);
-       brcmf_init_prof(cfg_priv->profile);
-       brcmf_link_down(cfg_priv);
+       brcmf_init_escan(cfg);
+       brcmf_init_conf(cfg->conf);
+       brcmf_init_prof(cfg->profile);
+       brcmf_link_down(cfg);
 
        return err;
 }
 
-static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv)
+static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
 {
-       cancel_work_sync(&cfg_priv->event_work);
-       cfg_priv->dongle_up = false;    /* dongle down */
-       brcmf_flush_eq(cfg_priv);
-       brcmf_link_down(cfg_priv);
-       brcmf_term_iscan(cfg_priv);
-       brcmf_deinit_priv_mem(cfg_priv);
+       cancel_work_sync(&cfg->event_work);
+       cfg->dongle_up = false; /* dongle down */
+       brcmf_flush_eq(cfg);
+       brcmf_link_down(cfg);
+       brcmf_abort_scanning(cfg);
+       brcmf_deinit_priv_mem(cfg);
 }
 
-struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
-                                                struct device *busdev,
-                                                void *data)
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
+                                                 struct device *busdev,
+                                                 struct brcmf_pub *drvr)
 {
        struct wireless_dev *wdev;
-       struct brcmf_cfg80211_priv *cfg_priv;
-       struct brcmf_cfg80211_iface *ci;
-       struct brcmf_cfg80211_dev *cfg_dev;
+       struct brcmf_cfg80211_info *cfg;
        s32 err = 0;
 
        if (!ndev) {
                WL_ERR("ndev is invalid\n");
                return NULL;
        }
-       cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
-       if (!cfg_dev)
-               return NULL;
 
-       wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev);
+       wdev = brcmf_alloc_wdev(busdev);
        if (IS_ERR(wdev)) {
-               kfree(cfg_dev);
                return NULL;
        }
 
        wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
-       cfg_priv = wdev_to_cfg(wdev);
-       cfg_priv->wdev = wdev;
-       cfg_priv->pub = data;
-       ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
-       ci->cfg_priv = cfg_priv;
+       cfg = wdev_to_cfg(wdev);
+       cfg->wdev = wdev;
+       cfg->pub = drvr;
        ndev->ieee80211_ptr = wdev;
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
-       err = wl_init_priv(cfg_priv);
+       err = wl_init_priv(cfg);
        if (err) {
                WL_ERR("Failed to init iwm_priv (%d)\n", err);
                goto cfg80211_attach_out;
        }
-       brcmf_set_drvdata(cfg_dev, ci);
 
-       return cfg_dev;
+       return cfg;
 
 cfg80211_attach_out:
-       brcmf_free_wdev(cfg_priv);
-       kfree(cfg_dev);
+       brcmf_free_wdev(cfg);
        return NULL;
 }
 
-void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev)
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_priv *cfg_priv;
-
-       cfg_priv = brcmf_priv_get(cfg_dev);
-
-       wl_deinit_priv(cfg_priv);
-       brcmf_free_wdev(cfg_priv);
-       brcmf_set_drvdata(cfg_dev, NULL);
-       kfree(cfg_dev);
+       wl_deinit_priv(cfg);
+       brcmf_free_wdev(cfg);
 }
 
 void
@@ -3478,10 +5180,10 @@ brcmf_cfg80211_event(struct net_device *ndev,
                  const struct brcmf_event_msg *e, void *data)
 {
        u32 event_type = be32_to_cpu(e->event_type);
-       struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+       struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
 
-       if (!brcmf_enq_event(cfg_priv, event_type, e))
-               schedule_work(&cfg_priv->event_work);
+       if (!brcmf_enq_event(cfg, event_type, e, data))
+               schedule_work(&cfg->event_work);
 }
 
 static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
@@ -3502,6 +5204,9 @@ static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
        case NL80211_IFTYPE_STATION:
                infra = 1;
                break;
+       case NL80211_IFTYPE_AP:
+               infra = 1;
+               break;
        default:
                err = -EINVAL;
                WL_ERR("invalid type (%d)\n", iftype);
@@ -3554,6 +5259,8 @@ static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
        setbit(eventmask, BRCMF_E_TXFAIL);
        setbit(eventmask, BRCMF_E_JOIN_START);
        setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
+       setbit(eventmask, BRCMF_E_ESCAN_RESULT);
+       setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
 
        brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
                        iovbuf, sizeof(iovbuf));
@@ -3672,46 +5379,46 @@ dongle_scantime_out:
        return err;
 }
 
-static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
 {
        struct wiphy *wiphy;
        s32 phy_list;
        s8 phy;
        s32 err = 0;
 
-       err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST,
+       err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
                              &phy_list, sizeof(phy_list));
        if (err) {
                WL_ERR("error (%d)\n", err);
                return err;
        }
 
-       phy = ((char *)&phy_list)[1];
+       phy = ((char *)&phy_list)[0];
        WL_INFO("%c phy\n", phy);
        if (phy == 'n' || phy == 'a') {
-               wiphy = cfg_to_wiphy(cfg_priv);
+               wiphy = cfg_to_wiphy(cfg);
                wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
        }
 
        return err;
 }
 
-static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
 {
-       return wl_update_wiphybands(cfg_priv);
+       return wl_update_wiphybands(cfg);
 }
 
-static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
 {
        struct net_device *ndev;
        struct wireless_dev *wdev;
        s32 power_mode;
        s32 err = 0;
 
-       if (cfg_priv->dongle_up)
+       if (cfg->dongle_up)
                return err;
 
-       ndev = cfg_to_ndev(cfg_priv);
+       ndev = cfg_to_ndev(cfg);
        wdev = ndev->ieee80211_ptr;
 
        brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
@@ -3721,21 +5428,21 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
        if (err)
                goto default_conf_out;
 
-       power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF;
+       power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
        err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
        if (err)
                goto default_conf_out;
        WL_INFO("power save set to %s\n",
                (power_mode ? "enabled" : "disabled"));
 
-       err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1),
+       err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
                                WL_BEACON_TIMEOUT);
        if (err)
                goto default_conf_out;
        err = brcmf_dongle_mode(ndev, wdev->iftype);
        if (err && err != -EINPROGRESS)
                goto default_conf_out;
-       err = brcmf_dongle_probecap(cfg_priv);
+       err = brcmf_dongle_probecap(cfg);
        if (err)
                goto default_conf_out;
 
@@ -3743,31 +5450,31 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
 
 default_conf_out:
 
-       cfg_priv->dongle_up = true;
+       cfg->dongle_up = true;
 
        return err;
 
 }
 
-static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv)
+static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
 {
        char buf[10+IFNAMSIZ];
        struct dentry *fd;
        s32 err = 0;
 
-       sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name);
-       cfg_priv->debugfsdir = debugfs_create_dir(buf,
-                                       cfg_to_wiphy(cfg_priv)->debugfsdir);
+       sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
+       cfg->debugfsdir = debugfs_create_dir(buf,
+                                       cfg_to_wiphy(cfg)->debugfsdir);
 
-       fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir,
-               (u16 *)&cfg_priv->profile->beacon_interval);
+       fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
+               (u16 *)&cfg->profile->beacon_interval);
        if (!fd) {
                err = -ENOMEM;
                goto err_out;
        }
 
-       fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir,
-               (u8 *)&cfg_priv->profile->dtim_period);
+       fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
+               (u8 *)&cfg->profile->dtim_period);
        if (!fd) {
                err = -ENOMEM;
                goto err_out;
@@ -3777,40 +5484,40 @@ err_out:
        return err;
 }
 
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
 {
-       debugfs_remove_recursive(cfg_priv->debugfsdir);
-       cfg_priv->debugfsdir = NULL;
+       debugfs_remove_recursive(cfg->debugfsdir);
+       cfg->debugfsdir = NULL;
 }
 
-static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
 {
        s32 err = 0;
 
-       set_bit(WL_STATUS_READY, &cfg_priv->status);
+       set_bit(WL_STATUS_READY, &cfg->status);
 
-       brcmf_debugfs_add_netdev_params(cfg_priv);
+       brcmf_debugfs_add_netdev_params(cfg);
 
-       err = brcmf_config_dongle(cfg_priv);
+       err = brcmf_config_dongle(cfg);
        if (err)
                return err;
 
-       brcmf_invoke_iscan(cfg_priv);
+       brcmf_invoke_iscan(cfg);
 
        return err;
 }
 
-static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
 {
        /*
         * While going down, if associated with AP disassociate
         * from AP to save power
         */
-       if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
-            test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
-            test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+       if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
+            test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
+            test_bit(WL_STATUS_READY, &cfg->status)) {
                WL_INFO("Disassociating from AP");
-               brcmf_link_down(cfg_priv);
+               brcmf_link_down(cfg);
 
                /* Make sure WPA_Supplicant receives all the event
                   generated due to DISASSOC call to the fw to keep
@@ -3819,63 +5526,33 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
                brcmf_delay(500);
        }
 
-       set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
-       brcmf_term_iscan(cfg_priv);
-       if (cfg_priv->scan_request) {
-               cfg80211_scan_done(cfg_priv->scan_request, true);
-               /* May need to perform this to cover rmmod */
-               /* wl_set_mpc(cfg_to_ndev(wl), 1); */
-               cfg_priv->scan_request = NULL;
-       }
-       clear_bit(WL_STATUS_READY, &cfg_priv->status);
-       clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
-       clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+       brcmf_abort_scanning(cfg);
+       clear_bit(WL_STATUS_READY, &cfg->status);
 
-       brcmf_debugfs_remove_netdev(cfg_priv);
+       brcmf_debugfs_remove_netdev(cfg);
 
        return 0;
 }
 
-s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev)
+s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_priv *cfg_priv;
        s32 err = 0;
 
-       cfg_priv = brcmf_priv_get(cfg_dev);
-       mutex_lock(&cfg_priv->usr_sync);
-       err = __brcmf_cfg80211_up(cfg_priv);
-       mutex_unlock(&cfg_priv->usr_sync);
+       mutex_lock(&cfg->usr_sync);
+       err = __brcmf_cfg80211_up(cfg);
+       mutex_unlock(&cfg->usr_sync);
 
        return err;
 }
 
-s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev)
+s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
 {
-       struct brcmf_cfg80211_priv *cfg_priv;
        s32 err = 0;
 
-       cfg_priv = brcmf_priv_get(cfg_dev);
-       mutex_lock(&cfg_priv->usr_sync);
-       err = __brcmf_cfg80211_down(cfg_priv);
-       mutex_unlock(&cfg_priv->usr_sync);
+       mutex_lock(&cfg->usr_sync);
+       err = __brcmf_cfg80211_down(cfg);
+       mutex_unlock(&cfg->usr_sync);
 
        return err;
 }
 
-static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
-                              u8 t, u8 l, u8 *v)
-{
-       struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
-       s32 err = 0;
-
-       if (ie->offset + l + 2 > WL_TLV_INFO_MAX) {
-               WL_ERR("ei crosses buffer boundary\n");
-               return -ENOSPC;
-       }
-       ie->buf[ie->offset] = t;
-       ie->buf[ie->offset + 1] = l;
-       memcpy(&ie->buf[ie->offset + 2], v, l);
-       ie->offset += l + 2;
-
-       return err;
-}
index b5d9b36df3d0556bb54a34efbeabc5512b10057b..71ced174748a335a72f2c6b68712d9d3e40731d5 100644 (file)
 #ifndef _wl_cfg80211_h_
 #define _wl_cfg80211_h_
 
-struct brcmf_cfg80211_conf;
-struct brcmf_cfg80211_iface;
-struct brcmf_cfg80211_priv;
-struct brcmf_cfg80211_security;
-struct brcmf_cfg80211_ibss;
-
 #define WL_DBG_NONE            0
 #define WL_DBG_CONN            (1 << 5)
 #define WL_DBG_SCAN            (1 << 4)
@@ -123,13 +117,25 @@ do {                                                              \
 #define WL_SCAN_UNASSOC_TIME           40
 #define WL_SCAN_PASSIVE_TIME           120
 
+#define WL_ESCAN_BUF_SIZE              (1024 * 64)
+#define WL_ESCAN_TIMER_INTERVAL_MS     8000 /* E-Scan timeout */
+
+#define WL_ESCAN_ACTION_START          1
+#define WL_ESCAN_ACTION_CONTINUE       2
+#define WL_ESCAN_ACTION_ABORT          3
+
+#define WL_AUTH_SHARED_KEY             1       /* d11 shared authentication */
+#define IE_MAX_LEN                     512
+
 /* dongle status */
 enum wl_status {
        WL_STATUS_READY,
        WL_STATUS_SCANNING,
        WL_STATUS_SCAN_ABORTING,
        WL_STATUS_CONNECTING,
-       WL_STATUS_CONNECTED
+       WL_STATUS_CONNECTED,
+       WL_STATUS_AP_CREATING,
+       WL_STATUS_AP_CREATED
 };
 
 /* wi-fi mode */
@@ -169,23 +175,17 @@ struct brcmf_cfg80211_conf {
        struct ieee80211_channel channel;
 };
 
+/* forward declaration */
+struct brcmf_cfg80211_info;
+
 /* cfg80211 main event loop */
 struct brcmf_cfg80211_event_loop {
-       s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_priv *cfg_priv,
+       s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
                                     struct net_device *ndev,
                                     const struct brcmf_event_msg *e,
                                     void *data);
 };
 
-/* representing interface of cfg80211 plane */
-struct brcmf_cfg80211_iface {
-       struct brcmf_cfg80211_priv *cfg_priv;
-};
-
-struct brcmf_cfg80211_dev {
-       void *driver_data;      /* to store cfg80211 object information */
-};
-
 /* basic structure of scan request */
 struct brcmf_cfg80211_scan_req {
        struct brcmf_ssid_le ssid_le;
@@ -238,7 +238,7 @@ struct brcmf_cfg80211_profile {
 /* dongle iscan event loop */
 struct brcmf_cfg80211_iscan_eloop {
        s32 (*handler[WL_SCAN_ERSULTS_LAST])
-               (struct brcmf_cfg80211_priv *cfg_priv);
+               (struct brcmf_cfg80211_info *cfg);
 };
 
 /* dongle iscan controller */
@@ -275,92 +275,240 @@ struct brcmf_cfg80211_pmk_list {
        struct pmkid foo[MAXPMKID - 1];
 };
 
-/* dongle private data of cfg80211 interface */
-struct brcmf_cfg80211_priv {
-       struct wireless_dev *wdev;      /* representing wl cfg80211 device */
-       struct brcmf_cfg80211_conf *conf;       /* dongle configuration */
-       struct cfg80211_scan_request *scan_request;     /* scan request
-                                                        object */
-       struct brcmf_cfg80211_event_loop el;    /* main event loop */
-       struct list_head evt_q_list;    /* used for event queue */
-       spinlock_t       evt_q_lock;    /* for event queue synchronization */
-       struct mutex usr_sync;  /* maily for dongle up/down synchronization */
-       struct brcmf_scan_results *bss_list;    /* bss_list holding scanned
-                                                ap information */
+/* dongle escan state */
+enum wl_escan_state {
+       WL_ESCAN_STATE_IDLE,
+       WL_ESCAN_STATE_SCANNING
+};
+
+struct escan_info {
+       u32 escan_state;
+       u8 escan_buf[WL_ESCAN_BUF_SIZE];
+       struct wiphy *wiphy;
+       struct net_device *ndev;
+};
+
+/* Structure to hold WPS, WPA IEs for a AP */
+struct ap_info {
+       u8 probe_res_ie[IE_MAX_LEN];
+       u8 beacon_ie[IE_MAX_LEN];
+       u32 probe_res_ie_len;
+       u32 beacon_ie_len;
+       u8 *wpa_ie;
+       u8 *rsn_ie;
+       bool security_mode;
+};
+
+/**
+ * struct brcmf_pno_param_le - PNO scan configuration parameters
+ *
+ * @version: PNO parameters version.
+ * @scan_freq: scan frequency.
+ * @lost_network_timeout: #sec. to declare discovered network as lost.
+ * @flags: Bit field to control features of PFN such as sort criteria auto
+ *     enable switch and background scan.
+ * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
+ *     criteria.
+ * @bestn: number of best networks in each scan.
+ * @mscan: number of scans recorded.
+ * @repeat: minimum number of scan intervals before scan frequency changes
+ *     in adaptive scan.
+ * @exp: exponent of 2 for maximum scan interval.
+ * @slow_freq: slow scan period.
+ */
+struct brcmf_pno_param_le {
+       __le32 version;
+       __le32 scan_freq;
+       __le32 lost_network_timeout;
+       __le16 flags;
+       __le16 rssi_margin;
+       u8 bestn;
+       u8 mscan;
+       u8 repeat;
+       u8 exp;
+       __le32 slow_freq;
+};
+
+/**
+ * struct brcmf_pno_net_param_le - scan parameters per preferred network.
+ *
+ * @ssid: ssid name and its length.
+ * @flags: bit2: hidden.
+ * @infra: BSS vs IBSS.
+ * @auth: Open vs Closed.
+ * @wpa_auth: WPA type.
+ * @wsec: wsec value.
+ */
+struct brcmf_pno_net_param_le {
+       struct brcmf_ssid_le ssid;
+       __le32 flags;
+       __le32 infra;
+       __le32 auth;
+       __le32 wpa_auth;
+       __le32 wsec;
+};
+
+/**
+ * struct brcmf_pno_net_info_le - information per found network.
+ *
+ * @bssid: BSS network identifier.
+ * @channel: channel number only.
+ * @SSID_len: length of ssid.
+ * @SSID: ssid characters.
+ * @RSSI: receive signal strength (in dBm).
+ * @timestamp: age in seconds.
+ */
+struct brcmf_pno_net_info_le {
+       u8 bssid[ETH_ALEN];
+       u8 channel;
+       u8 SSID_len;
+       u8 SSID[32];
+       __le16  RSSI;
+       __le16  timestamp;
+};
+
+/**
+ * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event.
+ *
+ * @version: PNO version identifier.
+ * @status: indicates completion status of PNO scan.
+ * @count: amount of brcmf_pno_net_info_le entries appended.
+ */
+struct brcmf_pno_scanresults_le {
+       __le32 version;
+       __le32 status;
+       __le32 count;
+};
+
+/**
+ * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
+ *
+ * @wdev: representing wl cfg80211 device.
+ * @conf: dongle configuration.
+ * @scan_request: cfg80211 scan request object.
+ * @el: main event loop.
+ * @evt_q_list: used for event queue.
+ * @evt_q_lock: for event queue synchronization.
+ * @usr_sync: mainly for dongle up/down synchronization.
+ * @bss_list: bss_list holding scanned ap information.
+ * @scan_results: results of the last scan.
+ * @scan_req_int: internal scan request object.
+ * @bss_info: bss information for cfg80211 layer.
+ * @ie: information element object for internal purpose.
+ * @profile: holding dongle profile.
+ * @iscan: iscan controller information.
+ * @conn_info: association info.
+ * @pmk_list: wpa2 pmk list.
+ * @event_work: event handler work struct.
+ * @status: current dongle status.
+ * @pub: common driver information.
+ * @channel: current channel.
+ * @iscan_on: iscan on/off switch.
+ * @iscan_kickstart: indicate iscan already started.
+ * @active_scan: current scan mode.
+ * @sched_escan: e-scan for scheduled scan support running.
+ * @ibss_starter: indicates this sta is ibss starter.
+ * @link_up: link/connection up flag.
+ * @pwr_save: indicate whether dongle to support power save mode.
+ * @dongle_up: indicate whether dongle up or not.
+ * @roam_on: on/off switch for dongle self-roaming.
+ * @scan_tried: indicates if first scan attempted.
+ * @dcmd_buf: dcmd buffer.
+ * @extra_buf: mainly to grab assoc information.
+ * @debugfsdir: debugfs folder for this device.
+ * @escan_on: escan on/off switch.
+ * @escan_info: escan information.
+ * @escan_timeout: Timer for catch scan timeout.
+ * @escan_timeout_work: scan timeout worker.
+ * @escan_ioctl_buf: dongle command buffer for escan commands.
+ * @ap_info: host ap information.
+ * @ci: used to link this structure to netdev private data.
+ */
+struct brcmf_cfg80211_info {
+       struct wireless_dev *wdev;
+       struct brcmf_cfg80211_conf *conf;
+       struct cfg80211_scan_request *scan_request;
+       struct brcmf_cfg80211_event_loop el;
+       struct list_head evt_q_list;
+       spinlock_t       evt_q_lock;
+       struct mutex usr_sync;
+       struct brcmf_scan_results *bss_list;
        struct brcmf_scan_results *scan_results;
-       struct brcmf_cfg80211_scan_req *scan_req_int;   /* scan request object
-                                                for internal purpose */
-       struct wl_cfg80211_bss_info *bss_info;  /* bss information for
-                                                cfg80211 layer */
-       struct brcmf_cfg80211_ie ie;    /* information element object for
-                                        internal purpose */
-       struct brcmf_cfg80211_profile *profile; /* holding dongle profile */
-       struct brcmf_cfg80211_iscan_ctrl *iscan;        /* iscan controller */
-       struct brcmf_cfg80211_connect_info conn_info; /* association info */
-       struct brcmf_cfg80211_pmk_list *pmk_list;       /* wpa2 pmk list */
-       struct work_struct event_work;  /* event handler work struct */
-       unsigned long status;           /* current dongle status */
-       void *pub;
-       u32 channel;            /* current channel */
-       bool iscan_on;          /* iscan on/off switch */
-       bool iscan_kickstart;   /* indicate iscan already started */
-       bool active_scan;       /* current scan mode */
-       bool ibss_starter;      /* indicates this sta is ibss starter */
-       bool link_up;           /* link/connection up flag */
-       bool pwr_save;          /* indicate whether dongle to support
-                                        power save mode */
-       bool dongle_up;         /* indicate whether dongle up or not */
-       bool roam_on;           /* on/off switch for dongle self-roaming */
-       bool scan_tried;        /* indicates if first scan attempted */
-       u8 *dcmd_buf;           /* dcmd buffer */
-       u8 *extra_buf;          /* maily to grab assoc information */
+       struct brcmf_cfg80211_scan_req *scan_req_int;
+       struct wl_cfg80211_bss_info *bss_info;
+       struct brcmf_cfg80211_ie ie;
+       struct brcmf_cfg80211_profile *profile;
+       struct brcmf_cfg80211_iscan_ctrl *iscan;
+       struct brcmf_cfg80211_connect_info conn_info;
+       struct brcmf_cfg80211_pmk_list *pmk_list;
+       struct work_struct event_work;
+       unsigned long status;
+       struct brcmf_pub *pub;
+       u32 channel;
+       bool iscan_on;
+       bool iscan_kickstart;
+       bool active_scan;
+       bool sched_escan;
+       bool ibss_starter;
+       bool link_up;
+       bool pwr_save;
+       bool dongle_up;
+       bool roam_on;
+       bool scan_tried;
+       u8 *dcmd_buf;
+       u8 *extra_buf;
        struct dentry *debugfsdir;
-       u8 ci[0] __aligned(NETDEV_ALIGN);
+       bool escan_on;
+       struct escan_info escan_info;
+       struct timer_list escan_timeout;
+       struct work_struct escan_timeout_work;
+       u8 *escan_ioctl_buf;
+       struct ap_info *ap_info;
 };
 
-static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_priv *w)
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
 {
        return w->wdev->wiphy;
 }
 
-static inline struct brcmf_cfg80211_priv *wiphy_to_cfg(struct wiphy *w)
+static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
 {
-       return (struct brcmf_cfg80211_priv *)(wiphy_priv(w));
+       return (struct brcmf_cfg80211_info *)(wiphy_priv(w));
 }
 
-static inline struct brcmf_cfg80211_priv *wdev_to_cfg(struct wireless_dev *wd)
+static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
 {
-       return (struct brcmf_cfg80211_priv *)(wdev_priv(wd));
+       return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
 }
 
-static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_priv *cfg)
+static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
 {
        return cfg->wdev->netdev;
 }
 
-static inline struct brcmf_cfg80211_priv *ndev_to_cfg(struct net_device *ndev)
+static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
 {
        return wdev_to_cfg(ndev->ieee80211_ptr);
 }
 
-#define iscan_to_cfg(i) ((struct brcmf_cfg80211_priv *)(i->data))
+#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
 #define cfg_to_iscan(w) (w->iscan)
 
 static inline struct
-brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
+brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
 {
        return &cfg->conn_info;
 }
 
-extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
-                                                       struct device *busdev,
-                                                       void *data);
-extern void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg);
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
+                                                 struct device *busdev,
+                                                 struct brcmf_pub *drvr);
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
 
 /* event handler from dongle */
-extern void brcmf_cfg80211_event(struct net_device *ndev,
-                                const struct brcmf_event_msg *e, void *data);
-extern s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev);
-extern s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev);
+void brcmf_cfg80211_event(struct net_device *ndev,
+                         const struct brcmf_event_msg *e, void *data);
+s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
+s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
 
 #endif                         /* _wl_cfg80211_h_ */
index 8c9345dd37d270fcf8abf03d08714ff176df5529..b89f1272b93f506f24f8cf9e262f9b7c489c6180 100644 (file)
@@ -535,9 +535,6 @@ void ai_detach(struct si_pub *sih)
 {
        struct si_info *sii;
 
-       struct si_pub *si_local = NULL;
-       memcpy(&si_local, &sih, sizeof(struct si_pub **));
-
        sii = container_of(sih, struct si_info, pub);
 
        if (sii == NULL)
index a5edebeb0b4f7f748155551df76396602885078e..a744ea5a95599797023a0040401ab1816855f77f 100644 (file)
@@ -86,7 +86,9 @@ MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
 MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
-
+/* This needs to be adjusted when brcms_firmwares changes */
+MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
+MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
 
 /* recognized BCMA Core IDs */
 static struct bcma_device_id brcms_coreid_table[] = {
@@ -265,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
        }
 }
 
-static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void brcms_ops_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct brcms_info *wl = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -277,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                goto done;
        }
        brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
-       tx_info->rate_driver_data[0] = tx_info->control.sta;
+       tx_info->rate_driver_data[0] = control->sta;
  done:
        spin_unlock_bh(&wl->lock);
 }
@@ -300,7 +304,10 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        wl->mute_tx = true;
 
        if (!wl->pub->up)
-               err = brcms_up(wl);
+               if (!blocked)
+                       err = brcms_up(wl);
+               else
+                       err = -ERFKILL;
        else
                err = -ENODEV;
        spin_unlock_bh(&wl->lock);
index 03ca65324845f39f1e8e48b2669a243bddf7ce5c..75086b37c817b747fe5482bb097a66a859c14214 100644 (file)
@@ -7512,15 +7512,10 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
 
        channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
 
-       if (channel > 14) {
-               rx_status->band = IEEE80211_BAND_5GHZ;
-               rx_status->freq = ieee80211_ofdm_chan_to_freq(
-                                       WF_CHAN_FACTOR_5_G/2, channel);
-
-       } else {
-               rx_status->band = IEEE80211_BAND_2GHZ;
-               rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
-       }
+       rx_status->band =
+               channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+       rx_status->freq =
+               ieee80211_channel_to_frequency(channel, rx_status->band);
 
        rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
 
index bcc79b4e3267ba1434c3c0bfdec1b2de6f50d18b..e8682855b73a5189936576bc69367fb765d1bf7f 100644 (file)
@@ -34,6 +34,7 @@
 #define BCM43235_CHIP_ID       43235
 #define BCM43236_CHIP_ID       43236
 #define BCM43238_CHIP_ID       43238
+#define BCM43241_CHIP_ID       0x4324
 #define BCM4329_CHIP_ID                0x4329
 #define BCM4330_CHIP_ID                0x4330
 #define BCM4331_CHIP_ID                0x4331
index f10d30274c23ade434cc8149ebcaebaadedf8584..c11a290a1edf6c07e38cfd1035f783ddb9918ac2 100644 (file)
 #define WL_CHANSPEC_BAND_2G            0x2000
 #define INVCHANSPEC                    255
 
-/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
-#define WF_CHAN_FACTOR_2_4_G           4814    /* 2.4 GHz band, 2407 MHz */
-#define WF_CHAN_FACTOR_5_G             10000   /* 5   GHz band, 5000 MHz */
-#define WF_CHAN_FACTOR_4_G             8000    /* 4.9 GHz band for Japan */
-
 #define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
 #define CHSPEC_BAND(chspec)    ((chspec) & WL_CHANSPEC_BAND_MASK)
 
index e1f4102772426032f9ccd5c060f39118d9bb25c3..c6ea995750db80c9664351c079be86be81c70262 100644 (file)
@@ -860,10 +860,10 @@ void hostap_free_data(struct ap_data *ap)
                return;
        }
 
-       flush_work_sync(&ap->add_sta_proc_queue);
+       flush_work(&ap->add_sta_proc_queue);
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-       flush_work_sync(&ap->wds_oper_queue);
+       flush_work(&ap->wds_oper_queue);
        if (ap->crypt)
                ap->crypt->deinit(ap->crypt_priv);
        ap->crypt = ap->crypt_priv = NULL;
index 50f87b60b0bd01400590dcd7e661f7bbe5423cd9..8e7000fd4414fa4c52ff8218a7c61963d6c215bb 100644 (file)
@@ -3311,13 +3311,13 @@ static void prism2_free_local_data(struct net_device *dev)
 
        unregister_netdev(local->dev);
 
-       flush_work_sync(&local->reset_queue);
-       flush_work_sync(&local->set_multicast_list_queue);
-       flush_work_sync(&local->set_tim_queue);
+       flush_work(&local->reset_queue);
+       flush_work(&local->set_multicast_list_queue);
+       flush_work(&local->set_tim_queue);
 #ifndef PRISM2_NO_STATION_MODES
-       flush_work_sync(&local->info_queue);
+       flush_work(&local->info_queue);
 #endif
-       flush_work_sync(&local->comms_qual_update);
+       flush_work(&local->comms_qual_update);
 
        lib80211_crypt_info_free(&local->crypt_info);
 
index 47932b28aac101217ee56d1db2dc736e7d8d1d5b..970a48baaf804a38ff1883702bbd9460d7d5b8c8 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/etherdevice.h>
 #include "hostap_wlan.h"
 #include "hostap.h"
 #include "hostap_ap.h"
@@ -463,8 +464,7 @@ static void handle_info_queue_scanresults(local_info_t *local)
                prism2_host_roaming(local);
 
        if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
-           memcmp(local->preferred_ap, "\x00\x00\x00\x00\x00\x00",
-                  ETH_ALEN) != 0) {
+           !is_zero_ether_addr(local->preferred_ap)) {
                /*
                 * Firmware seems to be getting into odd state in host_roaming
                 * mode 2 when hostscan is used without join command, so try
index 18054d9c66887363cc7d028279c04e89732481ba..ac074731335a5ed1b7ef393dfd15ae6c87299d03 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/ethtool.h>
 #include <linux/if_arp.h>
 #include <linux/module.h>
+#include <linux/etherdevice.h>
 #include <net/lib80211.h>
 
 #include "hostap_wlan.h"
@@ -3221,8 +3222,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
                return -EINVAL;
 
        addr = ext->addr.sa_data;
-       if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
-           addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(addr)) {
                sta_ptr = NULL;
                crypt = &local->crypt_info.crypt[i];
        } else {
@@ -3394,8 +3394,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
                i--;
 
        addr = ext->addr.sa_data;
-       if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
-           addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(addr)) {
                sta_ptr = NULL;
                crypt = &local->crypt_info.crypt[i];
        } else {
@@ -3458,9 +3457,7 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
            param->u.crypt.key_len)
                return -EINVAL;
 
-       if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
-           param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
-           param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(param->sta_addr)) {
                if (param->u.crypt.idx >= WEP_KEYS)
                        return -EINVAL;
                sta_ptr = NULL;
@@ -3593,9 +3590,7 @@ static int prism2_ioctl_get_encryption(local_info_t *local,
        if (max_key_len < 0)
                return -EINVAL;
 
-       if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
-           param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
-           param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+       if (is_broadcast_ether_addr(param->sta_addr)) {
                sta_ptr = NULL;
                if (param->u.crypt.idx >= WEP_KEYS)
                        param->u.crypt.idx = local->crypt_info.tx_keyidx;
index 627bc12074c729a37426199e52cb119dce01fdda..15f0fad39add227550e3d0a49fedd24737da6aba 100644 (file)
@@ -1084,7 +1084,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
        __le16 val = cpu_to_le16(reason);
 
        if (local->iw_mode != IW_MODE_INFRA ||
-           memcmp(local->bssid, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0 ||
+           is_zero_ether_addr(local->bssid) ||
            memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
                return 0;
 
index 83324b3216527ec72195b35da104a4ce6c5ad6e1..29b8fa1adefde125d62608f3b9513e53afba247a 100644 (file)
@@ -2181,8 +2181,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
 
        /* Make sure the RF Kill check timer is running */
        priv->stop_rf_kill = 0;
-       cancel_delayed_work(&priv->rf_kill);
-       schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
+       mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
 }
 
 static void send_scan_event(void *data)
@@ -4322,9 +4321,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
                                          "disabled by HW switch\n");
                        /* Make sure the RF_KILL check timer is running */
                        priv->stop_rf_kill = 0;
-                       cancel_delayed_work(&priv->rf_kill);
-                       schedule_delayed_work(&priv->rf_kill,
-                                             round_jiffies_relative(HZ));
+                       mod_delayed_work(system_wq, &priv->rf_kill,
+                                        round_jiffies_relative(HZ));
                } else
                        schedule_reset(priv);
        }
@@ -6964,13 +6962,6 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
        struct ipw2100_priv *priv = libipw_priv(dev);
        int err = 0;
 
-       static const unsigned char any[] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-       };
-       static const unsigned char off[] = {
-               0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-       };
-
        // sanity checks
        if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
                return -EINVAL;
@@ -6981,8 +6972,8 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
                goto done;
        }
 
-       if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
-           !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+       if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+           is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
                /* we disable mandatory BSSID association */
                IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
                priv->config &= ~CFG_STATIC_BSSID;
index 0df45914739489f5f7555423f9f647d8fdb7dff5..935120fc8c9397daef4cb822b3d00176e0ca0cb8 100644 (file)
@@ -9037,18 +9037,11 @@ static int ipw_wx_set_wap(struct net_device *dev,
 {
        struct ipw_priv *priv = libipw_priv(dev);
 
-       static const unsigned char any[] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-       };
-       static const unsigned char off[] = {
-               0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-       };
-
        if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
                return -EINVAL;
        mutex_lock(&priv->mutex);
-       if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
-           !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+       if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+           is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
                /* we disable mandatory BSSID association */
                IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
                priv->config &= ~CFG_STATIC_BSSID;
index 1571505b1a38ba4903f6664cb0a25ada4e9a6e04..54aba474443867f463714e117f14a605ba251803 100644 (file)
@@ -675,7 +675,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
        }
       done:
        if (ieee->set_security)
-               ieee->set_security(ieee->dev, &sec);
+               ieee->set_security(dev, &sec);
 
        return ret;
 }
index faec404672081d5f72d03da9f44abac0de3c27d1..e252acb9c86239aa0b031fad77d465beb4efb86d 100644 (file)
@@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
  * start C_TX command process
  */
 static int
-il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il3945_tx_skb(struct il_priv *il,
+             struct ieee80211_sta *sta,
+             struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
        hdr_len = ieee80211_hdrlen(fc);
 
        /* Find idx into station table for destination station */
-       sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+       sta_id = il_sta_id_or_broadcast(il, sta);
        if (sta_id == IL_INVALID_STATION) {
                D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
                goto drop;
@@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw)
 }
 
 static void
-il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il3945_mac_tx(struct ieee80211_hw *hw,
+              struct ieee80211_tx_control *control,
+              struct sk_buff *skb)
 {
        struct il_priv *il = hw->priv;
 
@@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
             ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (il3945_tx_skb(il, skb))
+       if (il3945_tx_skb(il, control->sta, skb))
                dev_kfree_skb_any(skb);
 
        D_MAC80211("leave\n");
index 34f61a0581a22cf78054063ced014aa05cd72f38..eac4dc8bc879ffabeacf558b576f6a6fe8b5d9ed 100644 (file)
@@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
 }
 
 static void
-il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
-                        struct ieee80211_tx_info *info, __le16 fc)
+il4965_tx_cmd_build_rate(struct il_priv *il,
+                        struct il_tx_cmd *tx_cmd,
+                        struct ieee80211_tx_info *info,
+                        struct ieee80211_sta *sta,
+                        __le16 fc)
 {
        const u8 rts_retry_limit = 60;
        u32 rate_flags;
@@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
        rate_idx = info->control.rates[0].idx;
        if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
            || rate_idx > RATE_COUNT_LEGACY)
-               rate_idx =
-                   rate_lowest_index(&il->bands[info->band],
-                                     info->control.sta);
+               rate_idx = rate_lowest_index(&il->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
        if (info->band == IEEE80211_BAND_5GHZ)
                rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
  * start C_TX command process
  */
 int
-il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il4965_tx_skb(struct il_priv *il,
+             struct ieee80211_sta *sta,
+             struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct il_station_priv *sta_priv = NULL;
        struct il_tx_queue *txq;
        struct il_queue *q;
@@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
                sta_id = il->hw_params.bcast_id;
        else {
                /* Find idx into station table for destination station */
-               sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+               sta_id = il_sta_id_or_broadcast(il, sta);
 
                if (sta_id == IL_INVALID_STATION) {
                        D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
        /* TODO need this for burst mode later on */
        il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
 
-       il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+       il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
 
        il_update_stats(il, true, fc, len);
        /*
@@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw)
 }
 
 void
-il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il4965_mac_tx(struct ieee80211_hw *hw,
+             struct ieee80211_tx_control *control,
+             struct sk_buff *skb)
 {
        struct il_priv *il = hw->priv;
 
@@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
             ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (il4965_tx_skb(il, skb))
+       if (il4965_tx_skb(il, control->sta, skb))
                dev_kfree_skb_any(skb);
 
        D_MACDUMP("leave\n");
index 1db677689cfe36b18940c406e302519c51591bef..2d092f328547d8ca37dfef233a4619c23f7b5aa4 100644 (file)
@@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
 int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
 void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
                                 struct ieee80211_tx_info *info);
-int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_skb(struct il_priv *il,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb);
 int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 * ssn);
 int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il);
 int il4965_eeprom_check_version(struct il_priv *il);
 
 /* mac80211 handlers (for 4965) */
-void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void il4965_mac_tx(struct ieee80211_hw *hw,
+                  struct ieee80211_tx_control *control,
+                  struct sk_buff *skb);
 int il4965_mac_start(struct ieee80211_hw *hw);
 void il4965_mac_stop(struct ieee80211_hw *hw);
 void il4965_configure_filter(struct ieee80211_hw *hw,
index 0370403fd0bd5d2345704f8fc766c5ad2260a7dc..318ed3c9fe7499899d08fc3de1de917438690109 100644 (file)
@@ -1586,9 +1586,9 @@ il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
                return 0;
 
        frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-       memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->da);
        memcpy(frame->sa, ta, ETH_ALEN);
-       memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->bssid);
        frame->seq_ctrl = 0;
 
        len += 24;
@@ -4860,7 +4860,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
 
 #ifdef CONFIG_PM
 
-int
+static int
 il_pci_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
@@ -4877,9 +4877,8 @@ il_pci_suspend(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(il_pci_suspend);
 
-int
+static int
 il_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
@@ -4906,16 +4905,8 @@ il_pci_resume(struct device *device)
 
        return 0;
 }
-EXPORT_SYMBOL(il_pci_resume);
 
-const struct dev_pm_ops il_pm_ops = {
-       .suspend = il_pci_suspend,
-       .resume = il_pci_resume,
-       .freeze = il_pci_suspend,
-       .thaw = il_pci_resume,
-       .poweroff = il_pci_suspend,
-       .restore = il_pci_resume,
-};
+SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
 EXPORT_SYMBOL(il_pm_ops);
 
 #endif /* CONFIG_PM */
index 72468266906019972fbca813ff5b0e019fc47e29..b4bb813362bdbeb44afab3dd7b73d74dd556feb3 100644 (file)
@@ -1843,8 +1843,6 @@ __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
                          u32 beacon_interval);
 
 #ifdef CONFIG_PM
-int il_pci_suspend(struct device *device);
-int il_pci_resume(struct device *device);
 extern const struct dev_pm_ops il_pm_ops;
 
 #define IL_LEGACY_PM_OPS       (&il_pm_ops)
index 9bb16bdf6d26118ccc4bb6708934400c59b21c44..75e12f29d9eb7dc0e4a16bb92aa5cd6aa587b062 100644 (file)
@@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 
 
 /* tx */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_skb(struct iwl_priv *priv,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb);
 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
 #else
-static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+static inline int iwl_dbgfs_register(struct iwl_priv *priv,
+                                    struct dentry *dbgfs_dir)
 {
        return 0;
 }
-static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
 #ifdef CONFIG_IWLWIFI_DEBUG
index 4a361c55c543f5fdf491c265509f89f50903d815..01128c96b5d8c309e800bd08b01f378ceb382613 100644 (file)
@@ -1055,8 +1055,9 @@ struct iwl_wep_cmd {
 #define RX_RES_PHY_FLAGS_MOD_CCK_MSK           cpu_to_le16(1 << 1)
 #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK    cpu_to_le16(1 << 2)
 #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK       cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK           0x70
 #define RX_RES_PHY_FLAGS_ANTENNA_POS           4
+#define RX_RES_PHY_FLAGS_AGG_MSK               cpu_to_le16(1 << 7)
 
 #define RX_RES_STATUS_SEC_TYPE_MSK     (0x7 << 8)
 #define RX_RES_STATUS_SEC_TYPE_NONE    (0x0 << 8)
index a47b306b522cd3a49fcbb1622083baacb245d621..1a98fa3ab06df6fbf9ae8ec3197680adef182883 100644 (file)
@@ -2352,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
  * Create the debugfs files and directories
  *
  */
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
 {
-       struct dentry *phyd = priv->hw->wiphy->debugfsdir;
-       struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+       struct dentry *dir_data, *dir_rf, *dir_debug;
 
-       dir_drv = debugfs_create_dir(name, phyd);
-       if (!dir_drv)
-               return -ENOMEM;
-
-       priv->debugfs_dir = dir_drv;
+       priv->debugfs_dir = dbgfs_dir;
 
-       dir_data = debugfs_create_dir("data", dir_drv);
+       dir_data = debugfs_create_dir("data", dbgfs_dir);
        if (!dir_data)
                goto err;
-       dir_rf = debugfs_create_dir("rf", dir_drv);
+       dir_rf = debugfs_create_dir("rf", dbgfs_dir);
        if (!dir_rf)
                goto err;
-       dir_debug = debugfs_create_dir("debug", dir_drv);
+       dir_debug = debugfs_create_dir("debug", dbgfs_dir);
        if (!dir_debug)
                goto err;
 
@@ -2415,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
        /* Calibrations disabled/enabled status*/
        DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
 
-       if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
-               goto err;
+       /*
+        * Create a symlink with mac80211. This is not very robust, as it does
+        * not remove the symlink created. The implicit assumption is that
+        * when the opmode exits, mac80211 will also exit, and will remove
+        * this symlink as part of its cleanup.
+        */
+       if (priv->mac80211_registered) {
+               char buf[100];
+               struct dentry *mac80211_dir, *dev_dir, *root_dir;
+
+               dev_dir = dbgfs_dir->d_parent;
+               root_dir = dev_dir->d_parent;
+               mac80211_dir = priv->hw->wiphy->debugfsdir;
+
+               snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
+                        dev_dir->d_name.name);
+
+               if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
+                       goto err;
+       }
+
        return 0;
 
 err:
-       IWL_ERR(priv, "Can't create the debugfs directory\n");
-       iwl_dbgfs_unregister(priv);
+       IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
        return -ENOMEM;
 }
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-       if (!priv->debugfs_dir)
-               return;
-
-       debugfs_remove_recursive(priv->debugfs_dir);
-       priv->debugfs_dir = NULL;
-}
index 054f728f6266fb6f4f4425e7824f62a0e910730c..8141f91c3725bd3c8f26d9830e61574dd3b4586f 100644 (file)
@@ -771,6 +771,7 @@ struct iwl_priv {
        u8 agg_tids_count;
 
        struct iwl_rx_phy_res last_phy_res;
+       u32 ampdu_ref;
        bool last_phy_res_valid;
 
        /*
index a5f7bce96325819f6c14471bec1b4232af533a3e..ff8162d4c4543d3d2976c964ed2f790a8f7cb6a1 100644 (file)
@@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                        ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
        }
 
-       hw->wiphy->max_remain_on_channel_duration = 1000;
+       hw->wiphy->max_remain_on_channel_duration = 500;
 
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
 }
 #endif
 
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwlagn_mac_tx(struct ieee80211_hw *hw,
+                         struct ieee80211_tx_control *control,
+                         struct sk_buff *skb)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
        IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
                     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
 
-       if (iwlagn_tx_skb(priv, skb))
+       if (iwlagn_tx_skb(priv, control->sta, skb))
                dev_kfree_skb_any(skb);
 }
 
index 84d3db5aa506c113c572f46cae1d58b4525c3b0a..7ff3f14306784169f886e5c7ca570d8b217a7309 100644 (file)
@@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv)
         * No race since we hold the mutex here and a new one
         * can't come in at this time.
         */
-       ieee80211_remain_on_channel_expired(priv->hw);
+       if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
+               ieee80211_remain_on_channel_expired(priv->hw);
 
        exit_pending =
                test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data)
                iwlagn_prepare_restart(priv);
                mutex_unlock(&priv->mutex);
                iwl_cancel_deferred_work(priv);
-               ieee80211_restart_hw(priv->hw);
+               if (priv->mac80211_registered)
+                       ieee80211_restart_hw(priv->hw);
+               else
+                       IWL_ERR(priv,
+                               "Cannot request restart before registrating with mac80211");
        } else {
                WARN_ON(1);
        }
@@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
 
 static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
                                                 const struct iwl_cfg *cfg,
-                                                const struct iwl_fw *fw)
+                                                const struct iwl_fw *fw,
+                                                struct dentry *dbgfs_dir)
 {
        struct iwl_priv *priv;
        struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
                goto out_destroy_workqueue;
 
-       if (iwl_dbgfs_register(priv, DRV_NAME))
-               IWL_ERR(priv,
-                       "failed to create debugfs files. Ignoring error\n");
+       if (iwl_dbgfs_register(priv, dbgfs_dir))
+               goto out_mac80211_unregister;
 
        return op_mode;
 
+out_mac80211_unregister:
+       iwlagn_mac_unregister(priv);
 out_destroy_workqueue:
+       iwl_tt_exit(priv);
+       iwl_testmode_free(priv);
+       iwl_cancel_deferred_work(priv);
        destroy_workqueue(priv->workqueue);
        priv->workqueue = NULL;
        iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
 
        IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
 
-       iwl_dbgfs_unregister(priv);
-
        iwl_testmode_free(priv);
        iwlagn_mac_unregister(priv);
 
index fee5cffa166998c30437ac37b7490e0b85972dc1..5a9c325804f6dcdc8d47d44faf15dec1051d4471 100644 (file)
@@ -667,6 +667,7 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
        priv->last_phy_res_valid = true;
+       priv->ampdu_ref++;
        memcpy(&priv->last_phy_res, pkt->data,
               sizeof(struct iwl_rx_phy_res));
        return 0;
@@ -981,6 +982,16 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
        if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
                rx_status.flag |= RX_FLAG_SHORTPRE;
 
+       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+               /*
+                * We know which subframes of an A-MPDU belong
+                * together since we get a single PHY response
+                * from the firmware for all of them
+                */
+               rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+               rx_status.ampdu_reference = priv->ampdu_ref;
+       }
+
        /* Set up the HT phy flags */
        if (rate_n_flags & RATE_MCS_HT_MSK)
                rx_status.flag |= RX_FLAG_HT;
index e3467fa868996264f88e681edd8b5f6114fa5fe4..bb9f6252d28fad2d25340845f8f3a324e5d0fc56 100644 (file)
@@ -612,9 +612,9 @@ static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
                return 0;
 
        frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-       memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->da);
        memcpy(frame->sa, ta, ETH_ALEN);
-       memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
+       eth_broadcast_addr(frame->bssid);
        frame->seq_ctrl = 0;
 
        len += 24;
index b29b798f7550ad41b55efa7db5ab94e9d146a355..cd9b6de4273e8c035a8bcdd39bf715316825db90 100644 (file)
@@ -128,10 +128,11 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                               struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_addsta_cmd *addsta =
-               (struct iwl_addsta_cmd *) cmd->payload;
 
-       return iwl_process_add_sta_resp(priv, addsta, pkt);
+       if (!cmd)
+               return 0;
+
+       return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
 }
 
 int iwl_send_add_sta(struct iwl_priv *priv,
@@ -150,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
 
        if (!(flags & CMD_ASYNC)) {
-               cmd.flags |= CMD_WANT_SKB;
+               cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
                might_sleep();
        }
 
index 5971a23aa47d1218317460404ef5401f38d43547..f5ca73a89870727a71d6a354abebaeb01221d6f5 100644 (file)
@@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                                     struct iwl_tx_cmd *tx_cmd,
                                     struct ieee80211_tx_info *info,
+                                    struct ieee80211_sta *sta,
                                     __le16 fc)
 {
        u32 rate_flags;
@@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
        if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
                        (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
                rate_idx = rate_lowest_index(
-                               &priv->eeprom_data->bands[info->band],
-                               info->control.sta);
+                               &priv->eeprom_data->bands[info->band], sta);
        /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
        if (info->band == IEEE80211_BAND_5GHZ)
                rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
 /*
  * start REPLY_TX command process
  */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+int iwlagn_tx_skb(struct iwl_priv *priv,
+                 struct ieee80211_sta *sta,
+                 struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
                sta_id = ctx->bcast_sta_id;
        else {
                /* Find index into station table for destination station */
-               sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta);
+               sta_id = iwl_sta_id_or_broadcast(ctx, sta);
                if (sta_id == IWL_INVALID_STATION) {
                        IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
                                       hdr->addr1);
@@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
 
-       if (info->control.sta)
-               sta_priv = (void *)info->control.sta->drv_priv;
+       if (sta)
+               sta_priv = (void *)sta->drv_priv;
 
        if (sta_priv && sta_priv->asleep &&
            (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        /* TODO need this for burst mode later on */
        iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
 
-       iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+       iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
 
        memset(&info->status, 0, sizeof(info->status));
 
@@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
                 * only. Check this here.
                 */
                if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
-                   tid_data->agg.state != IWL_AGG_OFF,
+                             tid_data->agg.state != IWL_AGG_OFF,
                    "Tx while agg.state = %d", tid_data->agg.state))
                        goto drop_unlock_sta;
 
index 6d8d6dd7943fc3cd2e58e872071078be8f4bfc0b..2cb1efbc5ed1f3d80127f678e4c82e42046eb7c1 100644 (file)
@@ -295,7 +295,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
 static int iwl_verify_sec_sparse(struct iwl_priv *priv,
                                  const struct fw_desc *fw_desc)
 {
-       __le32 *image = (__le32 *)fw_desc->v_addr;
+       __le32 *image = (__le32 *)fw_desc->data;
        u32 len = fw_desc->len;
        u32 val;
        u32 i;
@@ -319,7 +319,7 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
 static void iwl_print_mismatch_sec(struct iwl_priv *priv,
                                    const struct fw_desc *fw_desc)
 {
-       __le32 *image = (__le32 *)fw_desc->v_addr;
+       __le32 *image = (__le32 *)fw_desc->data;
        u32 len = fw_desc->len;
        u32 val;
        u32 offs;
index 06ca505bb2cc68cb84168f55107e52324639ff4a..59a5f78402fce35319014267c33514b7e4b7ef65 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/tracepoint.h>
 #include <linux/device.h>
+#include "iwl-trans.h"
 
 
 #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -237,27 +238,34 @@ TRACE_EVENT(iwlwifi_dbg,
 #define TRACE_SYSTEM iwlwifi
 
 TRACE_EVENT(iwlwifi_dev_hcmd,
-       TP_PROTO(const struct device *dev, u32 flags,
-                const void *hcmd0, size_t len0,
-                const void *hcmd1, size_t len1,
-                const void *hcmd2, size_t len2),
-       TP_ARGS(dev, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
+       TP_PROTO(const struct device *dev,
+                struct iwl_host_cmd *cmd, u16 total_size,
+                const void *hdr, size_t hdr_len),
+       TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
        TP_STRUCT__entry(
                DEV_ENTRY
-               __dynamic_array(u8, hcmd0, len0)
-               __dynamic_array(u8, hcmd1, len1)
-               __dynamic_array(u8, hcmd2, len2)
+               __dynamic_array(u8, hcmd, total_size)
                __field(u32, flags)
        ),
        TP_fast_assign(
+               int i, offset = hdr_len;
+
                DEV_ASSIGN;
-               memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
-               memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
-               memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
-               __entry->flags = flags;
+               __entry->flags = cmd->flags;
+               memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
+
+               for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+                       if (!cmd->len[i])
+                               continue;
+                       if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+                               continue;
+                       memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
+                              cmd->data[i], cmd->len[i]);
+                       offset += cmd->len[i];
+               }
        ),
        TP_printk("[%s] hcmd %#.2x (%ssync)",
-                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd0))[0],
+                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
                  __entry->flags & CMD_ASYNC ? "a" : "")
 );
 
index cc41cfaedfbde4afaef1e54f23f215e658490dca..198634b75ed0e0cb5cb732525b6c9049f1a4b422 100644 (file)
@@ -64,6 +64,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 
 #include "iwl-drv.h"
 #include "iwl-debug.h"
@@ -101,6 +102,10 @@ MODULE_VERSION(DRV_VERSION);
 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
 MODULE_LICENSE("GPL");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static struct dentry *iwl_dbgfs_root;
+#endif
+
 /**
  * struct iwl_drv - drv common data
  * @list: list of drv structures using this opmode
@@ -126,6 +131,12 @@ struct iwl_drv {
        char firmware_name[25];         /* name of firmware file to load */
 
        struct completion request_firmware_complete;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct dentry *dbgfs_drv;
+       struct dentry *dbgfs_trans;
+       struct dentry *dbgfs_op_mode;
+#endif
 };
 
 #define DVM_OP_MODE    0
@@ -154,10 +165,8 @@ struct fw_sec {
 
 static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
 {
-       if (desc->v_addr)
-               dma_free_coherent(drv->trans->dev, desc->len,
-                                 desc->v_addr, desc->p_addr);
-       desc->v_addr = NULL;
+       vfree(desc->data);
+       desc->data = NULL;
        desc->len = 0;
 }
 
@@ -176,25 +185,29 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
 }
 
 static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
-                     struct fw_sec *sec)
+                            struct fw_sec *sec)
 {
-       if (!sec || !sec->size) {
-               desc->v_addr = NULL;
+       void *data;
+
+       desc->data = NULL;
+
+       if (!sec || !sec->size)
                return -EINVAL;
-       }
 
-       desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size,
-                                         &desc->p_addr, GFP_KERNEL);
-       if (!desc->v_addr)
+       data = vmalloc(sec->size);
+       if (!data)
                return -ENOMEM;
 
        desc->len = sec->size;
        desc->offset = sec->offset;
-       memcpy(desc->v_addr, sec->data, sec->size);
+       memcpy(data, sec->data, desc->len);
+       desc->data = data;
+
        return 0;
 }
 
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+static void iwl_req_fw_callback(const struct firmware *ucode_raw,
+                               void *context);
 
 #define UCODE_EXPERIMENTAL_INDEX       100
 #define UCODE_EXPERIMENTAL_TAG         "exp"
@@ -231,7 +244,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
 
        return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
                                       drv->trans->dev,
-                                      GFP_KERNEL, drv, iwl_ucode_callback);
+                                      GFP_KERNEL, drv, iwl_req_fw_callback);
 }
 
 struct fw_img_parsing {
@@ -759,13 +772,57 @@ static int validate_sec_sizes(struct iwl_drv *drv,
        return 0;
 }
 
+static struct iwl_op_mode *
+_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
+{
+       const struct iwl_op_mode_ops *ops = op->ops;
+       struct dentry *dbgfs_dir = NULL;
+       struct iwl_op_mode *op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+                                               drv->dbgfs_drv);
+       if (!drv->dbgfs_op_mode) {
+               IWL_ERR(drv,
+                       "failed to create opmode debugfs directory\n");
+               return op_mode;
+       }
+       dbgfs_dir = drv->dbgfs_op_mode;
+#endif
+
+       op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (!op_mode) {
+               debugfs_remove_recursive(drv->dbgfs_op_mode);
+               drv->dbgfs_op_mode = NULL;
+       }
+#endif
+
+       return op_mode;
+}
+
+static void _iwl_op_mode_stop(struct iwl_drv *drv)
+{
+       /* op_mode can be NULL if its start failed */
+       if (drv->op_mode) {
+               iwl_op_mode_stop(drv->op_mode);
+               drv->op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               debugfs_remove_recursive(drv->dbgfs_op_mode);
+               drv->dbgfs_op_mode = NULL;
+#endif
+       }
+}
+
 /**
- * iwl_ucode_callback - callback when firmware was loaded
+ * iwl_req_fw_callback - callback when firmware was loaded
  *
  * If loaded successfully, copies the firmware into buffers
  * for the card to fetch (via DMA).
  */
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
+static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 {
        struct iwl_drv *drv = context;
        struct iwl_fw *fw = &drv->fw;
@@ -908,8 +965,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
        list_add_tail(&drv->list, &op->drv);
 
        if (op->ops) {
-               const struct iwl_op_mode_ops *ops = op->ops;
-               drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
+               drv->op_mode = _iwl_op_mode_start(drv, op);
 
                if (!drv->op_mode) {
                        mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,14 +1025,43 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
        init_completion(&drv->request_firmware_complete);
        INIT_LIST_HEAD(&drv->list);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* Create the device debugfs entries. */
+       drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
+                                           iwl_dbgfs_root);
+
+       if (!drv->dbgfs_drv) {
+               IWL_ERR(drv, "failed to create debugfs directory\n");
+               goto err_free_drv;
+       }
+
+       /* Create transport layer debugfs dir */
+       drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
+
+       if (!drv->trans->dbgfs_dir) {
+               IWL_ERR(drv, "failed to create transport debugfs directory\n");
+               goto err_free_dbgfs;
+       }
+#endif
+
        ret = iwl_request_firmware(drv, true);
 
        if (ret) {
                IWL_ERR(trans, "Couldn't request the fw\n");
-               kfree(drv);
-               drv = NULL;
+               goto err_fw;
        }
 
+       return drv;
+
+err_fw:
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+err_free_dbgfs:
+       debugfs_remove_recursive(drv->dbgfs_drv);
+err_free_drv:
+#endif
+       kfree(drv);
+       drv = NULL;
+
        return drv;
 }
 
@@ -984,9 +1069,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
 {
        wait_for_completion(&drv->request_firmware_complete);
 
-       /* op_mode can be NULL if its start failed */
-       if (drv->op_mode)
-               iwl_op_mode_stop(drv->op_mode);
+       _iwl_op_mode_stop(drv);
 
        iwl_dealloc_ucode(drv);
 
@@ -1000,6 +1083,10 @@ void iwl_drv_stop(struct iwl_drv *drv)
                list_del(&drv->list);
        mutex_unlock(&iwlwifi_opmode_table_mtx);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove_recursive(drv->dbgfs_drv);
+#endif
+
        kfree(drv);
 }
 
@@ -1022,15 +1109,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
 {
        int i;
        struct iwl_drv *drv;
+       struct iwlwifi_opmode_table *op;
 
        mutex_lock(&iwlwifi_opmode_table_mtx);
        for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
-               if (strcmp(iwlwifi_opmode_table[i].name, name))
+               op = &iwlwifi_opmode_table[i];
+               if (strcmp(op->name, name))
                        continue;
-               iwlwifi_opmode_table[i].ops = ops;
-               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
-                       drv->op_mode = ops->start(drv->trans, drv->cfg,
-                                                 &drv->fw);
+               op->ops = ops;
+               /* TODO: need to handle exceptional case */
+               list_for_each_entry(drv, &op->drv, list)
+                       drv->op_mode = _iwl_op_mode_start(drv, op);
+
                mutex_unlock(&iwlwifi_opmode_table_mtx);
                return 0;
        }
@@ -1051,12 +1141,9 @@ void iwl_opmode_deregister(const char *name)
                iwlwifi_opmode_table[i].ops = NULL;
 
                /* call the stop routine for all devices */
-               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
-                       if (drv->op_mode) {
-                               iwl_op_mode_stop(drv->op_mode);
-                               drv->op_mode = NULL;
-                       }
-               }
+               list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+                       _iwl_op_mode_stop(drv);
+
                mutex_unlock(&iwlwifi_opmode_table_mtx);
                return;
        }
@@ -1076,6 +1163,14 @@ static int __init iwl_drv_init(void)
        pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
        pr_info(DRV_COPYRIGHT "\n");
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* Create the root of iwlwifi debugfs subsystem. */
+       iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
+
+       if (!iwl_dbgfs_root)
+               return -EFAULT;
+#endif
+
        return iwl_pci_register_driver();
 }
 module_init(iwl_drv_init);
@@ -1083,6 +1178,10 @@ module_init(iwl_drv_init);
 static void __exit iwl_drv_exit(void)
 {
        iwl_pci_unregister_driver();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       debugfs_remove_recursive(iwl_dbgfs_root);
+#endif
 }
 module_exit(iwl_drv_exit);
 
index 2cbf137b25bf7b55ddddf1d4d9792b6d1c03fba9..285de5f68c051e39c9244188f234639dd8115328 100644 (file)
@@ -90,9 +90,9 @@
  * 4) The bus specific component configures the bus
  * 5) The bus specific component calls to the drv bus agnostic part
  *    (iwl_drv_start)
- * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback
- * 7) iwl_ucode_callback parses the fw file
- * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
+ * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
+ * 7) iwl_req_fw_callback parses the fw file
+ * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
  */
 
 struct iwl_drv;
index 9c07c670a1ce7edf617dd88ea403041dcf481e31..a5e425718f56a052e365dc0e2f11657b2af88bd8 100644 (file)
@@ -85,8 +85,6 @@ struct iwl_eeprom_data {
        int n_hw_addrs;
        u8 hw_addr[ETH_ALEN];
 
-       u16 radio_config;
-
        u8 calib_version;
        __le16 calib_voltage;
 
index 2153e4cc5572ede3f15c1bb8a7eb5c3f279b24a1..d1a86b66bc51ee6fab8e797e8aa458b9db3812ea 100644 (file)
@@ -124,8 +124,7 @@ struct iwl_ucode_capabilities {
 
 /* one for each uCode image (inst/data, init/runtime/wowlan) */
 struct fw_desc {
-       dma_addr_t p_addr;      /* hardware address */
-       void *v_addr;           /* software address */
+       const void *data;       /* vmalloc'ed data */
        u32 len;                /* size in bytes */
        u32 offset;             /* offset in the device */
 };
index 64886f95664f996b370f621c97c14998b0c27171..c8d9b951746827b6ebd821eb0e72e2aef0c13a9c 100644 (file)
@@ -134,7 +134,8 @@ struct iwl_cfg;
 struct iwl_op_mode_ops {
        struct iwl_op_mode *(*start)(struct iwl_trans *trans,
                                     const struct iwl_cfg *cfg,
-                                    const struct iwl_fw *fw);
+                                    const struct iwl_fw *fw,
+                                    struct dentry *dbgfs_dir);
        void (*stop)(struct iwl_op_mode *op_mode);
        int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
                  struct iwl_device_cmd *cmd);
index 92576a3e84ef1c8cf04e6881b5b2691924a89b09..ff1154232885da82add146706ffe67b325d3e9b2 100644 (file)
@@ -184,14 +184,20 @@ struct iwl_rx_packet {
  * @CMD_SYNC: The caller will be stalled until the fw responds to the command
  * @CMD_ASYNC: Return right away and don't want for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- *     response.
+ *     response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
+ *     response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
+ *     copied. The pointer passed to the response handler is in the transport
+ *     ownership and don't need to be freed by the op_mode. This also means
+ *     that the pointer is invalidated after the op_mode's handler returns.
  * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
  */
 enum CMD_MODE {
        CMD_SYNC = 0,
        CMD_ASYNC = BIT(0),
        CMD_WANT_SKB = BIT(1),
-       CMD_ON_DEMAND = BIT(2),
+       CMD_WANT_HCMD = BIT(2),
+       CMD_ON_DEMAND = BIT(3),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@ struct iwl_trans {
        size_t dev_cmd_headroom;
        char dev_cmd_pool_name[50];
 
+       struct dentry *dbgfs_dir;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
index f4c3500b68c682ced4ecec9f21c89c1d97cf7732..2a4675396707474fc0a0ced64009eddb967b4bec 100644 (file)
@@ -263,8 +263,6 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
 
-#ifndef CONFIG_IWLWIFI_IDI
-
 static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -282,8 +280,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!trans_pcie->drv)
                goto out_free_trans;
 
+       /* register transport layer debugfs here */
+       if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
+               goto out_free_drv;
+
        return 0;
 
+out_free_drv:
+       iwl_drv_stop(trans_pcie->drv);
 out_free_trans:
        iwl_trans_pcie_free(iwl_trans);
        pci_set_drvdata(pdev, NULL);
@@ -301,8 +305,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
-#endif /* CONFIG_IWLWIFI_IDI */
-
 #ifdef CONFIG_PM_SLEEP
 
 static int iwl_pci_suspend(struct device *device)
@@ -347,15 +349,6 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
 
 #endif
 
-#ifdef CONFIG_IWLWIFI_IDI
-/*
- * Defined externally in iwl-idi.c
- */
-int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-void __devexit iwl_pci_remove(struct pci_dev *pdev);
-
-#endif /* CONFIG_IWLWIFI_IDI */
-
 static struct pci_driver iwl_pci_driver = {
        .name = DRV_NAME,
        .id_table = iwl_hw_card_ids,
index 4ffc18dc3a5761cc6b63c5aa60691727459e5e27..401178f44a3b130a9de6832bf10bd3e9b6e4b529 100644 (file)
@@ -184,6 +184,7 @@ struct iwl_queue {
 
 struct iwl_pcie_tx_queue_entry {
        struct iwl_device_cmd *cmd;
+       struct iwl_device_cmd *copy_cmd;
        struct sk_buff *skb;
        struct iwl_cmd_meta meta;
 };
@@ -310,7 +311,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
 ******************************************************/
 void iwl_bg_rx_replenish(struct work_struct *data);
 void iwl_irq_tasklet(struct iwl_trans *trans);
-void iwlagn_rx_replenish(struct iwl_trans *trans);
+void iwl_rx_replenish(struct iwl_trans *trans);
 void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
                                   struct iwl_rx_queue *q);
 
index d1a61ba6247ab68a13e4cd8943959c92d0fa3452..17c8e5d82681022383d657da550924d48d0d9289 100644 (file)
 #include "internal.h"
 #include "iwl-op-mode.h"
 
-#ifdef CONFIG_IWLWIFI_IDI
-#include "iwl-amfh.h"
-#endif
-
 /******************************************************************************
  *
  * RX path functions
@@ -181,15 +177,15 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
 }
 
 /**
- * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  */
-static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
 {
        return cpu_to_le32((u32)(dma_addr >> 8));
 }
 
 /**
- * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
  *
  * If there are slots in the RX queue that need to be restocked,
  * and we have free pre-allocated buffers, fill the ranks as much
@@ -199,7 +195,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
  * also updates the memory address in the firmware to reference the new
  * target buffer.
  */
-static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
+static void iwl_rx_queue_restock(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -207,6 +203,17 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
        struct iwl_rx_mem_buffer *rxb;
        unsigned long flags;
 
+       /*
+        * If the device isn't enabled - not need to try to add buffers...
+        * This can happen when we stop the device and still have an interrupt
+        * pending. We stop the APM before we sync the interrupts / tasklets
+        * because we have to (see comment there). On the other hand, since
+        * the APM is stopped, we cannot access the HW (in particular not prph).
+        * So don't try to restock if the APM has been already stopped.
+        */
+       if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+               return;
+
        spin_lock_irqsave(&rxq->lock, flags);
        while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
                /* The overwritten rxb must be a used one */
@@ -219,7 +226,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
                list_del(element);
 
                /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
+               rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
                rxq->queue[rxq->write] = rxb;
                rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
                rxq->free_count--;
@@ -230,7 +237,6 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
        if (rxq->free_count <= RX_LOW_WATERMARK)
                schedule_work(&trans_pcie->rx_replenish);
 
-
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
@@ -241,15 +247,16 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
        }
 }
 
-/**
- * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
+/*
+ * iwl_rx_allocate - allocate a page for each used RBD
  *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * A used RBD is an Rx buffer that has been given to the stack. To use it again
+ * a page must be allocated and the RBD must point to the page. This function
+ * doesn't change the HW pointer but handles the list of pages that is used by
+ * iwl_rx_queue_restock. The latter function will update the HW to use the newly
+ * allocated buffers.
  */
-static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -328,23 +335,31 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
        }
 }
 
-void iwlagn_rx_replenish(struct iwl_trans *trans)
+/*
+ * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
+ *
+ * When moving to rx_free an page is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+void iwl_rx_replenish(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        unsigned long flags;
 
-       iwlagn_rx_allocate(trans, GFP_KERNEL);
+       iwl_rx_allocate(trans, GFP_KERNEL);
 
        spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-       iwlagn_rx_queue_restock(trans);
+       iwl_rx_queue_restock(trans);
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 }
 
-static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_rx_replenish_now(struct iwl_trans *trans)
 {
-       iwlagn_rx_allocate(trans, GFP_ATOMIC);
+       iwl_rx_allocate(trans, GFP_ATOMIC);
 
-       iwlagn_rx_queue_restock(trans);
+       iwl_rx_queue_restock(trans);
 }
 
 void iwl_bg_rx_replenish(struct work_struct *data)
@@ -352,7 +367,7 @@ void iwl_bg_rx_replenish(struct work_struct *data)
        struct iwl_trans_pcie *trans_pcie =
            container_of(data, struct iwl_trans_pcie, rx_replenish);
 
-       iwlagn_rx_replenish(trans_pcie->trans);
+       iwl_rx_replenish(trans_pcie->trans);
 }
 
 static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
@@ -421,13 +436,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim)
-                       cmd = txq->entries[cmd_index].cmd;
-               else
+               if (reclaim) {
+                       struct iwl_pcie_tx_queue_entry *ent;
+                       ent = &txq->entries[cmd_index];
+                       cmd = ent->copy_cmd;
+                       WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
+               } else {
                        cmd = NULL;
+               }
 
                err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
 
+               if (reclaim) {
+                       /* The original command isn't needed any more */
+                       kfree(txq->entries[cmd_index].copy_cmd);
+                       txq->entries[cmd_index].copy_cmd = NULL;
+               }
+
                /*
                 * After here, we should always check rxcb._page_stolen,
                 * if it is true then one of the handlers took the page.
@@ -520,7 +545,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
                        count++;
                        if (count >= 8) {
                                rxq->read = i;
-                               iwlagn_rx_replenish_now(trans);
+                               iwl_rx_replenish_now(trans);
                                count = 0;
                        }
                }
@@ -529,9 +554,9 @@ static void iwl_rx_handle(struct iwl_trans *trans)
        /* Backtrack one entry */
        rxq->read = i;
        if (fill_rx)
-               iwlagn_rx_replenish_now(trans);
+               iwl_rx_replenish_now(trans);
        else
-               iwlagn_rx_queue_restock(trans);
+               iwl_rx_queue_restock(trans);
 }
 
 /**
@@ -713,11 +738,9 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
                /* Disable periodic interrupt; we use it as just a one-shot. */
                iwl_write8(trans, CSR_INT_PERIODIC_REG,
                            CSR_INT_PERIODIC_DIS);
-#ifdef CONFIG_IWLWIFI_IDI
-               iwl_amfh_rx_handler();
-#else
+
                iwl_rx_handle(trans);
-#endif
+
                /*
                 * Enable periodic interrupt in 8 msec only if we received
                 * real RX interrupt (instead of just periodic int), to catch
index 063ecaff5b569962e3dc2bf3114368385a575629..fe0fffd043048f48adca5c110031f537d363d73e 100644 (file)
@@ -216,7 +216,7 @@ static int iwl_rx_init(struct iwl_trans *trans)
        rxq->free_count = 0;
        spin_unlock_irqrestore(&rxq->lock, flags);
 
-       iwlagn_rx_replenish(trans);
+       iwl_rx_replenish(trans);
 
        iwl_trans_rx_hw_init(trans, rxq);
 
@@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
        iwl_tx_queue_unmap(trans, txq_id);
 
        /* De-alloc array of command/tx buffers */
-
        if (txq_id == trans_pcie->cmd_queue)
-               for (i = 0; i < txq->q.n_window; i++)
+               for (i = 0; i < txq->q.n_window; i++) {
                        kfree(txq->entries[i].cmd);
+                       kfree(txq->entries[i].copy_cmd);
+               }
 
        /* De-alloc circular buffer of TFDs */
        if (txq->q.n_bd) {
@@ -851,10 +852,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
 
        iwl_op_mode_nic_config(trans->op_mode);
 
-#ifndef CONFIG_IWLWIFI_IDI
        /* Allocate the RX queue, or reset if it is already allocated */
        iwl_rx_init(trans);
-#endif
 
        /* Allocate or reset and init all Tx and Command queues */
        if (iwl_tx_init(trans))
@@ -893,6 +892,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
 static int iwl_prepare_card_hw(struct iwl_trans *trans)
 {
        int ret;
+       int t = 0;
 
        IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
 
@@ -905,30 +905,25 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                    CSR_HW_IF_CONFIG_REG_PREPARE);
 
-       ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
-                          ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
-                          CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+       do {
+               ret = iwl_set_hw_ready(trans);
+               if (ret >= 0)
+                       return 0;
 
-       if (ret < 0)
-               return ret;
+               usleep_range(200, 1000);
+               t += 200;
+       } while (t < 150000);
 
-       /* HW should be ready by now, check again. */
-       ret = iwl_set_hw_ready(trans);
-       if (ret >= 0)
-               return 0;
        return ret;
 }
 
 /*
  * ucode
  */
-static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
-                           const struct fw_desc *section)
+static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+                                  dma_addr_t phy_addr, u32 byte_cnt)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       dma_addr_t phy_addr = section->p_addr;
-       u32 byte_cnt = section->len;
-       u32 dst_addr = section->offset;
        int ret;
 
        trans_pcie->ucode_write_complete = false;
@@ -942,8 +937,8 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
                           dst_addr);
 
        iwl_write_direct32(trans,
-               FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
-               phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+                          FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+                          phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
 
        iwl_write_direct32(trans,
                           FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
@@ -962,33 +957,64 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
                           FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
                           FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 
-       IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
-                    section_num);
        ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
                                 trans_pcie->ucode_write_complete, 5 * HZ);
        if (!ret) {
-               IWL_ERR(trans, "Could not load the [%d] uCode section\n",
-                       section_num);
+               IWL_ERR(trans, "Failed to load firmware chunk!\n");
                return -ETIMEDOUT;
        }
 
        return 0;
 }
 
-static int iwl_load_given_ucode(struct iwl_trans *trans,
-                               const struct fw_img *image)
+static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+                           const struct fw_desc *section)
 {
+       u8 *v_addr;
+       dma_addr_t p_addr;
+       u32 offset;
        int ret = 0;
-               int i;
 
-               for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
-                       if (!image->sec[i].p_addr)
-                               break;
+       IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+                    section_num);
+
+       v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
+       if (!v_addr)
+               return -ENOMEM;
+
+       for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
+               u32 copy_size;
+
+               copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
 
-                       ret = iwl_load_section(trans, i, &image->sec[i]);
-                       if (ret)
-                               return ret;
+               memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+               ret = iwl_load_firmware_chunk(trans, section->offset + offset,
+                                             p_addr, copy_size);
+               if (ret) {
+                       IWL_ERR(trans,
+                               "Could not load the [%d] uCode section\n",
+                               section_num);
+                       break;
                }
+       }
+
+       dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
+       return ret;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+                               const struct fw_img *image)
+{
+       int i, ret = 0;
+
+       for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+               if (!image->sec[i].data)
+                       break;
+
+               ret = iwl_load_section(trans, i, &image->sec[i]);
+               if (ret)
+                       return ret;
+       }
 
        /* Remove all resets to allow NIC to operate */
        iwl_write32(trans, CSR_RESET, 0);
@@ -1181,9 +1207,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
         */
        if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
                iwl_trans_tx_stop(trans);
-#ifndef CONFIG_IWLWIFI_IDI
                iwl_trans_rx_stop(trans);
-#endif
+
                /* Power-down device's busmaster DMA clocks */
                iwl_write_prph(trans, APMG_CLK_DIS_REG,
                               APMG_CLK_VAL_DMA_CLK_RQT);
@@ -1454,14 +1479,16 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
        bool hw_rfkill;
        unsigned long flags;
 
+       spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+       iwl_disable_interrupts(trans);
+       spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
        iwl_apm_stop(trans);
 
        spin_lock_irqsave(&trans_pcie->irq_lock, flags);
        iwl_disable_interrupts(trans);
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
-       iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
        if (!op_mode_leaving) {
                /*
                 * Even if we stop the HW, we still want the RF kill
@@ -1549,9 +1576,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        iwl_trans_pcie_tx_free(trans);
-#ifndef CONFIG_IWLWIFI_IDI
        iwl_trans_pcie_rx_free(trans);
-#endif
+
        if (trans_pcie->irq_requested == true) {
                free_irq(trans_pcie->irq, trans);
                iwl_free_isr_ict(trans);
@@ -1769,7 +1795,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                      \
        if (!debugfs_create_file(#name, mode, parent, trans,            \
                                 &iwl_dbgfs_##name##_ops))              \
-               return -ENOMEM;                                         \
+               goto err;                                               \
 } while (0)
 
 /* file operation */
@@ -2033,6 +2059,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
        DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
        DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
        return 0;
+
+err:
+       IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+       return -ENOMEM;
 }
 #else
 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
index 6baf8deef5190abef3fa85a7854b8396530cd97f..105e3af3c621b0b9e335fbe42d1ea1e444bfa579 100644 (file)
@@ -521,12 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        u16 copy_size, cmd_size;
        bool had_nocopy = false;
        int i;
-       u8 *cmd_dest;
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-       const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
-       int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
-       int trace_idx;
-#endif
+       u32 cmd_pos;
 
        copy_size = sizeof(out_cmd->hdr);
        cmd_size = sizeof(out_cmd->hdr);
@@ -584,15 +579,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
                                         INDEX_TO_SEQ(q->write_ptr));
 
        /* and copy the data that needs to be copied */
-
-       cmd_dest = out_cmd->payload;
+       cmd_pos = offsetof(struct iwl_device_cmd, payload);
        for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
                if (!cmd->len[i])
                        continue;
                if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
                        break;
-               memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
-               cmd_dest += cmd->len[i];
+               memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
+               cmd_pos += cmd->len[i];
+       }
+
+       WARN_ON_ONCE(txq->entries[idx].copy_cmd);
+
+       /*
+        * since out_cmd will be the source address of the FH, it will write
+        * the retry count there. So when the user needs to receivce the HCMD
+        * that corresponds to the response in the response handler, it needs
+        * to set CMD_WANT_HCMD.
+        */
+       if (cmd->flags & CMD_WANT_HCMD) {
+               txq->entries[idx].copy_cmd =
+                       kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
+               if (unlikely(!txq->entries[idx].copy_cmd)) {
+                       idx = -ENOMEM;
+                       goto out;
+               }
        }
 
        IWL_DEBUG_HC(trans,
@@ -612,11 +623,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        dma_unmap_len_set(out_meta, len, copy_size);
 
        iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-       trace_bufs[0] = &out_cmd->hdr;
-       trace_lens[0] = copy_size;
-       trace_idx = 1;
-#endif
 
        for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
                if (!cmd->len[i])
@@ -635,25 +641,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 
                iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
                                             cmd->len[i], 0);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-               trace_bufs[trace_idx] = cmd->data[i];
-               trace_lens[trace_idx] = cmd->len[i];
-               trace_idx++;
-#endif
        }
 
        out_meta->flags = cmd->flags;
 
        txq->need_update = 1;
 
-       /* check that tracing gets all possible blocks */
-       BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-       trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
-                              trace_bufs[0], trace_lens[0],
-                              trace_bufs[1], trace_lens[1],
-                              trace_bufs[2], trace_lens[2]);
-#endif
+       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
+                              &out_cmd->hdr, copy_size);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
index 26e68326710b04bce2a9e4e08f42ddf1ba645ce2..aaa297315c47102df73b6fd3948e4b8fd4cbba25 100644 (file)
@@ -1159,6 +1159,22 @@ void lbs_set_mac_control(struct lbs_private *priv)
        lbs_deb_leave(LBS_DEB_CMD);
 }
 
+int lbs_set_mac_control_sync(struct lbs_private *priv)
+{
+       struct cmd_ds_mac_control cmd;
+       int ret = 0;
+
+       lbs_deb_enter(LBS_DEB_CMD);
+
+       cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+       cmd.action = cpu_to_le16(priv->mac_control);
+       cmd.reserved = 0;
+       ret = lbs_cmd_with_response(priv, CMD_MAC_CONTROL, &cmd);
+
+       lbs_deb_leave(LBS_DEB_CMD);
+       return ret;
+}
+
 /**
  *  lbs_allocate_cmd_buffer - allocates the command buffer and links
  *  it to command free queue
index ab07608e13d07031bda8f3a7d6f712b1a01b73ac..4279e8ab95f2aa4545cef71daa5649b1314fdf3b 100644 (file)
@@ -96,6 +96,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv);
 int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
 
 void lbs_set_mac_control(struct lbs_private *priv);
+int lbs_set_mac_control_sync(struct lbs_private *priv);
 
 int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
                     s16 *maxlevel);
index fe1ea43c5149ef03e9ed0e2809695773fa2a576d..0c02f0483d1fd65e8b2a61b55ab57cbcc95ad5f2 100644 (file)
@@ -682,8 +682,10 @@ static int lbs_setup_firmware(struct lbs_private *priv)
 
        /* Send cmd to FW to enable 11D function */
        ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+       if (ret)
+               goto done;
 
-       lbs_set_mac_control(priv);
+       ret = lbs_set_mac_control_sync(priv);
 done:
        lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
        return ret;
index a03457292c88c1fd5f96eb4ad5a60df671227d91..7001856241e60354e9ff5cbed505ba6fc8bac415 100644 (file)
@@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
        lbtf_deb_leave(LBTF_DEB_MAIN);
 }
 
-static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct lbtf_private *priv = hw->priv;
 
index 00838395778cb4c98854c93de0e39e2d392bbadc..429ca3215fdbf3e0d48f82e3c1eb69381938fd46 100644 (file)
@@ -38,7 +38,7 @@ MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
 MODULE_LICENSE("GPL");
 
-static u32 wmediumd_pid;
+static u32 wmediumd_portid;
 
 static int radios = 2;
 module_param(radios, int, 0444);
@@ -545,7 +545,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
 
 static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                                       struct sk_buff *my_skb,
-                                      int dst_pid)
+                                      int dst_portid)
 {
        struct sk_buff *skb;
        struct mac80211_hwsim_data *data = hw->priv;
@@ -619,7 +619,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
 
        genlmsg_end(skb, msg_head);
-       genlmsg_unicast(&init_net, skb, dst_pid);
+       genlmsg_unicast(&init_net, skb, dst_portid);
 
        /* Enqueue the packet */
        skb_queue_tail(&data->pending, my_skb);
@@ -709,11 +709,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
        return ack;
 }
 
-static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+                             struct ieee80211_tx_control *control,
+                             struct sk_buff *skb)
 {
        bool ack;
        struct ieee80211_tx_info *txi;
-       u32 _pid;
+       u32 _portid;
 
        mac80211_hwsim_monitor_rx(hw, skb);
 
@@ -724,10 +726,10 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
 
        /* NO wmediumd detected, perfect medium simulation */
        ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
@@ -812,7 +814,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
        struct ieee80211_hw *hw = arg;
        struct sk_buff *skb;
        struct ieee80211_tx_info *info;
-       u32 _pid;
+       u32 _portid;
 
        hwsim_check_magic(vif);
 
@@ -829,10 +831,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
        mac80211_hwsim_monitor_rx(hw, skb);
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
 
        mac80211_hwsim_tx_frame_no_nl(hw, skb);
        dev_kfree_skb(skb);
@@ -1313,7 +1315,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
        struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
        struct sk_buff *skb;
        struct ieee80211_pspoll *pspoll;
-       u32 _pid;
+       u32 _portid;
 
        if (!vp->assoc)
                return;
@@ -1334,10 +1336,10 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
        memcpy(pspoll->ta, mac, ETH_ALEN);
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
 
        if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
                printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
@@ -1351,7 +1353,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
        struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
-       u32 _pid;
+       u32 _portid;
 
        if (!vp->assoc)
                return;
@@ -1373,10 +1375,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
 
        /* wmediumd mode check */
-       _pid = ACCESS_ONCE(wmediumd_pid);
+       _portid = ACCESS_ONCE(wmediumd_portid);
 
-       if (_pid)
-               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+       if (_portid)
+               return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
 
        if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
                printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
@@ -1630,10 +1632,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
        if (info == NULL)
                goto out;
 
-       wmediumd_pid = info->snd_pid;
+       wmediumd_portid = info->snd_portid;
 
        printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
-              "switching to wmediumd mode with pid %d\n", info->snd_pid);
+              "switching to wmediumd mode with pid %d\n", info->snd_portid);
 
        return 0;
 out:
@@ -1670,10 +1672,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
        if (state != NETLINK_URELEASE)
                return NOTIFY_DONE;
 
-       if (notify->pid == wmediumd_pid) {
+       if (notify->portid == wmediumd_portid) {
                printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
                       " socket, switching to perfect channel medium\n");
-               wmediumd_pid = 0;
+               wmediumd_portid = 0;
        }
        return NOTIFY_DONE;
 
@@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
 #endif
                                 BIT(NL80211_IFTYPE_AP) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
+       { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void)
                        BIT(NL80211_IFTYPE_P2P_CLIENT) |
                        BIT(NL80211_IFTYPE_P2P_GO) |
                        BIT(NL80211_IFTYPE_ADHOC) |
-                       BIT(NL80211_IFTYPE_MESH_POINT);
+                       BIT(NL80211_IFTYPE_MESH_POINT) |
+                       BIT(NL80211_IFTYPE_P2P_DEVICE);
 
                hw->flags = IEEE80211_HW_MFP_CAPABLE |
                            IEEE80211_HW_SIGNAL_DBM |
@@ -2052,7 +2056,7 @@ failed:
        mac80211_hwsim_free();
        return err;
 }
-
+module_init(init_mac80211_hwsim);
 
 static void __exit exit_mac80211_hwsim(void)
 {
@@ -2063,7 +2067,4 @@ static void __exit exit_mac80211_hwsim(void)
        mac80211_hwsim_free();
        unregister_netdev(hwsim_mon);
 }
-
-
-module_init(init_mac80211_hwsim);
 module_exit(exit_mac80211_hwsim);
index e535c937628b4575d87ebb8435c7362367634c73..245a371f1a43a4746bda49912a4a2d656d5cf75e 100644 (file)
@@ -175,23 +175,6 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
        return 0;
 }
 
-/*
- * This function handles the command response of 11n configuration request.
- *
- * Handling includes changing the header fields into CPU format.
- */
-int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
-                       struct mwifiex_ds_11n_tx_cfg *tx_cfg)
-{
-       struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
-
-       if (tx_cfg) {
-               tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
-               tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
-       }
-       return 0;
-}
-
 /*
  * This function prepares command of reconfigure Tx buffer.
  *
@@ -257,27 +240,6 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
        return 0;
 }
 
-/*
- * This function handles the command response of AMSDU aggregation
- * control request.
- *
- * Handling includes changing the header fields into CPU format.
- */
-int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
-                               struct mwifiex_ds_11n_amsdu_aggr_ctrl
-                               *amsdu_aggr_ctrl)
-{
-       struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
-               &resp->params.amsdu_aggr_ctrl;
-
-       if (amsdu_aggr_ctrl) {
-               amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
-               amsdu_aggr_ctrl->curr_buf_size =
-                       le16_to_cpu(amsdu_ctrl->curr_buf_size);
-       }
-       return 0;
-}
-
 /*
  * This function prepares 11n configuration command.
  *
@@ -726,3 +688,29 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
 
        return count;
 }
+
+/*
+ * This function retrieves the entry for specific tx BA stream table by RA and
+ * deletes it.
+ */
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
+{
+       struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
+       unsigned long flags;
+
+       if (!ra)
+               return;
+
+       spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+       list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
+               if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
+                                              flags);
+                       mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+
+       return;
+}
index 28366e9211fbb8773f1eae98441401aea6473ca1..46006a54a6566ee1c744220926bc198159aac0dd 100644 (file)
@@ -28,8 +28,6 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
                          struct host_cmd_ds_command *resp);
 int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
                              struct host_cmd_ds_command *resp);
-int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
-                       struct mwifiex_ds_11n_tx_cfg *tx_cfg);
 int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
                        struct mwifiex_ds_11n_tx_cfg *txcfg);
 
@@ -60,15 +58,13 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
                              struct mwifiex_ds_rx_reorder_tbl *buf);
 int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
                               struct mwifiex_ds_tx_ba_stream_tbl *buf);
-int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
-                               struct mwifiex_ds_11n_amsdu_aggr_ctrl
-                               *amsdu_aggr_ctrl);
 int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
                             struct host_cmd_ds_command *cmd,
                             int cmd_action, u16 *buf_size);
 int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
                                int cmd_action,
                                struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
 
 /*
  * This function checks whether AMPDU is allowed or not for a particular TID.
@@ -157,4 +153,18 @@ mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
 
        return false;
 }
+
+/*
+ * This function checks whether associated station is 11n enabled
+ */
+static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
+                                            struct mwifiex_sta_node *node)
+{
+
+       if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
+           !priv->ap_11n_enabled)
+               return 0;
+
+       return node->is_11n_enabled;
+}
 #endif /* !_MWIFIEX_11N_H_ */
index ab84eb94374905c166d6bdcd6c745db1992574fe..395f1bfd41027f788901b62b5ef4621ac019956d 100644 (file)
@@ -62,9 +62,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
        };
        struct tx_packet_hdr *tx_header;
 
-       skb_put(skb_aggr, sizeof(*tx_header));
-
-       tx_header = (struct tx_packet_hdr *) skb_aggr->data;
+       tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
 
        /* Copy DA and SA */
        dt_offset = 2 * ETH_ALEN;
@@ -82,12 +80,10 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
        tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
 
        /* Add payload */
-       skb_put(skb_aggr, skb_src->len);
-       memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
-              skb_src->len);
-       *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
-                                                     LLC_SNAP_LEN)) & 3)) : 0;
-       skb_put(skb_aggr, *pad);
+       memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
+
+       /* Add padding for new MSDU to start from 4 byte boundary */
+       *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
 
        return skb_aggr->len + *pad;
 }
index 591ccd33f83c5482340c6667cf76660e24ac00d7..9402b93b9a363b350545fb24aad6f67f06afc172 100644 (file)
@@ -54,8 +54,13 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
                        tbl->rx_reorder_ptr[i] = NULL;
                }
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-               if (rx_tmp_ptr)
-                       mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+               if (rx_tmp_ptr) {
+                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                               mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+                       else
+                               mwifiex_process_rx_packet(priv->adapter,
+                                                         rx_tmp_ptr);
+               }
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -97,7 +102,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
                rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                tbl->rx_reorder_ptr[i] = NULL;
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-               mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+
+               if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                       mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+               else
+                       mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -148,7 +157,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
  */
-static struct mwifiex_rx_reorder_tbl *
+struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
        struct mwifiex_rx_reorder_tbl *tbl;
@@ -167,6 +176,31 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
        return NULL;
 }
 
+/* This function retrieves the pointer to an entry in Rx reordering
+ * table which matches the given TA and deletes it.
+ */
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
+{
+       struct mwifiex_rx_reorder_tbl *tbl, *tmp;
+       unsigned long flags;
+
+       if (!ta)
+               return;
+
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+               if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
+                       mwifiex_del_rx_reorder_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+       return;
+}
+
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
@@ -226,6 +260,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        struct mwifiex_rx_reorder_tbl *tbl, *new_node;
        u16 last_seq = 0;
        unsigned long flags;
+       struct mwifiex_sta_node *node;
 
        /*
         * If we get a TID, ta pair which is already present dispatch all the
@@ -248,19 +283,26 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        new_node->tid = tid;
        memcpy(new_node->ta, ta, ETH_ALEN);
        new_node->start_win = seq_num;
-       if (mwifiex_queuing_ra_based(priv))
-               /* TODO for adhoc */
+
+       if (mwifiex_queuing_ra_based(priv)) {
                dev_dbg(priv->adapter->dev,
-                       "info: ADHOC:last_seq=%d start_win=%d\n",
+                       "info: AP/ADHOC:last_seq=%d start_win=%d\n",
                        last_seq, new_node->start_win);
-       else
+               if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
+                       node = mwifiex_get_sta_entry(priv, ta);
+                       if (node)
+                               last_seq = node->rx_seq[tid];
+               }
+       } else {
                last_seq = priv->rx_seq[tid];
+       }
 
        if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
            last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
+       new_node->flags = 0;
 
        new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
                                        GFP_KERNEL);
@@ -396,8 +438,13 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
 
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (!tbl) {
-               if (pkt_type != PKT_TYPE_BAR)
-                       mwifiex_process_rx_packet(priv->adapter, payload);
+               if (pkt_type != PKT_TYPE_BAR) {
+                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+                               mwifiex_handle_uap_rx_forward(priv, payload);
+                       else
+                               mwifiex_process_rx_packet(priv->adapter,
+                                                         payload);
+               }
                return 0;
        }
        start_win = tbl->start_win;
@@ -411,13 +458,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
         * If seq_num is less then starting win then ignore and drop the
         * packet
         */
-       if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */
-               if (seq_num >= ((start_win + TWOPOW11) &
-                               (MAX_TID_VALUE - 1)) && (seq_num < start_win))
+       if (tbl->flags & RXREOR_FORCE_NO_DROP) {
+               dev_dbg(priv->adapter->dev,
+                       "RXREOR_FORCE_NO_DROP when HS is activated\n");
+               tbl->flags &= ~RXREOR_FORCE_NO_DROP;
+       } else {
+               if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
+                       if (seq_num >= ((start_win + TWOPOW11) &
+                                       (MAX_TID_VALUE - 1)) &&
+                           seq_num < start_win)
+                               return -1;
+               } else if ((seq_num < start_win) ||
+                          (seq_num > (start_win + TWOPOW11))) {
                        return -1;
-       } else if ((seq_num < start_win) ||
-                  (seq_num > (start_win + TWOPOW11))) {
-               return -1;
+               }
        }
 
        /*
@@ -428,8 +482,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
 
        if (((end_win < start_win) &&
-            (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) &&
-            (seq_num > end_win)) ||
+            (seq_num < start_win) && (seq_num > end_win)) ||
            ((end_win > start_win) && ((seq_num > end_win) ||
                                       (seq_num < start_win)))) {
                end_win = seq_num;
@@ -591,3 +644,29 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
        mwifiex_reset_11n_rx_seq_num(priv);
 }
+
+/*
+ * This function updates all rx_reorder_tbl's flags.
+ */
+void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
+{
+       struct mwifiex_private *priv;
+       struct mwifiex_rx_reorder_tbl *tbl;
+       unsigned long lock_flags;
+       int i;
+
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+               if (!priv)
+                       continue;
+               if (list_empty(&priv->rx_reorder_tbl_ptr))
+                       continue;
+
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
+               list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+                       tbl->flags = flags;
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
+       }
+
+       return;
+}
index 6c9815a0f5d8b0d7aebcb5d6a9953a24819ad45a..4064041ac852737602b8970eff8c40f4070c9117 100644 (file)
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
 #define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+#define BA_SETUP_MAX_PACKET_THRESHOLD  16
+#define BA_SETUP_PACKET_OFFSET         16
+
+enum mwifiex_rxreor_flags {
+       RXREOR_FORCE_NO_DROP    = 1<<0,
+};
 
 static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
 {
@@ -68,5 +74,9 @@ struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
                                                           mwifiex_private
                                                           *priv, int tid,
                                                           u8 *ta);
+struct mwifiex_rx_reorder_tbl *
+mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
+void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
 
 #endif /* _MWIFIEX_11N_RXREORDER_H_ */
index 3f66ebb0a630813d3bc836c4412233a3f8883f7b..dd0410d2d465d8e279ec1104ea5ee51831fed97c 100644 (file)
@@ -33,8 +33,10 @@ mwifiex-y += uap_cmd.o
 mwifiex-y += ie.o
 mwifiex-y += sta_cmdresp.o
 mwifiex-y += sta_event.o
+mwifiex-y += uap_event.o
 mwifiex-y += sta_tx.o
 mwifiex-y += sta_rx.o
+mwifiex-y += uap_txrx.o
 mwifiex-y += cfg80211.o
 mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
 obj-$(CONFIG_MWIFIEX) += mwifiex.o
index fe42137384da0bbae54ee0e656bf51a50cb58e68..2691620393eae14f771f89641124ce9ff5b354c4 100644 (file)
@@ -22,7 +22,7 @@
 
 static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
        {
-               .max = 1, .types = BIT(NL80211_IFTYPE_STATION),
+               .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
        },
        {
                .max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -37,6 +37,36 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
        .beacon_int_infra_match = true,
 };
 
+static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
+       .n_reg_rules = 7,
+       .alpha2 =  "99",
+       .reg_rules = {
+               /* Channel 1 - 11 */
+               REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
+               /* Channel 12 - 13 */
+               REG_RULE(2467-10, 2472+10, 20, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+               /* Channel 14 */
+               REG_RULE(2484-10, 2484+10, 20, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+                        NL80211_RRF_NO_OFDM),
+               /* Channel 36 - 48 */
+               REG_RULE(5180-10, 5240+10, 40, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+               /* Channel 149 - 165 */
+               REG_RULE(5745-10, 5825+10, 40, 3, 20,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+               /* Channel 52 - 64 */
+               REG_RULE(5260-10, 5320+10, 40, 3, 30,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+                        NL80211_RRF_DFS),
+               /* Channel 100 - 140 */
+               REG_RULE(5500-10, 5700+10, 40, 3, 30,
+                        NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+                        NL80211_RRF_DFS),
+       }
+};
+
 /*
  * This function maps the nl802.11 channel type into driver channel type.
  *
@@ -47,8 +77,7 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
  *      NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
  *      Others                 -> IEEE80211_HT_PARAM_CHA_SEC_NONE
  */
-static u8
-mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
+u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
 {
        switch (chan_type) {
        case NL80211_CHAN_NO_HT:
@@ -99,7 +128,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
        const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
 
-       if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) {
+       if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
                wiphy_err(wiphy, "deleting the crypto keys\n");
                return -EFAULT;
        }
@@ -108,6 +137,188 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
        return 0;
 }
 
+/*
+ * This function forms an skb for management frame.
+ */
+static int
+mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
+{
+       u8 addr[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       u16 pkt_len;
+       u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
+       struct timeval tv;
+
+       pkt_len = len + ETH_ALEN;
+
+       skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
+                   MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+       memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
+
+       memcpy(skb_push(skb, sizeof(tx_control)),
+              &tx_control, sizeof(tx_control));
+
+       memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
+
+       /* Add packet data and address4 */
+       memcpy(skb_put(skb, sizeof(struct ieee80211_hdr_3addr)), buf,
+              sizeof(struct ieee80211_hdr_3addr));
+       memcpy(skb_put(skb, ETH_ALEN), addr, ETH_ALEN);
+       memcpy(skb_put(skb, len - sizeof(struct ieee80211_hdr_3addr)),
+              buf + sizeof(struct ieee80211_hdr_3addr),
+              len - sizeof(struct ieee80211_hdr_3addr));
+
+       skb->priority = LOW_PRIO_TID;
+       do_gettimeofday(&tv);
+       skb->tstamp = timeval_to_ktime(tv);
+
+       return 0;
+}
+
+/*
+ * CFG802.11 operation handler to transmit a management frame.
+ */
+static int
+mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+                        struct ieee80211_channel *chan, bool offchan,
+                        enum nl80211_channel_type channel_type,
+                        bool channel_type_valid, unsigned int wait,
+                        const u8 *buf, size_t len, bool no_cck,
+                        bool dont_wait_for_ack, u64 *cookie)
+{
+       struct sk_buff *skb;
+       u16 pkt_len;
+       const struct ieee80211_mgmt *mgmt;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+
+       if (!buf || !len) {
+               wiphy_err(wiphy, "invalid buffer and length\n");
+               return -EFAULT;
+       }
+
+       mgmt = (const struct ieee80211_mgmt *)buf;
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA &&
+           ieee80211_is_probe_resp(mgmt->frame_control)) {
+               /* Since we support offload probe resp, we need to skip probe
+                * resp in AP or GO mode */
+               wiphy_dbg(wiphy,
+                         "info: skip to send probe resp in AP or GO mode\n");
+               return 0;
+       }
+
+       pkt_len = len + ETH_ALEN;
+       skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
+                           MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+                           pkt_len + sizeof(pkt_len));
+
+       if (!skb) {
+               wiphy_err(wiphy, "allocate skb failed for management frame\n");
+               return -ENOMEM;
+       }
+
+       mwifiex_form_mgmt_frame(skb, buf, len);
+       mwifiex_queue_tx_pkt(priv, skb);
+
+       *cookie = random32() | 1;
+       cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
+
+       wiphy_dbg(wiphy, "info: management frame transmitted\n");
+       return 0;
+}
+
+/*
+ * CFG802.11 operation handler to register a mgmt frame.
+ */
+static void
+mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+                                    struct wireless_dev *wdev,
+                                    u16 frame_type, bool reg)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+
+       if (reg)
+               priv->mgmt_frame_mask |= BIT(frame_type >> 4);
+       else
+               priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
+
+       mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
+                              HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
+
+       wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+}
+
+/*
+ * CFG802.11 operation handler to remain on channel.
+ */
+static int
+mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
+                                  struct wireless_dev *wdev,
+                                  struct ieee80211_channel *chan,
+                                  enum nl80211_channel_type channel_type,
+                                  unsigned int duration, u64 *cookie)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+       int ret;
+
+       if (!chan || !cookie) {
+               wiphy_err(wiphy, "Invalid parameter for ROC\n");
+               return -EINVAL;
+       }
+
+       if (priv->roc_cfg.cookie) {
+               wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llu\n",
+                         priv->roc_cfg.cookie);
+               return -EBUSY;
+       }
+
+       ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
+                                        &channel_type, duration);
+
+       if (!ret) {
+               *cookie = random32() | 1;
+               priv->roc_cfg.cookie = *cookie;
+               priv->roc_cfg.chan = *chan;
+               priv->roc_cfg.chan_type = channel_type;
+
+               cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type,
+                                         duration, GFP_ATOMIC);
+
+               wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
+       }
+
+       return ret;
+}
+
+/*
+ * CFG802.11 operation handler to cancel remain on channel.
+ */
+static int
+mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+                                         struct wireless_dev *wdev, u64 cookie)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+       int ret;
+
+       if (cookie != priv->roc_cfg.cookie)
+               return -ENOENT;
+
+       ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
+                                        &priv->roc_cfg.chan,
+                                        &priv->roc_cfg.chan_type, 0);
+
+       if (!ret) {
+               cfg80211_remain_on_channel_expired(wdev, cookie,
+                                                  &priv->roc_cfg.chan,
+                                                  priv->roc_cfg.chan_type,
+                                                  GFP_ATOMIC);
+
+               memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
+
+               wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
+       }
+
+       return ret;
+}
+
 /*
  * CFG802.11 operation handler to set Tx power.
  */
@@ -171,7 +382,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
 
        if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
                priv->wep_key_curr_index = key_index;
-       } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
+       } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
+                                     NULL, 0)) {
                wiphy_err(wiphy, "set default Tx key index\n");
                return -EFAULT;
        }
@@ -207,7 +419,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
                return 0;
        }
 
-       if (mwifiex_set_encode(priv, params->key, params->key_len,
+       if (mwifiex_set_encode(priv, params, params->key, params->key_len,
                               key_index, peer_mac, 0)) {
                wiphy_err(wiphy, "crypto keys added\n");
                return -EFAULT;
@@ -462,6 +674,76 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
        return 0;
 }
 
+static int
+mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
+{
+       u16 mode = P2P_MODE_DISABLE;
+
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
+               mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
+
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       return 0;
+}
+
+/*
+ * This function initializes the functionalities for P2P client.
+ * The P2P client initialization sequence is:
+ * disable -> device -> client
+ */
+static int
+mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
+{
+       u16 mode;
+
+       if (mwifiex_cfg80211_deinit_p2p(priv))
+               return -1;
+
+       mode = P2P_MODE_DEVICE;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       mode = P2P_MODE_CLIENT;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       return 0;
+}
+
+/*
+ * This function initializes the functionalities for P2P GO.
+ * The P2P GO initialization sequence is:
+ * disable -> device -> GO
+ */
+static int
+mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
+{
+       u16 mode;
+
+       if (mwifiex_cfg80211_deinit_p2p(priv))
+               return -1;
+
+       mode = P2P_MODE_DEVICE;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       mode = P2P_MODE_GO;
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+                                 HostCmd_ACT_GEN_SET, 0, &mode))
+               return -1;
+
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
+               mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_UAP);
+
+       return 0;
+}
+
 /*
  * CFG802.11 operation handler to change interface type.
  */
@@ -494,6 +776,16 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                switch (type) {
                case NL80211_IFTYPE_ADHOC:
                        break;
+               case NL80211_IFTYPE_P2P_CLIENT:
+                       if (mwifiex_cfg80211_init_p2p_client(priv))
+                               return -EFAULT;
+                       dev->ieee80211_ptr->iftype = type;
+                       return 0;
+               case NL80211_IFTYPE_P2P_GO:
+                       if (mwifiex_cfg80211_init_p2p_go(priv))
+                               return -EFAULT;
+                       dev->ieee80211_ptr->iftype = type;
+                       return 0;
                case NL80211_IFTYPE_UNSPECIFIED:
                        wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
                case NL80211_IFTYPE_STATION:    /* This shouldn't happen */
@@ -519,6 +811,18 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                        return -EOPNOTSUPP;
                }
                break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               switch (type) {
+               case NL80211_IFTYPE_STATION:
+                       if (mwifiex_cfg80211_deinit_p2p(priv))
+                               return -EFAULT;
+                       dev->ieee80211_ptr->iftype = type;
+                       return 0;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
        default:
                wiphy_err(wiphy, "%s: unknown iftype: %d\n",
                          dev->name, dev->ieee80211_ptr->iftype);
@@ -657,7 +961,6 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
 }
 
 /* Supported rates to be advertised to the cfg80211 */
-
 static struct ieee80211_rate mwifiex_rates[] = {
        {.bitrate = 10, .hw_value = 2, },
        {.bitrate = 20, .hw_value = 4, },
@@ -674,7 +977,6 @@ static struct ieee80211_rate mwifiex_rates[] = {
 };
 
 /* Channel definitions to be advertised to cfg80211 */
-
 static struct ieee80211_channel mwifiex_channels_2ghz[] = {
        {.center_freq = 2412, .hw_value = 1, },
        {.center_freq = 2417, .hw_value = 2, },
@@ -742,12 +1044,41 @@ static struct ieee80211_supported_band mwifiex_band_5ghz = {
 
 
 /* Supported crypto cipher suits to be advertised to cfg80211 */
-
 static const u32 mwifiex_cipher_suites[] = {
        WLAN_CIPHER_SUITE_WEP40,
        WLAN_CIPHER_SUITE_WEP104,
        WLAN_CIPHER_SUITE_TKIP,
        WLAN_CIPHER_SUITE_CCMP,
+       WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+/* Supported mgmt frame types to be advertised to cfg80211 */
+static const struct ieee80211_txrx_stypes
+mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+       [NL80211_IFTYPE_STATION] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
+       [NL80211_IFTYPE_AP] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
+       [NL80211_IFTYPE_P2P_CLIENT] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
+       [NL80211_IFTYPE_P2P_GO] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                     BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
 };
 
 /*
@@ -842,7 +1173,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
-       if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
                wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
                return -EINVAL;
        }
@@ -906,6 +1237,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        if (mwifiex_del_mgmt_ies(priv))
                wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
 
+       priv->ap_11n_enabled = 0;
+
        if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
                wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -928,7 +1261,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        u8 config_bands = 0;
 
-       if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
+       if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
                return -1;
        if (mwifiex_set_mgmt_ies(priv, &params->beacon))
                return -1;
@@ -965,15 +1298,18 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
 
        bss_cfg->channel =
            (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
-       bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
 
        /* Set appropriate bands */
        if (params->channel->band == IEEE80211_BAND_2GHZ) {
+               bss_cfg->band_cfg = BAND_CONFIG_BG;
+
                if (params->channel_type == NL80211_CHAN_NO_HT)
                        config_bands = BAND_B | BAND_G;
                else
                        config_bands = BAND_B | BAND_G | BAND_GN;
        } else {
+               bss_cfg->band_cfg = BAND_CONFIG_A;
+
                if (params->channel_type == NL80211_CHAN_NO_HT)
                        config_bands = BAND_A;
                else
@@ -984,6 +1320,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
              ~priv->adapter->fw_bands))
                priv->adapter->config_bands = config_bands;
 
+       mwifiex_set_uap_rates(bss_cfg, params);
        mwifiex_send_domain_info_cmd_fw(wiphy);
 
        if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
@@ -994,6 +1331,12 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
 
        mwifiex_set_ht_params(priv, bss_cfg, params);
 
+       if (params->inactivity_timeout > 0) {
+               /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
+               bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
+               bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
+       }
+
        if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
                wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -1149,7 +1492,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
                              ~priv->adapter->fw_bands))
                                priv->adapter->config_bands = config_bands;
                }
-               mwifiex_send_domain_info_cmd_fw(priv->wdev->wiphy);
        }
 
        /* As this is new association, clear locally stored
@@ -1159,7 +1501,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
        priv->wep_key_curr_index = 0;
        priv->sec_info.encryption_mode = 0;
        priv->sec_info.is_authtype_auto = 0;
-       ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1);
+       ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
 
        if (mode == NL80211_IFTYPE_ADHOC) {
                /* "privacy" is set only for ad-hoc mode */
@@ -1206,8 +1548,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
                                "info: setting wep encryption"
                                " with key len %d\n", sme->key_len);
                        priv->wep_key_curr_index = sme->key_idx;
-                       ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
-                                                sme->key_idx, NULL, 0);
+                       ret = mwifiex_set_encode(priv, NULL, sme->key,
+                                                sme->key_len, sme->key_idx,
+                                                NULL, 0);
                }
        }
 done:
@@ -1459,11 +1802,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
 {
        struct net_device *dev = request->wdev->netdev;
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-       int i;
+       int i, offset;
        struct ieee80211_channel *chan;
+       struct ieee_types_header *ie;
 
        wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
 
+       if (atomic_read(&priv->wmm.tx_pkts_queued) >=
+           MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
+               dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
+               return -EBUSY;
+       }
+
        priv->scan_request = request;
 
        priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
@@ -1477,13 +1827,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        priv->user_scan_cfg->ssid_list = request->ssids;
 
        if (request->ie && request->ie_len) {
+               offset = 0;
                for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
                        if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
                                continue;
                        priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
-                       memcpy(&priv->vs_ie[i].ie, request->ie,
-                              request->ie_len);
-                       break;
+                       ie = (struct ieee_types_header *)(request->ie + offset);
+                       memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
+                       offset += sizeof(*ie) + ie->len;
+
+                       if (offset >= request->ie_len)
+                               break;
                }
        }
 
@@ -1592,7 +1946,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
  *  create a new virtual interface with the given name
  */
 struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
-                                             char *name,
+                                             const char *name,
                                              enum nl80211_iftype type,
                                              u32 *flags,
                                              struct vif_params *params)
@@ -1632,7 +1986,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                priv->bss_type = MWIFIEX_BSS_TYPE_STA;
                priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
-               priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+               priv->bss_priority = 0;
                priv->bss_role = MWIFIEX_BSS_ROLE_STA;
                priv->bss_num = 0;
 
@@ -1655,12 +2009,47 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
                priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
-               priv->bss_priority = MWIFIEX_BSS_ROLE_UAP;
+               priv->bss_priority = 0;
                priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
                priv->bss_started = 0;
                priv->bss_num = 0;
                priv->bss_mode = type;
 
+               break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+               priv = adapter->priv[MWIFIEX_BSS_TYPE_P2P];
+
+               if (priv->bss_mode) {
+                       wiphy_err(wiphy, "Can't create multiple P2P ifaces");
+                       return ERR_PTR(-EINVAL);
+               }
+
+               wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+               if (!wdev)
+                       return ERR_PTR(-ENOMEM);
+
+               priv->wdev = wdev;
+               wdev->wiphy = wiphy;
+
+               /* At start-up, wpa_supplicant tries to change the interface
+                * to NL80211_IFTYPE_STATION if it is not managed mode.
+                * So, we initialize it to STA mode.
+                */
+               wdev->iftype = NL80211_IFTYPE_STATION;
+               priv->bss_mode = NL80211_IFTYPE_STATION;
+
+               /* Setting bss_type to P2P tells firmware that this interface
+                * is receiving P2P peers found during find phase and doing
+                * action frame handshake.
+                */
+               priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+
+               priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+               priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+               priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+               priv->bss_started = 0;
+               priv->bss_num = 0;
+
                break;
        default:
                wiphy_err(wiphy, "type not supported\n");
@@ -1769,6 +2158,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .leave_ibss = mwifiex_cfg80211_leave_ibss,
        .add_key = mwifiex_cfg80211_add_key,
        .del_key = mwifiex_cfg80211_del_key,
+       .mgmt_tx = mwifiex_cfg80211_mgmt_tx,
+       .mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
+       .remain_on_channel = mwifiex_cfg80211_remain_on_channel,
+       .cancel_remain_on_channel = mwifiex_cfg80211_cancel_remain_on_channel,
        .set_default_key = mwifiex_cfg80211_set_default_key,
        .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
        .set_tx_power = mwifiex_cfg80211_set_tx_power,
@@ -1805,8 +2198,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        }
        wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
        wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+       wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
+       wiphy->max_remain_on_channel_duration = 5000;
        wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                 BIT(NL80211_IFTYPE_ADHOC) |
+                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                BIT(NL80211_IFTYPE_P2P_GO) |
                                 BIT(NL80211_IFTYPE_AP);
 
        wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
@@ -1825,15 +2222,21 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
        wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
-                       WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+                       WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+                       WIPHY_FLAG_CUSTOM_REGULATORY |
+                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+       wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
 
        wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
-                                   NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
+                                   NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+                                   NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
        wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
        wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
 
-       wiphy->features = NL80211_FEATURE_HT_IBSS;
+       wiphy->features = NL80211_FEATURE_HT_IBSS |
+                         NL80211_FEATURE_INACTIVITY_TIMER;
 
        /* Reserve space for mwifiex specific private data for BSS */
        wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1854,8 +2257,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                return ret;
        }
        country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
-       if (country_code && regulatory_hint(wiphy, country_code))
-               dev_err(adapter->dev, "regulatory_hint() failed\n");
+       if (country_code)
+               dev_info(adapter->dev,
+                        "ignoring F/W country code %2.2s\n", country_code);
 
        adapter->wiphy = wiphy;
        return ret;
index 565527aee0ea3f73caa832f336c0ded06a3b22d9..8d465107f52b2c5073acad20d8d0acbe0485be5e 100644 (file)
@@ -460,7 +460,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
                        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
        }
 
-       ret = mwifiex_process_sta_event(priv);
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               ret = mwifiex_process_uap_event(priv);
+       else
+               ret = mwifiex_process_sta_event(priv);
 
        adapter->event_cause = 0;
        adapter->event_skb = NULL;
@@ -1085,6 +1088,8 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
        if (activated) {
                if (priv->adapter->is_hs_configured) {
                        priv->adapter->hs_activated = true;
+                       mwifiex_update_rxreor_flags(priv->adapter,
+                                                   RXREOR_FORCE_NO_DROP);
                        dev_dbg(priv->adapter->dev, "event: hs_activated\n");
                        priv->adapter->hs_activate_wait_q_woken = true;
                        wake_up_interruptible(
index 070ef25f51867a1bb0d3baf3279d0eb02b2ba3d6..e9357d87d3279f7ba1d19c8246347f44ca46a269 100644 (file)
 #include <linux/ieee80211.h>
 
 
-#define MWIFIEX_MAX_BSS_NUM         (2)
+#define MWIFIEX_MAX_BSS_NUM         (3)
 
 #define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
                                         *   + 4 byte alignment
                                         */
+#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8       /* sizeof(pkt_type)
+                                                *   + sizeof(tx_control)
+                                                */
 
 #define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED      2
 #define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED      16
 #define MWIFIEX_SDIO_BLOCK_SIZE            256
 
 #define MWIFIEX_BUF_FLAG_REQUEUED_PKT      BIT(0)
+#define MWIFIEX_BUF_FLAG_BRIDGED_PKT      BIT(1)
+
+#define MWIFIEX_BRIDGED_PKTS_THRESHOLD     1024
 
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
        MWIFIEX_BSS_TYPE_UAP = 1,
+       MWIFIEX_BSS_TYPE_P2P = 2,
        MWIFIEX_BSS_TYPE_ANY = 0xff,
 };
 
index e831b440a24a3f2c654e8ad04e17701fb0e1b85f..dda588b3557063e95bfc92746422e3d3942a1719 100644 (file)
@@ -65,10 +65,12 @@ enum KEY_TYPE_ID {
        KEY_TYPE_ID_TKIP,
        KEY_TYPE_ID_AES,
        KEY_TYPE_ID_WAPI,
+       KEY_TYPE_ID_AES_CMAC,
 };
 #define KEY_MCAST      BIT(0)
 #define KEY_UNICAST    BIT(1)
 #define KEY_ENABLED    BIT(2)
+#define KEY_IGTK       BIT(10)
 
 #define WAPI_KEY_LEN                   50
 
@@ -92,6 +94,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 };
 
 #define CAL_SNR(RSSI, NF)              ((s16)((s16)(RSSI)-(s16)(NF)))
+#define CAL_RSSI(SNR, NF)              ((s16)((s16)(SNR)+(s16)(NF)))
 
 #define UAP_BSS_PARAMS_I                       0
 #define UAP_CUSTOM_IE_I                                1
@@ -106,6 +109,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define MGMT_MASK_BEACON                       0x100
 
 #define TLV_TYPE_UAP_SSID                      0x0000
+#define TLV_TYPE_UAP_RATES                     0x0001
 
 #define PROPRIETARY_TLV_BASE_ID                 0x0100
 #define TLV_TYPE_KEY_MATERIAL       (PROPRIETARY_TLV_BASE_ID + 0)
@@ -124,6 +128,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
 #define TLV_TYPE_UAP_BCAST_SSID     (PROPRIETARY_TLV_BASE_ID + 48)
 #define TLV_TYPE_UAP_RTS_THRESHOLD  (PROPRIETARY_TLV_BASE_ID + 51)
+#define TLV_TYPE_UAP_AO_TIMER       (PROPRIETARY_TLV_BASE_ID + 57)
 #define TLV_TYPE_UAP_WEP_KEY        (PROPRIETARY_TLV_BASE_ID + 59)
 #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
 #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -138,6 +143,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_MGMT_IE            (PROPRIETARY_TLV_BASE_ID + 105)
 #define TLV_TYPE_AUTO_DS_PARAM      (PROPRIETARY_TLV_BASE_ID + 113)
 #define TLV_TYPE_PS_PARAM           (PROPRIETARY_TLV_BASE_ID + 114)
+#define TLV_TYPE_UAP_PS_AO_TIMER    (PROPRIETARY_TLV_BASE_ID + 123)
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
 
@@ -257,9 +263,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_TX_RATE_CFG                       0x00d6
 #define HostCmd_CMD_802_11_PS_MODE_ENH                0x00e4
 #define HostCmd_CMD_802_11_HS_CFG_ENH                 0x00e5
+#define HostCmd_CMD_P2P_MODE_CFG                      0x00eb
 #define HostCmd_CMD_CAU_REG_ACCESS                    0x00ed
 #define HostCmd_CMD_SET_BSS_MODE                      0x00f7
 #define HostCmd_CMD_PCIE_DESC_DETAILS                 0x00fa
+#define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
+#define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 
 #define PROTOCOL_NO_SECURITY        0x01
 #define PROTOCOL_STATIC_WEP         0x02
@@ -285,9 +294,17 @@ enum ENH_PS_MODES {
        DIS_AUTO_PS = 0xfe,
 };
 
+enum P2P_MODES {
+       P2P_MODE_DISABLE = 0,
+       P2P_MODE_DEVICE = 1,
+       P2P_MODE_GO = 2,
+       P2P_MODE_CLIENT = 3,
+};
+
 #define HostCmd_RET_BIT                       0x8000
 #define HostCmd_ACT_GEN_GET                   0x0000
 #define HostCmd_ACT_GEN_SET                   0x0001
+#define HostCmd_ACT_GEN_REMOVE                0x0004
 #define HostCmd_ACT_BITWISE_SET               0x0002
 #define HostCmd_ACT_BITWISE_CLR               0x0003
 #define HostCmd_RESULT_OK                     0x0000
@@ -307,7 +324,7 @@ enum ENH_PS_MODES {
 #define HostCmd_SCAN_RADIO_TYPE_A           1
 
 #define HOST_SLEEP_CFG_CANCEL          0xffffffff
-#define HOST_SLEEP_CFG_COND_DEF                0x0000000f
+#define HOST_SLEEP_CFG_COND_DEF                0x00000000
 #define HOST_SLEEP_CFG_GPIO_DEF                0xff
 #define HOST_SLEEP_CFG_GAP_DEF         0
 
@@ -385,6 +402,7 @@ enum ENH_PS_MODES {
 #define EVENT_BW_CHANGE                 0x00000048
 #define EVENT_UAP_MIC_COUNTERMEASURES   0x0000004c
 #define EVENT_HOSTWAKE_STAIE           0x0000004d
+#define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 
 #define EVENT_ID_MASK                   0xffff
 #define BSS_NUM_MASK                    0xf
@@ -424,10 +442,10 @@ struct txpd {
 struct rxpd {
        u8 bss_type;
        u8 bss_num;
-       u16 rx_pkt_length;
-       u16 rx_pkt_offset;
-       u16 rx_pkt_type;
-       u16 seq_num;
+       __le16 rx_pkt_length;
+       __le16 rx_pkt_offset;
+       __le16 rx_pkt_type;
+       __le16 seq_num;
        u8 priority;
        u8 rx_rate;
        s8 snr;
@@ -439,6 +457,31 @@ struct rxpd {
        u8 reserved;
 } __packed;
 
+struct uap_txpd {
+       u8 bss_type;
+       u8 bss_num;
+       __le16 tx_pkt_length;
+       __le16 tx_pkt_offset;
+       __le16 tx_pkt_type;
+       __le32 tx_control;
+       u8 priority;
+       u8 flags;
+       u8 pkt_delay_2ms;
+       u8 reserved1;
+       __le32 reserved2;
+};
+
+struct uap_rxpd {
+       u8 bss_type;
+       u8 bss_num;
+       __le16 rx_pkt_length;
+       __le16 rx_pkt_offset;
+       __le16 rx_pkt_type;
+       __le16 seq_num;
+       u8 priority;
+       u8 reserved1;
+};
+
 enum mwifiex_chan_scan_mode_bitmasks {
        MWIFIEX_PASSIVE_SCAN = BIT(0),
        MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -558,6 +601,13 @@ struct mwifiex_ie_type_key_param_set {
        u8 key[50];
 } __packed;
 
+#define IGTK_PN_LEN            8
+
+struct mwifiex_cmac_param {
+       u8 ipn[IGTK_PN_LEN];
+       u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
 struct host_cmd_ds_802_11_key_material {
        __le16 action;
        struct mwifiex_ie_type_key_param_set key_param_set;
@@ -1250,6 +1300,11 @@ struct host_cmd_tlv_ssid {
        u8 ssid[0];
 } __packed;
 
+struct host_cmd_tlv_rates {
+       struct host_cmd_tlv tlv;
+       u8 rates[0];
+} __packed;
+
 struct host_cmd_tlv_bcast_ssid {
        struct host_cmd_tlv tlv;
        u8 bcast_ctl;
@@ -1291,11 +1346,35 @@ struct host_cmd_tlv_channel_band {
        u8 channel;
 } __packed;
 
+struct host_cmd_tlv_ageout_timer {
+       struct host_cmd_tlv tlv;
+       __le32 sta_ao_timer;
+} __packed;
+
 struct host_cmd_ds_version_ext {
        u8 version_str_sel;
        char version_str[128];
 } __packed;
 
+struct host_cmd_ds_mgmt_frame_reg {
+       __le16 action;
+       __le32 mask;
+} __packed;
+
+struct host_cmd_ds_p2p_mode_cfg {
+       __le16 action;
+       __le16 mode;
+} __packed;
+
+struct host_cmd_ds_remain_on_chan {
+       __le16 action;
+       u8 status;
+       u8 reserved;
+       u8 band_cfg;
+       u8 channel;
+       __le32 duration;
+} __packed;
+
 struct host_cmd_ds_802_11_ibss_status {
        __le16 action;
        __le16 enable;
@@ -1307,6 +1386,7 @@ struct host_cmd_ds_802_11_ibss_status {
 
 #define CONNECTION_TYPE_INFRA   0
 #define CONNECTION_TYPE_ADHOC   1
+#define CONNECTION_TYPE_AP      2
 
 struct host_cmd_ds_set_bss_mode {
        u8 con_type;
@@ -1404,6 +1484,9 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_wmm_get_status get_wmm_status;
                struct host_cmd_ds_802_11_key_material key_material;
                struct host_cmd_ds_version_ext verext;
+               struct host_cmd_ds_mgmt_frame_reg reg_mask;
+               struct host_cmd_ds_remain_on_chan roc_cfg;
+               struct host_cmd_ds_p2p_mode_cfg mode_cfg;
                struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
                struct host_cmd_ds_mac_reg_access mac_reg;
                struct host_cmd_ds_bbp_reg_access bbp_reg;
index 1d8dd003e39617124ff87d92d2d3437391b7ba84..e38342f86c515e6e574fbae572e7c51dbe7c8fa8 100644 (file)
@@ -114,9 +114,6 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
                                                        cpu_to_le16(mask);
 
                        ie->ie_index = cpu_to_le16(index);
-                       ie->ie_length = priv->mgmt_ie[index].ie_length;
-                       memcpy(&ie->ie_buffer, &priv->mgmt_ie[index].ie_buffer,
-                              le16_to_cpu(priv->mgmt_ie[index].ie_length));
                } else {
                        if (mask != MWIFIEX_DELETE_MASK)
                                return -1;
@@ -160,7 +157,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
        u16 len;
        int ret;
 
-       ap_custom_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+       ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
        if (!ap_custom_ie)
                return -ENOMEM;
 
@@ -214,30 +211,35 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
        return ret;
 }
 
-/* This function checks if WPS IE is present in passed buffer and copies it to
- * mwifiex_ie structure.
+/* This function checks if the vendor specified IE is present in passed buffer
+ * and copies it to mwifiex_ie structure.
  * Function takes pointer to struct mwifiex_ie pointer as argument.
- * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled
- * in with WPS IE. Caller should take care of freeing this memory.
+ * If the vendor specified IE is present then memory is allocated for
+ * mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
+ * this memory.
  */
-static int mwifiex_update_wps_ie(const u8 *ies, int ies_len,
-                                struct mwifiex_ie **ie_ptr, u16 mask)
+static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
+                               struct mwifiex_ie **ie_ptr, u16 mask,
+                               unsigned int oui, u8 oui_type)
 {
-       struct ieee_types_header *wps_ie;
-       struct mwifiex_ie *ie = NULL;
+       struct ieee_types_header *vs_ie;
+       struct mwifiex_ie *ie = *ie_ptr;
        const u8 *vendor_ie;
 
-       vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
-                                           WLAN_OUI_TYPE_MICROSOFT_WPS,
-                                           ies, ies_len);
+       vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
        if (vendor_ie) {
-               ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
-               if (!ie)
-                       return -ENOMEM;
+               if (!*ie_ptr) {
+                       *ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
+                                         GFP_KERNEL);
+                       if (!*ie_ptr)
+                               return -ENOMEM;
+                       ie = *ie_ptr;
+               }
 
-               wps_ie = (struct ieee_types_header *)vendor_ie;
-               memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2);
-               ie->ie_length = cpu_to_le16(wps_ie->len + 2);
+               vs_ie = (struct ieee_types_header *)vendor_ie;
+               memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
+                      vs_ie, vs_ie->len + 2);
+               le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
                ie->mgmt_subtype_mask = cpu_to_le16(mask);
                ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
        }
@@ -257,20 +259,40 @@ static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
        u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
        int ret = 0;
 
-       if (data->beacon_ies && data->beacon_ies_len)
-               mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len,
-                                     &beacon_ie, MGMT_MASK_BEACON);
+       if (data->beacon_ies && data->beacon_ies_len) {
+               mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
+                                    &beacon_ie, MGMT_MASK_BEACON,
+                                    WLAN_OUI_MICROSOFT,
+                                    WLAN_OUI_TYPE_MICROSOFT_WPS);
+               mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
+                                    &beacon_ie, MGMT_MASK_BEACON,
+                                    WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
+       }
 
-       if (data->proberesp_ies && data->proberesp_ies_len)
-               mwifiex_update_wps_ie(data->proberesp_ies,
-                                     data->proberesp_ies_len, &pr_ie,
-                                     MGMT_MASK_PROBE_RESP);
+       if (data->proberesp_ies && data->proberesp_ies_len) {
+               mwifiex_update_vs_ie(data->proberesp_ies,
+                                    data->proberesp_ies_len, &pr_ie,
+                                    MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
+                                    WLAN_OUI_TYPE_MICROSOFT_WPS);
+               mwifiex_update_vs_ie(data->proberesp_ies,
+                                    data->proberesp_ies_len, &pr_ie,
+                                    MGMT_MASK_PROBE_RESP,
+                                    WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
+       }
 
-       if (data->assocresp_ies && data->assocresp_ies_len)
-               mwifiex_update_wps_ie(data->assocresp_ies,
-                                     data->assocresp_ies_len, &ar_ie,
-                                     MGMT_MASK_ASSOC_RESP |
-                                     MGMT_MASK_REASSOC_RESP);
+       if (data->assocresp_ies && data->assocresp_ies_len) {
+               mwifiex_update_vs_ie(data->assocresp_ies,
+                                    data->assocresp_ies_len, &ar_ie,
+                                    MGMT_MASK_ASSOC_RESP |
+                                    MGMT_MASK_REASSOC_RESP,
+                                    WLAN_OUI_MICROSOFT,
+                                    WLAN_OUI_TYPE_MICROSOFT_WPS);
+               mwifiex_update_vs_ie(data->assocresp_ies,
+                                    data->assocresp_ies_len, &ar_ie,
+                                    MGMT_MASK_ASSOC_RESP |
+                                    MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
+                                    WLAN_OUI_TYPE_WFA_P2P);
+       }
 
        if (beacon_ie || pr_ie || ar_ie) {
                ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
index 21fdc6c02775b4f9b119afbab1de01420062c28f..b5d37a8caa09a4429504fd804d8a6a5b1542b1af 100644 (file)
@@ -64,60 +64,77 @@ static void scan_delay_timer_fn(unsigned long data)
        struct cmd_ctrl_node *cmd_node, *tmp_node;
        unsigned long flags;
 
-       if (!mwifiex_wmm_lists_empty(adapter)) {
-               if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+       if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+               /*
+                * Abort scan operation by cancelling all pending scan
+                * commands
+                */
+               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+               list_for_each_entry_safe(cmd_node, tmp_node,
+                                        &adapter->scan_pending_q, list) {
+                       list_del(&cmd_node->list);
+                       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               }
+               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+               adapter->scan_processing = false;
+               adapter->scan_delay_cnt = 0;
+               adapter->empty_tx_q_cnt = 0;
+               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+               if (priv->user_scan_cfg) {
+                       dev_dbg(priv->adapter->dev,
+                               "info: %s: scan aborted\n", __func__);
+                       cfg80211_scan_done(priv->scan_request, 1);
+                       priv->scan_request = NULL;
+                       kfree(priv->user_scan_cfg);
+                       priv->user_scan_cfg = NULL;
+               }
+
+               if (priv->scan_pending_on_block) {
+                       priv->scan_pending_on_block = false;
+                       up(&priv->async_sem);
+               }
+               goto done;
+       }
+
+       if (!atomic_read(&priv->adapter->is_tx_received)) {
+               adapter->empty_tx_q_cnt++;
+               if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
                        /*
-                        * Abort scan operation by cancelling all pending scan
-                        * command
+                        * No Tx traffic for 200msec. Get scan command from
+                        * scan pending queue and put to cmd pending queue to
+                        * resume scan operation
                         */
+                       adapter->scan_delay_cnt = 0;
+                       adapter->empty_tx_q_cnt = 0;
                        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-                       list_for_each_entry_safe(cmd_node, tmp_node,
-                                                &adapter->scan_pending_q,
-                                                list) {
-                               list_del(&cmd_node->list);
-                               cmd_node->wait_q_enabled = false;
-                               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-                       }
+                       cmd_node = list_first_entry(&adapter->scan_pending_q,
+                                                   struct cmd_ctrl_node, list);
+                       list_del(&cmd_node->list);
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
 
-                       spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-                       adapter->scan_processing = false;
-                       spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
-                                              flags);
-
-                       if (priv->user_scan_cfg) {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: %s: scan aborted\n", __func__);
-                               cfg80211_scan_done(priv->scan_request, 1);
-                               priv->scan_request = NULL;
-                               kfree(priv->user_scan_cfg);
-                               priv->user_scan_cfg = NULL;
-                       }
-               } else {
-                       /*
-                        * Tx data queue is still not empty, delay scan
-                        * operation further by 20msec.
-                        */
-                       mod_timer(&priv->scan_delay_timer, jiffies +
-                                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
-                       adapter->scan_delay_cnt++;
+                       mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+                                                       true);
+                       queue_work(adapter->workqueue, &adapter->main_work);
+                       goto done;
                }
-               queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
        } else {
-               /*
-                * Tx data queue is empty. Get scan command from scan_pending_q
-                * and put to cmd_pending_q to resume scan operation
-                */
-               adapter->scan_delay_cnt = 0;
-               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-               cmd_node = list_first_entry(&adapter->scan_pending_q,
-                                           struct cmd_ctrl_node, list);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
-               mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+               adapter->empty_tx_q_cnt = 0;
        }
+
+       /* Delay scan operation further by 20msec */
+       mod_timer(&priv->scan_delay_timer, jiffies +
+                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+       adapter->scan_delay_cnt++;
+
+done:
+       if (atomic_read(&priv->adapter->is_tx_received))
+               atomic_set(&priv->adapter->is_tx_received, false);
+
+       return;
 }
 
 /*
@@ -127,7 +144,7 @@ static void scan_delay_timer_fn(unsigned long data)
  * Additionally, it also initializes all the locks and sets up all the
  * lists.
  */
-static int mwifiex_init_priv(struct mwifiex_private *priv)
+int mwifiex_init_priv(struct mwifiex_private *priv)
 {
        u32 i;
 
@@ -196,6 +213,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
        priv->curr_bcn_size = 0;
        priv->wps_ie = NULL;
        priv->wps_ie_len = 0;
+       priv->ap_11n_enabled = 0;
+       memset(&priv->roc_cfg, 0, sizeof(priv->roc_cfg));
 
        priv->scan_block = false;
 
@@ -345,6 +364,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
        adapter->arp_filter_size = 0;
        adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+       adapter->empty_tx_q_cnt = 0;
 }
 
 /*
@@ -410,6 +430,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
                                list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
                        list_del(&priv->tx_ba_stream_tbl_ptr);
                        list_del(&priv->rx_reorder_tbl_ptr);
+                       list_del(&priv->sta_list);
                }
        }
 }
@@ -472,6 +493,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                        spin_lock_init(&priv->rx_pkt_lock);
                        spin_lock_init(&priv->wmm.ra_list_spinlock);
                        spin_lock_init(&priv->curr_bcn_buf_lock);
+                       spin_lock_init(&priv->sta_list_spinlock);
                }
        }
 
@@ -504,6 +526,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                }
                INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
                INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
+               INIT_LIST_HEAD(&priv->sta_list);
 
                spin_lock_init(&priv->tx_ba_stream_tbl_lock);
                spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -625,6 +648,17 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
        }
 }
 
+/*
+ * This function frees the private structure, including cleans
+ * up the TX and RX queues and frees the BSS priority tables.
+ */
+void mwifiex_free_priv(struct mwifiex_private *priv)
+{
+       mwifiex_clean_txrx(priv);
+       mwifiex_delete_bss_prio_tbl(priv);
+       mwifiex_free_curr_bcn(priv);
+}
+
 /*
  * This function is used to shutdown the driver.
  *
index 50191539bb322ed206bc46f11cfa6154cbda2ad4..4e31c6013ebe5d73db79e427ee3296c5daa940c5 100644 (file)
@@ -81,7 +81,11 @@ struct wep_key {
 
 #define KEY_MGMT_ON_HOST        0x03
 #define MWIFIEX_AUTH_MODE_AUTO  0xFF
-#define BAND_CONFIG_MANUAL      0x00
+#define BAND_CONFIG_BG          0x00
+#define BAND_CONFIG_A           0x01
+#define MWIFIEX_SUPPORTED_RATES                 14
+#define MWIFIEX_SUPPORTED_RATES_EXT             32
+
 struct mwifiex_uap_bss_param {
        u8 channel;
        u8 band_cfg;
@@ -100,6 +104,9 @@ struct mwifiex_uap_bss_param {
        struct wpa_param wpa_cfg;
        struct wep_key wep_cfg[NUM_WEP_KEYS];
        struct ieee80211_ht_cap ht_cap;
+       u8 rates[MWIFIEX_SUPPORTED_RATES];
+       u32 sta_ao_timer;
+       u32 ps_sta_ao_timer;
 };
 
 enum {
@@ -213,7 +220,7 @@ struct mwifiex_debug_info {
 };
 
 #define MWIFIEX_KEY_INDEX_UNICAST      0x40000000
-#define WAPI_RXPN_LEN                  16
+#define PN_LEN                         16
 
 struct mwifiex_ds_encrypt_key {
        u32 key_disable;
@@ -222,7 +229,8 @@ struct mwifiex_ds_encrypt_key {
        u8 key_material[WLAN_MAX_KEY_LEN];
        u8 mac_addr[ETH_ALEN];
        u32 is_wapi_key;
-       u8 wapi_rxpn[WAPI_RXPN_LEN];
+       u8 pn[PN_LEN];          /* packet number */
+       u8 is_igtk_key;
 };
 
 struct mwifiex_power_cfg {
index 46803621d01511dad87b91b59ef2b6fb8a38a9ef..eb22dd248d5491e0644a7b60225216faeae7ad1a 100644 (file)
@@ -72,7 +72,6 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
                        goto error;
 
                adapter->priv[i]->adapter = adapter;
-               adapter->priv[i]->bss_priority = i;
                adapter->priv_num++;
        }
        mwifiex_init_lock_list(adapter);
@@ -370,6 +369,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
                dev_err(adapter->dev, "cannot create default AP interface\n");
                goto err_add_intf;
        }
+
+       /* Create P2P interface by default */
+       if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
+                                     NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
+               dev_err(adapter->dev, "cannot create default P2P interface\n");
+               goto err_add_intf;
+       }
        rtnl_unlock();
 
        mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -469,6 +475,27 @@ mwifiex_close(struct net_device *dev)
        return 0;
 }
 
+/*
+ * Add buffer into wmm tx queue and queue work to transmit it.
+ */
+int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
+{
+       mwifiex_wmm_add_buf_txqueue(priv, skb);
+       atomic_inc(&priv->adapter->tx_pending);
+
+       if (priv->adapter->scan_delay_cnt)
+               atomic_set(&priv->adapter->is_tx_received, true);
+
+       if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
+               mwifiex_set_trans_start(priv->netdev);
+               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+       }
+
+       queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+
+       return 0;
+}
+
 /*
  * CFG802.11 network device handler for data transmission.
  */
@@ -517,15 +544,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_info->bss_type = priv->bss_type;
        mwifiex_fill_buffer(skb);
 
-       mwifiex_wmm_add_buf_txqueue(priv, skb);
-       atomic_inc(&priv->adapter->tx_pending);
-
-       if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
-               mwifiex_set_trans_start(dev);
-               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
-       }
-
-       queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+       mwifiex_queue_tx_pkt(priv, skb);
 
        return 0;
 }
index e7c2a82fd6106481d63d7beb9df0388d1a0798c2..bfb3fa69805c8d34d5d25586ec014c7d71728c9a 100644 (file)
@@ -88,13 +88,18 @@ enum {
 #define MWIFIEX_MAX_TOTAL_SCAN_TIME    (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
 
 #define MWIFIEX_MAX_SCAN_DELAY_CNT                     50
+#define MWIFIEX_MAX_EMPTY_TX_Q_CNT                     10
 #define MWIFIEX_SCAN_DELAY_MSEC                                20
 
+#define MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN          2
+
 #define RSN_GTK_OUI_OFFSET                             2
 
 #define MWIFIEX_OUI_NOT_PRESENT                        0
 #define MWIFIEX_OUI_PRESENT                            1
 
+#define PKT_TYPE_MGMT  0xE5
+
 /*
  * Do not check for data_received for USB, as data_received
  * is handled in mwifiex_usb_recv for USB
@@ -115,6 +120,7 @@ enum {
 #define MAX_BITMAP_RATES_SIZE                  10
 
 #define MAX_CHANNEL_BAND_BG     14
+#define MAX_CHANNEL_BAND_A      165
 
 #define MAX_FREQUENCY_BAND_BG   2484
 
@@ -199,6 +205,9 @@ struct mwifiex_ra_list_tbl {
        u8 ra[ETH_ALEN];
        u32 total_pkts_size;
        u32 is_11n_enabled;
+       u16 max_amsdu;
+       u16 pkt_count;
+       u8 ba_packet_thr;
 };
 
 struct mwifiex_tid_tbl {
@@ -245,10 +254,6 @@ struct ieee_types_header {
        u8 len;
 } __packed;
 
-#define MWIFIEX_SUPPORTED_RATES                 14
-
-#define MWIFIEX_SUPPORTED_RATES_EXT             32
-
 struct ieee_types_vendor_specific {
        struct ieee_types_vendor_header vend_hdr;
        u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
@@ -365,6 +370,12 @@ struct wps {
        u8 session_enable;
 };
 
+struct mwifiex_roc_cfg {
+       u64 cookie;
+       struct ieee80211_channel chan;
+       enum nl80211_channel_type chan_type;
+};
+
 struct mwifiex_adapter;
 struct mwifiex_private;
 
@@ -431,6 +442,9 @@ struct mwifiex_private {
        u8 wmm_enabled;
        u8 wmm_qosinfo;
        struct mwifiex_wmm_desc wmm;
+       struct list_head sta_list;
+       /* spin lock for associated station list */
+       spinlock_t sta_list_spinlock;
        struct list_head tx_ba_stream_tbl_ptr;
        /* spin lock for tx_ba_stream_tbl_ptr queue */
        spinlock_t tx_ba_stream_tbl_lock;
@@ -480,12 +494,16 @@ struct mwifiex_private {
        s32 cqm_rssi_thold;
        u32 cqm_rssi_hyst;
        u8 subsc_evt_rssi_state;
+       struct mwifiex_ds_misc_subsc_evt async_subsc_evt_storage;
        struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX];
        u16 beacon_idx;
        u16 proberesp_idx;
        u16 assocresp_idx;
        u16 rsn_idx;
        struct timer_list scan_delay_timer;
+       u8 ap_11n_enabled;
+       u32 mgmt_frame_mask;
+       struct mwifiex_roc_cfg roc_cfg;
 };
 
 enum mwifiex_ba_status {
@@ -517,6 +535,7 @@ struct mwifiex_rx_reorder_tbl {
        int win_size;
        void **rx_reorder_ptr;
        struct reorder_tmr_cnxt timer_context;
+       u8 flags;
 };
 
 struct mwifiex_bss_prio_node {
@@ -550,6 +569,19 @@ struct mwifiex_bss_priv {
        u64 fw_tsf;
 };
 
+/* This is AP specific structure which stores information
+ * about associated STA
+ */
+struct mwifiex_sta_node {
+       struct list_head list;
+       u8 mac_addr[ETH_ALEN];
+       u8 is_wmm_enabled;
+       u8 is_11n_enabled;
+       u8 ampdu_sta[MAX_NUM_TID];
+       u16 rx_seq[MAX_NUM_TID];
+       u16 max_amsdu;
+};
+
 struct mwifiex_if_ops {
        int (*init_if) (struct mwifiex_adapter *);
        void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,6 +722,9 @@ struct mwifiex_adapter {
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
        u16 max_mgmt_ie_index;
        u8 scan_delay_cnt;
+       u8 empty_tx_q_cnt;
+       atomic_t is_tx_received;
+       atomic_t pending_bridged_pkts;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -702,6 +737,9 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
 void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
                struct mwifiex_adapter *adapter);
 
+int mwifiex_init_priv(struct mwifiex_private *priv);
+void mwifiex_free_priv(struct mwifiex_private *priv);
+
 int mwifiex_init_fw(struct mwifiex_adapter *adapter);
 
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -714,6 +752,9 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
 
 int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
 
+int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+                               struct sk_buff *skb);
+
 int mwifiex_process_event(struct mwifiex_adapter *adapter);
 
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
@@ -780,8 +821,17 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
                                struct host_cmd_ds_command *resp);
 int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
                                  struct sk_buff *skb);
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+                                 struct sk_buff *skb);
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+                                 struct sk_buff *skb);
 int mwifiex_process_sta_event(struct mwifiex_private *);
+int mwifiex_process_uap_event(struct mwifiex_private *);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
 void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
+void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
 int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
 int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
                            struct mwifiex_scan_cmd_config *scan_cfg);
@@ -840,6 +890,8 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
 void mwifiex_set_ht_params(struct mwifiex_private *priv,
                           struct mwifiex_uap_bss_param *bss_cfg,
                           struct cfg80211_ap_settings *params);
+void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+                          struct cfg80211_ap_settings *params);
 
 /*
  * This function checks if the queuing is RA based or not.
@@ -925,6 +977,14 @@ mwifiex_netdev_get_priv(struct net_device *dev)
        return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
 }
 
+/*
+ * This function checks if a skb holds a management frame.
+ */
+static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
+{
+       return (*(u32 *)skb->data == PKT_TYPE_MGMT);
+}
+
 int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
                             u32 func_init_shutdown);
 int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -949,14 +1009,21 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
                          const struct mwifiex_user_scan_cfg *user_scan_in);
 int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
 
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-                      int key_len, u8 key_index, const u8 *mac_addr,
-                      int disable);
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+                      const u8 *key, int key_len, u8 key_index,
+                      const u8 *mac_addr, int disable);
 
 int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
 
 int mwifiex_get_ver_ext(struct mwifiex_private *priv);
 
+int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
+                              struct ieee80211_channel *chan,
+                              enum nl80211_channel_type *channel_type,
+                              unsigned int duration);
+
+int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
+
 int mwifiex_get_stats_info(struct mwifiex_private *priv,
                           struct mwifiex_ds_get_stats *log);
 
@@ -987,6 +1054,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
 
 int mwifiex_main_process(struct mwifiex_adapter *);
 
+int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb);
+
 int mwifiex_get_bss_info(struct mwifiex_private *,
                         struct mwifiex_bss_info *);
 int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
@@ -997,8 +1066,10 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
 int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
                                        struct mwifiex_bssdescriptor *bss_desc);
 
+u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
+
 struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
-                                             char *name,
+                                             const char *name,
                                              enum nl80211_iftype type,
                                              u32 *flags,
                                              struct vif_params *params);
index 04dc7ca4ac221a3b2c54d644f2b04fed8c180852..e36a75988f877600978c083595c4844d951860c5 100644 (file)
@@ -614,9 +614,8 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
 
                        /* Increment the TLV header length by the size
                           appended */
-                       chan_tlv_out->header.len =
-                       cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) +
-                       (sizeof(chan_tlv_out->chan_scan_param)));
+                       le16_add_cpu(&chan_tlv_out->header.len,
+                                    sizeof(chan_tlv_out->chan_scan_param));
 
                        /*
                         * The tlv buffer length is set to the number of bytes
@@ -726,7 +725,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        struct mwifiex_ie_types_num_probes *num_probes_tlv;
        struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
        struct mwifiex_ie_types_rates_param_set *rates_tlv;
-       const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
        u8 *tlv_pos;
        u32 num_probes;
        u32 ssid_len;
@@ -840,8 +838,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                 *  or BSSID filter applied to the scan results in the firmware.
                 */
                if ((i && ssid_filter) ||
-                   memcmp(scan_cfg_out->specific_bssid, &zero_mac,
-                          sizeof(zero_mac)))
+                   !is_zero_ether_addr(scan_cfg_out->specific_bssid))
                        *filtered_scan = true;
        } else {
                scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
@@ -989,6 +986,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        *max_chan_per_scan = 2;
                else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
                        *max_chan_per_scan = 3;
+               else
+                       *max_chan_per_scan = 4;
        }
 }
 
@@ -1433,9 +1432,9 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
                        if (ret)
                                dev_err(priv->adapter->dev, "cannot find ssid "
                                        "%s\n", bss_desc->ssid.ssid);
-                               break;
+                       break;
                default:
-                               ret = 0;
+                       ret = 0;
                }
        }
 
index df3a33c530cf1a30f9a4058b2bb01d3b24777d0a..5d87195390f863a9492aadaa9ea1d51d6ba8ae8e 100644 (file)
@@ -551,7 +551,6 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
        struct host_cmd_tlv_mac_addr *tlv_mac;
        u16 key_param_len = 0, cmd_size;
        int ret = 0;
-       const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 
        cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
        key_material->action = cpu_to_le16(cmd_action);
@@ -593,7 +592,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                        /* set 0 when re-key */
                        key_material->key_param_set.key[1] = 0;
 
-               if (0 != memcmp(enc_key->mac_addr, bc_mac, sizeof(bc_mac))) {
+               if (!is_broadcast_ether_addr(enc_key->mac_addr)) {
                        /* WAPI pairwise key: unicast */
                        key_material->key_param_set.key_info |=
                                cpu_to_le16(KEY_UNICAST);
@@ -610,7 +609,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                memcpy(&key_material->key_param_set.key[2],
                       enc_key->key_material, enc_key->key_len);
                memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
-                      enc_key->wapi_rxpn, WAPI_RXPN_LEN);
+                      enc_key->pn, PN_LEN);
                key_material->key_param_set.length =
                        cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
 
@@ -621,23 +620,38 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                return ret;
        }
        if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
-               dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
-               key_material->key_param_set.key_type_id =
+               if (enc_key->is_igtk_key) {
+                       dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+                       key_material->key_param_set.key_type_id =
+                                       cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
+                       if (cmd_oid == KEY_INFO_ENABLED)
+                               key_material->key_param_set.key_info =
+                                               cpu_to_le16(KEY_ENABLED);
+                       else
+                               key_material->key_param_set.key_info =
+                                               cpu_to_le16(!KEY_ENABLED);
+
+                       key_material->key_param_set.key_info |=
+                                                       cpu_to_le16(KEY_IGTK);
+               } else {
+                       dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+                       key_material->key_param_set.key_type_id =
                                                cpu_to_le16(KEY_TYPE_ID_AES);
-               if (cmd_oid == KEY_INFO_ENABLED)
-                       key_material->key_param_set.key_info =
+                       if (cmd_oid == KEY_INFO_ENABLED)
+                               key_material->key_param_set.key_info =
                                                cpu_to_le16(KEY_ENABLED);
-               else
-                       key_material->key_param_set.key_info =
+                       else
+                               key_material->key_param_set.key_info =
                                                cpu_to_le16(!KEY_ENABLED);
 
-               if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+                       if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
                                /* AES pairwise key: unicast */
-                       key_material->key_param_set.key_info |=
+                               key_material->key_param_set.key_info |=
                                                cpu_to_le16(KEY_UNICAST);
-               else            /* AES group key: multicast */
-                       key_material->key_param_set.key_info |=
+                       else    /* AES group key: multicast */
+                               key_material->key_param_set.key_info |=
                                                        cpu_to_le16(KEY_MCAST);
+               }
        } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
                dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
                key_material->key_param_set.key_type_id =
@@ -668,6 +682,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
                                + sizeof(struct mwifiex_ie_types_header);
 
+               if (le16_to_cpu(key_material->key_param_set.key_type_id) ==
+                                                       KEY_TYPE_ID_AES_CMAC) {
+                       struct mwifiex_cmac_param *param =
+                                       (void *)key_material->key_param_set.key;
+
+                       memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN);
+                       memcpy(param->key, enc_key->key_material,
+                              WLAN_KEY_LEN_AES_CMAC);
+
+                       key_param_len = sizeof(struct mwifiex_cmac_param);
+                       key_material->key_param_set.key_len =
+                                               cpu_to_le16(key_param_len);
+                       key_param_len += KEYPARAMSET_FIXED_LEN;
+                       key_material->key_param_set.length =
+                                               cpu_to_le16(key_param_len);
+                       key_param_len += sizeof(struct mwifiex_ie_types_header);
+               }
+
                cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
                                        + key_param_len);
 
@@ -1135,6 +1167,31 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                                    S_DS_GEN);
                ret = 0;
                break;
+       case HostCmd_CMD_MGMT_FRAME_REG:
+               cmd_ptr->command = cpu_to_le16(cmd_no);
+               cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action);
+               cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf);
+               cmd_ptr->size =
+                       cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) +
+                                   S_DS_GEN);
+               ret = 0;
+               break;
+       case HostCmd_CMD_REMAIN_ON_CHAN:
+               cmd_ptr->command = cpu_to_le16(cmd_no);
+               memcpy(&cmd_ptr->params, data_buf,
+                      sizeof(struct host_cmd_ds_remain_on_chan));
+               cmd_ptr->size =
+                     cpu_to_le16(sizeof(struct host_cmd_ds_remain_on_chan) +
+                                 S_DS_GEN);
+               break;
+       case HostCmd_CMD_P2P_MODE_CFG:
+               cmd_ptr->command = cpu_to_le16(cmd_no);
+               cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
+               cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf);
+               cmd_ptr->size =
+                       cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) +
+                                   S_DS_GEN);
+               break;
        case HostCmd_CMD_FUNC_INIT:
                if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
                        priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
@@ -1204,6 +1261,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                else if (priv->bss_mode == NL80211_IFTYPE_STATION)
                        cmd_ptr->params.bss_mode.con_type =
                                CONNECTION_TYPE_INFRA;
+               else if (priv->bss_mode == NL80211_IFTYPE_AP)
+                       cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
                cmd_ptr->size = cpu_to_le16(sizeof(struct
                                host_cmd_ds_set_bss_mode) + S_DS_GEN);
                ret = 0;
@@ -1253,35 +1312,35 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
 
        if (first_sta) {
                if (priv->adapter->iface_type == MWIFIEX_PCIE) {
-                       ret = mwifiex_send_cmd_async(priv,
+                       ret = mwifiex_send_cmd_sync(priv,
                                                HostCmd_CMD_PCIE_DESC_DETAILS,
                                                HostCmd_ACT_GEN_SET, 0, NULL);
                        if (ret)
                                return -1;
                }
 
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_FUNC_INIT,
-                                            HostCmd_ACT_GEN_SET, 0, NULL);
+               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
+                                           HostCmd_ACT_GEN_SET, 0, NULL);
                if (ret)
                        return -1;
                /* Read MAC address from HW */
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_GET_HW_SPEC,
-                                            HostCmd_ACT_GEN_GET, 0, NULL);
+               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
+                                           HostCmd_ACT_GEN_GET, 0, NULL);
                if (ret)
                        return -1;
 
                /* Reconfigure tx buf size */
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_RECONFIGURE_TX_BUFF,
-                                            HostCmd_ACT_GEN_SET, 0,
-                                            &priv->adapter->tx_buf_size);
+               ret = mwifiex_send_cmd_sync(priv,
+                                           HostCmd_CMD_RECONFIGURE_TX_BUFF,
+                                           HostCmd_ACT_GEN_SET, 0,
+                                           &priv->adapter->tx_buf_size);
                if (ret)
                        return -1;
 
                if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
                        /* Enable IEEE PS by default */
                        priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
-                       ret = mwifiex_send_cmd_async(
+                       ret = mwifiex_send_cmd_sync(
                                        priv, HostCmd_CMD_802_11_PS_MODE_ENH,
                                        EN_AUTO_PS, BITMAP_STA_PS, NULL);
                        if (ret)
@@ -1290,21 +1349,21 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        }
 
        /* get tx rate */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TX_RATE_CFG,
-                                    HostCmd_ACT_GEN_GET, 0, NULL);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
+                                   HostCmd_ACT_GEN_GET, 0, NULL);
        if (ret)
                return -1;
        priv->data_rate = 0;
 
        /* get tx power */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR,
-                                    HostCmd_ACT_GEN_GET, 0, NULL);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
+                                   HostCmd_ACT_GEN_GET, 0, NULL);
        if (ret)
                return -1;
 
        if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
                /* set ibss coalescing_status */
-               ret = mwifiex_send_cmd_async(
+               ret = mwifiex_send_cmd_sync(
                                priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
                                HostCmd_ACT_GEN_SET, 0, &enable);
                if (ret)
@@ -1314,16 +1373,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
        amsdu_aggr_ctrl.enable = true;
        /* Send request to firmware */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
-                                    HostCmd_ACT_GEN_SET, 0,
-                                    &amsdu_aggr_ctrl);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
+                                   HostCmd_ACT_GEN_SET, 0,
+                                   &amsdu_aggr_ctrl);
        if (ret)
                return -1;
        /* MAC Control must be the last command in init_fw */
        /* set MAC Control */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
-                                    HostCmd_ACT_GEN_SET, 0,
-                                    &priv->curr_pkt_filter);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
+                                   HostCmd_ACT_GEN_SET, 0,
+                                   &priv->curr_pkt_filter);
        if (ret)
                return -1;
 
@@ -1332,10 +1391,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
                /* Enable auto deep sleep */
                auto_ds.auto_ds = DEEP_SLEEP_ON;
                auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_802_11_PS_MODE_ENH,
-                                            EN_AUTO_PS, BITMAP_AUTO_DS,
-                                            &auto_ds);
+               ret = mwifiex_send_cmd_sync(priv,
+                                           HostCmd_CMD_802_11_PS_MODE_ENH,
+                                           EN_AUTO_PS, BITMAP_AUTO_DS,
+                                           &auto_ds);
                if (ret)
                        return -1;
        }
@@ -1343,23 +1402,24 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
                /* Send cmd to FW to enable/disable 11D function */
                state_11d = ENABLE_11D;
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                                            HostCmd_ACT_GEN_SET, DOT11D_I,
-                                            &state_11d);
+               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                                           HostCmd_ACT_GEN_SET, DOT11D_I,
+                                           &state_11d);
                if (ret)
                        dev_err(priv->adapter->dev,
                                "11D: failed to enable 11D\n");
        }
 
+       /* set last_init_cmd before sending the command */
+       priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
+
        /* Send cmd to FW to configure 11n specific configuration
         * (Short GI, Channel BW, Green field support etc.) for transmit
         */
        tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_CFG,
-                                    HostCmd_ACT_GEN_SET, 0, &tx_cfg);
+       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
+                                   HostCmd_ACT_GEN_SET, 0, &tx_cfg);
 
-       /* set last_init_cmd */
-       priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
        ret = -EINPROGRESS;
 
        return ret;
index 0b09004ebb25a3eebf6f5fe8f85bfd9c196f351c..e380171c4c5dd08918669863d4a756894e46a2e9 100644 (file)
@@ -123,7 +123,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
 {
        struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
                                                &resp->params.rssi_info_rsp;
-       struct mwifiex_ds_misc_subsc_evt subsc_evt;
+       struct mwifiex_ds_misc_subsc_evt *subsc_evt =
+                                               &priv->async_subsc_evt_storage;
 
        priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
        priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -140,26 +141,27 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
        if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
                return 0;
 
+       memset(subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
+
        /* Resubscribe low and high rssi events with new thresholds */
-       memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
-       subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
-       subsc_evt.action = HostCmd_ACT_BITWISE_SET;
+       subsc_evt->events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
+       subsc_evt->action = HostCmd_ACT_BITWISE_SET;
        if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
-               subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
+               subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
                                priv->cqm_rssi_hyst);
-               subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+               subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
        } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
-               subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
-               subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
+               subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+               subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
                                priv->cqm_rssi_hyst);
        }
-       subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
-       subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
+       subsc_evt->bcn_l_rssi_cfg.evt_freq = 1;
+       subsc_evt->bcn_h_rssi_cfg.evt_freq = 1;
 
        priv->subsc_evt_rssi_state = EVENT_HANDLED;
 
        mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
-                              0, 0, &subsc_evt);
+                              0, 0, subsc_evt);
 
        return 0;
 }
@@ -651,6 +653,38 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
        return 0;
 }
 
+/*
+ * This function handles the command response of remain on channel.
+ */
+static int
+mwifiex_ret_remain_on_chan(struct mwifiex_private *priv,
+                          struct host_cmd_ds_command *resp,
+                          struct host_cmd_ds_remain_on_chan *roc_cfg)
+{
+       struct host_cmd_ds_remain_on_chan *resp_cfg = &resp->params.roc_cfg;
+
+       if (roc_cfg)
+               memcpy(roc_cfg, resp_cfg, sizeof(*roc_cfg));
+
+       return 0;
+}
+
+/*
+ * This function handles the command response of P2P mode cfg.
+ */
+static int
+mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
+                        struct host_cmd_ds_command *resp,
+                        void *data_buf)
+{
+       struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg;
+
+       if (data_buf)
+               *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode);
+
+       return 0;
+}
+
 /*
  * This function handles the command response of register access.
  *
@@ -736,7 +770,6 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
 {
        struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
                                        &(resp->params.ibss_coalescing);
-       u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
 
        if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
                return 0;
@@ -745,7 +778,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
                "info: new BSSID %pM\n", ibss_coal_resp->bssid);
 
        /* If rsp has NULL BSSID, Just return..... No Action */
-       if (!memcmp(ibss_coal_resp->bssid, zero_mac, ETH_ALEN)) {
+       if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
                dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
                return 0;
        }
@@ -775,8 +808,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
  * This function handles the command response for subscribe event command.
  */
 static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
-                                struct host_cmd_ds_command *resp,
-                                struct mwifiex_ds_misc_subsc_evt *sub_event)
+                                struct host_cmd_ds_command *resp)
 {
        struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
                &resp->params.subsc_evt;
@@ -786,10 +818,6 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
        dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
                le16_to_cpu(cmd_sub_event->events));
 
-       /*Return the subscribed event info for a Get request*/
-       if (sub_event)
-               sub_event->events = le16_to_cpu(cmd_sub_event->events);
-
        return 0;
 }
 
@@ -879,6 +907,13 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_VERSION_EXT:
                ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
                break;
+       case HostCmd_CMD_REMAIN_ON_CHAN:
+               ret = mwifiex_ret_remain_on_chan(priv, resp, data_buf);
+               break;
+       case HostCmd_CMD_P2P_MODE_CFG:
+               ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf);
+               break;
+       case HostCmd_CMD_MGMT_FRAME_REG:
        case HostCmd_CMD_FUNC_INIT:
        case HostCmd_CMD_FUNC_SHUTDOWN:
                break;
@@ -913,7 +948,6 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                                le16_to_cpu(resp->params.tx_buf.mp_end_port));
                break;
        case HostCmd_CMD_AMSDU_AGGR_CTRL:
-               ret = mwifiex_ret_amsdu_aggr_ctrl(resp, data_buf);
                break;
        case HostCmd_CMD_WMM_GET_STATUS:
                ret = mwifiex_ret_wmm_get_status(priv, resp);
@@ -932,12 +966,11 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_SET_BSS_MODE:
                break;
        case HostCmd_CMD_11N_CFG:
-               ret = mwifiex_ret_11n_cfg(resp, data_buf);
                break;
        case HostCmd_CMD_PCIE_DESC_DETAILS:
                break;
        case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
-               ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
+               ret = mwifiex_ret_subsc_evt(priv, resp);
                break;
        case HostCmd_CMD_UAP_SYS_CONFIG:
                break;
index b8614a82546072a25099994768ade4bcad28e02a..aafde30e714aa97e40ecbbedc86a39fb08b26866 100644 (file)
@@ -184,10 +184,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
 int mwifiex_process_sta_event(struct mwifiex_private *priv)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
-       int len, ret = 0;
+       int ret = 0;
        u32 eventcause = adapter->event_cause;
-       struct station_info sinfo;
-       struct mwifiex_assoc_event *event;
+       u16 ctrl;
 
        switch (eventcause) {
        case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -279,10 +278,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_MIC_ERR_UNICAST:
                dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+               cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+                                            NL80211_KEYTYPE_PAIRWISE,
+                                            -1, NULL, GFP_KERNEL);
                break;
 
        case EVENT_MIC_ERR_MULTICAST:
                dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+               cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+                                            NL80211_KEYTYPE_GROUP,
+                                            -1, NULL, GFP_KERNEL);
                break;
        case EVENT_MIB_CHANGED:
        case EVENT_INIT_DONE:
@@ -384,11 +389,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                              adapter->event_body);
                break;
        case EVENT_AMSDU_AGGR_CTRL:
-               dev_dbg(adapter->dev, "event:  AMSDU_AGGR_CTRL %d\n",
-                       *(u16 *) adapter->event_body);
+               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
                adapter->tx_buf_size =
-                       min(adapter->curr_tx_buf_size,
-                           le16_to_cpu(*(__le16 *) adapter->event_body));
+                               min_t(u16, adapter->curr_tx_buf_size, ctrl);
                dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
                        adapter->tx_buf_size);
                break;
@@ -405,51 +410,18 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
                break;
 
-       case EVENT_UAP_STA_ASSOC:
-               memset(&sinfo, 0, sizeof(sinfo));
-               event = (struct mwifiex_assoc_event *)
-                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
-               if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
-                       len = -1;
-
-                       if (ieee80211_is_assoc_req(event->frame_control))
-                               len = 0;
-                       else if (ieee80211_is_reassoc_req(event->frame_control))
-                               /* There will be ETH_ALEN bytes of
-                                * current_ap_addr before the re-assoc ies.
-                                */
-                               len = ETH_ALEN;
-
-                       if (len != -1) {
-                               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
-                               sinfo.assoc_req_ies = &event->data[len];
-                               len = (u8 *)sinfo.assoc_req_ies -
-                                     (u8 *)&event->frame_control;
-                               sinfo.assoc_req_ies_len =
-                                       le16_to_cpu(event->len) - (u16)len;
-                       }
-               }
-               cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
-                                GFP_KERNEL);
-               break;
-       case EVENT_UAP_STA_DEAUTH:
-               cfg80211_del_sta(priv->netdev, adapter->event_body +
-                                MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
-               break;
-       case EVENT_UAP_BSS_IDLE:
-               priv->media_connected = false;
-               break;
-       case EVENT_UAP_BSS_ACTIVE:
-               priv->media_connected = true;
-               break;
-       case EVENT_UAP_BSS_START:
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
-               memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
-               break;
-       case EVENT_UAP_MIC_COUNTERMEASURES:
-               /* For future development */
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+       case EVENT_REMAIN_ON_CHAN_EXPIRED:
+               dev_dbg(adapter->dev, "event: Remain on channel expired\n");
+               cfg80211_remain_on_channel_expired(priv->wdev,
+                                                  priv->roc_cfg.cookie,
+                                                  &priv->roc_cfg.chan,
+                                                  priv->roc_cfg.chan_type,
+                                                  GFP_ATOMIC);
+
+               memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
+
                break;
+
        default:
                dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
                        eventcause);
index fb2136089a2241318a0a697461dc02e0ec240dd6..0c9f70b2cbe61de8b77084fb71135bc7fda36f00 100644 (file)
@@ -26,6 +26,9 @@
 #include "11n.h"
 #include "cfg80211.h"
 
+static int disconnect_on_suspend = 1;
+module_param(disconnect_on_suspend, int, 0644);
+
 /*
  * Copies the multicast address list from device to driver.
  *
@@ -192,6 +195,44 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
        return ret;
 }
 
+static int mwifiex_process_country_ie(struct mwifiex_private *priv,
+                                     struct cfg80211_bss *bss)
+{
+       u8 *country_ie, country_ie_len;
+       struct mwifiex_802_11d_domain_reg *domain_info =
+                                       &priv->adapter->domain_reg;
+
+       country_ie = (u8 *)ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+
+       if (!country_ie)
+               return 0;
+
+       country_ie_len = country_ie[1];
+       if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
+               return 0;
+
+       domain_info->country_code[0] = country_ie[2];
+       domain_info->country_code[1] = country_ie[3];
+       domain_info->country_code[2] = ' ';
+
+       country_ie_len -= IEEE80211_COUNTRY_STRING_LEN;
+
+       domain_info->no_of_triplet =
+               country_ie_len / sizeof(struct ieee80211_country_ie_triplet);
+
+       memcpy((u8 *)domain_info->triplet,
+              &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len);
+
+       if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
+               wiphy_err(priv->adapter->wiphy,
+                         "11D: setting domain info in FW\n");
+               return -1;
+       }
+
+       return 0;
+}
+
 /*
  * In Ad-Hoc mode, the IBSS is created if not found in scan list.
  * In both Ad-Hoc and infra mode, an deauthentication is performed
@@ -207,6 +248,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
        priv->scan_block = false;
 
        if (bss) {
+               mwifiex_process_country_ie(priv, bss);
+
                /* Allocate and fill new bss descriptor */
                bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
                                GFP_KERNEL);
@@ -408,6 +451,16 @@ EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
 int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
 {
        struct mwifiex_ds_hs_cfg hscfg;
+       struct mwifiex_private *priv;
+       int i;
+
+       if (disconnect_on_suspend) {
+               for (i = 0; i < adapter->priv_num; i++) {
+                       priv = adapter->priv[i];
+                       if (priv)
+                               mwifiex_deauthenticate(priv, NULL);
+               }
+       }
 
        if (adapter->hs_activated) {
                dev_dbg(adapter->dev, "cmd: HS Already actived\n");
@@ -942,20 +995,26 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
  * This function allocates the IOCTL request buffer, fills it
  * with requisite parameters and calls the IOCTL handler.
  */
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
-                       int key_len, u8 key_index,
-                       const u8 *mac_addr, int disable)
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+                      const u8 *key, int key_len, u8 key_index,
+                      const u8 *mac_addr, int disable)
 {
        struct mwifiex_ds_encrypt_key encrypt_key;
 
        memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
        encrypt_key.key_len = key_len;
+
+       if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+               encrypt_key.is_igtk_key = true;
+
        if (!disable) {
                encrypt_key.key_index = key_index;
                if (key_len)
                        memcpy(encrypt_key.key_material, key, key_len);
                if (mac_addr)
                        memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
+               if (kp && kp->seq && kp->seq_len)
+                       memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
        } else {
                encrypt_key.key_disable = true;
                if (mac_addr)
@@ -984,6 +1043,65 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
        return 0;
 }
 
+int
+mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
+                          struct ieee80211_channel *chan,
+                          enum nl80211_channel_type *ct,
+                          unsigned int duration)
+{
+       struct host_cmd_ds_remain_on_chan roc_cfg;
+       u8 sc;
+
+       memset(&roc_cfg, 0, sizeof(roc_cfg));
+       roc_cfg.action = cpu_to_le16(action);
+       if (action == HostCmd_ACT_GEN_SET) {
+               roc_cfg.band_cfg = chan->band;
+               sc = mwifiex_chan_type_to_sec_chan_offset(*ct);
+               roc_cfg.band_cfg |= (sc << 2);
+
+               roc_cfg.channel =
+                       ieee80211_frequency_to_channel(chan->center_freq);
+               roc_cfg.duration = cpu_to_le32(duration);
+       }
+       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
+                                 action, 0, &roc_cfg)) {
+               dev_err(priv->adapter->dev, "failed to remain on channel\n");
+               return -1;
+       }
+
+       return roc_cfg.status;
+}
+
+int
+mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
+{
+       if (GET_BSS_ROLE(priv) == bss_role) {
+               dev_dbg(priv->adapter->dev,
+                       "info: already in the desired role.\n");
+               return 0;
+       }
+
+       mwifiex_free_priv(priv);
+       mwifiex_init_priv(priv);
+
+       priv->bss_role = bss_role;
+       switch (bss_role) {
+       case MWIFIEX_BSS_ROLE_UAP:
+               priv->bss_mode = NL80211_IFTYPE_AP;
+               break;
+       case MWIFIEX_BSS_ROLE_STA:
+       case MWIFIEX_BSS_ROLE_ANY:
+       default:
+               priv->bss_mode = NL80211_IFTYPE_STATION;
+               break;
+       }
+
+       mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
+                             HostCmd_ACT_GEN_SET, 0, NULL);
+
+       return mwifiex_sta_init_cmd(priv, false);
+}
+
 /*
  * Sends IOCTL request to get statistics information.
  *
index 02ce3b77d3e772c4e4cde5615e806be500c4ef43..07d32b73783ea2cdd0859d80849eed1eb4a94804 100644 (file)
@@ -54,8 +54,8 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
 
        local_rx_pd = (struct rxpd *) (skb->data);
 
-       rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-                               local_rx_pd->rx_pkt_offset);
+       rx_pkt_hdr = (void *)local_rx_pd +
+                    le16_to_cpu(local_rx_pd->rx_pkt_offset);
 
        if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
                    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
@@ -125,7 +125,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
        struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
        struct rx_packet_hdr *rx_pkt_hdr;
        u8 ta[ETH_ALEN];
-       u16 rx_pkt_type;
+       u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
        struct mwifiex_private *priv =
                        mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
                                               rx_info->bss_type);
@@ -134,16 +134,17 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                return -1;
 
        local_rx_pd = (struct rxpd *) (skb->data);
-       rx_pkt_type = local_rx_pd->rx_pkt_type;
+       rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
+       rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+       rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
+       seq_num = le16_to_cpu(local_rx_pd->seq_num);
 
-       rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
-                                       local_rx_pd->rx_pkt_offset);
+       rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
 
-       if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
-           (u16) skb->len) {
-               dev_err(adapter->dev, "wrong rx packet: len=%d,"
-                       " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
-                      local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
+       if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
+               dev_err(adapter->dev,
+                       "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+                       skb->len, rx_pkt_offset, rx_pkt_length);
                priv->stats.rx_dropped++;
 
                if (adapter->if_ops.data_complete)
@@ -154,14 +155,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                return ret;
        }
 
-       if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
+       if (rx_pkt_type == PKT_TYPE_AMSDU) {
                struct sk_buff_head list;
                struct sk_buff *rx_skb;
 
                __skb_queue_head_init(&list);
 
-               skb_pull(skb, local_rx_pd->rx_pkt_offset);
-               skb_trim(skb, local_rx_pd->rx_pkt_length);
+               skb_pull(skb, rx_pkt_offset);
+               skb_trim(skb, rx_pkt_length);
 
                ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
                                         priv->wdev->iftype, 0, false);
@@ -173,6 +174,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                                dev_err(adapter->dev, "Rx of A-MSDU failed");
                }
                return 0;
+       } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+               ret = mwifiex_process_mgmt_packet(adapter, skb);
+               if (ret)
+                       dev_err(adapter->dev, "Rx of mgmt packet failed");
+               dev_kfree_skb_any(skb);
+               return ret;
        }
 
        /*
@@ -189,17 +196,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
        } else {
                if (rx_pkt_type != PKT_TYPE_BAR)
-                       priv->rx_seq[local_rx_pd->priority] =
-                                               local_rx_pd->seq_num;
+                       priv->rx_seq[local_rx_pd->priority] = seq_num;
                memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
                       ETH_ALEN);
        }
 
        /* Reorder and send to OS */
-       ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
-                                            local_rx_pd->priority, ta,
-                                            (u8) local_rx_pd->rx_pkt_type,
-                                            skb);
+       ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
+                                        ta, (u8) rx_pkt_type, skb);
 
        if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
                if (adapter->if_ops.data_complete)
index 0a046d3a0c16d62effecfe17b9633369c7c6c3c8..7b581af24f5f6479ac185430be2bb35306e53c0e 100644 (file)
@@ -48,6 +48,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
        struct txpd *local_tx_pd;
        struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
        u8 pad;
+       u16 pkt_type, pkt_offset;
 
        if (!skb->len) {
                dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -55,6 +56,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
                return skb->data;
        }
 
+       pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
+
        /* If skb->data is not aligned; add padding */
        pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
 
@@ -93,7 +96,14 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
        }
 
        /* Offset of actual data */
-       local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + pad);
+       pkt_offset = sizeof(struct txpd) + pad;
+       if (pkt_type == PKT_TYPE_MGMT) {
+               /* Set the packet type and add header for management frame */
+               local_tx_pd->tx_pkt_type = cpu_to_le16(pkt_type);
+               pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+       }
+
+       local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
 
        /* make space for INTF_HEADER_LEN */
        skb_push(skb, INTF_HEADER_LEN);
index cecb27283196150afdcecc56c451fa396456891f..2af263992e83a23ff30bb6558f85204105bb4180 100644 (file)
@@ -51,6 +51,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
        rx_info->bss_num = priv->bss_num;
        rx_info->bss_type = priv->bss_type;
 
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               return mwifiex_process_uap_rx_packet(adapter, skb);
+
        return mwifiex_process_sta_rx_packet(adapter, skb);
 }
 EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -72,7 +75,11 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
        u8 *head_ptr;
        struct txpd *local_tx_pd = NULL;
 
-       head_ptr = mwifiex_process_sta_txpd(priv, skb);
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               head_ptr = mwifiex_process_uap_txpd(priv, skb);
+       else
+               head_ptr = mwifiex_process_sta_txpd(priv, skb);
+
        if (head_ptr) {
                if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
                        local_tx_pd =
@@ -157,6 +164,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                priv->stats.tx_errors++;
        }
 
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+               atomic_dec_return(&adapter->pending_bridged_pkts);
        if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
                goto done;
 
index f40e93fe894aca64702219b8223c464a526c92bf..d95a2d558fcfbb2d84950650344c1656db173351 100644 (file)
@@ -167,6 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
        if (ht_ie) {
                memcpy(&bss_cfg->ht_cap, ht_ie + 2,
                       sizeof(struct ieee80211_ht_cap));
+               priv->ap_11n_enabled = 1;
        } else {
                memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
                bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
@@ -176,6 +177,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
        return;
 }
 
+/* This function finds supported rates IE from beacon parameter and sets
+ * these rates into bss_config structure.
+ */
+void
+mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+                     struct cfg80211_ap_settings *params)
+{
+       struct ieee_types_header *rate_ie;
+       int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+       const u8 *var_pos = params->beacon.head + var_offset;
+       int len = params->beacon.head_len - var_offset;
+
+       rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
+       if (rate_ie)
+               memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
+
+       return;
+}
+
 /* This function initializes some of mwifiex_uap_bss_param variables.
  * This helps FW in ignoring invalid values. These values may or may not
  * be get updated to valid ones at later stage.
@@ -322,8 +342,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        struct host_cmd_tlv_retry_limit *retry_limit;
        struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
        struct host_cmd_tlv_auth_type *auth_type;
+       struct host_cmd_tlv_rates *tlv_rates;
+       struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
        struct mwifiex_ie_types_htcap *htcap;
        struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+       int i;
        u16 cmd_size = *param_size;
 
        if (bss_cfg->ssid.ssid_len) {
@@ -343,7 +366,23 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
                cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
                tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
        }
-       if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
+       if (bss_cfg->rates[0]) {
+               tlv_rates = (struct host_cmd_tlv_rates *)tlv;
+               tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
+
+               for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
+                    i++)
+                       tlv_rates->rates[i] = bss_cfg->rates[i];
+
+               tlv_rates->tlv.len = cpu_to_le16(i);
+               cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
+               tlv += sizeof(struct host_cmd_tlv_rates) + i;
+       }
+       if (bss_cfg->channel &&
+           ((bss_cfg->band_cfg == BAND_CONFIG_BG &&
+             bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
+           (bss_cfg->band_cfg == BAND_CONFIG_A &&
+            bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
                chan_band = (struct host_cmd_tlv_channel_band *)tlv;
                chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
                chan_band->tlv.len =
@@ -459,6 +498,27 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
                tlv += sizeof(struct mwifiex_ie_types_htcap);
        }
 
+       if (bss_cfg->sta_ao_timer) {
+               ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
+               ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
+               ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
+                                               sizeof(struct host_cmd_tlv));
+               ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
+               cmd_size += sizeof(*ao_timer);
+               tlv += sizeof(*ao_timer);
+       }
+
+       if (bss_cfg->ps_sta_ao_timer) {
+               ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
+               ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
+               ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
+                                                  sizeof(struct host_cmd_tlv));
+               ps_ao_timer->sta_ao_timer =
+                                       cpu_to_le32(bss_cfg->ps_sta_ao_timer);
+               cmd_size += sizeof(*ps_ao_timer);
+               tlv += sizeof(*ps_ao_timer);
+       }
+
        *param_size = cmd_size;
 
        return 0;
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
new file mode 100644 (file)
index 0000000..a33fa39
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Marvell Wireless LAN device driver: AP event handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "main.h"
+#include "11n.h"
+
+/*
+ * This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+
+       if (!mac)
+               return NULL;
+
+       list_for_each_entry(node, &priv->sta_list, list) {
+               if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+                       return node;
+       }
+
+       return NULL;
+}
+
+/*
+ * This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+static struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       if (!mac)
+               return NULL;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node)
+               goto done;
+
+       node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
+       if (!node)
+               goto done;
+
+       memcpy(node->mac_addr, mac, ETH_ALEN);
+       list_add_tail(&node->list, &priv->sta_list);
+
+done:
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return node;
+}
+
+/*
+ * This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+static void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+                      int ies_len, struct mwifiex_sta_node *node)
+{
+       const struct ieee80211_ht_cap *ht_cap;
+
+       if (!ies)
+               return;
+
+       ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+       if (ht_cap) {
+               node->is_11n_enabled = 1;
+               node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+                                 IEEE80211_HT_CAP_MAX_AMSDU ?
+                                 MWIFIEX_TX_DATA_BUF_SIZE_8K :
+                                 MWIFIEX_TX_DATA_BUF_SIZE_4K;
+       } else {
+               node->is_11n_enabled = 0;
+       }
+
+       return;
+}
+
+/*
+ * This function will delete a station entry from station list
+ */
+static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node) {
+               list_for_each_entry_safe(node, tmp, &priv->sta_list,
+                                        list) {
+                       list_del(&node->list);
+                       kfree(node);
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
+
+/*
+ * This function will delete all stations from associated station list.
+ */
+static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *node, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+               list_del(&node->list);
+               kfree(node);
+       }
+
+       INIT_LIST_HEAD(&priv->sta_list);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
+
+/*
+ * This function handles AP interface specific events generated by firmware.
+ *
+ * Event specific routines are called by this function based
+ * upon the generated event cause.
+ *
+ *
+ * Events supported for AP -
+ *      - EVENT_UAP_STA_ASSOC
+ *      - EVENT_UAP_STA_DEAUTH
+ *      - EVENT_UAP_BSS_ACTIVE
+ *      - EVENT_UAP_BSS_START
+ *      - EVENT_UAP_BSS_IDLE
+ *      - EVENT_UAP_MIC_COUNTERMEASURES:
+ */
+int mwifiex_process_uap_event(struct mwifiex_private *priv)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       int len, i;
+       u32 eventcause = adapter->event_cause;
+       struct station_info sinfo;
+       struct mwifiex_assoc_event *event;
+       struct mwifiex_sta_node *node;
+       u8 *deauth_mac;
+       struct host_cmd_ds_11n_batimeout *ba_timeout;
+       u16 ctrl;
+
+       switch (eventcause) {
+       case EVENT_UAP_STA_ASSOC:
+               memset(&sinfo, 0, sizeof(sinfo));
+               event = (struct mwifiex_assoc_event *)
+                       (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+               if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
+                       len = -1;
+
+                       if (ieee80211_is_assoc_req(event->frame_control))
+                               len = 0;
+                       else if (ieee80211_is_reassoc_req(event->frame_control))
+                               /* There will be ETH_ALEN bytes of
+                                * current_ap_addr before the re-assoc ies.
+                                */
+                               len = ETH_ALEN;
+
+                       if (len != -1) {
+                               sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+                               sinfo.assoc_req_ies = &event->data[len];
+                               len = (u8 *)sinfo.assoc_req_ies -
+                                     (u8 *)&event->frame_control;
+                               sinfo.assoc_req_ies_len =
+                                       le16_to_cpu(event->len) - (u16)len;
+                       }
+               }
+               cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+                                GFP_KERNEL);
+
+               node = mwifiex_add_sta_entry(priv, event->sta_addr);
+               if (!node) {
+                       dev_warn(adapter->dev,
+                                "could not create station entry!\n");
+                       return -1;
+               }
+
+               if (!priv->ap_11n_enabled)
+                       break;
+
+               mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
+                                      sinfo.assoc_req_ies_len, node);
+
+               for (i = 0; i < MAX_NUM_TID; i++) {
+                       if (node->is_11n_enabled)
+                               node->ampdu_sta[i] =
+                                             priv->aggr_prio_tbl[i].ampdu_user;
+                       else
+                               node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+               }
+               memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
+               break;
+       case EVENT_UAP_STA_DEAUTH:
+               deauth_mac = adapter->event_body +
+                            MWIFIEX_UAP_EVENT_EXTRA_HEADER;
+               cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
+
+               if (priv->ap_11n_enabled) {
+                       mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
+                       mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
+               }
+               mwifiex_del_sta_entry(priv, deauth_mac);
+               break;
+       case EVENT_UAP_BSS_IDLE:
+               priv->media_connected = false;
+               mwifiex_clean_txrx(priv);
+               mwifiex_del_all_sta_list(priv);
+               break;
+       case EVENT_UAP_BSS_ACTIVE:
+               priv->media_connected = true;
+               break;
+       case EVENT_UAP_BSS_START:
+               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
+                      ETH_ALEN);
+               break;
+       case EVENT_UAP_MIC_COUNTERMEASURES:
+               /* For future development */
+               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               break;
+       case EVENT_AMSDU_AGGR_CTRL:
+               ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
+               if (priv->media_connected) {
+                       adapter->tx_buf_size =
+                               min_t(u16, adapter->curr_tx_buf_size, ctrl);
+                       dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
+                               adapter->tx_buf_size);
+               }
+               break;
+       case EVENT_ADDBA:
+               dev_dbg(adapter->dev, "event: ADDBA Request\n");
+               if (priv->media_connected)
+                       mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              adapter->event_body);
+               break;
+       case EVENT_DELBA:
+               dev_dbg(adapter->dev, "event: DELBA Request\n");
+               if (priv->media_connected)
+                       mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
+               break;
+       case EVENT_BA_STREAM_TIEMOUT:
+               dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+               if (priv->media_connected) {
+                       ba_timeout = (void *)adapter->event_body;
+                       mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
+               }
+               break;
+       default:
+               dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
+                       eventcause);
+               break;
+       }
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
new file mode 100644 (file)
index 0000000..0966ac2
--- /dev/null
@@ -0,0 +1,340 @@
+/*
+ * Marvell Wireless LAN device driver: AP TX and RX data handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "ioctl.h"
+#include "main.h"
+#include "wmm.h"
+#include "11n_aggr.h"
+#include "11n_rxreorder.h"
+
+static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+                                        struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_rxpd *uap_rx_pd;
+       struct rx_packet_hdr *rx_pkt_hdr;
+       struct sk_buff *new_skb;
+       struct mwifiex_txinfo *tx_info;
+       int hdr_chop;
+       struct timeval tv;
+       u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       if ((atomic_read(&adapter->pending_bridged_pkts) >=
+                                            MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+               dev_err(priv->adapter->dev,
+                       "Tx: Bridge packet limit reached. Drop packet!\n");
+               kfree_skb(skb);
+               return;
+       }
+
+       if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
+                   rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
+               /* Chop off the rxpd + the excess memory from
+                * 802.2/llc/snap header that was removed.
+                */
+               hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
+       else
+               /* Chop off the rxpd */
+               hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
+
+       /* Chop off the leading header bytes so the it points
+        * to the start of either the reconstructed EthII frame
+        * or the 802.2/llc/snap frame.
+        */
+       skb_pull(skb, hdr_chop);
+
+       if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
+               dev_dbg(priv->adapter->dev,
+                       "data: Tx: insufficient skb headroom %d\n",
+                       skb_headroom(skb));
+               /* Insufficient skb headroom - allocate a new skb */
+               new_skb =
+                       skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+               if (unlikely(!new_skb)) {
+                       dev_err(priv->adapter->dev,
+                               "Tx: cannot allocate new_skb\n");
+                       kfree_skb(skb);
+                       priv->stats.tx_dropped++;
+                       return;
+               }
+
+               kfree_skb(skb);
+               skb = new_skb;
+               dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
+                       skb_headroom(skb));
+       }
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       tx_info->bss_num = priv->bss_num;
+       tx_info->bss_type = priv->bss_type;
+       tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+
+       do_gettimeofday(&tv);
+       skb->tstamp = timeval_to_ktime(tv);
+       mwifiex_wmm_add_buf_txqueue(priv, skb);
+       atomic_inc(&adapter->tx_pending);
+       atomic_inc(&adapter->pending_bridged_pkts);
+
+       if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
+               mwifiex_set_trans_start(priv->netdev);
+               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+       }
+       return;
+}
+
+/*
+ * This function contains logic for AP packet forwarding.
+ *
+ * If a packet is multicast/broadcast, it is sent to kernel/upper layer
+ * as well as queued back to AP TX queue so that it can be sent to other
+ * associated stations.
+ * If a packet is unicast and RA is present in associated station list,
+ * it is again requeued into AP TX queue.
+ * If a packet is unicast and RA is not in associated station list,
+ * packet is forwarded to kernel to handle routing logic.
+ */
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+                                 struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_rxpd *uap_rx_pd;
+       struct rx_packet_hdr *rx_pkt_hdr;
+       u8 ra[ETH_ALEN];
+       struct sk_buff *skb_uap;
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       /* don't do packet forwarding in disconnected state */
+       if (!priv->media_connected) {
+               dev_err(adapter->dev, "drop packet in disconnected state.\n");
+               dev_kfree_skb_any(skb);
+               return 0;
+       }
+
+       memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
+
+       if (is_multicast_ether_addr(ra)) {
+               skb_uap = skb_copy(skb, GFP_ATOMIC);
+               mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
+       } else {
+               if (mwifiex_get_sta_entry(priv, ra)) {
+                       /* Requeue Intra-BSS packet */
+                       mwifiex_uap_queue_bridged_pkt(priv, skb);
+                       return 0;
+               }
+       }
+
+       /* Forward unicat/Inter-BSS packets to kernel. */
+       return mwifiex_process_rx_packet(adapter, skb);
+}
+
+/*
+ * This function processes the packet received on AP interface.
+ *
+ * The function looks into the RxPD and performs sanity tests on the
+ * received buffer to ensure its a valid packet before processing it
+ * further. If the packet is determined to be aggregated, it is
+ * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
+ *
+ * The completion callback is called after processing is complete.
+ */
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+                                 struct sk_buff *skb)
+{
+       int ret;
+       struct uap_rxpd *uap_rx_pd;
+       struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+       struct rx_packet_hdr *rx_pkt_hdr;
+       u16 rx_pkt_type;
+       u8 ta[ETH_ALEN], pkt_type;
+       struct mwifiex_sta_node *node;
+
+       struct mwifiex_private *priv =
+                       mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+                                              rx_info->bss_type);
+
+       if (!priv)
+               return -1;
+
+       uap_rx_pd = (struct uap_rxpd *)(skb->data);
+       rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
+       rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+       if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+            le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
+               dev_err(adapter->dev,
+                       "wrong rx packet: len=%d, offset=%d, length=%d\n",
+                       skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+                       le16_to_cpu(uap_rx_pd->rx_pkt_length));
+               priv->stats.rx_dropped++;
+
+               if (adapter->if_ops.data_complete)
+                       adapter->if_ops.data_complete(adapter, skb);
+               else
+                       dev_kfree_skb_any(skb);
+
+               return 0;
+       }
+
+       if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+               struct sk_buff_head list;
+               struct sk_buff *rx_skb;
+
+               __skb_queue_head_init(&list);
+               skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
+               skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
+
+               ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+                                        priv->wdev->iftype, 0, false);
+
+               while (!skb_queue_empty(&list)) {
+                       rx_skb = __skb_dequeue(&list);
+                       ret = mwifiex_recv_packet(adapter, rx_skb);
+                       if (ret)
+                               dev_err(adapter->dev,
+                                       "AP:Rx A-MSDU failed");
+               }
+
+               return 0;
+       } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+               ret = mwifiex_process_mgmt_packet(adapter, skb);
+               if (ret)
+                       dev_err(adapter->dev, "Rx of mgmt packet failed");
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+
+       if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
+               node = mwifiex_get_sta_entry(priv, ta);
+               if (node)
+                       node->rx_seq[uap_rx_pd->priority] =
+                                               le16_to_cpu(uap_rx_pd->seq_num);
+       }
+
+       if (!priv->ap_11n_enabled ||
+           (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
+           (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
+               ret = mwifiex_handle_uap_rx_forward(priv, skb);
+               return ret;
+       }
+
+       /* Reorder and send to kernel */
+       pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
+       ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
+                                        uap_rx_pd->priority, ta, pkt_type,
+                                        skb);
+
+       if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
+               if (adapter->if_ops.data_complete)
+                       adapter->if_ops.data_complete(adapter, skb);
+               else
+                       dev_kfree_skb_any(skb);
+       }
+
+       if (ret)
+               priv->stats.rx_dropped++;
+
+       return ret;
+}
+
+/*
+ * This function fills the TxPD for AP tx packets.
+ *
+ * The Tx buffer received by this function should already have the
+ * header space allocated for TxPD.
+ *
+ * This function inserts the TxPD in between interface header and actual
+ * data and adjusts the buffer pointers accordingly.
+ *
+ * The following TxPD fields are set by this function, as required -
+ *      - BSS number
+ *      - Tx packet length and offset
+ *      - Priority
+ *      - Packet delay
+ *      - Priority specific Tx control
+ *      - Flags
+ */
+void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
+                              struct sk_buff *skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct uap_txpd *txpd;
+       struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+       int pad, len;
+       u16 pkt_type;
+
+       if (!skb->len) {
+               dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+               tx_info->status_code = -1;
+               return skb->data;
+       }
+
+       pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
+
+       /* If skb->data is not aligned, add padding */
+       pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
+
+       len = sizeof(*txpd) + pad;
+
+       BUG_ON(skb_headroom(skb) < len + INTF_HEADER_LEN);
+
+       skb_push(skb, len);
+
+       txpd = (struct uap_txpd *)skb->data;
+       memset(txpd, 0, sizeof(*txpd));
+       txpd->bss_num = priv->bss_num;
+       txpd->bss_type = priv->bss_type;
+       txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
+
+       txpd->priority = (u8)skb->priority;
+       txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+
+       if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
+               /*
+                * Set the priority specific tx_control field, setting of 0 will
+                * cause the default value to be used later in this function.
+                */
+               txpd->tx_control =
+                   cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
+
+       /* Offset of actual data */
+       if (pkt_type == PKT_TYPE_MGMT) {
+               /* Set the packet type and add header for management frame */
+               txpd->tx_pkt_type = cpu_to_le16(pkt_type);
+               len += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+       }
+
+       txpd->tx_pkt_offset = cpu_to_le16(len);
+
+       /* make space for INTF_HEADER_LEN */
+       skb_push(skb, INTF_HEADER_LEN);
+
+       if (!txpd->tx_control)
+               /* TxCtrl set by user or default */
+               txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
+
+       return skb->data;
+}
index 2864c74bdb6fc8b264815a57c039564fe9cbf0a0..ae88f80cf86b966238dff0544fa97ff7ca83e168 100644 (file)
@@ -141,6 +141,46 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
        return 0;
 }
 
+/*
+ * This function processes the received management packet and send it
+ * to the kernel.
+ */
+int
+mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+                           struct sk_buff *skb)
+{
+       struct rxpd *rx_pd;
+       struct mwifiex_private *priv;
+       u16 pkt_len;
+
+       if (!skb)
+               return -1;
+
+       rx_pd = (struct rxpd *)skb->data;
+       priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
+       if (!priv)
+               return -1;
+
+       skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
+       skb_pull(skb, sizeof(pkt_len));
+
+       pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
+
+       /* Remove address4 */
+       memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
+               skb->data + sizeof(struct ieee80211_hdr),
+               pkt_len - sizeof(struct ieee80211_hdr));
+
+       pkt_len -= ETH_ALEN + sizeof(pkt_len);
+       rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
+
+       cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
+                        CAL_RSSI(rx_pd->snr, rx_pd->nf),
+                        skb->data, pkt_len, GFP_ATOMIC);
+
+       return 0;
+}
+
 /*
  * This function processes the received packet before sending it to the
  * kernel.
index 3fa4d417699381225e853a56238e0d8506a2f99b..600d8194610e3b0c3c7d6c87a0400a5b290514a4 100644 (file)
@@ -127,6 +127,29 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
        return ra_list;
 }
 
+/* This function returns random no between 16 and 32 to be used as threshold
+ * for no of packets after which BA setup is initiated.
+ */
+static u8 mwifiex_get_random_ba_threshold(void)
+{
+       u32 sec, usec;
+       struct timeval ba_tstamp;
+       u8 ba_threshold;
+
+       /* setup ba_packet_threshold here random number between
+        * [BA_SETUP_PACKET_OFFSET,
+        * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
+        */
+
+       do_gettimeofday(&ba_tstamp);
+       sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
+       usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
+       ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
+                                                     + BA_SETUP_PACKET_OFFSET;
+
+       return ba_threshold;
+}
+
 /*
  * This function allocates and adds a RA list for all TIDs
  * with the given RA.
@@ -137,6 +160,12 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
        int i;
        struct mwifiex_ra_list_tbl *ra_list;
        struct mwifiex_adapter *adapter = priv->adapter;
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       node = mwifiex_get_sta_entry(priv, ra);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 
        for (i = 0; i < MAX_NUM_TID; ++i) {
                ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -145,14 +174,24 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
                if (!ra_list)
                        break;
 
-               if (!mwifiex_queuing_ra_based(priv))
+               ra_list->is_11n_enabled = 0;
+               if (!mwifiex_queuing_ra_based(priv)) {
                        ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
-               else
-                       ra_list->is_11n_enabled = false;
+               } else {
+                       ra_list->is_11n_enabled =
+                                     mwifiex_is_sta_11n_enabled(priv, node);
+                       if (ra_list->is_11n_enabled)
+                               ra_list->max_amsdu = node->max_amsdu;
+               }
 
                dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
                        ra_list, ra_list->is_11n_enabled);
 
+               if (ra_list->is_11n_enabled) {
+                       ra_list->pkt_count = 0;
+                       ra_list->ba_packet_thr =
+                                             mwifiex_get_random_ba_threshold();
+               }
                list_add_tail(&ra_list->list,
                              &priv->wmm.tid_tbl_ptr[i].ra_list);
 
@@ -423,7 +462,7 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
        for (i = 0; i < adapter->priv_num; ++i) {
                priv = adapter->priv[i];
                if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
-                               return false;
+                       return false;
        }
 
        return true;
@@ -609,7 +648,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        u8 ra[ETH_ALEN], tid_down;
        unsigned long flags;
 
-       if (!priv->media_connected) {
+       if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
                dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
                mwifiex_write_data_complete(adapter, skb, -1);
                return;
@@ -624,7 +663,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        /* In case of infra as we have already created the list during
           association we just don't have to call get_queue_raptr, we will
           have only 1 raptr for a tid in case of infra */
-       if (!mwifiex_queuing_ra_based(priv)) {
+       if (!mwifiex_queuing_ra_based(priv) &&
+           !mwifiex_is_skb_mgmt_frame(skb)) {
                if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
                        ra_list = list_first_entry(
                                &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
@@ -633,7 +673,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                        ra_list = NULL;
        } else {
                memcpy(ra, skb->data, ETH_ALEN);
-               if (ra[0] & 0x01)
+               if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
                        memset(ra, 0xff, ETH_ALEN);
                ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
        }
@@ -647,6 +687,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        skb_queue_tail(&ra_list->skb_head, skb);
 
        ra_list->total_pkts_size += skb->len;
+       ra_list->pkt_count++;
 
        atomic_inc(&priv->wmm.tx_pkts_queued);
 
@@ -867,17 +908,16 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
                if (adapter->bss_prio_tbl[j].bss_prio_cur ==
                    (struct mwifiex_bss_prio_node *)
                    &adapter->bss_prio_tbl[j].bss_prio_head) {
-                       bssprio_node =
+                       adapter->bss_prio_tbl[j].bss_prio_cur =
                                list_first_entry(&adapter->bss_prio_tbl[j]
                                                 .bss_prio_head,
                                                 struct mwifiex_bss_prio_node,
                                                 list);
-                       bssprio_head = bssprio_node;
-               } else {
-                       bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
-                       bssprio_head = bssprio_node;
                }
 
+               bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
+               bssprio_head = bssprio_node;
+
                do {
                        priv_tmp = bssprio_node->priv;
                        hqp = &priv_tmp->wmm.highest_queued_prio;
@@ -986,10 +1026,17 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
 {
        int count = 0, total_size = 0;
        struct sk_buff *skb, *tmp;
+       int max_amsdu_size;
+
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
+           ptr->is_11n_enabled)
+               max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
+       else
+               max_amsdu_size = max_buf_size;
 
        skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
                total_size += skb->len;
-               if (total_size >= max_buf_size)
+               if (total_size >= max_amsdu_size)
                        break;
                if (++count >= MIN_NUM_AMSDU)
                        return true;
@@ -1050,6 +1097,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
                skb_queue_tail(&ptr->skb_head, skb);
 
                ptr->total_pkts_size += skb->len;
+               ptr->pkt_count++;
                tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
@@ -1231,7 +1279,8 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                /* ra_list_spinlock has been freed in
                   mwifiex_send_single_packet() */
        } else {
-               if (mwifiex_is_ampdu_allowed(priv, tid)) {
+               if (mwifiex_is_ampdu_allowed(priv, tid) &&
+                   ptr->pkt_count > ptr->ba_packet_thr) {
                        if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
                                mwifiex_create_ba_tbl(priv, ptr->ra, tid,
                                                      BA_SETUP_INPROGRESS);
index 224e03ade145cbbcfbd597095948c70d2a1e9beb..5099e5375cb39ed8e22a7382f6a42bc939c47fb7 100644 (file)
@@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
 }
 
 static void
-mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
+mwl8k_txq_xmit(struct ieee80211_hw *hw,
+              int index,
+              struct ieee80211_sta *sta,
+              struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        struct ieee80211_tx_info *tx_info;
        struct mwl8k_vif *mwl8k_vif;
-       struct ieee80211_sta *sta;
        struct ieee80211_hdr *wh;
        struct mwl8k_tx_queue *txq;
        struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        wh = &((struct mwl8k_dma_data *)skb->data)->wh;
 
        tx_info = IEEE80211_SKB_CB(skb);
-       sta = tx_info->control.sta;
        mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
 
        if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
        tx->pkt_phys_addr = cpu_to_le32(dma);
        tx->pkt_len = cpu_to_le16(skb->len);
        tx->rate_info = 0;
-       if (!priv->ap_fw && tx_info->control.sta != NULL)
-               tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+       if (!priv->ap_fw && sta != NULL)
+               tx->peer_id = MWL8K_STA(sta)->peer_id;
        else
                tx->peer_id = 0;
 
@@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data)
 /*
  * Core driver operations.
  */
-static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct mwl8k_priv *priv = hw->priv;
        int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                return;
        }
 
-       mwl8k_txq_xmit(hw, index, skb);
+       mwl8k_txq_xmit(hw, index, control->sta, skb);
 }
 
 static int mwl8k_start(struct ieee80211_hw *hw)
index 33747e131a968e19f409de68e4edc92b6b2063e5..3b5508f982e80b8376d6bcb20c63dae6fd5586e0 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/if_arp.h>
 #include <linux/wireless.h>
 #include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
 #include <net/iw_handler.h>
 #include <net/cfg80211.h>
 #include <net/cfg80211-wext.h>
@@ -159,15 +160,13 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
        struct orinoco_private *priv = ndev_priv(dev);
        int err = -EINPROGRESS;         /* Call commit handler */
        unsigned long flags;
-       static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-       static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 
        if (orinoco_lock(priv, &flags) != 0)
                return -EBUSY;
 
        /* Enable automatic roaming - no sanity checks are needed */
-       if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 ||
-           memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) {
+       if (is_zero_ether_addr(ap_addr->sa_data) ||
+           is_broadcast_ether_addr(ap_addr->sa_data)) {
                priv->bssid_fixed = 0;
                memset(priv->desired_bssid, 0, ETH_ALEN);
 
index 14037092ba89da99ddf5481a56f730ac6866aa25..1ef1bfe6a9d7845822cef3413e680de03c2774c9 100644 (file)
@@ -76,6 +76,7 @@ struct p54_channel_entry {
        u16 freq;
        u16 data;
        int index;
+       int max_power;
        enum ieee80211_band band;
 };
 
@@ -173,6 +174,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
        for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
                           (i < list->entries); i++) {
                struct p54_channel_entry *chan = &list->channels[i];
+               struct ieee80211_channel *dest = &tmp->channels[j];
 
                if (chan->band != band)
                        continue;
@@ -190,14 +192,15 @@ static int p54_generate_band(struct ieee80211_hw *dev,
                        continue;
                }
 
-               tmp->channels[j].band = chan->band;
-               tmp->channels[j].center_freq = chan->freq;
+               dest->band = chan->band;
+               dest->center_freq = chan->freq;
+               dest->max_power = chan->max_power;
                priv->survey[*chan_num].channel = &tmp->channels[j];
                priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
                        SURVEY_INFO_CHANNEL_TIME |
                        SURVEY_INFO_CHANNEL_TIME_BUSY |
                        SURVEY_INFO_CHANNEL_TIME_TX;
-               tmp->channels[j].hw_value = (*chan_num);
+               dest->hw_value = (*chan_num);
                j++;
                (*chan_num)++;
        }
@@ -229,10 +232,11 @@ err_out:
        return ret;
 }
 
-static void p54_update_channel_param(struct p54_channel_list *list,
-                                    u16 freq, u16 data)
+static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list,
+                                                         u16 freq, u16 data)
 {
-       int band, i;
+       int i;
+       struct p54_channel_entry *entry = NULL;
 
        /*
         * usually all lists in the eeprom are mostly sorted.
@@ -241,30 +245,78 @@ static void p54_update_channel_param(struct p54_channel_list *list,
         */
        for (i = list->entries; i >= 0; i--) {
                if (freq == list->channels[i].freq) {
-                       list->channels[i].data |= data;
+                       entry = &list->channels[i];
                        break;
                }
        }
 
        if ((i < 0) && (list->entries < list->max_entries)) {
                /* entry does not exist yet. Initialize a new one. */
-               band = p54_get_band_from_freq(freq);
+               int band = p54_get_band_from_freq(freq);
 
                /*
                 * filter out frequencies which don't belong into
                 * any supported band.
                 */
-               if (band < 0)
-                       return ;
+               if (band >= 0) {
+                       i = list->entries++;
+                       list->band_channel_num[band]++;
+
+                       entry = &list->channels[i];
+                       entry->freq = freq;
+                       entry->band = band;
+                       entry->index = ieee80211_frequency_to_channel(freq);
+                       entry->max_power = 0;
+                       entry->data = 0;
+               }
+       }
 
-               i = list->entries++;
-               list->band_channel_num[band]++;
+       if (entry)
+               entry->data |= data;
 
-               list->channels[i].freq = freq;
-               list->channels[i].data = data;
-               list->channels[i].band = band;
-               list->channels[i].index = ieee80211_frequency_to_channel(freq);
-               /* TODO: parse output_limit and fill max_power */
+       return entry;
+}
+
+static int p54_get_maxpower(struct p54_common *priv, void *data)
+{
+       switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) {
+       case PDR_SYNTH_FRONTEND_LONGBOW: {
+               struct pda_channel_output_limit_longbow *pda = data;
+               int j;
+               u16 rawpower = 0;
+               pda = data;
+               for (j = 0; j < ARRAY_SIZE(pda->point); j++) {
+                       struct pda_channel_output_limit_point_longbow *point =
+                               &pda->point[j];
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_qpsk));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_bpsk));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_16qam));
+                       rawpower = max_t(u16,
+                               rawpower, le16_to_cpu(point->val_64qam));
+               }
+               /* longbow seems to use 1/16 dBm units */
+               return rawpower / 16;
+               }
+
+       case PDR_SYNTH_FRONTEND_DUETTE3:
+       case PDR_SYNTH_FRONTEND_DUETTE2:
+       case PDR_SYNTH_FRONTEND_FRISBEE:
+       case PDR_SYNTH_FRONTEND_XBOW: {
+               struct pda_channel_output_limit *pda = data;
+               u8 rawpower = 0;
+               rawpower = max(rawpower, pda->val_qpsk);
+               rawpower = max(rawpower, pda->val_bpsk);
+               rawpower = max(rawpower, pda->val_16qam);
+               rawpower = max(rawpower, pda->val_64qam);
+               /* raw values are in 1/4 dBm units */
+               return rawpower / 4;
+               }
+
+       default:
+               return 20;
        }
 }
 
@@ -315,12 +367,19 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
                }
 
                if (i < priv->output_limit->entries) {
-                       freq = le16_to_cpup((__le16 *) (i *
-                                           priv->output_limit->entry_size +
-                                           priv->output_limit->offset +
-                                           priv->output_limit->data));
-
-                       p54_update_channel_param(list, freq, CHAN_HAS_LIMIT);
+                       struct p54_channel_entry *tmp;
+
+                       void *data = (void *) ((unsigned long) i *
+                               priv->output_limit->entry_size +
+                               priv->output_limit->offset +
+                               priv->output_limit->data);
+
+                       freq = le16_to_cpup((__le16 *) data);
+                       tmp = p54_update_channel_param(list, freq,
+                                                      CHAN_HAS_LIMIT);
+                       if (tmp) {
+                               tmp->max_power = p54_get_maxpower(priv, data);
+                       }
                }
 
                if (i < priv->curve_data->entries) {
@@ -834,11 +893,12 @@ good_eeprom:
                goto err;
        }
 
+       priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
+
        err = p54_generate_channel_lists(dev);
        if (err)
                goto err;
 
-       priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
        if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
                p54_init_xbow_synth(priv);
        if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
index afde72b8460652dfa1fa3a475b1d35d9f50a04c4..20ebe39a3f4e8714d49cf8ff58478f87f6e4dbde 100644 (file)
@@ -57,6 +57,18 @@ struct pda_channel_output_limit {
        u8 rate_set_size;
 } __packed;
 
+struct pda_channel_output_limit_point_longbow {
+       __le16 val_bpsk;
+       __le16 val_qpsk;
+       __le16 val_16qam;
+       __le16 val_64qam;
+} __packed;
+
+struct pda_channel_output_limit_longbow {
+       __le16 freq;
+       struct pda_channel_output_limit_point_longbow point[3];
+} __packed;
+
 struct pda_pa_curve_data_sample_rev0 {
        u8 rf_power;
        u8 pa_detector;
index 3d8d622bec55d394543cf1a563b2163573e3b4f8..de1d46bf97dffc50836e813d4dc4edfc36f6a27a 100644 (file)
@@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv);
 void p54_unregister_leds(struct p54_common *priv);
 
 /* xmit functions */
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb);
 int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
 void p54_tx(struct p54_common *priv, struct sk_buff *skb);
 
index 7cffea795ad27d0044e777a0a04e02e741793ea9..aadda99989c007838faf7eb5ca816d18904ea91c 100644 (file)
@@ -139,6 +139,7 @@ static int p54_beacon_format_ie_tim(struct sk_buff *skb)
 static int p54_beacon_update(struct p54_common *priv,
                        struct ieee80211_vif *vif)
 {
+       struct ieee80211_tx_control control = { };
        struct sk_buff *beacon;
        int ret;
 
@@ -158,7 +159,7 @@ static int p54_beacon_update(struct p54_common *priv,
         * to cancel the old beacon template by hand, instead the firmware
         * will release the previous one through the feedback mechanism.
         */
-       p54_tx_80211(priv->hw, beacon);
+       p54_tx_80211(priv->hw, &control, beacon);
        priv->tsf_high32 = 0;
        priv->tsf_low32 = 0;
 
@@ -514,6 +515,17 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
        if (modparam_nohwcrypt)
                return -EOPNOTSUPP;
 
+       if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+               /*
+                * Unfortunately most/all firmwares are trying to decrypt
+                * incoming management frames if a suitable key can be found.
+                * However, in doing so the data in these frames gets
+                * corrupted. So, we can't have firmware supported crypto
+                * offload in this case.
+                */
+               return -EOPNOTSUPP;
+       }
+
        mutex_lock(&priv->conf_mutex);
        if (cmd == SET_KEY) {
                switch (key->cipher) {
@@ -737,6 +749,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
                     IEEE80211_HW_SIGNAL_DBM |
                     IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_PS_NULLFUNC_STACK |
+                    IEEE80211_HW_MFP_CAPABLE |
                     IEEE80211_HW_REPORTS_TX_ACK_STATUS;
 
        dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
index 89318adc8c7f0427ec8e95caed78a36312a934b8..b4390797d78c1c6917d4ea81e0ddf2f269fbc46f 100644 (file)
@@ -488,6 +488,58 @@ static int p54p_open(struct ieee80211_hw *dev)
        return 0;
 }
 
+static void p54p_firmware_step2(const struct firmware *fw,
+                               void *context)
+{
+       struct p54p_priv *priv = context;
+       struct ieee80211_hw *dev = priv->common.hw;
+       struct pci_dev *pdev = priv->pdev;
+       int err;
+
+       if (!fw) {
+               dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
+               err = -ENOENT;
+               goto out;
+       }
+
+       priv->firmware = fw;
+
+       err = p54p_open(dev);
+       if (err)
+               goto out;
+       err = p54_read_eeprom(dev);
+       p54p_stop(dev);
+       if (err)
+               goto out;
+
+       err = p54_register_common(dev, &pdev->dev);
+       if (err)
+               goto out;
+
+out:
+
+       complete(&priv->fw_loaded);
+
+       if (err) {
+               struct device *parent = pdev->dev.parent;
+
+               if (parent)
+                       device_lock(parent);
+
+               /*
+                * This will indirectly result in a call to p54p_remove.
+                * Hence, we don't need to bother with freeing any
+                * allocated ressources at all.
+                */
+               device_release_driver(&pdev->dev);
+
+               if (parent)
+                       device_unlock(parent);
+       }
+
+       pci_dev_put(pdev);
+}
+
 static int __devinit p54p_probe(struct pci_dev *pdev,
                                const struct pci_device_id *id)
 {
@@ -496,6 +548,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        unsigned long mem_addr, mem_len;
        int err;
 
+       pci_dev_get(pdev);
        err = pci_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "Cannot enable new PCI device\n");
@@ -537,6 +590,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        priv = dev->priv;
        priv->pdev = pdev;
 
+       init_completion(&priv->fw_loaded);
        SET_IEEE80211_DEV(dev, &pdev->dev);
        pci_set_drvdata(pdev, dev);
 
@@ -561,32 +615,12 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        spin_lock_init(&priv->lock);
        tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
 
-       err = request_firmware(&priv->firmware, "isl3886pci",
-                              &priv->pdev->dev);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
-               err = request_firmware(&priv->firmware, "isl3886",
-                                      &priv->pdev->dev);
-               if (err)
-                       goto err_free_common;
-       }
-
-       err = p54p_open(dev);
-       if (err)
-               goto err_free_common;
-       err = p54_read_eeprom(dev);
-       p54p_stop(dev);
-       if (err)
-               goto err_free_common;
-
-       err = p54_register_common(dev, &pdev->dev);
-       if (err)
-               goto err_free_common;
-
-       return 0;
+       err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
+                                     &priv->pdev->dev, GFP_KERNEL,
+                                     priv, p54p_firmware_step2);
+       if (!err)
+               return 0;
 
- err_free_common:
-       release_firmware(priv->firmware);
        pci_free_consistent(pdev, sizeof(*priv->ring_control),
                            priv->ring_control, priv->ring_control_dma);
 
@@ -601,6 +635,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
        pci_release_regions(pdev);
  err_disable_dev:
        pci_disable_device(pdev);
+       pci_dev_put(pdev);
        return err;
 }
 
@@ -612,8 +647,9 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
        if (!dev)
                return;
 
-       p54_unregister_common(dev);
        priv = dev->priv;
+       wait_for_completion(&priv->fw_loaded);
+       p54_unregister_common(dev);
        release_firmware(priv->firmware);
        pci_free_consistent(pdev, sizeof(*priv->ring_control),
                            priv->ring_control, priv->ring_control_dma);
index 7aa509f7e387c052c31ca56d9a159b0dbc8d1743..68405c142f973d356a8847c72cafb5d3fdc6f6fe 100644 (file)
@@ -105,6 +105,7 @@ struct p54p_priv {
        struct sk_buff *tx_buf_data[32];
        struct sk_buff *tx_buf_mgmt[4];
        struct completion boot_comp;
+       struct completion fw_loaded;
 };
 
 #endif /* P54USB_H */
index f38786e02623e04c5ca67f20c3ee43b009aa98d1..5861e13a6fd8d5f9aa83c24892acfc11a210b02e 100644 (file)
@@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
 EXPORT_SYMBOL_GPL(p54_rx);
 
 static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
-                               struct ieee80211_tx_info *info, u8 *queue,
-                               u32 *extra_len, u16 *flags, u16 *aid,
+                               struct ieee80211_tx_info *info,
+                               struct ieee80211_sta *sta,
+                               u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
                                bool *burst_possible)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
                        }
                }
 
-               if (info->control.sta)
-                       *aid = info->control.sta->aid;
+               if (sta)
+                       *aid = sta->aid;
                break;
        }
 }
@@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher)
        }
 }
 
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb)
 {
        struct p54_common *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
        u8 nrates = 0, nremaining = 8;
        bool burst_allowed = false;
 
-       p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
+       p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
                            &hdr_flags, &aid, &burst_allowed);
 
        if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
index 7a4ae9ee1c63057b78d582fd3a94964a6fdea264..bd1f0cb56085ef94eb5e634a7467f1c12e3a3ffb 100644 (file)
@@ -1959,9 +1959,6 @@ static int rndis_scan(struct wiphy *wiphy,
         */
        rndis_check_bssid_list(usbdev, NULL, NULL);
 
-       if (!request)
-               return -EINVAL;
-
        if (priv->scan_request && priv->scan_request != request)
                return -EBUSY;
 
index 64328af496f598bb3280784b6d2adfd25ec5cc70..e3a2d9070cf655acdfb9758a525c5cc7a71575a9 100644 (file)
@@ -205,7 +205,7 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
-       return rt2x00_get_field32(reg, GPIOCSR_BIT0);
+       return rt2x00_get_field32(reg, GPIOCSR_VAL0);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1629,7 +1629,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
-       rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
+       rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
        rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
 
        /*
@@ -1789,7 +1789,6 @@ static const struct data_queue_desc rt2400pci_queue_atim = {
 
 static const struct rt2x00_ops rt2400pci_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 1,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 7564ae992b735179b15e24a3d616c5a71acb1aeb..e4b07f0aa3cc0bfd955c1cebfcc1e1443065a4f9 100644 (file)
 
 /*
  * GPIOCSR: GPIO control register.
+ *     GPIOCSR_VALx: Actual GPIO pin x value
+ *     GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
  */
 #define GPIOCSR                                0x0120
-#define GPIOCSR_BIT0                   FIELD32(0x00000001)
-#define GPIOCSR_BIT1                   FIELD32(0x00000002)
-#define GPIOCSR_BIT2                   FIELD32(0x00000004)
-#define GPIOCSR_BIT3                   FIELD32(0x00000008)
-#define GPIOCSR_BIT4                   FIELD32(0x00000010)
-#define GPIOCSR_BIT5                   FIELD32(0x00000020)
-#define GPIOCSR_BIT6                   FIELD32(0x00000040)
-#define GPIOCSR_BIT7                   FIELD32(0x00000080)
-#define GPIOCSR_BIT8                   FIELD32(0x00000100)
+#define GPIOCSR_VAL0                   FIELD32(0x00000001)
+#define GPIOCSR_VAL1                   FIELD32(0x00000002)
+#define GPIOCSR_VAL2                   FIELD32(0x00000004)
+#define GPIOCSR_VAL3                   FIELD32(0x00000008)
+#define GPIOCSR_VAL4                   FIELD32(0x00000010)
+#define GPIOCSR_VAL5                   FIELD32(0x00000020)
+#define GPIOCSR_VAL6                   FIELD32(0x00000040)
+#define GPIOCSR_VAL7                   FIELD32(0x00000080)
+#define GPIOCSR_DIR0                   FIELD32(0x00000100)
+#define GPIOCSR_DIR1                   FIELD32(0x00000200)
+#define GPIOCSR_DIR2                   FIELD32(0x00000400)
+#define GPIOCSR_DIR3                   FIELD32(0x00000800)
+#define GPIOCSR_DIR4                   FIELD32(0x00001000)
+#define GPIOCSR_DIR5                   FIELD32(0x00002000)
+#define GPIOCSR_DIR6                   FIELD32(0x00004000)
+#define GPIOCSR_DIR7                   FIELD32(0x00008000)
 
 /*
  * BBPPCSR: BBP Pin control register.
index 3de0406735f6b7347b46cdf2305e413aaa17256d..479d756e275b388fc5d17b77bde4303e27117e48 100644 (file)
@@ -205,7 +205,7 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
-       return rt2x00_get_field32(reg, GPIOCSR_BIT0);
+       return rt2x00_get_field32(reg, GPIOCSR_VAL0);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2081,7 +2081,6 @@ static const struct data_queue_desc rt2500pci_queue_atim = {
 
 static const struct rt2x00_ops rt2500pci_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 1,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 2aad7ba8a10083547c8e39d4cba5dc77dcbc42d7..9c10068e4987a384019301542f664bd44f1b130a 100644 (file)
 
 /*
  * GPIOCSR: GPIO control register.
+ *     GPIOCSR_VALx: GPIO value
+ *     GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
  */
 #define GPIOCSR                                0x0120
-#define GPIOCSR_BIT0                   FIELD32(0x00000001)
-#define GPIOCSR_BIT1                   FIELD32(0x00000002)
-#define GPIOCSR_BIT2                   FIELD32(0x00000004)
-#define GPIOCSR_BIT3                   FIELD32(0x00000008)
-#define GPIOCSR_BIT4                   FIELD32(0x00000010)
-#define GPIOCSR_BIT5                   FIELD32(0x00000020)
-#define GPIOCSR_BIT6                   FIELD32(0x00000040)
-#define GPIOCSR_BIT7                   FIELD32(0x00000080)
+#define GPIOCSR_VAL0                   FIELD32(0x00000001)
+#define GPIOCSR_VAL1                   FIELD32(0x00000002)
+#define GPIOCSR_VAL2                   FIELD32(0x00000004)
+#define GPIOCSR_VAL3                   FIELD32(0x00000008)
+#define GPIOCSR_VAL4                   FIELD32(0x00000010)
+#define GPIOCSR_VAL5                   FIELD32(0x00000020)
+#define GPIOCSR_VAL6                   FIELD32(0x00000040)
+#define GPIOCSR_VAL7                   FIELD32(0x00000080)
 #define GPIOCSR_DIR0                   FIELD32(0x00000100)
 #define GPIOCSR_DIR1                   FIELD32(0x00000200)
 #define GPIOCSR_DIR2                   FIELD32(0x00000400)
index 89fee311d8fda5ad07ae5ecd50fae567232aa35d..a12e84f892be1d9b13d1762a0369a7841af32645 100644 (file)
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u16 reg;
 
        rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
-       return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
+       return rt2x00_get_field16(reg, MAC_CSR19_VAL7);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1786,7 +1786,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
-       rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
+       rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0);
        rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
 
        /*
@@ -1896,7 +1896,6 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
 
 static const struct rt2x00_ops rt2500usb_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 1,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 196bd5103e4f5450483ce1e60449021bf6eafd2c..1b91a4cef9652c22fe336c2c889ec300d04d685f 100644 (file)
 
 /*
  * MAC_CSR19: GPIO control register.
+ *     MAC_CSR19_VALx: GPIO value
+ *     MAC_CSR19_DIRx: GPIO direction: 0 = input; 1 = output
  */
 #define MAC_CSR19                      0x0426
-#define MAC_CSR19_BIT0                 FIELD16(0x0001)
-#define MAC_CSR19_BIT1                 FIELD16(0x0002)
-#define MAC_CSR19_BIT2                 FIELD16(0x0004)
-#define MAC_CSR19_BIT3                 FIELD16(0x0008)
-#define MAC_CSR19_BIT4                 FIELD16(0x0010)
-#define MAC_CSR19_BIT5                 FIELD16(0x0020)
-#define MAC_CSR19_BIT6                 FIELD16(0x0040)
-#define MAC_CSR19_BIT7                 FIELD16(0x0080)
-#define MAC_CSR19_BIT8                 FIELD16(0x0100)
+#define MAC_CSR19_VAL0                 FIELD16(0x0001)
+#define MAC_CSR19_VAL1                 FIELD16(0x0002)
+#define MAC_CSR19_VAL2                 FIELD16(0x0004)
+#define MAC_CSR19_VAL3                 FIELD16(0x0008)
+#define MAC_CSR19_VAL4                 FIELD16(0x0010)
+#define MAC_CSR19_VAL5                 FIELD16(0x0020)
+#define MAC_CSR19_VAL6                 FIELD16(0x0040)
+#define MAC_CSR19_VAL7                 FIELD16(0x0080)
+#define MAC_CSR19_DIR0                 FIELD16(0x0100)
+#define MAC_CSR19_DIR1                 FIELD16(0x0200)
+#define MAC_CSR19_DIR2                 FIELD16(0x0400)
+#define MAC_CSR19_DIR3                 FIELD16(0x0800)
+#define MAC_CSR19_DIR4                 FIELD16(0x1000)
+#define MAC_CSR19_DIR5                 FIELD16(0x2000)
+#define MAC_CSR19_DIR6                 FIELD16(0x4000)
+#define MAC_CSR19_DIR7                 FIELD16(0x8000)
 
 /*
  * MAC_CSR20: LED control register.
index e252e9bafd0e2776075a8159d74e4f9180b87e83..6d67c3ede6513fd4e1be870bbf117cb2d69138dc 100644 (file)
 #define WMM_TXOP1_CFG_AC3TXOP          FIELD32(0xffff0000)
 
 /*
- * GPIO_CTRL_CFG:
- * GPIOD: GPIO direction, 0: Output, 1: Input
- */
-#define GPIO_CTRL_CFG                  0x0228
-#define GPIO_CTRL_CFG_BIT0             FIELD32(0x00000001)
-#define GPIO_CTRL_CFG_BIT1             FIELD32(0x00000002)
-#define GPIO_CTRL_CFG_BIT2             FIELD32(0x00000004)
-#define GPIO_CTRL_CFG_BIT3             FIELD32(0x00000008)
-#define GPIO_CTRL_CFG_BIT4             FIELD32(0x00000010)
-#define GPIO_CTRL_CFG_BIT5             FIELD32(0x00000020)
-#define GPIO_CTRL_CFG_BIT6             FIELD32(0x00000040)
-#define GPIO_CTRL_CFG_BIT7             FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_GPIOD_BIT0       FIELD32(0x00000100)
-#define GPIO_CTRL_CFG_GPIOD_BIT1       FIELD32(0x00000200)
-#define GPIO_CTRL_CFG_GPIOD_BIT2       FIELD32(0x00000400)
-#define GPIO_CTRL_CFG_GPIOD_BIT3       FIELD32(0x00000800)
-#define GPIO_CTRL_CFG_GPIOD_BIT4       FIELD32(0x00001000)
-#define GPIO_CTRL_CFG_GPIOD_BIT5       FIELD32(0x00002000)
-#define GPIO_CTRL_CFG_GPIOD_BIT6       FIELD32(0x00004000)
-#define GPIO_CTRL_CFG_GPIOD_BIT7       FIELD32(0x00008000)
+ * GPIO_CTRL:
+ *     GPIO_CTRL_VALx: GPIO value
+ *     GPIO_CTRL_DIRx: GPIO direction: 0 = output; 1 = input
+ */
+#define GPIO_CTRL                      0x0228
+#define GPIO_CTRL_VAL0                 FIELD32(0x00000001)
+#define GPIO_CTRL_VAL1                 FIELD32(0x00000002)
+#define GPIO_CTRL_VAL2                 FIELD32(0x00000004)
+#define GPIO_CTRL_VAL3                 FIELD32(0x00000008)
+#define GPIO_CTRL_VAL4                 FIELD32(0x00000010)
+#define GPIO_CTRL_VAL5                 FIELD32(0x00000020)
+#define GPIO_CTRL_VAL6                 FIELD32(0x00000040)
+#define GPIO_CTRL_VAL7                 FIELD32(0x00000080)
+#define GPIO_CTRL_DIR0                 FIELD32(0x00000100)
+#define GPIO_CTRL_DIR1                 FIELD32(0x00000200)
+#define GPIO_CTRL_DIR2                 FIELD32(0x00000400)
+#define GPIO_CTRL_DIR3                 FIELD32(0x00000800)
+#define GPIO_CTRL_DIR4                 FIELD32(0x00001000)
+#define GPIO_CTRL_DIR5                 FIELD32(0x00002000)
+#define GPIO_CTRL_DIR6                 FIELD32(0x00004000)
+#define GPIO_CTRL_DIR7                 FIELD32(0x00008000)
+#define GPIO_CTRL_VAL8                 FIELD32(0x00010000)
+#define GPIO_CTRL_VAL9                 FIELD32(0x00020000)
+#define GPIO_CTRL_VAL10                        FIELD32(0x00040000)
+#define GPIO_CTRL_DIR8                 FIELD32(0x01000000)
+#define GPIO_CTRL_DIR9                 FIELD32(0x02000000)
+#define GPIO_CTRL_DIR10                        FIELD32(0x04000000)
 
 /*
  * MCU_CMD_CFG
@@ -1935,6 +1942,11 @@ struct mac_iveiv_entry {
 #define BBP47_TSSI_TSSI_MODE           FIELD8(0x18)
 #define BBP47_TSSI_ADC6                        FIELD8(0x80)
 
+/*
+ * BBP 49
+ */
+#define BBP49_UPDATE_FLAG              FIELD8(0x01)
+
 /*
  * BBP 109
  */
index b93516d832fb5603e4bb3d287a4770c0c8de06ad..540c94f8505a9b734b9b09ef6ab814ca04b17bd0 100644 (file)
@@ -923,8 +923,8 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
                rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
                return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
        } else {
-               rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-               return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+               rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+               return rt2x00_get_field32(reg, GPIO_CTRL_VAL2);
        }
 }
 EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
@@ -1570,10 +1570,10 @@ static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
                rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
                                   eesk_pin, 0);
 
-       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
-       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+       rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, gpio_bit3);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
 }
 
 void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
@@ -1615,6 +1615,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
        case 1:
                if (rt2x00_rt(rt2x00dev, RT3070) ||
                    rt2x00_rt(rt2x00dev, RT3090) ||
+                   rt2x00_rt(rt2x00dev, RT3352) ||
                    rt2x00_rt(rt2x00dev, RT3390)) {
                        rt2x00_eeprom_read(rt2x00dev,
                                           EEPROM_NIC_CONF1, &eeprom);
@@ -1762,36 +1763,15 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
 
        rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
+                         rt2x00dev->default_ant.rx_chain_num <= 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD,
+                         rt2x00dev->default_ant.rx_chain_num <= 2);
        rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
-       if (rt2x00_rt(rt2x00dev, RT3390)) {
-               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
-                                 rt2x00dev->default_ant.rx_chain_num == 1);
-               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
-                                 rt2x00dev->default_ant.tx_chain_num == 1);
-       } else {
-               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
-               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
-               rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
-               rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
-
-               switch (rt2x00dev->default_ant.tx_chain_num) {
-               case 1:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
-                       /* fall through */
-               case 2:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
-                       break;
-               }
-
-               switch (rt2x00dev->default_ant.rx_chain_num) {
-               case 1:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
-                       /* fall through */
-               case 2:
-                       rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
-                       break;
-               }
-       }
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
+                         rt2x00dev->default_ant.tx_chain_num <= 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD,
+                         rt2x00dev->default_ant.tx_chain_num <= 2);
        rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
 
        rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
@@ -1995,13 +1975,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
                rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
        }
 
-       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT7, 0);
+       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
        if (rf->channel <= 14)
-               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 1);
+               rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
        else
-               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 0);
-       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+               rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 0);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
 
        rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
@@ -2053,6 +2033,60 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
        }
 }
 
+static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_conf *conf,
+                                        struct rf_channel *rf,
+                                        struct channel_info *info)
+{
+       u8 rfcsr;
+
+       rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+       rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+
+       rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
+       rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
+       rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+
+       if (info->default_power1 > POWER_BOUND)
+               rt2800_rfcsr_write(rt2x00dev, 47, POWER_BOUND);
+       else
+               rt2800_rfcsr_write(rt2x00dev, 47, info->default_power1);
+
+       if (info->default_power2 > POWER_BOUND)
+               rt2800_rfcsr_write(rt2x00dev, 48, POWER_BOUND);
+       else
+               rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
+
+       rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+       if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+
+       rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+
+       if ( rt2x00dev->default_ant.tx_chain_num == 2 )
+               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+
+       if ( rt2x00dev->default_ant.rx_chain_num == 2 )
+               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+
+       rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+       rt2800_rfcsr_write(rt2x00dev, 31, 80);
+}
+
 static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
                                         struct ieee80211_conf *conf,
                                         struct rf_channel *rf,
@@ -2182,6 +2216,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        case RF3290:
                rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
                break;
+       case RF3322:
+               rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
+               break;
        case RF5360:
        case RF5370:
        case RF5372:
@@ -2194,6 +2231,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        }
 
        if (rt2x00_rf(rt2x00dev, RF3290) ||
+           rt2x00_rf(rt2x00dev, RF3322) ||
            rt2x00_rf(rt2x00dev, RF5360) ||
            rt2x00_rf(rt2x00dev, RF5370) ||
            rt2x00_rf(rt2x00dev, RF5372) ||
@@ -2212,10 +2250,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        /*
         * Change BBP settings
         */
-       rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
-       rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
-       rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
-       rt2800_bbp_write(rt2x00dev, 86, 0);
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 27, 0x0);
+               rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 27, 0x20);
+               rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
+       } else {
+               rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 86, 0);
+       }
 
        if (rf->channel <= 14) {
                if (!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -2310,6 +2355,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg);
        rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg);
        rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
+
+       /*
+        * Clear update flag
+        */
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_read(rt2x00dev, 49, &bbp);
+               rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
+               rt2800_bbp_write(rt2x00dev, 49, bbp);
+       }
 }
 
 static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
@@ -2821,23 +2875,32 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
 
 static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
 {
+       u8 vgc;
+
        if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
                if (rt2x00_rt(rt2x00dev, RT3070) ||
                    rt2x00_rt(rt2x00dev, RT3071) ||
                    rt2x00_rt(rt2x00dev, RT3090) ||
                    rt2x00_rt(rt2x00dev, RT3290) ||
                    rt2x00_rt(rt2x00dev, RT3390) ||
+                   rt2x00_rt(rt2x00dev, RT3572) ||
                    rt2x00_rt(rt2x00dev, RT5390) ||
                    rt2x00_rt(rt2x00dev, RT5392))
-                       return 0x1c + (2 * rt2x00dev->lna_gain);
+                       vgc = 0x1c + (2 * rt2x00dev->lna_gain);
                else
-                       return 0x2e + rt2x00dev->lna_gain;
+                       vgc = 0x2e + rt2x00dev->lna_gain;
+       } else { /* 5GHZ band */
+               if (rt2x00_rt(rt2x00dev, RT3572))
+                       vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+               else {
+                       if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+                               vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
+                       else
+                               vgc = 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+               }
        }
 
-       if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
-               return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
-       else
-               return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+       return vgc;
 }
 
 static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
@@ -2998,11 +3061,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
+       } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
        } else if (rt2x00_rt(rt2x00dev, RT3572)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
        } else if (rt2x00_rt(rt2x00dev, RT5390) ||
-                          rt2x00_rt(rt2x00dev, RT5392)) {
+                  rt2x00_rt(rt2x00dev, RT5392)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3378,6 +3445,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                     rt2800_wait_bbp_ready(rt2x00dev)))
                return -EACCES;
 
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 3, 0x00);
+               rt2800_bbp_write(rt2x00dev, 4, 0x50);
+       }
+
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3388,15 +3460,20 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
 
        if (rt2800_is_305x_soc(rt2x00dev) ||
            rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT3572) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 31, 0x08);
 
+       if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 47, 0x48);
+
        rt2800_bbp_write(rt2x00dev, 65, 0x2c);
        rt2800_bbp_write(rt2x00dev, 66, 0x38);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 68, 0x0b);
@@ -3405,6 +3482,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 69, 0x16);
                rt2800_bbp_write(rt2x00dev, 73, 0x12);
        } else if (rt2x00_rt(rt2x00dev, RT3290) ||
+                  rt2x00_rt(rt2x00dev, RT3352) ||
                   rt2x00_rt(rt2x00dev, RT5390) ||
                   rt2x00_rt(rt2x00dev, RT5392)) {
                rt2800_bbp_write(rt2x00dev, 69, 0x12);
@@ -3436,15 +3514,17 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        } else if (rt2800_is_305x_soc(rt2x00dev)) {
                rt2800_bbp_write(rt2x00dev, 78, 0x0e);
                rt2800_bbp_write(rt2x00dev, 80, 0x08);
-       } else {
-               rt2800_bbp_write(rt2x00dev, 81, 0x37);
-       }
-
-       if (rt2x00_rt(rt2x00dev, RT3290)) {
+       } else if (rt2x00_rt(rt2x00dev, RT3290)) {
                rt2800_bbp_write(rt2x00dev, 74, 0x0b);
                rt2800_bbp_write(rt2x00dev, 79, 0x18);
                rt2800_bbp_write(rt2x00dev, 80, 0x09);
                rt2800_bbp_write(rt2x00dev, 81, 0x33);
+       } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+               rt2800_bbp_write(rt2x00dev, 80, 0x08);
+               rt2800_bbp_write(rt2x00dev, 81, 0x37);
+       } else {
+               rt2800_bbp_write(rt2x00dev, 81, 0x37);
        }
 
        rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -3465,18 +3545,21 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 84, 0x99);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 86, 0x38);
        else
                rt2800_bbp_write(rt2x00dev, 86, 0x00);
 
-       if (rt2x00_rt(rt2x00dev, RT5392))
+       if (rt2x00_rt(rt2x00dev, RT3352) ||
+           rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 88, 0x90);
 
        rt2800_bbp_write(rt2x00dev, 91, 0x04);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 92, 0x02);
@@ -3493,6 +3576,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
            rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
            rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
            rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT3572) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3502,6 +3586,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 103, 0x00);
 
        if (rt2x00_rt(rt2x00dev, RT3290) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 104, 0x92);
@@ -3510,6 +3595,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 105, 0x01);
        else if (rt2x00_rt(rt2x00dev, RT3290))
                rt2800_bbp_write(rt2x00dev, 105, 0x1c);
+       else if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 105, 0x34);
        else if (rt2x00_rt(rt2x00dev, RT5390) ||
                         rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 105, 0x3c);
@@ -3519,11 +3606,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390))
                rt2800_bbp_write(rt2x00dev, 106, 0x03);
+       else if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 106, 0x05);
        else if (rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 106, 0x12);
        else
                rt2800_bbp_write(rt2x00dev, 106, 0x35);
 
+       if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
@@ -3534,6 +3626,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 135, 0xf6);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT3352))
+               rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
        if (rt2x00_rt(rt2x00dev, RT3071) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
            rt2x00_rt(rt2x00dev, RT3390) ||
@@ -3574,6 +3669,28 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_bbp_write(rt2x00dev, 3, value);
        }
 
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+               /* Set ITxBF timeout to 0x9c40=1000msec */
+               rt2800_bbp_write(rt2x00dev, 179, 0x02);
+               rt2800_bbp_write(rt2x00dev, 180, 0x00);
+               rt2800_bbp_write(rt2x00dev, 182, 0x40);
+               rt2800_bbp_write(rt2x00dev, 180, 0x01);
+               rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+               rt2800_bbp_write(rt2x00dev, 179, 0x00);
+               /* Reprogram the inband interface to put right values in RXWI */
+               rt2800_bbp_write(rt2x00dev, 142, 0x04);
+               rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+               rt2800_bbp_write(rt2x00dev, 142, 0x06);
+               rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+               rt2800_bbp_write(rt2x00dev, 142, 0x07);
+               rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+               rt2800_bbp_write(rt2x00dev, 142, 0x08);
+               rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+
+               rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+       }
+
        if (rt2x00_rt(rt2x00dev, RT5390) ||
                rt2x00_rt(rt2x00dev, RT5392)) {
                int ant, div_mode;
@@ -3587,16 +3704,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
                        u32 reg;
 
-                       rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
+                       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
                        if (ant == 0)
-                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
+                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
                        else if (ant == 1)
-                               rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
-                       rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
+                       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
                }
 
                /* This chip has hardware antenna diversity*/
@@ -3707,6 +3824,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
            !rt2x00_rt(rt2x00dev, RT3071) &&
            !rt2x00_rt(rt2x00dev, RT3090) &&
            !rt2x00_rt(rt2x00dev, RT3290) &&
+           !rt2x00_rt(rt2x00dev, RT3352) &&
            !rt2x00_rt(rt2x00dev, RT3390) &&
            !rt2x00_rt(rt2x00dev, RT3572) &&
            !rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3903,6 +4021,70 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
                rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
                return 0;
+       } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
+               rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
+               rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+               rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
+               rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
+               rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+               rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+               rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
+               rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
+               rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
+               rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
+               rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
+               rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+               rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+               rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+               rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
+               rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
+               rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
+               rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
+               rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
+               rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
+               rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
+               rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
+               rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+               rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
+               rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
+               rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
+               rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
+               rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
+               rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
+               rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
+               rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
+               rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
+               rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
+               rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
+               rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
+               rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
+               rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+               rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
        } else if (rt2x00_rt(rt2x00dev, RT5390)) {
                rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
                rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
@@ -4104,6 +4286,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                        rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
        } else if (rt2x00_rt(rt2x00dev, RT3071) ||
                   rt2x00_rt(rt2x00dev, RT3090) ||
+                  rt2x00_rt(rt2x00dev, RT3352) ||
                   rt2x00_rt(rt2x00dev, RT3390) ||
                   rt2x00_rt(rt2x00dev, RT3572)) {
                drv_data->calibration_bw20 =
@@ -4392,13 +4575,18 @@ void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
 }
 EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
 
-int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
        u16 word;
        u8 *mac;
        u8 default_lna_gain;
 
+       /*
+        * Read the EEPROM.
+        */
+       rt2800_read_eeprom(rt2x00dev);
+
        /*
         * Start validation of the data that has been read.
         */
@@ -4521,9 +4709,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
 
-int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        u32 reg;
        u16 value;
@@ -4562,6 +4749,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RT3071:
        case RT3090:
        case RT3290:
+       case RT3352:
        case RT3390:
        case RT3572:
        case RT5390:
@@ -4584,6 +4772,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RF3052:
        case RF3290:
        case RF3320:
+       case RF3322:
        case RF5360:
        case RF5370:
        case RF5372:
@@ -4608,6 +4797,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
 
        if (rt2x00_rt(rt2x00dev, RT3070) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
+           rt2x00_rt(rt2x00dev, RT3352) ||
            rt2x00_rt(rt2x00dev, RT3390)) {
                value = rt2x00_get_field16(eeprom,
                                EEPROM_NIC_CONF1_ANT_DIVERSITY);
@@ -4681,7 +4871,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
 
 /*
  * RF value list for rt28xx
@@ -4824,7 +5013,7 @@ static const struct rf_channel rf_vals_3x[] = {
        {173, 0x61, 0, 9},
 };
 
-int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
        struct hw_mode_spec *spec = &rt2x00dev->spec;
        struct channel_info *info;
@@ -4901,6 +5090,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                   rt2x00_rf(rt2x00dev, RF3022) ||
                   rt2x00_rf(rt2x00dev, RF3290) ||
                   rt2x00_rf(rt2x00dev, RF3320) ||
+                  rt2x00_rf(rt2x00dev, RF3322) ||
                   rt2x00_rf(rt2x00dev, RF5360) ||
                   rt2x00_rf(rt2x00dev, RF5370) ||
                   rt2x00_rf(rt2x00dev, RF5372) ||
@@ -5000,7 +5190,72 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode);
+
+int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+       int retval;
+       u32 reg;
+
+       /*
+        * Allocate eeprom data.
+        */
+       retval = rt2800_validate_eeprom(rt2x00dev);
+       if (retval)
+               return retval;
+
+       retval = rt2800_init_eeprom(rt2x00dev);
+       if (retval)
+               return retval;
+
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_DIR2, 1);
+       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
+       /*
+        * Initialize hw specifications.
+        */
+       retval = rt2800_probe_hw_mode(rt2x00dev);
+       if (retval)
+               return retval;
+
+       /*
+        * Set device capabilities.
+        */
+       __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
+       __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
+       if (!rt2x00_is_usb(rt2x00dev))
+               __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
+
+       /*
+        * Set device requirements.
+        */
+       if (!rt2x00_is_soc(rt2x00dev))
+               __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
+       __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
+       __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
+       if (!rt2800_hwcrypt_disabled(rt2x00dev))
+               __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
+       __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
+       __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
+       if (rt2x00_is_usb(rt2x00dev))
+               __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
+       else {
+               __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
+               __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
+       }
+
+       /*
+        * Set the rssi offset.
+        */
+       rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_probe_hw);
 
 /*
  * IEEE80211 stack callback functions.
index 18a0b67b4c68daa23f6759a5ac407e04209bfc2c..a128ceadcb3e733620c37fa03e49666cf80b2e09 100644 (file)
@@ -43,6 +43,9 @@ struct rt2800_ops {
                            const unsigned int offset,
                            const struct rt2x00_field32 field, u32 *reg);
 
+       void (*read_eeprom)(struct rt2x00_dev *rt2x00dev);
+       bool (*hwcrypt_disabled)(struct rt2x00_dev *rt2x00dev);
+
        int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
                                  const u8 *data, const size_t len);
        int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
@@ -114,6 +117,20 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
        return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
 }
 
+static inline void rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+       const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
+
+       rt2800ops->read_eeprom(rt2x00dev);
+}
+
+static inline bool rt2800_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+       const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
+
+       return rt2800ops->hwcrypt_disabled(rt2x00dev);
+}
+
 static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev,
                                            const u8 *data, const size_t len)
 {
@@ -191,9 +208,8 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
 
 int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
 void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
-int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev);
-int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev);
-int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
+
+int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
 
 void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
                         u16 *iv16);
index 4765bbd654cdcfeea617c84f9c755db05409600d..27829e1e2e38964b2085dc84e30f618b3f72f97e 100644 (file)
@@ -54,6 +54,11 @@ static bool modparam_nohwcrypt = false;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
+static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+       return modparam_nohwcrypt;
+}
+
 static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
 {
        unsigned int i;
@@ -965,85 +970,14 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
 /*
  * Device probe functions.
  */
-static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
-       /*
-        * Read EEPROM into buffer
-        */
        if (rt2x00_is_soc(rt2x00dev))
                rt2800pci_read_eeprom_soc(rt2x00dev);
        else if (rt2800pci_efuse_detect(rt2x00dev))
                rt2800pci_read_eeprom_efuse(rt2x00dev);
        else
                rt2800pci_read_eeprom_pci(rt2x00dev);
-
-       return rt2800_validate_eeprom(rt2x00dev);
-}
-
-static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
-{
-       int retval;
-       u32 reg;
-
-       /*
-        * Allocate eeprom data.
-        */
-       retval = rt2800pci_validate_eeprom(rt2x00dev);
-       if (retval)
-               return retval;
-
-       retval = rt2800_init_eeprom(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * Enable rfkill polling by setting GPIO direction of the
-        * rfkill switch GPIO pin correctly.
-        */
-       rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
-       rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
-
-       /*
-        * Initialize hw specifications.
-        */
-       retval = rt2800_probe_hw_mode(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * This device has multiple filters for control frames
-        * and has a separate filter for PS Poll frames.
-        */
-       __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
-
-       /*
-        * This device has a pre tbtt interrupt and thus fetches
-        * a new beacon directly prior to transmission.
-        */
-       __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
-
-       /*
-        * This device requires firmware.
-        */
-       if (!rt2x00_is_soc(rt2x00dev))
-               __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
-       if (!modparam_nohwcrypt)
-               __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
-
-       /*
-        * Set the rssi offset.
-        */
-       rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
-
-       return 0;
 }
 
 static const struct ieee80211_ops rt2800pci_mac80211_ops = {
@@ -1081,6 +1015,8 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
        .register_multiread     = rt2x00pci_register_multiread,
        .register_multiwrite    = rt2x00pci_register_multiwrite,
        .regbusy_read           = rt2x00pci_regbusy_read,
+       .read_eeprom            = rt2800pci_read_eeprom,
+       .hwcrypt_disabled       = rt2800pci_hwcrypt_disabled,
        .drv_write_firmware     = rt2800pci_write_firmware,
        .drv_init_registers     = rt2800pci_init_registers,
        .drv_get_txwi           = rt2800pci_get_txwi,
@@ -1093,7 +1029,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .tbtt_tasklet           = rt2800pci_tbtt_tasklet,
        .rxdone_tasklet         = rt2800pci_rxdone_tasklet,
        .autowake_tasklet       = rt2800pci_autowake_tasklet,
-       .probe_hw               = rt2800pci_probe_hw,
+       .probe_hw               = rt2800_probe_hw,
        .get_firmware_name      = rt2800pci_get_firmware_name,
        .check_firmware         = rt2800_check_firmware,
        .load_firmware          = rt2800_load_firmware,
@@ -1152,7 +1088,6 @@ static const struct data_queue_desc rt2800pci_queue_bcn = {
 static const struct rt2x00_ops rt2800pci_ops = {
        .name                   = KBUILD_MODNAME,
        .drv_data_size          = sizeof(struct rt2800_drv_data),
-       .max_sta_intf           = 1,
        .max_ap_intf            = 8,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 6b4226b716187ea037d2a1c84e012806649e8816..c9e9370eb789c04ec91fd97a2a85d8d21977a144 100644 (file)
@@ -49,6 +49,11 @@ static bool modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
+static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+       return modparam_nohwcrypt;
+}
+
 /*
  * Queue handlers.
  */
@@ -730,73 +735,27 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
 /*
  * Device probe functions.
  */
-static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static void rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        if (rt2800_efuse_detect(rt2x00dev))
                rt2800_read_eeprom_efuse(rt2x00dev);
        else
                rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
                                      EEPROM_SIZE);
-
-       return rt2800_validate_eeprom(rt2x00dev);
 }
 
 static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
-       u32 reg;
 
-       /*
-        * Allocate eeprom data.
-        */
-       retval = rt2800usb_validate_eeprom(rt2x00dev);
+       retval = rt2800_probe_hw(rt2x00dev);
        if (retval)
                return retval;
 
-       retval = rt2800_init_eeprom(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * Enable rfkill polling by setting GPIO direction of the
-        * rfkill switch GPIO pin correctly.
-        */
-       rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
-       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
-       rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
-
-       /*
-        * Initialize hw specifications.
-        */
-       retval = rt2800_probe_hw_mode(rt2x00dev);
-       if (retval)
-               return retval;
-
-       /*
-        * This device has multiple filters for control frames
-        * and has a separate filter for PS Poll frames.
-        */
-       __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
-
-       /*
-        * This device requires firmware.
-        */
-       __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
-       if (!modparam_nohwcrypt)
-               __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
-       __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
-       __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
-
-       rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout,
-
        /*
-        * Set the rssi offset.
+        * Set txstatus timer function.
         */
-       rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+       rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
 
        /*
         * Overwrite TX done handler
@@ -842,6 +801,8 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
        .register_multiread     = rt2x00usb_register_multiread,
        .register_multiwrite    = rt2x00usb_register_multiwrite,
        .regbusy_read           = rt2x00usb_regbusy_read,
+       .read_eeprom            = rt2800usb_read_eeprom,
+       .hwcrypt_disabled       = rt2800usb_hwcrypt_disabled,
        .drv_write_firmware     = rt2800usb_write_firmware,
        .drv_init_registers     = rt2800usb_init_registers,
        .drv_get_txwi           = rt2800usb_get_txwi,
@@ -909,7 +870,6 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
 static const struct rt2x00_ops rt2800usb_ops = {
        .name                   = KBUILD_MODNAME,
        .drv_data_size          = sizeof(struct rt2800_drv_data),
-       .max_sta_intf           = 1,
        .max_ap_intf            = 8,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 8afb546c2b2d3b1741be167969e7c04dcefb0239..0751b35ef6dcd536ba51c3554dc4b58e359a9628 100644 (file)
@@ -188,6 +188,7 @@ struct rt2x00_chip {
 #define RT3071         0x3071
 #define RT3090         0x3090  /* 2.4GHz PCIe */
 #define RT3290         0x3290
+#define RT3352         0x3352  /* WSOC */
 #define RT3390         0x3390
 #define RT3572         0x3572
 #define RT3593         0x3593
@@ -655,7 +656,6 @@ struct rt2x00lib_ops {
 struct rt2x00_ops {
        const char *name;
        const unsigned int drv_data_size;
-       const unsigned int max_sta_intf;
        const unsigned int max_ap_intf;
        const unsigned int eeprom_size;
        const unsigned int rf_size;
@@ -740,6 +740,14 @@ enum rt2x00_capability_flags {
        CAPABILITY_VCO_RECALIBRATION,
 };
 
+/*
+ * Interface combinations
+ */
+enum {
+       IF_COMB_AP = 0,
+       NUM_IF_COMB,
+};
+
 /*
  * rt2x00 device structure.
  */
@@ -866,6 +874,12 @@ struct rt2x00_dev {
        unsigned int intf_associated;
        unsigned int intf_beaconing;
 
+       /*
+        * Interface combinations
+        */
+       struct ieee80211_iface_limit if_limits_ap;
+       struct ieee80211_iface_combination if_combinations[NUM_IF_COMB];
+
        /*
         * Link quality
         */
@@ -1287,7 +1301,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
 /*
  * mac80211 handlers.
  */
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb);
 int rt2x00mac_start(struct ieee80211_hw *hw);
 void rt2x00mac_stop(struct ieee80211_hw *hw);
 int rt2x00mac_add_interface(struct ieee80211_hw *hw,
index 3f07e36f462b384565884580170faf7c3be2f25f..69097d1faeb676d97ddd27c7ba7dc3f575cd2f6f 100644 (file)
@@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
         */
        skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
        while (skb) {
-               rt2x00mac_tx(rt2x00dev->hw, skb);
+               rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
                skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
        }
 }
@@ -1118,6 +1118,34 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->intf_associated = 0;
 }
 
+static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
+{
+       struct ieee80211_iface_limit *if_limit;
+       struct ieee80211_iface_combination *if_combination;
+
+       /*
+        * Build up AP interface limits structure.
+        */
+       if_limit = &rt2x00dev->if_limits_ap;
+       if_limit->max = rt2x00dev->ops->max_ap_intf;
+       if_limit->types = BIT(NL80211_IFTYPE_AP);
+
+       /*
+        * Build up AP interface combinations structure.
+        */
+       if_combination = &rt2x00dev->if_combinations[IF_COMB_AP];
+       if_combination->limits = if_limit;
+       if_combination->n_limits = 1;
+       if_combination->max_interfaces = if_limit->max;
+       if_combination->num_different_channels = 1;
+
+       /*
+        * Finally, specify the possible combinations to mac80211.
+        */
+       rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations;
+       rt2x00dev->hw->wiphy->n_iface_combinations = 1;
+}
+
 /*
  * driver allocation handlers.
  */
@@ -1125,6 +1153,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
 {
        int retval = -ENOMEM;
 
+       /*
+        * Set possible interface combinations.
+        */
+       rt2x00lib_set_if_combinations(rt2x00dev);
+
        /*
         * Allocate the driver data memory, if necessary.
         */
index 4ff26c2159bf4b25178fbb66a0cd9794651ac185..98a9e48f8e4a38e852c54e8dbdef0c2369a753a4 100644 (file)
@@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+                 struct ieee80211_tx_control *control,
+                 struct sk_buff *skb)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -212,46 +214,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
            !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
                return -ENODEV;
 
-       switch (vif->type) {
-       case NL80211_IFTYPE_AP:
-               /*
-                * We don't support mixed combinations of
-                * sta and ap interfaces.
-                */
-               if (rt2x00dev->intf_sta_count)
-                       return -ENOBUFS;
-
-               /*
-                * Check if we exceeded the maximum amount
-                * of supported interfaces.
-                */
-               if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
-                       return -ENOBUFS;
-
-               break;
-       case NL80211_IFTYPE_STATION:
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_MESH_POINT:
-       case NL80211_IFTYPE_WDS:
-               /*
-                * We don't support mixed combinations of
-                * sta and ap interfaces.
-                */
-               if (rt2x00dev->intf_ap_count)
-                       return -ENOBUFS;
-
-               /*
-                * Check if we exceeded the maximum amount
-                * of supported interfaces.
-                */
-               if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
-                       return -ENOBUFS;
-
-               break;
-       default:
-               return -EINVAL;
-       }
-
        /*
         * Loop through all beacon queues to find a free
         * entry. Since there are as much beacon entries
index f7e74a0a775911abab23dab3b0a512fb0524d0a4..e488b944a0340834ed96c02c91df59e9b3f5e142 100644 (file)
@@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                                                struct sk_buff *skb,
                                                struct txentry_desc *txdesc,
+                                               struct ieee80211_sta *sta,
                                                const struct rt2x00_rate *hwrate)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct rt2x00_sta *sta_priv = NULL;
 
-       if (tx_info->control.sta) {
+       if (sta) {
                txdesc->u.ht.mpdu_density =
-                   tx_info->control.sta->ht_cap.ampdu_density;
+                   sta->ht_cap.ampdu_density;
 
-               sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
+               sta_priv = sta_to_rt2x00_sta(sta);
                txdesc->u.ht.wcid = sta_priv->wcid;
        }
 
@@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                 * MIMO PS should be set to 1 for STA's using dynamic SM PS
                 * when using more then one tx stream (>MCS7).
                 */
-               if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
-                   ((tx_info->control.sta->ht_cap.cap &
+               if (sta && txdesc->u.ht.mcs > 7 &&
+                   ((sta->ht_cap.cap &
                      IEEE80211_HT_CAP_SM_PS) >>
                     IEEE80211_HT_CAP_SM_PS_SHIFT) ==
                    WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 
 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
                                             struct sk_buff *skb,
-                                            struct txentry_desc *txdesc)
+                                            struct txentry_desc *txdesc,
+                                            struct ieee80211_sta *sta)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
 
        if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
                rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
-                                                   hwrate);
+                                                  sta, hwrate);
        else
                rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
                                                      hwrate);
@@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
         * after that we are free to use the skb->cb array
         * for our information.
         */
-       rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
+       rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
 
        /*
         * All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
         * after that we are free to use the skb->cb array
         * for our information.
         */
-       rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
+       rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
 
        /*
         * Fill in skb descriptor
index b8ec96163922a11711a3d6800b9556052c1386fc..d6582a2fa3534879614a23dd29435e747a0cca0c 100644 (file)
@@ -243,7 +243,7 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
-       return rt2x00_get_field32(reg, MAC_CSR13_BIT5);
+       return rt2x00_get_field32(reg, MAC_CSR13_VAL5);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -715,11 +715,11 @@ static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
 
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT4, p1);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT12, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1);
 
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT3, !p2);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT11, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2);
 
        rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
 }
@@ -2855,7 +2855,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1);
        rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
 
        /*
@@ -3045,7 +3045,6 @@ static const struct data_queue_desc rt61pci_queue_bcn = {
 
 static const struct rt2x00_ops rt61pci_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 4,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index 8f3da5a56766f4c3293825c2d649078f4cb5455f..9bc6b6044e34189e6a069a4168a7eaec53565a00 100644 (file)
@@ -357,22 +357,22 @@ struct hw_pairwise_ta_entry {
 
 /*
  * MAC_CSR13: GPIO.
+ *     MAC_CSR13_VALx: GPIO value
+ *     MAC_CSR13_DIRx: GPIO direction: 0 = output; 1 = input
  */
 #define MAC_CSR13                      0x3034
-#define MAC_CSR13_BIT0                 FIELD32(0x00000001)
-#define MAC_CSR13_BIT1                 FIELD32(0x00000002)
-#define MAC_CSR13_BIT2                 FIELD32(0x00000004)
-#define MAC_CSR13_BIT3                 FIELD32(0x00000008)
-#define MAC_CSR13_BIT4                 FIELD32(0x00000010)
-#define MAC_CSR13_BIT5                 FIELD32(0x00000020)
-#define MAC_CSR13_BIT6                 FIELD32(0x00000040)
-#define MAC_CSR13_BIT7                 FIELD32(0x00000080)
-#define MAC_CSR13_BIT8                 FIELD32(0x00000100)
-#define MAC_CSR13_BIT9                 FIELD32(0x00000200)
-#define MAC_CSR13_BIT10                        FIELD32(0x00000400)
-#define MAC_CSR13_BIT11                        FIELD32(0x00000800)
-#define MAC_CSR13_BIT12                        FIELD32(0x00001000)
-#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
+#define MAC_CSR13_VAL0                 FIELD32(0x00000001)
+#define MAC_CSR13_VAL1                 FIELD32(0x00000002)
+#define MAC_CSR13_VAL2                 FIELD32(0x00000004)
+#define MAC_CSR13_VAL3                 FIELD32(0x00000008)
+#define MAC_CSR13_VAL4                 FIELD32(0x00000010)
+#define MAC_CSR13_VAL5                 FIELD32(0x00000020)
+#define MAC_CSR13_DIR0                 FIELD32(0x00000100)
+#define MAC_CSR13_DIR1                 FIELD32(0x00000200)
+#define MAC_CSR13_DIR2                 FIELD32(0x00000400)
+#define MAC_CSR13_DIR3                 FIELD32(0x00000800)
+#define MAC_CSR13_DIR4                 FIELD32(0x00001000)
+#define MAC_CSR13_DIR5                 FIELD32(0x00002000)
 
 /*
  * MAC_CSR14: LED control register.
index 248436c13ce04ae1f79312c6cbb1e16d8a4b5fc9..e5eb43b3eee74f94db85349436f0ba1e15925059 100644 (file)
@@ -189,7 +189,7 @@ static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u32 reg;
 
        rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
-       return rt2x00_get_field32(reg, MAC_CSR13_BIT7);
+       return rt2x00_get_field32(reg, MAC_CSR13_VAL7);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2195,7 +2195,7 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
         * rfkill switch GPIO pin correctly.
         */
        rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
-       rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
+       rt2x00_set_field32(&reg, MAC_CSR13_DIR7, 0);
        rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
 
        /*
@@ -2382,7 +2382,6 @@ static const struct data_queue_desc rt73usb_queue_bcn = {
 
 static const struct rt2x00_ops rt73usb_ops = {
        .name                   = KBUILD_MODNAME,
-       .max_sta_intf           = 1,
        .max_ap_intf            = 4,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
index df1cc116b83be891ee2ff20702260f5949d3d983..7577e0ba3877363cb63480b42c4e118f599de7ef 100644 (file)
@@ -267,24 +267,26 @@ struct hw_pairwise_ta_entry {
 
 /*
  * MAC_CSR13: GPIO.
+ *     MAC_CSR13_VALx: GPIO value
+ *     MAC_CSR13_DIRx: GPIO direction: 0 = input; 1 = output
  */
 #define MAC_CSR13                      0x3034
-#define MAC_CSR13_BIT0                 FIELD32(0x00000001)
-#define MAC_CSR13_BIT1                 FIELD32(0x00000002)
-#define MAC_CSR13_BIT2                 FIELD32(0x00000004)
-#define MAC_CSR13_BIT3                 FIELD32(0x00000008)
-#define MAC_CSR13_BIT4                 FIELD32(0x00000010)
-#define MAC_CSR13_BIT5                 FIELD32(0x00000020)
-#define MAC_CSR13_BIT6                 FIELD32(0x00000040)
-#define MAC_CSR13_BIT7                 FIELD32(0x00000080)
-#define MAC_CSR13_BIT8                 FIELD32(0x00000100)
-#define MAC_CSR13_BIT9                 FIELD32(0x00000200)
-#define MAC_CSR13_BIT10                        FIELD32(0x00000400)
-#define MAC_CSR13_BIT11                        FIELD32(0x00000800)
-#define MAC_CSR13_BIT12                        FIELD32(0x00001000)
-#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
-#define MAC_CSR13_BIT14                        FIELD32(0x00004000)
-#define MAC_CSR13_BIT15                        FIELD32(0x00008000)
+#define MAC_CSR13_VAL0                 FIELD32(0x00000001)
+#define MAC_CSR13_VAL1                 FIELD32(0x00000002)
+#define MAC_CSR13_VAL2                 FIELD32(0x00000004)
+#define MAC_CSR13_VAL3                 FIELD32(0x00000008)
+#define MAC_CSR13_VAL4                 FIELD32(0x00000010)
+#define MAC_CSR13_VAL5                 FIELD32(0x00000020)
+#define MAC_CSR13_VAL6                 FIELD32(0x00000040)
+#define MAC_CSR13_VAL7                 FIELD32(0x00000080)
+#define MAC_CSR13_DIR0                 FIELD32(0x00000100)
+#define MAC_CSR13_DIR1                 FIELD32(0x00000200)
+#define MAC_CSR13_DIR2                 FIELD32(0x00000400)
+#define MAC_CSR13_DIR3                 FIELD32(0x00000800)
+#define MAC_CSR13_DIR4                 FIELD32(0x00001000)
+#define MAC_CSR13_DIR5                 FIELD32(0x00002000)
+#define MAC_CSR13_DIR6                 FIELD32(0x00004000)
+#define MAC_CSR13_DIR7                 FIELD32(0x00008000)
 
 /*
  * MAC_CSR14: LED control register.
index aceaf689f73704d5eba60e2d17478ee620999392..021d83e1b1d3367d0ff19954324c4306dc6b8f42 100644 (file)
@@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       rtl8180_tx(dev, skb);
+       rtl8180_tx(dev, NULL, skb);
 
 resched:
        /*
index 533024095c43ad48871868d8522c6953399eb86a..7811b6315973cd466e19d2fd2c139532daf04610 100644 (file)
@@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb)
        }
 }
 
-static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
 {
        struct rtl8187_priv *priv = dev->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
        /* TODO: use actual beacon queue */
        skb_set_queue_mapping(skb, 0);
 
-       rtl8187_tx(dev, skb);
+       rtl8187_tx(dev, NULL, skb);
 
 resched:
        /*
index cefac6a43601e17ca6ca9ef6db36589e7d055daf..6b28e92d1d215c0f598354326b9c1e9780376268 100644 (file)
@@ -1,6 +1,6 @@
 config RTL8192CE
        tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
-       depends on MAC80211 && PCI && EXPERIMENTAL
+       depends on MAC80211 && PCI
        select FW_LOADER
        select RTLWIFI
        select RTL8192C_COMMON
@@ -12,7 +12,7 @@ config RTL8192CE
 
 config RTL8192SE
        tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
-       depends on MAC80211 && EXPERIMENTAL && PCI
+       depends on MAC80211 && PCI
        select FW_LOADER
        select RTLWIFI
        ---help---
@@ -23,7 +23,7 @@ config RTL8192SE
 
 config RTL8192DE
        tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
-       depends on MAC80211 && EXPERIMENTAL && PCI
+       depends on MAC80211 && PCI
        select FW_LOADER
        select RTLWIFI
        ---help---
@@ -34,7 +34,7 @@ config RTL8192DE
 
 config RTL8192CU
        tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
-       depends on MAC80211 && USB && EXPERIMENTAL
+       depends on MAC80211 && USB
        select FW_LOADER
        select RTLWIFI
        select RTL8192C_COMMON
index 942e56b77b6030856ee6c6c19055512c8c3552de..59381fe8ed064064dcddaf697ba26aa345b2c0de 100644 (file)
@@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
                rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
 
                info->control.rates[0].idx = 0;
-               info->control.sta = sta;
                info->band = hw->conf.channel->band;
-               rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+               rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
        }
 err_free:
        return 0;
index a18ad2a989381bc363aa782b514c913f1f8c5b1a..a7c0e52869ba3c708cf39685c59489122353b4a0 100644 (file)
@@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_unlock(&rtlpriv->locks.conf_mutex);
 }
 
-static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
                goto err_free;
 
-       if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
-               rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+       if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+               rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
 
        return;
 
index 5983631a1b1a750c631a688f896e17043eb14fd1..abc306b502ac0348b97dd40eae26c01ebf6947d9 100644 (file)
@@ -502,7 +502,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
                                _rtl_update_earlymode_info(hw, skb,
                                                           &tcb_desc, tid);
 
-                       rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+                       rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
                }
        }
 }
@@ -927,7 +927,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        info = IEEE80211_SKB_CB(pskb);
        pdesc = &ring->desc[0];
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
-               info, pskb, BEACON_QUEUE, &tcb_desc);
+               info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
 
        __skb_queue_tail(&ring->queue, pskb);
 
@@ -1303,11 +1303,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
 }
 
 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct ieee80211_sta *sta,
                                        struct sk_buff *skb)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct rtl_sta_info *sta_entry = NULL;
        u8 tid = rtl_get_tid(skb);
 
@@ -1335,13 +1334,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
        return true;
 }
 
-static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
-               struct rtl_tcb_desc *ptcb_desc)
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_sta *sta,
+                     struct sk_buff *skb,
+                     struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_sta_info *sta_entry = NULL;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
        struct rtl8192_tx_ring *ring;
        struct rtl_tx_desc *pdesc;
        u8 idx;
@@ -1416,7 +1416,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
                rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
-                       info, skb, hw_queue, ptcb_desc);
+                       info, sta, skb, hw_queue, ptcb_desc);
 
        __skb_queue_tail(&ring->queue, skb);
 
index a45afda8259c1fbf4f550b08d9af6904932e3675..1ca4e25c143b83026c43fcccc0ddc6723f900605 100644 (file)
@@ -167,7 +167,7 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
        dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
        dm_digtable->cur_igvalue = 0x20;
        dm_digtable->pre_igvalue = 0x0;
-       dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+       dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
        dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
        dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
        dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -190,7 +190,7 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
        long rssi_val_min = 0;
 
        if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
-           (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) {
+           (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) {
                if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
                        rssi_val_min =
                            (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -199,8 +199,8 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
                            rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
                else
                        rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-       } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT ||
-                  dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+       } else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT ||
+                  dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) {
                rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
        } else if (dm_digtable->curmultista_connectstate ==
                   DIG_MULTISTA_CONNECT) {
@@ -334,7 +334,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
                multi_sta = true;
 
        if (!multi_sta ||
-           dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
+           dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
                initialized = false;
                dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
                return;
@@ -378,15 +378,15 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
        struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
        RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
-                "presta_connectstate = %x, cursta_connectctate = %x\n",
+                "presta_connectstate = %x, cursta_connectstate = %x\n",
                 dm_digtable->presta_connectstate,
-                dm_digtable->cursta_connectctate);
+                dm_digtable->cursta_connectstate);
 
-       if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate
-           || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT
-           || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+       if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate
+           || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT
+           || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
 
-               if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
+               if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
                        dm_digtable->rssi_val_min =
                            rtl92c_dm_initial_gain_min_pwdb(hw);
                        rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -407,7 +407,7 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
-       if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+       if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
                dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
 
                if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +484,15 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
                return;
 
        if (mac->link_state >= MAC80211_LINKED)
-               dm_digtable->cursta_connectctate = DIG_STA_CONNECT;
+               dm_digtable->cursta_connectstate = DIG_STA_CONNECT;
        else
-               dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+               dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
 
        rtl92c_dm_initial_gain_sta(hw);
        rtl92c_dm_initial_gain_multi_sta(hw);
        rtl92c_dm_cck_packet_detection_thresh(hw);
 
-       dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate;
+       dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate;
 
 }
 
@@ -1214,18 +1214,13 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
                                 "PreState = %d, CurState = %d\n",
                                 p_ra->pre_ratr_state, p_ra->ratr_state);
 
-                       /* Only the PCI card uses sta in the update rate table
-                        * callback routine */
-                       if (rtlhal->interface == INTF_PCI) {
-                               rcu_read_lock();
-                               sta = ieee80211_find_sta(mac->vif, mac->bssid);
-                       }
+                       rcu_read_lock();
+                       sta = ieee80211_find_sta(mac->vif, mac->bssid);
                        rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
                                        p_ra->ratr_state);
 
                        p_ra->pre_ratr_state = p_ra->ratr_state;
-                       if (rtlhal->interface == INTF_PCI)
-                               rcu_read_unlock();
+                       rcu_read_unlock();
                }
        }
 }
index 8a7b864faca30cc7bfe7cd7462b959be285a609e..883f23ae95194f66fc416853a2a027a4a8a6f9a1 100644 (file)
@@ -577,8 +577,7 @@ static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
        ring = &rtlpci->tx_ring[BEACON_QUEUE];
 
        pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
-               kfree_skb(pskb);
+       kfree_skb(pskb);
 
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 
index dd4bb0950a575ddd37c31489f10d2bacdaaab940..86d73b32d9956c81f9b43886805bc23cb4adaa10 100644 (file)
@@ -1914,8 +1914,8 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
        }
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
                 "ratr_bitmap :%x\n", ratr_bitmap);
-       *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
-                                    (ratr_index << 28));
+       *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+                                    (ratr_index << 28);
        rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
        RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
                 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
index 7d8f96405f42068bafa9c08d7ec6a382f2e2c7f0..ea2e1bd847c847d83b000f808b7cc1d60c99b7d6 100644 (file)
@@ -344,7 +344,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
        .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
 };
 
-DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
+static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
        {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
index 52166640f1679897480bb02ebebf33251002e051..390d6d4fcaa027654e82ceea0892933751383be6 100644 (file)
@@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
 
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        bool defaultadapter = true;
-       struct ieee80211_sta *sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
index c4adb97773659b5353b8d1d724e6f3cfa3ff0e8f..a7cdd514cb2e2bfd6a86aa629a997a3d1c5f5db8 100644 (file)
@@ -713,6 +713,7 @@ struct rx_desc_92c {
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
index 2e6eb356a93ed3151ece64055875d1e292d9e372..6e66f04c363fb43ad25557e5149cab6a78a6d933 100644 (file)
@@ -491,12 +491,14 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
        SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
        for (index = 0; index < 16; index++)
                checksum = checksum ^ (*(ptr + index));
-       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
+       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
 }
 
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 queue_index,
                          struct rtl_tcb_desc *tcb_desc)
 {
@@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        bool defaultadapter = true;
-       struct ieee80211_sta *sta = info->control.sta = info->control.sta;
        u8 *qc = ieee80211_get_qos_ctl(hdr);
        u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
        u16 seq_number;
index 332b06e78b00d8e9fb849e03709fcb0b412849b0..725c53accc5839bbd30af97e753ab6b8d040ee48 100644 (file)
@@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
                                           struct sk_buff_head *);
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 queue_index,
                          struct rtl_tcb_desc *tcb_desc);
 void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
index c0201ed69dd75737b2be2e3c65717bdec90232ae..ed868c396c257d5a5b4a22affcc4c12e401b0d0c 100644 (file)
@@ -164,7 +164,7 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
        de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
        de_digtable->cur_igvalue = 0x20;
        de_digtable->pre_igvalue = 0x0;
-       de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+       de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
        de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
        de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
        de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -310,7 +310,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
        struct dig_t *de_digtable = &rtlpriv->dm_digtable;
        unsigned long flag = 0;
 
-       if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+       if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) {
                if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
                        if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
                                de_digtable->cur_cck_pd_state =
@@ -342,7 +342,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
                de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
        }
        RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
-                de_digtable->cursta_connectctate == DIG_STA_CONNECT ?
+                de_digtable->cursta_connectstate == DIG_STA_CONNECT ?
                 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
        RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
                 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -428,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
        RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
        /* Decide the current status and if modify initial gain or not */
        if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
-               de_digtable->cursta_connectctate = DIG_STA_CONNECT;
+               de_digtable->cursta_connectstate = DIG_STA_CONNECT;
        else
-               de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+               de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
 
        /* adjust initial gain according to false alarm counter */
        if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
index eb22dccc418bb3ac50c4d9513cf671517ce8970f..23177076b97f9795b7ba2c57581f49924823fc3e 100644 (file)
@@ -570,8 +570,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
 
        ring = &rtlpci->tx_ring[BEACON_QUEUE];
        pskb = __skb_dequeue(&ring->queue);
-       if (pskb)
-               kfree_skb(pskb);
+       kfree_skb(pskb);
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
        pdesc = &ring->desc[idx];
        /* discard output from call below */
index 442031256bceeda3df1bc56a45fbdcb9edd6aff3..db0086062d0574f59018739a8ac3f198c3a4aa34 100644 (file)
@@ -1314,7 +1314,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
        struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
 
        RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
-       /*----Restore RFENV control type----*/ ;
+       /*----Restore RFENV control type----*/
        switch (rfpath) {
        case RF90_PATH_A:
        case RF90_PATH_C:
index f80690d82c117ab430b25caac22170be63a32e9a..4686f340b9d6095698c0e8b2f3a16f3bd2ae0fdb 100644 (file)
@@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
 
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
+                         struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct ieee80211_sta *sta = info->control.sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
index 057a52431b0036a9b953b9628397a851cb913995..c1b5dfb79d53ce2d2ebf843db65c14999d92196c 100644 (file)
@@ -730,6 +730,7 @@ struct rx_desc_92d {
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
index 36d1cb3aef8a7d5a76e019ab4671f140c38b6255..e3cf4c02122a9baaad488c4e02aaf23fcb33f21f 100644 (file)
@@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
                struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-               struct ieee80211_tx_info *info, struct sk_buff *skb,
+               struct ieee80211_tx_info *info,
+               struct ieee80211_sta *sta,
+               struct sk_buff *skb,
                u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct ieee80211_sta *sta = info->control.sta;
        u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
@@ -755,7 +756,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
        SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
 
        /* DOWRD 8 */
-       SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+       SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
 
        RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
 }
@@ -785,7 +786,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
                /* 92SE need not to set TX packet size when firmware download */
                SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
                SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
-               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
 
                wmb();
                SET_TX_DESC_OWN(pdesc, 1);
@@ -804,7 +805,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
                SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
 
                SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
-               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+               SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
 
                wmb();
                SET_TX_DESC_OWN(pdesc, 1);
index 011e7b0695f24f0a00bdd322a82e961cb69faa28..64dd66f287c182a25949126d303925124a24bd71 100644 (file)
@@ -31,6 +31,7 @@
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
                          u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
 void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
index aa970fc18a2176e736758d7467a93e577b835b92..030beb45d8b0b2b30669e945eb1ccc2e2cbd5fd7 100644 (file)
@@ -120,7 +120,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
 
        if (status < 0 && count++ < 4)
                pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
-                      value, status, le32_to_cpu(*(u32 *)pdata));
+                      value, status, *(u32 *)pdata);
        return status;
 }
 
@@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
        _rtl_submit_tx_urb(hw, _urb);
 }
 
-static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
-                           u16 hw_queue)
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
+                                  struct ieee80211_sta *sta,
+                                  struct sk_buff *skb,
+                                  u16 hw_queue)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
                seq_number += 1;
                seq_number <<= 4;
        }
-       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
                                        hw_queue, &tcb_desc);
        if (!ieee80211_has_morefrags(hdr->frame_control)) {
                if (qc)
@@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
                rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 }
 
-static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+static int rtl_usb_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_sta *sta,
+                     struct sk_buff *skb,
                      struct rtl_tcb_desc *dummy)
 {
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
        if (unlikely(is_hal_stop(rtlhal)))
                goto err_free;
        hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
-       _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+       _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
        _rtl_usb_transmit(hw, skb, hw_queue);
        return NETDEV_TX_OK;
 
@@ -923,6 +927,7 @@ err_free:
 }
 
 static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+                                       struct ieee80211_sta *sta,
                                        struct sk_buff *skb)
 {
        return false;
index cdaa21f297108fe1ac4306d001843965029ac514..f1b6bc693b0a28ddddfd89ef50ffa898f3ce24f7 100644 (file)
@@ -122,7 +122,7 @@ enum rt_eeprom_type {
        EEPROM_BOOT_EFUSE,
 };
 
-enum rtl_status {
+enum ttl_status {
        RTL_STATUS_INTERFACE_START = 0,
 };
 
@@ -135,7 +135,7 @@ enum hardware_type {
        HARDWARE_TYPE_RTL8192CU,
        HARDWARE_TYPE_RTL8192DE,
        HARDWARE_TYPE_RTL8192DU,
-       HARDWARE_TYPE_RTL8723E,
+       HARDWARE_TYPE_RTL8723AE,
        HARDWARE_TYPE_RTL8723U,
 
        /* keep it last */
@@ -389,6 +389,7 @@ enum rt_enc_alg {
        RSERVED_ENCRYPTION = 3,
        AESCCMP_ENCRYPTION = 4,
        WEP104_ENCRYPTION = 5,
+       AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
 };
 
 enum rtl_hal_state {
@@ -873,6 +874,7 @@ struct rtl_phy {
        u32 adda_backup[16];
        u32 iqk_mac_backup[IQK_MAC_REG_NUM];
        u32 iqk_bb_backup[10];
+       bool iqk_initialized;
 
        /* Dual mac */
        bool need_iqk;
@@ -910,6 +912,8 @@ struct rtl_phy {
 #define RTL_AGG_OPERATIONAL                    3
 #define RTL_AGG_OFF                            0
 #define RTL_AGG_ON                             1
+#define RTL_RX_AGG_START                       1
+#define RTL_RX_AGG_STOP                                0
 #define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA                2
 #define RTL_AGG_EMPTYING_HW_QUEUE_DELBA                3
 
@@ -920,6 +924,7 @@ struct rtl_ht_agg {
        u64 bitmap;
        u32 rate_n_flags;
        u8 agg_state;
+       u8 rx_agg_state;
 };
 
 struct rtl_tid_data {
@@ -927,11 +932,19 @@ struct rtl_tid_data {
        struct rtl_ht_agg agg;
 };
 
+struct rssi_sta {
+       long undecorated_smoothed_pwdb;
+};
+
 struct rtl_sta_info {
+       struct list_head list;
        u8 ratr_index;
        u8 wireless_mode;
        u8 mimo_ps;
        struct rtl_tid_data tids[MAX_TID_COUNT];
+
+       /* just used for ap adhoc or mesh*/
+       struct rssi_sta rssi_stat;
 } __packed;
 
 struct rtl_priv;
@@ -1034,6 +1047,11 @@ struct rtl_mac {
 struct rtl_hal {
        struct ieee80211_hw *hw;
 
+       bool up_first_time;
+       bool first_init;
+       bool being_init_adapter;
+       bool bbrf_ready;
+
        enum intf_type interface;
        u16 hw_type;            /*92c or 92d or 92s and so on */
        u8 ic_class;
@@ -1048,6 +1066,7 @@ struct rtl_hal {
        u16 fw_subversion;
        bool h2c_setinprogress;
        u8 last_hmeboxnum;
+       bool fw_ready;
        /*Reserve page start offset except beacon in TxQ. */
        u8 fw_rsvdpage_startoffset;
        u8 h2c_txcmd_seq;
@@ -1083,6 +1102,8 @@ struct rtl_hal {
        bool load_imrandiqk_setting_for2g;
 
        bool disable_amsdu_8k;
+       bool master_of_dmsp;
+       bool slave_of_dmsp;
 };
 
 struct rtl_security {
@@ -1144,6 +1165,9 @@ struct rtl_dm {
        bool disable_tx_int;
        char ofdm_index[2];
        char cck_index;
+
+       /* DMSP */
+       bool supp_phymode_switch;
 };
 
 #define        EFUSE_MAX_LOGICAL_SIZE                  256
@@ -1337,6 +1361,10 @@ struct rtl_stats {
 };
 
 struct rt_link_detect {
+       /* count for roaming */
+       u32 bcn_rx_inperiod;
+       u32 roam_times;
+
        u32 num_tx_in4period[4];
        u32 num_rx_in4period[4];
 
@@ -1344,6 +1372,8 @@ struct rt_link_detect {
        u32 num_rx_inperiod;
 
        bool busytraffic;
+       bool tx_busy_traffic;
+       bool rx_busy_traffic;
        bool higher_busytraffic;
        bool higher_busyrxtraffic;
 
@@ -1418,6 +1448,7 @@ struct rtl_hal_ops {
        void (*fill_tx_desc) (struct ieee80211_hw *hw,
                              struct ieee80211_hdr *hdr, u8 *pdesc_tx,
                              struct ieee80211_tx_info *info,
+                             struct ieee80211_sta *sta,
                              struct sk_buff *skb, u8 hw_queue,
                              struct rtl_tcb_desc *ptcb_desc);
        void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1454,7 +1485,12 @@ struct rtl_hal_ops {
                          u32 regaddr, u32 bitmask);
        void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
                           u32 regaddr, u32 bitmask, u32 data);
+       void (*allow_all_destaddr)(struct ieee80211_hw *hw,
+               bool allow_all_da, bool write_into_reg);
        void (*linked_set_reg) (struct ieee80211_hw *hw);
+       void (*check_switch_to_dmdp) (struct ieee80211_hw *hw);
+       void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
+       void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
        bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
        void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
                                            u8 *powerlevel);
@@ -1474,12 +1510,18 @@ struct rtl_intf_ops {
        void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
        int (*adapter_start) (struct ieee80211_hw *hw);
        void (*adapter_stop) (struct ieee80211_hw *hw);
+       bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+                                struct rtl_priv **buddy_priv);
 
-       int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb,
-                       struct rtl_tcb_desc *ptcb_desc);
+       int (*adapter_tx) (struct ieee80211_hw *hw,
+                          struct ieee80211_sta *sta,
+                          struct sk_buff *skb,
+                          struct rtl_tcb_desc *ptcb_desc);
        void (*flush)(struct ieee80211_hw *hw, bool drop);
        int (*reset_trx_ring) (struct ieee80211_hw *hw);
-       bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+       bool (*waitq_insert) (struct ieee80211_hw *hw,
+                             struct ieee80211_sta *sta,
+                             struct sk_buff *skb);
 
        /*pci */
        void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1554,11 +1596,16 @@ struct rtl_locks {
        spinlock_t h2c_lock;
        spinlock_t rf_ps_lock;
        spinlock_t rf_lock;
+       spinlock_t lps_lock;
        spinlock_t waitq_lock;
+       spinlock_t entry_list_lock;
        spinlock_t usb_lock;
 
        /*Dual mac*/
        spinlock_t cck_and_rw_pagea_lock;
+
+       /*Easy concurrent*/
+       spinlock_t check_sendpkt_lock;
 };
 
 struct rtl_works {
@@ -1566,6 +1613,7 @@ struct rtl_works {
 
        /*timer */
        struct timer_list watchdog_timer;
+       struct timer_list dualmac_easyconcurrent_retrytimer;
 
        /*task */
        struct tasklet_struct irq_tasklet;
@@ -1593,6 +1641,31 @@ struct rtl_debug {
        char proc_name[20];
 };
 
+#define MIMO_PS_STATIC                 0
+#define MIMO_PS_DYNAMIC                        1
+#define MIMO_PS_NOLIMIT                        3
+
+struct rtl_dualmac_easy_concurrent_ctl {
+       enum band_type currentbandtype_backfordmdp;
+       bool close_bbandrf_for_dmsp;
+       bool change_to_dmdp;
+       bool change_to_dmsp;
+       bool switch_in_process;
+};
+
+struct rtl_dmsp_ctl {
+       bool activescan_for_slaveofdmsp;
+       bool scan_for_anothermac_fordmsp;
+       bool scan_for_itself_fordmsp;
+       bool writedig_for_anothermacofdmsp;
+       u32 curdigvalue_for_anothermacofdmsp;
+       bool changecckpdstate_for_anothermacofdmsp;
+       u8 curcckpdstate_for_anothermacofdmsp;
+       bool changetxhighpowerlvl_for_anothermacofdmsp;
+       u8 curtxhighlvl_for_anothermacofdmsp;
+       long rssivalmin_for_anothermacofdmsp;
+};
+
 struct ps_t {
        u8 pre_ccastate;
        u8 cur_ccasate;
@@ -1619,7 +1692,7 @@ struct dig_t {
        u8 dig_twoport_algorithm;
        u8 dig_dbgmode;
        u8 dig_slgorithm_switch;
-       u8 cursta_connectctate;
+       u8 cursta_connectstate;
        u8 presta_connectstate;
        u8 curmultista_connectstate;
        char backoff_val;
@@ -1652,8 +1725,20 @@ struct dig_t {
        char backoffval_range_min;
 };
 
+struct rtl_global_var {
+       /* from this list we can get
+        * other adapter's rtl_priv */
+       struct list_head glb_priv_list;
+       spinlock_t glb_list_lock;
+};
+
 struct rtl_priv {
        struct completion firmware_loading_complete;
+       struct list_head list;
+       struct rtl_priv *buddy_priv;
+       struct rtl_global_var *glb_var;
+       struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
+       struct rtl_dmsp_ctl dmsp_ctl;
        struct rtl_locks locks;
        struct rtl_works works;
        struct rtl_mac mac80211;
@@ -1674,6 +1759,9 @@ struct rtl_priv {
 
        struct rtl_rate_priv *rate_priv;
 
+       /* sta entry list for ap adhoc or mesh */
+       struct list_head entry_list;
+
        struct rtl_debug dbg;
        int max_fw_size;
 
@@ -1815,9 +1903,9 @@ struct bt_coexist_info {
        EF1BYTE(*((u8 *)(_ptr)))
 /* Read le16 data from memory and convert to host ordering */
 #define READEF2BYTE(_ptr)      \
-       EF2BYTE(*((u16 *)(_ptr)))
+       EF2BYTE(*(_ptr))
 #define READEF4BYTE(_ptr)      \
-       EF4BYTE(*((u32 *)(_ptr)))
+       EF4BYTE(*(_ptr))
 
 /* Write data to memory */
 #define WRITEEF1BYTE(_ptr, _val)       \
@@ -1826,7 +1914,7 @@ struct bt_coexist_info {
 #define WRITEEF2BYTE(_ptr, _val)       \
        (*((u16 *)(_ptr))) = EF2BYTE(_val)
 #define WRITEEF4BYTE(_ptr, _val)       \
-       (*((u16 *)(_ptr))) = EF2BYTE(_val)
+       (*((u32 *)(_ptr))) = EF2BYTE(_val)
 
 /* Create a bit mask
  * Examples:
@@ -1859,9 +1947,9 @@ struct bt_coexist_info {
  * 4-byte pointer in little-endian system.
  */
 #define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
-       (EF4BYTE(*((u32 *)(__pstart))))
+       (EF4BYTE(*((__le32 *)(__pstart))))
 #define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
-       (EF2BYTE(*((u16 *)(__pstart))))
+       (EF2BYTE(*((__le16 *)(__pstart))))
 #define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
        (EF1BYTE(*((u8 *)(__pstart))))
 
@@ -1908,13 +1996,13 @@ value to host byte ordering.*/
  * Set subfield of little-endian 4-byte value to specified value.
  */
 #define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u32 *)(__pstart)) = EF4BYTE \
+       *((u32 *)(__pstart)) = \
        ( \
                LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
                ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
        );
 #define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u16 *)(__pstart)) = EF2BYTE \
+       *((u16 *)(__pstart)) = \
        ( \
                LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
                ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
@@ -2100,4 +2188,11 @@ static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
        return ieee80211_find_sta(vif, bssid);
 }
 
+static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+               u8 *mac_addr)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       return ieee80211_find_sta(mac->vif, mac_addr);
+}
+
 #endif
index 3118c425bcf17dcbdf85f274a568e25a80cac9e2..441cbccbd38162bb8b1e83c11fe5fe266897a9c8 100644 (file)
@@ -354,7 +354,9 @@ out:
        return ret;
 }
 
-static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct wl1251 *wl = hw->priv;
        unsigned long flags;
index f429fc110cb0ac1a09e887564310f04dfe0c72dd..dadf1dbb002a3ac7bfb904f47e694eb5b5b37c19 100644 (file)
@@ -32,7 +32,6 @@
 #include "../wlcore/acx.h"
 #include "../wlcore/tx.h"
 #include "../wlcore/rx.h"
-#include "../wlcore/io.h"
 #include "../wlcore/boot.h"
 
 #include "wl12xx.h"
@@ -1185,9 +1184,16 @@ static int wl12xx_enable_interrupts(struct wl1271 *wl)
        ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
                               WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
        if (ret < 0)
-               goto out;
+               goto disable_interrupts;
 
        ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
+       if (ret < 0)
+               goto disable_interrupts;
+
+       return ret;
+
+disable_interrupts:
+       wlcore_disable_interrupts(wl);
 
 out:
        return ret;
@@ -1583,7 +1589,10 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        return wlcore_set_key(wl, cmd, vif, sta, key_conf);
 }
 
+static int wl12xx_setup(struct wl1271 *wl);
+
 static struct wlcore_ops wl12xx_ops = {
+       .setup                  = wl12xx_setup,
        .identify_chip          = wl12xx_identify_chip,
        .identify_fw            = wl12xx_identify_fw,
        .boot                   = wl12xx_boot,
@@ -1624,26 +1633,15 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
                },
 };
 
-static int __devinit wl12xx_probe(struct platform_device *pdev)
+static int wl12xx_setup(struct wl1271 *wl)
 {
-       struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
-       struct wl1271 *wl;
-       struct ieee80211_hw *hw;
-       struct wl12xx_priv *priv;
-
-       hw = wlcore_alloc_hw(sizeof(*priv));
-       if (IS_ERR(hw)) {
-               wl1271_error("can't allocate hw");
-               return PTR_ERR(hw);
-       }
+       struct wl12xx_priv *priv = wl->priv;
+       struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
 
-       wl = hw->priv;
-       priv = wl->priv;
-       wl->ops = &wl12xx_ops;
-       wl->ptable = wl12xx_ptable;
        wl->rtable = wl12xx_rtable;
-       wl->num_tx_desc = 16;
-       wl->num_rx_desc = 8;
+       wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
+       wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
+       wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
        wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1695,7 +1693,36 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
                        wl1271_error("Invalid tcxo parameter %s", tcxo_param);
        }
 
-       return wlcore_probe(wl, pdev);
+       return 0;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+       struct wl1271 *wl;
+       struct ieee80211_hw *hw;
+       int ret;
+
+       hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
+                            WL12XX_AGGR_BUFFER_SIZE);
+       if (IS_ERR(hw)) {
+               wl1271_error("can't allocate hw");
+               ret = PTR_ERR(hw);
+               goto out;
+       }
+
+       wl = hw->priv;
+       wl->ops = &wl12xx_ops;
+       wl->ptable = wl12xx_ptable;
+       ret = wlcore_probe(wl, pdev);
+       if (ret)
+               goto out_free;
+
+       return ret;
+
+out_free:
+       wlcore_free_hw(wl);
+out:
+       return ret;
 }
 
 static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
@@ -1714,17 +1741,7 @@ static struct platform_driver wl12xx_driver = {
        }
 };
 
-static int __init wl12xx_init(void)
-{
-       return platform_driver_register(&wl12xx_driver);
-}
-module_init(wl12xx_init);
-
-static void __exit wl12xx_exit(void)
-{
-       platform_driver_unregister(&wl12xx_driver);
-}
-module_exit(wl12xx_exit);
+module_platform_driver(wl12xx_driver);
 
 module_param_named(fref, fref_param, charp, 0);
 MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
index 26990fb4edeade102151fe8e818c215bc51b25d0..7182bbf6625daac99ddcf00b3581c32ee39c2ef6 100644 (file)
 #define WL128X_SUBTYPE_VER     2
 #define WL128X_MINOR_VER       115
 
+#define WL12XX_AGGR_BUFFER_SIZE        (4 * PAGE_SIZE)
+
+#define WL12XX_NUM_TX_DESCRIPTORS 16
+#define WL12XX_NUM_RX_DESCRIPTORS 8
+
+#define WL12XX_NUM_MAC_ADDRESSES 2
+
 struct wl127x_rx_mem_pool_addr {
        u32 addr;
        u32 addr_extra;
index 3ce6f1039af3f33593663d8d5c8b0639f803f346..7f1669cdea090ed6895b8eaabf0ab7f74054d9c4 100644 (file)
@@ -220,7 +220,7 @@ static ssize_t clear_fw_stats_write(struct file *file,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl18xx_acx_clear_statistics(wl);
index 69042bb9a0975637be043e2d89d7fdc16b3a9b34..a39682a7c25f333cefb9bcfc67b632426afece00 100644 (file)
@@ -30,7 +30,6 @@
 #include "../wlcore/acx.h"
 #include "../wlcore/tx.h"
 #include "../wlcore/rx.h"
-#include "../wlcore/io.h"
 #include "../wlcore/boot.h"
 
 #include "reg.h"
@@ -46,7 +45,6 @@
 static char *ht_mode_param = NULL;
 static char *board_type_param = NULL;
 static bool checksum_param = false;
-static bool enable_11a_param = true;
 static int num_rx_desc_param = -1;
 
 /* phy paramters */
@@ -416,7 +414,7 @@ static struct wlcore_conf wl18xx_conf = {
                .snr_threshold                  = 0,
        },
        .ht = {
-               .rx_ba_win_size = 10,
+               .rx_ba_win_size = 32,
                .tx_ba_win_size = 64,
                .inactivity_timeout = 10000,
                .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
@@ -506,8 +504,8 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
                .rdl                            = 0x01,
                .auto_detect                    = 0x00,
                .dedicated_fem                  = FEM_NONE,
-               .low_band_component             = COMPONENT_2_WAY_SWITCH,
-               .low_band_component_type        = 0x06,
+               .low_band_component             = COMPONENT_3_WAY_SWITCH,
+               .low_band_component_type        = 0x04,
                .high_band_component            = COMPONENT_2_WAY_SWITCH,
                .high_band_component_type       = 0x09,
                .tcxo_ldo_voltage               = 0x00,
@@ -813,6 +811,13 @@ static int wl18xx_enable_interrupts(struct wl1271 *wl)
 
        ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
                               WL1271_ACX_INTR_ALL & ~intr_mask);
+       if (ret < 0)
+               goto disable_interrupts;
+
+       return ret;
+
+disable_interrupts:
+       wlcore_disable_interrupts(wl);
 
 out:
        return ret;
@@ -1203,6 +1208,12 @@ static int wl18xx_handle_static_data(struct wl1271 *wl,
        struct wl18xx_static_data_priv *static_data_priv =
                (struct wl18xx_static_data_priv *) static_data->priv;
 
+       strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
+               sizeof(wl->chip.phy_fw_ver_str));
+
+       /* make sure the string is NULL-terminated */
+       wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0';
+
        wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
 
        return 0;
@@ -1241,13 +1252,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        if (!change_spare)
                return wlcore_set_key(wl, cmd, vif, sta, key_conf);
 
-       /*
-        * stop the queues and flush to ensure the next packets are
-        * in sync with FW spare block accounting
-        */
-       wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
-       wl1271_tx_flush(wl);
-
        ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
        if (ret < 0)
                goto out;
@@ -1270,7 +1274,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        }
 
 out:
-       wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
        return ret;
 }
 
@@ -1293,7 +1296,10 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
        return buf_offset;
 }
 
+static int wl18xx_setup(struct wl1271 *wl);
+
 static struct wlcore_ops wl18xx_ops = {
+       .setup          = wl18xx_setup,
        .identify_chip  = wl18xx_identify_chip,
        .boot           = wl18xx_boot,
        .plt_init       = wl18xx_plt_init,
@@ -1374,27 +1380,15 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
                },
 };
 
-static int __devinit wl18xx_probe(struct platform_device *pdev)
+static int wl18xx_setup(struct wl1271 *wl)
 {
-       struct wl1271 *wl;
-       struct ieee80211_hw *hw;
-       struct wl18xx_priv *priv;
+       struct wl18xx_priv *priv = wl->priv;
        int ret;
 
-       hw = wlcore_alloc_hw(sizeof(*priv));
-       if (IS_ERR(hw)) {
-               wl1271_error("can't allocate hw");
-               ret = PTR_ERR(hw);
-               goto out;
-       }
-
-       wl = hw->priv;
-       priv = wl->priv;
-       wl->ops = &wl18xx_ops;
-       wl->ptable = wl18xx_ptable;
        wl->rtable = wl18xx_rtable;
-       wl->num_tx_desc = 32;
-       wl->num_rx_desc = 32;
+       wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+       wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+       wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
        wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1405,9 +1399,9 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
        if (num_rx_desc_param != -1)
                wl->num_rx_desc = num_rx_desc_param;
 
-       ret = wl18xx_conf_init(wl, &pdev->dev);
+       ret = wl18xx_conf_init(wl, wl->dev);
        if (ret < 0)
-               goto out_free;
+               return ret;
 
        /* If the module param is set, update it in conf */
        if (board_type_param) {
@@ -1424,27 +1418,14 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
                } else {
                        wl1271_error("invalid board type '%s'",
                                board_type_param);
-                       ret = -EINVAL;
-                       goto out_free;
+                       return -EINVAL;
                }
        }
 
-       /* HACK! Just for now we hardcode COM8 and HDK to 0x06 */
-       switch (priv->conf.phy.board_type) {
-       case BOARD_TYPE_HDK_18XX:
-       case BOARD_TYPE_COM8_18XX:
-               priv->conf.phy.low_band_component_type = 0x06;
-               break;
-       case BOARD_TYPE_FPGA_18XX:
-       case BOARD_TYPE_DVP_18XX:
-       case BOARD_TYPE_EVB_18XX:
-               priv->conf.phy.low_band_component_type = 0x05;
-               break;
-       default:
+       if (priv->conf.phy.board_type >= NUM_BOARD_TYPES) {
                wl1271_error("invalid board type '%d'",
                        priv->conf.phy.board_type);
-               ret = -EINVAL;
-               goto out_free;
+               return -EINVAL;
        }
 
        if (low_band_component_param != -1)
@@ -1476,22 +1457,21 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
                        priv->conf.ht.mode = HT_MODE_SISO20;
                else {
                        wl1271_error("invalid ht_mode '%s'", ht_mode_param);
-                       ret = -EINVAL;
-                       goto out_free;
+                       return -EINVAL;
                }
        }
 
        if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
                /*
                 * Only support mimo with multiple antennas. Fall back to
-                * siso20.
+                * siso40.
                 */
                if (wl18xx_is_mimo_supported(wl))
                        wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
                                          &wl18xx_mimo_ht_cap_2ghz);
                else
                        wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
-                                         &wl18xx_siso20_ht_cap);
+                                         &wl18xx_siso40_ht_cap_2ghz);
 
                /* 5Ghz is always wide */
                wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
@@ -1513,9 +1493,34 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
                wl18xx_ops.init_vif = NULL;
        }
 
-       wl->enable_11a = enable_11a_param;
+       /* Enable 11a Band only if we have 5G antennas */
+       wl->enable_11a = (priv->conf.phy.number_of_assembled_ant5 != 0);
+
+       return 0;
+}
+
+static int __devinit wl18xx_probe(struct platform_device *pdev)
+{
+       struct wl1271 *wl;
+       struct ieee80211_hw *hw;
+       int ret;
+
+       hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
+                            WL18XX_AGGR_BUFFER_SIZE);
+       if (IS_ERR(hw)) {
+               wl1271_error("can't allocate hw");
+               ret = PTR_ERR(hw);
+               goto out;
+       }
+
+       wl = hw->priv;
+       wl->ops = &wl18xx_ops;
+       wl->ptable = wl18xx_ptable;
+       ret = wlcore_probe(wl, pdev);
+       if (ret)
+               goto out_free;
 
-       return wlcore_probe(wl, pdev);
+       return ret;
 
 out_free:
        wlcore_free_hw(wl);
@@ -1539,18 +1544,7 @@ static struct platform_driver wl18xx_driver = {
        }
 };
 
-static int __init wl18xx_init(void)
-{
-       return platform_driver_register(&wl18xx_driver);
-}
-module_init(wl18xx_init);
-
-static void __exit wl18xx_exit(void)
-{
-       platform_driver_unregister(&wl18xx_driver);
-}
-module_exit(wl18xx_exit);
-
+module_platform_driver(wl18xx_driver);
 module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
 MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
 
@@ -1561,9 +1555,6 @@ MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
 module_param_named(checksum, checksum_param, bool, S_IRUSR);
 MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
 
-module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
-MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
-
 module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
 MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
 
index 6452396fa1d411d4e37b9c82e37046589dc26044..96a1e438d677fd1f14ea125338c824c25e34b228 100644 (file)
 
 #define WL18XX_CMD_MAX_SIZE          740
 
+#define WL18XX_AGGR_BUFFER_SIZE                (13 * PAGE_SIZE)
+
+#define WL18XX_NUM_TX_DESCRIPTORS 32
+#define WL18XX_NUM_RX_DESCRIPTORS 32
+
+#define WL18XX_NUM_MAC_ADDRESSES 3
+
 struct wl18xx_priv {
        /* buffer for sending commands to FW */
        u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
index 20e1bd9238321e6d2eab682784f8e3cd9b6f8528..eaef3f41b2524b9885084ce33c77afb2a034ceb6 100644 (file)
@@ -59,6 +59,9 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
        u16 status;
        u16 poll_count = 0;
 
+       if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
+               return -EIO;
+
        cmd = buf;
        cmd->id = cpu_to_le16(id);
        cmd->status = 0;
@@ -990,7 +993,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
 
        ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV,
                                      skb->data, skb->len,
-                                     CMD_TEMPL_KLV_IDX_NULL_DATA,
+                                     wlvif->sta.klv_template_id,
                                      wlvif->basic_rate);
 
 out:
@@ -1785,10 +1788,17 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                      wlvif->bss_type == BSS_TYPE_IBSS)))
                return -EINVAL;
 
-       ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+       ret = wl12xx_cmd_role_enable(wl,
+                                    wl12xx_wlvif_to_vif(wlvif)->addr,
+                                    WL1271_ROLE_DEVICE,
+                                    &wlvif->dev_role_id);
        if (ret < 0)
                goto out;
 
+       ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+       if (ret < 0)
+               goto out_disable;
+
        ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
        if (ret < 0)
                goto out_stop;
@@ -1797,6 +1807,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 
 out_stop:
        wl12xx_cmd_role_stop_dev(wl, wlvif);
+out_disable:
+       wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
 out:
        return ret;
 }
@@ -1824,6 +1836,11 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
        if (ret < 0)
                goto out;
+
+       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+       if (ret < 0)
+               goto out;
+
 out:
        return ret;
 }
index 4ef0b095f0d61cbb2d85d7f9986cf8f25f58fa81..2409f3d71f63ddd457cc571b7a76798234d16e0b 100644 (file)
@@ -157,11 +157,6 @@ enum wl1271_commands {
 
 #define MAX_CMD_PARAMS 572
 
-enum {
-       CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
-       CMD_TEMPL_KLV_IDX_MAX = 4
-};
-
 enum cmd_templ {
        CMD_TEMPL_NULL_DATA = 0,
        CMD_TEMPL_BEACON,
index d77224f2ac6bcbccfa6377f85c49a620a8fd2234..9e40760bafe17b43121585d63bacfaa1925611b3 100644 (file)
@@ -412,8 +412,7 @@ struct conf_rx_settings {
 #define CONF_TX_RATE_RETRY_LIMIT       10
 
 /* basic rates for p2p operations (probe req/resp, etc.) */
-#define CONF_TX_RATE_MASK_BASIC_P2P    (CONF_HW_BIT_RATE_6MBPS | \
-       CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS)
+#define CONF_TX_RATE_MASK_BASIC_P2P    CONF_HW_BIT_RATE_6MBPS
 
 /*
  * Rates supported for data packets when operating as AP. Note the absence
index 6b800b3cbea59fdeb7be9fbe395aea2988fbd682..db4bf5a68ce208c1fa3e98fbe848221051ce0ec5 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/bitops.h>
 #include <linux/printk.h>
 
-#define DRIVER_NAME "wl12xx"
+#define DRIVER_NAME "wlcore"
 #define DRIVER_PREFIX DRIVER_NAME ": "
 
 enum {
@@ -73,11 +73,21 @@ extern u32 wl12xx_debug_level;
 #define wl1271_info(fmt, arg...) \
        pr_info(DRIVER_PREFIX fmt "\n", ##arg)
 
+/* define the debug macro differently if dynamic debug is supported */
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define wl1271_debug(level, fmt, arg...) \
        do { \
-               if (level & wl12xx_debug_level) \
-                       pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+               if (unlikely(level & wl12xx_debug_level)) \
+                       dynamic_pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+       } while (0)
+#else
+#define wl1271_debug(level, fmt, arg...) \
+       do { \
+               if (unlikely(level & wl12xx_debug_level)) \
+                       printk(KERN_DEBUG pr_fmt(DRIVER_PREFIX fmt "\n"), \
+                              ##arg); \
        } while (0)
+#endif
 
 /* TODO: use pr_debug_hex_dump when it becomes available */
 #define wl1271_dump(level, prefix, buf, len)   \
index 80dbc5304facdc5e69f93b55b6fbe442f4a25052..c86bb00c24884d355e93af45917b30defe86f220 100644 (file)
@@ -62,11 +62,14 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
 
        mutex_lock(&wl->mutex);
 
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       if (wl->state == WL1271_STATE_ON && !wl->plt &&
+       if (!wl->plt &&
            time_after(jiffies, wl->stats.fw_stats_update +
                       msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
                wl1271_acx_statistics(wl, wl->stats.fw_stats);
@@ -286,7 +289,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
 
        wl->conf.conn.dynamic_ps_timeout = value;
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -353,7 +356,7 @@ static ssize_t forced_ps_write(struct file *file,
 
        wl->conf.conn.forced_ps = value;
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -486,6 +489,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
        DRIVER_STATE_PRINT_HEX(platform_quirks);
        DRIVER_STATE_PRINT_HEX(chip.id);
        DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
+       DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
        DRIVER_STATE_PRINT_INT(sched_scanning);
 
 #undef DRIVER_STATE_PRINT_INT
@@ -999,7 +1003,7 @@ static ssize_t sleep_auth_write(struct file *file,
 
        wl->conf.conn.sta_sleep_auth = value;
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                /* this will show up on "read" in case we are off */
                wl->sleep_auth = value;
                goto out;
@@ -1060,14 +1064,16 @@ static ssize_t dev_mem_read(struct file *file,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state == WLCORE_STATE_OFF)) {
                ret = -EFAULT;
                goto skip_read;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto skip_read;
+       /*
+        * Don't fail if elp_wakeup returns an error, so the device's memory
+        * could be read even if the FW crashed
+        */
+       wl1271_ps_elp_wakeup(wl);
 
        /* store current partition and switch partition */
        memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1145,14 +1151,16 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state == WLCORE_STATE_OFF)) {
                ret = -EFAULT;
                goto skip_write;
        }
 
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto skip_write;
+       /*
+        * Don't fail if elp_wakeup returns an error, so the device's memory
+        * could be read even if the FW crashed
+        */
+       wl1271_ps_elp_wakeup(wl);
 
        /* store current partition and switch partition */
        memcpy(&old_part, &wl->curr_part, sizeof(old_part));
index a3c867786df80fcc9458461b804dd8fcdeb0610c..32d157f62f3116f32dfc2b562aad770c0e9da907 100644 (file)
@@ -141,7 +141,7 @@ int wl1271_init_templates_config(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+       for (i = 0; i < WLCORE_MAX_KLV_TEMPLATES; i++) {
                ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
                                              CMD_TEMPL_KLV, NULL,
                                              sizeof(struct ieee80211_qos_hdr),
@@ -371,15 +371,7 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
                                       struct ieee80211_vif *vif)
 {
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
-       int ret, i;
-
-       /* disable all keep-alive templates */
-       for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-               ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
-                                                  ACX_KEEP_ALIVE_TPL_INVALID);
-               if (ret < 0)
-                       return ret;
-       }
+       int ret;
 
        /* disable the keep-alive feature */
        ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
index 259149f36faec0b7108500302f30e66e323cc1aa..f48530fec14fb3ba9563e1763c2c38fa46b9488d 100644 (file)
@@ -64,7 +64,7 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
                return -EIO;
 
        ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
-       if (ret && wl->state != WL1271_STATE_OFF)
+       if (ret && wl->state != WLCORE_STATE_OFF)
                set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
 
        return ret;
@@ -80,7 +80,7 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
                return -EIO;
 
        ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
-       if (ret && wl->state != WL1271_STATE_OFF)
+       if (ret && wl->state != WLCORE_STATE_OFF)
                set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
 
        return ret;
index 72548609f71122b469991615c3dda3e2aabe598c..25530c8760cb0a07234f753efe66e31fbf107c2d 100644 (file)
@@ -248,7 +248,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* Tx went out in the meantime - everything is ok */
@@ -512,7 +512,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_IRQ, "IRQ work");
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -696,7 +696,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
                 * we can't call wl12xx_get_vif_count() here because
                 * wl->mutex is taken, so use the cached last_vif_count value
                 */
-               if (wl->last_vif_count > 1) {
+               if (wl->last_vif_count > 1 && wl->mr_fw_name) {
                        fw_type = WL12XX_FW_TYPE_MULTI;
                        fw_name = wl->mr_fw_name;
                } else {
@@ -744,38 +744,14 @@ out:
        return ret;
 }
 
-static void wl1271_fetch_nvs(struct wl1271 *wl)
-{
-       const struct firmware *fw;
-       int ret;
-
-       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
-
-       if (ret < 0) {
-               wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
-                            WL12XX_NVS_NAME, ret);
-               return;
-       }
-
-       wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
-       if (!wl->nvs) {
-               wl1271_error("could not allocate memory for the nvs file");
-               goto out;
-       }
-
-       wl->nvs_len = fw->size;
-
-out:
-       release_firmware(fw);
-}
-
 void wl12xx_queue_recovery_work(struct wl1271 *wl)
 {
        WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
 
        /* Avoid a recursive recovery */
-       if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+       if (wl->state == WLCORE_STATE_ON) {
+               wl->state = WLCORE_STATE_RESTARTING;
+               set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
                wlcore_disable_interrupts_nosync(wl);
                ieee80211_queue_work(wl->hw, &wl->recovery_work);
        }
@@ -913,7 +889,7 @@ static void wl1271_recovery_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state != WL1271_STATE_ON || wl->plt)
+       if (wl->state == WLCORE_STATE_OFF || wl->plt)
                goto out_unlock;
 
        if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
@@ -1081,7 +1057,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
 
        wl1271_notice("power up");
 
-       if (wl->state != WL1271_STATE_OFF) {
+       if (wl->state != WLCORE_STATE_OFF) {
                wl1271_error("cannot go into PLT state because not "
                             "in off state: %d", wl->state);
                ret = -EBUSY;
@@ -1102,7 +1078,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
                if (ret < 0)
                        goto power_off;
 
-               wl->state = WL1271_STATE_ON;
+               wl->state = WLCORE_STATE_ON;
                wl1271_notice("firmware booted in PLT mode %s (%s)",
                              PLT_MODE[plt_mode],
                              wl->chip.fw_ver_str);
@@ -1171,7 +1147,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
        wl1271_power_off(wl);
        wl->flags = 0;
        wl->sleep_auth = WL1271_PSM_ILLEGAL;
-       wl->state = WL1271_STATE_OFF;
+       wl->state = WLCORE_STATE_OFF;
        wl->plt = false;
        wl->plt_mode = PLT_OFF;
        wl->rx_counter = 0;
@@ -1181,7 +1157,9 @@ out:
        return ret;
 }
 
-static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw,
+                        struct ieee80211_tx_control *control,
+                        struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1175,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        mapping = skb_get_queue_mapping(skb);
        q = wl1271_tx_get_queue(mapping);
 
-       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
+       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
 
        spin_lock_irqsave(&wl->wl_lock, flags);
 
@@ -1600,12 +1578,6 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
        if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out;
 
-       if ((wl->conf.conn.suspend_wake_up_event ==
-            wl->conf.conn.wake_up_event) &&
-           (wl->conf.conn.suspend_listen_interval ==
-            wl->conf.conn.listen_interval))
-               goto out;
-
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
@@ -1614,6 +1586,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
        if (ret < 0)
                goto out_sleep;
 
+       if ((wl->conf.conn.suspend_wake_up_event ==
+            wl->conf.conn.wake_up_event) &&
+           (wl->conf.conn.suspend_listen_interval ==
+            wl->conf.conn.listen_interval))
+               goto out_sleep;
+
        ret = wl1271_acx_wake_up_conditions(wl, wlvif,
                                    wl->conf.conn.suspend_wake_up_event,
                                    wl->conf.conn.suspend_listen_interval);
@@ -1669,11 +1647,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
        if ((!is_ap) && (!is_sta))
                return;
 
-       if (is_sta &&
-           ((wl->conf.conn.suspend_wake_up_event ==
-             wl->conf.conn.wake_up_event) &&
-            (wl->conf.conn.suspend_listen_interval ==
-             wl->conf.conn.listen_interval)))
+       if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                return;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -1683,6 +1657,12 @@ static void wl1271_configure_resume(struct wl1271 *wl,
        if (is_sta) {
                wl1271_configure_wowlan(wl, NULL);
 
+               if ((wl->conf.conn.suspend_wake_up_event ==
+                    wl->conf.conn.wake_up_event) &&
+                   (wl->conf.conn.suspend_listen_interval ==
+                    wl->conf.conn.listen_interval))
+                       goto out_sleep;
+
                ret = wl1271_acx_wake_up_conditions(wl, wlvif,
                                    wl->conf.conn.wake_up_event,
                                    wl->conf.conn.listen_interval);
@@ -1695,6 +1675,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
                ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
        }
 
+out_sleep:
        wl1271_ps_elp_sleep(wl);
 }
 
@@ -1831,7 +1812,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
 {
        int i;
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (wl->state == WLCORE_STATE_OFF) {
                if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
                                        &wl->flags))
                        wlcore_enable_interrupts(wl);
@@ -1843,7 +1824,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
         * this must be before the cancel_work calls below, so that the work
         * functions don't perform further work.
         */
-       wl->state = WL1271_STATE_OFF;
+       wl->state = WLCORE_STATE_OFF;
 
        /*
         * Use the nosync variant to disable interrupts, so the mutex could be
@@ -1854,6 +1835,8 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
        mutex_unlock(&wl->mutex);
 
        wlcore_synchronize_interrupts(wl);
+       if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+               cancel_work_sync(&wl->recovery_work);
        wl1271_flush_deferred_work(wl);
        cancel_delayed_work_sync(&wl->scan_complete_work);
        cancel_work_sync(&wl->netstack_work);
@@ -1956,6 +1939,27 @@ static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
        *idx = WL12XX_MAX_RATE_POLICIES;
 }
 
+static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
+{
+       u8 policy = find_first_zero_bit(wl->klv_templates_map,
+                                       WLCORE_MAX_KLV_TEMPLATES);
+       if (policy >= WLCORE_MAX_KLV_TEMPLATES)
+               return -EBUSY;
+
+       __set_bit(policy, wl->klv_templates_map);
+       *idx = policy;
+       return 0;
+}
+
+static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
+{
+       if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
+               return;
+
+       __clear_bit(*idx, wl->klv_templates_map);
+       *idx = WLCORE_MAX_KLV_TEMPLATES;
+}
+
 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        switch (wlvif->bss_type) {
@@ -2020,6 +2024,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
                wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
                wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+               wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
                wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
                wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
                wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -2096,7 +2101,7 @@ irq_disable:
                /* Unlocking the mutex in the middle of handling is
                   inherently unsafe. In this case we deem it safe to do,
                   because we need to let any possibly pending IRQ out of
-                  the system (and while we are WL1271_STATE_OFF the IRQ
+                  the system (and while we are WLCORE_STATE_OFF the IRQ
                   work function will not do anything.) Also, any other
                   possible concurrent operations will fail due to the
                   current state, hence the wl1271 struct should be safe. */
@@ -2131,7 +2136,7 @@ power_off:
        wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
                     wl->enable_11a ? "" : "not ");
 
-       wl->state = WL1271_STATE_ON;
+       wl->state = WLCORE_STATE_ON;
 out:
        return booted;
 }
@@ -2165,7 +2170,11 @@ static bool wl12xx_need_fw_change(struct wl1271 *wl,
        wl->last_vif_count = vif_count;
 
        /* no need for fw change if the device is OFF */
-       if (wl->state == WL1271_STATE_OFF)
+       if (wl->state == WLCORE_STATE_OFF)
+               return false;
+
+       /* no need for fw change if a single fw is used */
+       if (!wl->mr_fw_name)
                return false;
 
        if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
@@ -2247,7 +2256,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
         * TODO: after the nvs issue will be solved, move this block
         * to start(), and make sure here the driver is ON.
         */
-       if (wl->state == WL1271_STATE_OFF) {
+       if (wl->state == WLCORE_STATE_OFF) {
                /*
                 * we still need this in order to configure the fw
                 * while uploading the nvs
@@ -2261,21 +2270,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
-           wlvif->bss_type == BSS_TYPE_IBSS) {
-               /*
-                * The device role is a special role used for
-                * rx and tx frames prior to association (as
-                * the STA role can get packets only from
-                * its associated bssid)
-                */
-               ret = wl12xx_cmd_role_enable(wl, vif->addr,
-                                                WL1271_ROLE_DEVICE,
-                                                &wlvif->dev_role_id);
-               if (ret < 0)
-                       goto out;
-       }
-
        ret = wl12xx_cmd_role_enable(wl, vif->addr,
                                     role_type, &wlvif->role_id);
        if (ret < 0)
@@ -2314,7 +2308,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                return;
 
        /* because of hardware recovery, we may get here twice */
-       if (wl->state != WL1271_STATE_ON)
+       if (wl->state == WLCORE_STATE_OFF)
                return;
 
        wl1271_info("down");
@@ -2344,10 +2338,6 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                    wlvif->bss_type == BSS_TYPE_IBSS) {
                        if (wl12xx_dev_role_started(wlvif))
                                wl12xx_stop_dev(wl, wlvif);
-
-                       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
-                       if (ret < 0)
-                               goto deinit;
                }
 
                ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
@@ -2366,6 +2356,7 @@ deinit:
                wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
                wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
                wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+               wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
        } else {
                wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
                wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
@@ -2430,12 +2421,11 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl12xx_vif *iter;
        struct vif_counter_data vif_count;
-       bool cancel_recovery = true;
 
        wl12xx_get_vif_count(hw, vif, &vif_count);
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF ||
+       if (wl->state == WLCORE_STATE_OFF ||
            !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
                goto out;
 
@@ -2455,12 +2445,9 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
                wl12xx_force_active_psm(wl);
                set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
                wl12xx_queue_recovery_work(wl);
-               cancel_recovery = false;
        }
 out:
        mutex_unlock(&wl->mutex);
-       if (cancel_recovery)
-               cancel_work_sync(&wl->recovery_work);
 }
 
 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
@@ -2534,7 +2521,7 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                goto out;
 
        ret = wl1271_acx_keep_alive_config(wl, wlvif,
-                                          CMD_TEMPL_KLV_IDX_NULL_DATA,
+                                          wlvif->sta.klv_template_id,
                                           ACX_KEEP_ALIVE_TPL_VALID);
        if (ret < 0)
                goto out;
@@ -2554,6 +2541,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                ieee80211_chswitch_done(vif, false);
        }
 
+       /* invalidate keep-alive template */
+       wl1271_acx_keep_alive_config(wl, wlvif,
+                                    wlvif->sta.klv_template_id,
+                                    ACX_KEEP_ALIVE_TPL_INVALID);
+
        /* to stop listening to a channel, we disconnect */
        ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
        if (ret < 0)
@@ -2592,11 +2584,6 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                wlvif->rate_set =
                        wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
                ret = wl1271_acx_sta_rate_policies(wl, wlvif);
-               if (ret < 0)
-                       goto out;
-               ret = wl1271_acx_keep_alive_config(
-                       wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
-                       ACX_KEEP_ALIVE_TPL_INVALID);
                if (ret < 0)
                        goto out;
                clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
@@ -2770,7 +2757,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
        if (changed & IEEE80211_CONF_CHANGE_POWER)
                wl->power_level = conf->power_level;
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -2804,10 +2791,6 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
 {
        struct wl1271_filter_params *fp;
        struct netdev_hw_addr *ha;
-       struct wl1271 *wl = hw->priv;
-
-       if (unlikely(wl->state == WL1271_STATE_OFF))
-               return 0;
 
        fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
        if (!fp) {
@@ -2856,7 +2839,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        *total &= WL1271_SUPPORTED_FILTERS;
        changed &= WL1271_SUPPORTED_FILTERS;
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -3080,8 +3063,45 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                             struct ieee80211_key_conf *key_conf)
 {
        struct wl1271 *wl = hw->priv;
+       int ret;
+       bool might_change_spare =
+               key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+               key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
 
-       return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+       if (might_change_spare) {
+               /*
+                * stop the queues and flush to ensure the next packets are
+                * in sync with FW spare block accounting
+                */
+               mutex_lock(&wl->mutex);
+               wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+               mutex_unlock(&wl->mutex);
+
+               wl1271_tx_flush(wl);
+       }
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
+               ret = -EAGAIN;
+               goto out_wake_queues;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out_wake_queues;
+
+       ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+
+       wl1271_ps_elp_sleep(wl);
+
+out_wake_queues:
+       if (might_change_spare)
+               wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+
+       mutex_unlock(&wl->mutex);
+
+       return ret;
 }
 
 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
@@ -3103,17 +3123,6 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                     key_conf->keylen, key_conf->flags);
        wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
 
-       mutex_lock(&wl->mutex);
-
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               ret = -EAGAIN;
-               goto out_unlock;
-       }
-
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out_unlock;
-
        switch (key_conf->cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
@@ -3143,8 +3152,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
        default:
                wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
 
-               ret = -EOPNOTSUPP;
-               goto out_sleep;
+               return -EOPNOTSUPP;
        }
 
        switch (cmd) {
@@ -3155,7 +3163,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                                 tx_seq_32, tx_seq_16, sta);
                if (ret < 0) {
                        wl1271_error("Could not add or replace key");
-                       goto out_sleep;
+                       return ret;
                }
 
                /*
@@ -3169,7 +3177,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                        ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
                        if (ret < 0) {
                                wl1271_warning("build arp rsp failed: %d", ret);
-                               goto out_sleep;
+                               return ret;
                        }
                }
                break;
@@ -3181,22 +3189,15 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                                     0, 0, sta);
                if (ret < 0) {
                        wl1271_error("Could not remove key");
-                       goto out_sleep;
+                       return ret;
                }
                break;
 
        default:
                wl1271_error("Unsupported key cmd 0x%x", cmd);
-               ret = -EOPNOTSUPP;
-               break;
+               return -EOPNOTSUPP;
        }
 
-out_sleep:
-       wl1271_ps_elp_sleep(wl);
-
-out_unlock:
-       mutex_unlock(&wl->mutex);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(wlcore_set_key);
@@ -3219,7 +3220,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                /*
                 * We cannot return -EBUSY here because cfg80211 will expect
                 * a call to ieee80211_scan_completed if we do - in this case
@@ -3259,7 +3260,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -3308,7 +3309,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -3345,7 +3346,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -3366,7 +3367,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -3395,7 +3396,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -4171,7 +4172,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
@@ -4255,7 +4256,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -4454,7 +4455,7 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EBUSY;
                goto out;
        }
@@ -4493,7 +4494,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EAGAIN;
                goto out;
        }
@@ -4611,7 +4612,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
                                                    mask->control[i].legacy,
                                                    i);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
@@ -4647,12 +4648,14 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+       if (unlikely(wl->state == WLCORE_STATE_OFF)) {
                wl12xx_for_each_wlvif_sta(wl, wlvif) {
                        struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
                        ieee80211_chswitch_done(vif, false);
                }
                goto out;
+       } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
+               goto out;
        }
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -4687,7 +4690,7 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* packets are considered pending if in the TX queue or the FW */
@@ -4936,7 +4939,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
 
        wl->sg_enabled = res;
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -5054,7 +5057,7 @@ static void wl1271_connection_loss_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* Call mac80211 connection loss */
@@ -5068,18 +5071,17 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
-                                       u32 oui, u32 nic, int n)
+static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
 {
        int i;
 
-       wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
-                    oui, nic, n);
+       wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
+                    oui, nic);
 
-       if (nic + n - 1 > 0xffffff)
+       if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
                wl1271_warning("NIC part of the MAC address wraps around!");
 
-       for (i = 0; i < n; i++) {
+       for (i = 0; i < wl->num_mac_addr; i++) {
                wl->addresses[i].addr[0] = (u8)(oui >> 16);
                wl->addresses[i].addr[1] = (u8)(oui >> 8);
                wl->addresses[i].addr[2] = (u8) oui;
@@ -5089,7 +5091,22 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
                nic++;
        }
 
-       wl->hw->wiphy->n_addresses = n;
+       /* we may be one address short at the most */
+       WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
+
+       /*
+        * turn on the LAA bit in the first address and use it as
+        * the last address.
+        */
+       if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
+               int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
+               memcpy(&wl->addresses[idx], &wl->addresses[0],
+                      sizeof(wl->addresses[0]));
+               /* LAA bit */
+               wl->addresses[idx].addr[2] |= BIT(1);
+       }
+
+       wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
        wl->hw->wiphy->addresses = wl->addresses;
 }
 
@@ -5128,8 +5145,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
        if (wl->mac80211_registered)
                return 0;
 
-       wl1271_fetch_nvs(wl);
-       if (wl->nvs != NULL) {
+       if (wl->nvs_len >= 12) {
                /* NOTE: The wl->nvs->nvs element must be first, in
                 * order to simplify the casting, we assume it is at
                 * the beginning of the wl->nvs structure.
@@ -5149,7 +5165,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
                nic_addr = wl->fuse_nic_addr + 1;
        }
 
-       wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
+       wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
 
        ret = ieee80211_register_hw(wl->hw);
        if (ret < 0) {
@@ -5179,7 +5195,7 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
 
 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
        {
-               .max = 2,
+               .max = 3,
                .types = BIT(NL80211_IFTYPE_STATION),
        },
        {
@@ -5194,7 +5210,7 @@ static const struct ieee80211_iface_combination
 wlcore_iface_combinations[] = {
        {
          .num_different_channels = 1,
-         .max_interfaces = 2,
+         .max_interfaces = 3,
          .limits = wlcore_iface_limits,
          .n_limits = ARRAY_SIZE(wlcore_iface_limits),
        },
@@ -5310,7 +5326,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
 
 #define WL1271_DEFAULT_CHANNEL 0
 
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
 {
        struct ieee80211_hw *hw;
        struct wl1271 *wl;
@@ -5390,17 +5406,19 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
 
        spin_lock_init(&wl->wl_lock);
 
-       wl->state = WL1271_STATE_OFF;
+       wl->state = WLCORE_STATE_OFF;
        wl->fw_type = WL12XX_FW_TYPE_NONE;
        mutex_init(&wl->mutex);
        mutex_init(&wl->flush_mutex);
+       init_completion(&wl->nvs_loading_complete);
 
-       order = get_order(WL1271_AGGR_BUFFER_SIZE);
+       order = get_order(aggr_buf_size);
        wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
        if (!wl->aggr_buf) {
                ret = -ENOMEM;
                goto err_wq;
        }
+       wl->aggr_buf_size = aggr_buf_size;
 
        wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
        if (!wl->dummy_packet) {
@@ -5463,8 +5481,7 @@ int wlcore_free_hw(struct wl1271 *wl)
        device_remove_file(wl->dev, &dev_attr_bt_coex_state);
        free_page((unsigned long)wl->fwlog);
        dev_kfree_skb(wl->dummy_packet);
-       free_pages((unsigned long)wl->aggr_buf,
-                       get_order(WL1271_AGGR_BUFFER_SIZE));
+       free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
 
        wl1271_debugfs_exit(wl);
 
@@ -5514,17 +5531,32 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
        return IRQ_WAKE_THREAD;
 }
 
-int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
+static void wlcore_nvs_cb(const struct firmware *fw, void *context)
 {
+       struct wl1271 *wl = context;
+       struct platform_device *pdev = wl->pdev;
        struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
        unsigned long irqflags;
        int ret;
 
-       if (!wl->ops || !wl->ptable) {
-               ret = -EINVAL;
-               goto out_free_hw;
+       if (fw) {
+               wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               if (!wl->nvs) {
+                       wl1271_error("Could not allocate nvs data");
+                       goto out;
+               }
+               wl->nvs_len = fw->size;
+       } else {
+               wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
+                            WL12XX_NVS_NAME);
+               wl->nvs = NULL;
+               wl->nvs_len = 0;
        }
 
+       ret = wl->ops->setup(wl);
+       if (ret < 0)
+               goto out_free_nvs;
+
        BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
 
        /* adjust some runtime configuration parameters */
@@ -5533,11 +5565,8 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
        wl->irq = platform_get_irq(pdev, 0);
        wl->platform_quirks = pdata->platform_quirks;
        wl->set_power = pdata->set_power;
-       wl->dev = &pdev->dev;
        wl->if_ops = pdata->ops;
 
-       platform_set_drvdata(pdev, wl);
-
        if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
                irqflags = IRQF_TRIGGER_RISING;
        else
@@ -5548,7 +5577,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
                                   pdev->name, wl);
        if (ret < 0) {
                wl1271_error("request_irq() failed: %d", ret);
-               goto out_free_hw;
+               goto out_free_nvs;
        }
 
 #ifdef CONFIG_PM
@@ -5607,6 +5636,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
                goto out_hw_pg_ver;
        }
 
+       wl->initialized = true;
        goto out;
 
 out_hw_pg_ver:
@@ -5621,10 +5651,33 @@ out_unreg:
 out_irq:
        free_irq(wl->irq, wl);
 
-out_free_hw:
-       wlcore_free_hw(wl);
+out_free_nvs:
+       kfree(wl->nvs);
 
 out:
+       release_firmware(fw);
+       complete_all(&wl->nvs_loading_complete);
+}
+
+int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
+{
+       int ret;
+
+       if (!wl->ops || !wl->ptable)
+               return -EINVAL;
+
+       wl->dev = &pdev->dev;
+       wl->pdev = pdev;
+       platform_set_drvdata(pdev, wl);
+
+       ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+                                     WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
+                                     wl, wlcore_nvs_cb);
+       if (ret < 0) {
+               wl1271_error("request_firmware_nowait failed: %d", ret);
+               complete_all(&wl->nvs_loading_complete);
+       }
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -5633,6 +5686,10 @@ int __devexit wlcore_remove(struct platform_device *pdev)
 {
        struct wl1271 *wl = platform_get_drvdata(pdev);
 
+       wait_for_completion(&wl->nvs_loading_complete);
+       if (!wl->initialized)
+               return 0;
+
        if (wl->irq_wake_enabled) {
                device_init_wakeup(wl->dev, 0);
                disable_irq_wake(wl->irq);
@@ -5663,3 +5720,4 @@ MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL12XX_NVS_NAME);
index 46d36fd30eba54e306cbf4cdeade28b61e337ea7..4d1414a673fb6025f42d20e4b7da16fcaf2f6265 100644 (file)
@@ -28,7 +28,7 @@
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
-#define ELP_ENTRY_DELAY  5
+#define ELP_ENTRY_DELAY  30
 
 void wl1271_elp_work(struct work_struct *work)
 {
@@ -44,7 +44,7 @@ void wl1271_elp_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        /* our work might have been already cancelled */
@@ -98,11 +98,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
                        return;
        }
 
-       if (wl->conf.conn.forced_ps)
-               timeout = ELP_ENTRY_DELAY;
-       else
-               timeout = wl->conf.conn.dynamic_ps_timeout;
-
+       timeout = ELP_ENTRY_DELAY;
        ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
                                     msecs_to_jiffies(timeout));
 }
index f55e2f9e7ac56c4212c01cdf0e96b32cf3a7e465..9ee0ec6fd1db3d666769747e2f52c42bcdbe53e1 100644 (file)
@@ -221,7 +221,7 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
                        pkt_len = wlcore_rx_get_buf_size(wl, des);
                        align_pkt_len = wlcore_rx_get_align_buf_size(wl,
                                                                     pkt_len);
-                       if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE)
+                       if (buf_size + align_pkt_len > wl->aggr_buf_size)
                                break;
                        buf_size += align_pkt_len;
                        rx_counter++;
index dbeca1bfbb2cc40814baa73b16d7a9a33c7607ad..d00501493dfec06d9aa314c675e1834f12bb98d2 100644 (file)
@@ -46,7 +46,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF)
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                goto out;
 
        if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -184,11 +184,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
        if (passive)
                scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-       if (wlvif->bss_type == BSS_TYPE_AP_BSS ||
-           test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
-               cmd->params.role_id = wlvif->role_id;
-       else
-               cmd->params.role_id = wlvif->dev_role_id;
+       cmd->params.role_id = wlvif->role_id;
 
        if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
@@ -593,7 +589,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
                goto out;
        }
 
-       cmd->role_id = wlvif->dev_role_id;
+       cmd->role_id = wlvif->role_id;
        if (!n_match_ssids) {
                /* No filter, with ssids */
                type = SCAN_SSID_FILTER_DISABLED;
@@ -683,7 +679,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        if (!cfg)
                return -ENOMEM;
 
-       cfg->role_id = wlvif->dev_role_id;
+       cfg->role_id = wlvif->role_id;
        cfg->rssi_threshold = c->rssi_threshold;
        cfg->snr_threshold  = c->snr_threshold;
        cfg->n_probe_reqs = c->num_probe_reqs;
@@ -718,7 +714,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        if (!force_passive && cfg->active[0]) {
                u8 band = IEEE80211_BAND_2GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
-                                                wlvif->dev_role_id, band,
+                                                wlvif->role_id, band,
                                                 req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[band],
@@ -732,7 +728,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        if (!force_passive && cfg->active[1]) {
                u8 band = IEEE80211_BAND_5GHZ;
                ret = wl12xx_cmd_build_probe_req(wl, wlvif,
-                                                wlvif->dev_role_id, band,
+                                                wlvif->role_id, band,
                                                 req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[band],
@@ -774,7 +770,7 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        if (!start)
                return -ENOMEM;
 
-       start->role_id = wlvif->dev_role_id;
+       start->role_id = wlvif->role_id;
        start->tag = WL1271_SCAN_DEFAULT_TAG;
 
        ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -810,7 +806,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl,  struct wl12xx_vif *wlvif)
                return;
        }
 
-       stop->role_id = wlvif->dev_role_id;
+       stop->role_id = wlvif->role_id;
        stop->tag = WL1271_SCAN_DEFAULT_TAG;
 
        ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
index 8da4ed243ebcd7ca5b33f7b78d140b21adfb404a..a519bc3adec1d6ade15542ba852918114f5cdb32 100644 (file)
 /* HW limitation: maximum possible chunk size is 4095 bytes */
 #define WSPI_MAX_CHUNK_SIZE    4092
 
-#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
+/*
+ * only support SPI for 12xx - this code should be reworked when 18xx
+ * support is introduced
+ */
+#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+
+#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
 
 struct wl12xx_spi_glue {
        struct device *dev;
@@ -271,7 +277,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
        u32 chunk_len;
        int i;
 
-       WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
+       WARN_ON(len > SPI_AGGR_BUFFER_SIZE);
 
        spi_message_init(&m);
        memset(t, 0, sizeof(t));
index 49e5ee1525c999f04655244e0d8a3bb81a324907..f3442762d884b04def7e2bb2e8b8bee603b3f0ae 100644 (file)
@@ -92,7 +92,7 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EINVAL;
                goto out;
        }
@@ -164,7 +164,7 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
 
        mutex_lock(&wl->mutex);
 
-       if (wl->state == WL1271_STATE_OFF) {
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
                ret = -EINVAL;
                goto out;
        }
index f0081f746482d8060810d27486c58e50d79df928..a90d3cd094089c82fe60db12dca55bedaa33af0f 100644 (file)
@@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
 
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                        struct sk_buff *skb)
+static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               struct sk_buff *skb, struct ieee80211_sta *sta)
 {
-       struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
-
-       if (control->control.sta) {
+       if (sta) {
                struct wl1271_station *wl_sta;
 
-               wl_sta = (struct wl1271_station *)
-                               control->control.sta->drv_priv;
+               wl_sta = (struct wl1271_station *)sta->drv_priv;
                return wl_sta->hlid;
        } else {
                struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 }
 
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                     struct sk_buff *skb)
+                     struct sk_buff *skb, struct ieee80211_sta *sta)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
@@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                return wl->system_hlid;
 
        if (wlvif->bss_type == BSS_TYPE_AP_BSS)
-               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
+               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
 
        if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
             test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -196,7 +193,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        int id, ret = -EBUSY, ac;
        u32 spare_blocks;
 
-       if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
+       if (buf_offset + total_len > wl->aggr_buf_size)
                return -EAGAIN;
 
        spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
@@ -322,8 +319,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                if (hlid == wlvif->ap.global_hlid)
                        rate_idx = wlvif->ap.mgmt_rate_idx;
                else if (hlid == wlvif->ap.bcast_hlid ||
-                        skb->protocol == cpu_to_be16(ETH_P_PAE))
-                       /* send AP bcast and EAPOLs using the min basic rate */
+                        skb->protocol == cpu_to_be16(ETH_P_PAE) ||
+                        !ieee80211_is_data(frame_control))
+                       /*
+                        * send non-data, bcast and EAPOLs using the
+                        * min basic rate
+                        */
                        rate_idx = wlvif->ap.bcast_rate_idx;
                else
                        rate_idx = wlvif->ap.ucast_rate_idx[ac];
@@ -344,13 +345,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
 /* caller must hold wl->mutex */
 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                                  struct sk_buff *skb, u32 buf_offset)
+                                  struct sk_buff *skb, u32 buf_offset, u8 hlid)
 {
        struct ieee80211_tx_info *info;
        u32 extra = 0;
        int ret = 0;
        u32 total_len;
-       u8 hlid;
        bool is_dummy;
        bool is_gem = false;
 
@@ -359,9 +359,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                return -EINVAL;
        }
 
+       if (hlid == WL12XX_INVALID_LINK_ID) {
+               wl1271_error("invalid hlid. dropping skb 0x%p", skb);
+               return -EINVAL;
+       }
+
        info = IEEE80211_SKB_CB(skb);
 
-       /* TODO: handle dummy packets on multi-vifs */
        is_dummy = wl12xx_is_dummy_packet(wl, skb);
 
        if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +390,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 
                is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
        }
-       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
-       if (hlid == WL12XX_INVALID_LINK_ID) {
-               wl1271_error("invalid hlid. dropping skb 0x%p", skb);
-               return -EINVAL;
-       }
 
        ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
                                 is_gem);
@@ -517,7 +516,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
 }
 
 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
-                                             struct wl12xx_vif *wlvif)
+                                             struct wl12xx_vif *wlvif,
+                                             u8 *hlid)
 {
        struct sk_buff *skb = NULL;
        int i, h, start_hlid;
@@ -544,10 +544,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
        if (!skb)
                wlvif->last_tx_hlid = 0;
 
+       *hlid = wlvif->last_tx_hlid;
        return skb;
 }
 
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
 {
        unsigned long flags;
        struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +557,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        /* continue from last wlvif (round robin) */
        if (wlvif) {
                wl12xx_for_each_wlvif_continue(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
                        if (skb) {
                                wl->last_wlvif = wlvif;
                                break;
@@ -565,13 +566,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        }
 
        /* dequeue from the system HLID before the restarting wlvif list */
-       if (!skb)
+       if (!skb) {
                skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
+               *hlid = wl->system_hlid;
+       }
 
        /* do a new pass over the wlvif list */
        if (!skb) {
                wl12xx_for_each_wlvif(wl, wlvif) {
-                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
                        if (skb) {
                                wl->last_wlvif = wlvif;
                                break;
@@ -591,6 +594,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
                int q;
 
                skb = wl->dummy_packet;
+               *hlid = wl->system_hlid;
                q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                spin_lock_irqsave(&wl->wl_lock, flags);
                WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +606,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
 }
 
 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                                 struct sk_buff *skb)
+                                 struct sk_buff *skb, u8 hlid)
 {
        unsigned long flags;
        int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +614,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        if (wl12xx_is_dummy_packet(wl, skb)) {
                set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
        } else {
-               u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
                skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
                /* make sure we dequeue the same packet next time */
@@ -686,26 +689,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
        unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
        int ret = 0;
        int bus_ret = 0;
+       u8 hlid;
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state != WLCORE_STATE_ON))
                return 0;
 
-       while ((skb = wl1271_skb_dequeue(wl))) {
+       while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
                bool has_data = false;
 
                wlvif = NULL;
                if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
                        wlvif = wl12xx_vif_to_data(info->control.vif);
+               else
+                       hlid = wl->system_hlid;
 
                has_data = wlvif && wl1271_tx_is_data_present(skb);
-               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
+               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
+                                             hlid);
                if (ret == -EAGAIN) {
                        /*
                         * Aggregation buffer is full.
                         * Flush buffer and try again.
                         */
-                       wl1271_skb_queue_head(wl, wlvif, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb, hlid);
 
                        buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
                                                            last_len);
@@ -722,7 +729,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
                         * Firmware buffer is full.
                         * Queue back last skb, and stop aggregating.
                         */
-                       wl1271_skb_queue_head(wl, wlvif, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb, hlid);
                        /* No work left, avoid scheduling redundant tx work */
                        set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
                        goto out_ack;
@@ -732,7 +739,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
                                 * fw still expects dummy packet,
                                 * so re-enqueue it
                                 */
-                               wl1271_skb_queue_head(wl, wlvif, skb);
+                               wl1271_skb_queue_head(wl, wlvif, skb, hlid);
                        else
                                ieee80211_free_txskb(wl->hw, skb);
                        goto out_ack;
@@ -1069,39 +1076,54 @@ void wl12xx_tx_reset(struct wl1271 *wl)
 /* caller must *NOT* hold wl->mutex */
 void wl1271_tx_flush(struct wl1271 *wl)
 {
-       unsigned long timeout;
+       unsigned long timeout, start_time;
        int i;
-       timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+       start_time = jiffies;
+       timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
 
        /* only one flush should be in progress, for consistent queue state */
        mutex_lock(&wl->flush_mutex);
 
+       mutex_lock(&wl->mutex);
+       if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
+               mutex_unlock(&wl->mutex);
+               goto out;
+       }
+
        wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
 
        while (!time_after(jiffies, timeout)) {
-               mutex_lock(&wl->mutex);
-               wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+               wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
                             wl->tx_frames_cnt,
                             wl1271_tx_total_queue_count(wl));
+
+               /* force Tx and give the driver some time to flush data */
+               mutex_unlock(&wl->mutex);
+               if (wl1271_tx_total_queue_count(wl))
+                       wl1271_tx_work(&wl->tx_work);
+               msleep(20);
+               mutex_lock(&wl->mutex);
+
                if ((wl->tx_frames_cnt == 0) &&
                    (wl1271_tx_total_queue_count(wl) == 0)) {
-                       mutex_unlock(&wl->mutex);
-                       goto out;
+                       wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
+                                    jiffies_to_msecs(jiffies - start_time));
+                       goto out_wake;
                }
-               mutex_unlock(&wl->mutex);
-               msleep(1);
        }
 
-       wl1271_warning("Unable to flush all TX buffers, timed out.");
+       wl1271_warning("Unable to flush all TX buffers, "
+                      "timed out (timeout %d ms",
+                      WL1271_TX_FLUSH_TIMEOUT / 1000);
 
        /* forcibly flush all Tx buffers on our queues */
-       mutex_lock(&wl->mutex);
        for (i = 0; i < WL12XX_MAX_LINKS; i++)
                wl1271_tx_reset_link_queues(wl, i);
-       mutex_unlock(&wl->mutex);
 
-out:
+out_wake:
        wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
+       mutex_unlock(&wl->mutex);
+out:
        mutex_unlock(&wl->flush_mutex);
 }
 EXPORT_SYMBOL_GPL(wl1271_tx_flush);
index 1e939b016155c57a45a5a5582b0823f1cbbbea9b..349520d8b7240686b2e7ffa266c4f9019c5ce481 100644 (file)
@@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
                                enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                        struct sk_buff *skb);
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-                     struct sk_buff *skb);
+                     struct sk_buff *skb, struct ieee80211_sta *sta);
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
index 0ce7a8ebbd46a8be119ed4b84eb146751177b6cf..68584aa0f2b0f731e86b7e1a0c0ea973b3a89ebf 100644 (file)
 /* The maximum number of Tx descriptors in all chip families */
 #define WLCORE_MAX_TX_DESCRIPTORS 32
 
+/*
+ * We always allocate this number of mac addresses. If we don't
+ * have enough allocated addresses, the LAA bit is used
+ */
+#define WLCORE_NUM_MAC_ADDRESSES 3
+
 /* forward declaration */
 struct wl1271_tx_hw_descr;
 enum wl_rx_buf_align;
 struct wl1271_rx_descriptor;
 
 struct wlcore_ops {
+       int (*setup)(struct wl1271 *wl);
        int (*identify_chip)(struct wl1271 *wl);
        int (*identify_fw)(struct wl1271 *wl);
        int (*boot)(struct wl1271 *wl);
@@ -139,10 +146,12 @@ struct wl1271_stats {
 };
 
 struct wl1271 {
+       bool initialized;
        struct ieee80211_hw *hw;
        bool mac80211_registered;
 
        struct device *dev;
+       struct platform_device *pdev;
 
        void *if_priv;
 
@@ -153,7 +162,7 @@ struct wl1271 {
 
        spinlock_t wl_lock;
 
-       enum wl1271_state state;
+       enum wlcore_state state;
        enum wl12xx_fw_type fw_type;
        bool plt;
        enum plt_mode plt_mode;
@@ -181,7 +190,7 @@ struct wl1271 {
        u32 fuse_nic_addr;
 
        /* we have up to 2 MAC addresses */
-       struct mac_address addresses[2];
+       struct mac_address addresses[WLCORE_NUM_MAC_ADDRESSES];
        int channel;
        u8 system_hlid;
 
@@ -190,6 +199,8 @@ struct wl1271 {
        unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
        unsigned long rate_policies_map[
                        BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+       unsigned long klv_templates_map[
+                       BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
 
        struct list_head wlvif_list;
 
@@ -237,6 +248,7 @@ struct wl1271 {
 
        /* Intermediate buffer, used for packet aggregation */
        u8 *aggr_buf;
+       u32 aggr_buf_size;
 
        /* Reusable dummy packet template */
        struct sk_buff *dummy_packet;
@@ -393,13 +405,18 @@ struct wl1271 {
        /* sleep auth value currently configured to FW */
        int sleep_auth;
 
+       /* the number of allocated MAC addresses in this chip */
+       int num_mac_addr;
+
        /* the minimum FW version required for the driver to work */
        unsigned int min_fw_ver[NUM_FW_VER];
+
+       struct completion nvs_loading_complete;
 };
 
 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
 int __devexit wlcore_remove(struct platform_device *pdev);
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
 int wlcore_free_hw(struct wl1271 *wl);
 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                   struct ieee80211_vif *vif,
index c0505635bb00dc42652e363c6e56dd199a2b01f8..6678d4b18611556be7617b6ff79dab0dad851d14 100644 (file)
@@ -66,6 +66,7 @@
 #define WLCORE_NUM_BANDS           2
 
 #define WL12XX_MAX_RATE_POLICIES 16
+#define WLCORE_MAX_KLV_TEMPLATES 4
 
 /* Defined by FW as 0. Will not be freed or allocated. */
 #define WL12XX_SYSTEM_HLID         0
 #define WL1271_AP_BSS_INDEX        0
 #define WL1271_AP_DEF_BEACON_EXP   20
 
-#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE)
-
-enum wl1271_state {
-       WL1271_STATE_OFF,
-       WL1271_STATE_ON,
+enum wlcore_state {
+       WLCORE_STATE_OFF,
+       WLCORE_STATE_RESTARTING,
+       WLCORE_STATE_ON,
 };
 
 enum wl12xx_fw_type {
@@ -124,6 +124,7 @@ struct wl1271_chip {
        u32 id;
        char fw_ver_str[ETHTOOL_BUSINFO_LEN];
        unsigned int fw_ver[NUM_FW_VER];
+       char phy_fw_ver_str[ETHTOOL_BUSINFO_LEN];
 };
 
 #define NUM_TX_QUEUES              4
@@ -337,6 +338,8 @@ struct wl12xx_vif {
                        u8 ap_rate_idx;
                        u8 p2p_rate_idx;
 
+                       u8 klv_template_id;
+
                        bool qos;
                } sta;
                struct {
index 00f6e69c1dcd6f47e3f1e916cf26f923b9f0e1ea..730186d0449b281b08242a1116c2f85115248f04 100644 (file)
@@ -1520,13 +1520,12 @@ static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info,
                          union iwreq_data *wrqu, char *extra)
 {
        struct wl3501_card *this = netdev_priv(dev);
-       static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 };
        int rc = -EINVAL;
 
        /* FIXME: we support other ARPHRDs...*/
        if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
                goto out;
-       if (!memcmp(bcast, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+       if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data)) {
                /* FIXME: rescan? */
        } else
                memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
index c9e2660e12638d156818367c19cc646064e4aeb3..114364b5d46638f3597e66761c5caff45903cc80 100644 (file)
@@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac,
  * control block of the skbuff will be initialized. If necessary the incoming
  * mac80211 queues will be stopped.
  */
-static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw,
+                    struct ieee80211_tx_control *control,
+                    struct sk_buff *skb)
 {
        struct zd_mac *mac = zd_hw_mac(hw);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac)
                skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
                if (!skb)
                        break;
-               zd_op_tx(mac->hw, skb);
+               zd_op_tx(mac->hw, NULL, skb);
        }
 
        /*
@@ -1399,7 +1401,8 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
 
        hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
                    IEEE80211_HW_SIGNAL_UNSPEC |
-                   IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+                   IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+                   IEEE80211_HW_MFP_CAPABLE;
 
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_MESH_POINT) |
index af83c43bcdb1ebe7d08c27ec6f6d85e13f4cf954..ef2b171e351479f5d9268a00e360d4e3ff525d1b 100644 (file)
@@ -1164,8 +1164,7 @@ void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
 {
        struct zd_usb_rx *rx = &usb->rx;
 
-       cancel_delayed_work(&rx->idle_work);
-       queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+       mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
 }
 
 static inline void init_usb_interrupt(struct zd_usb *usb)
index 682633bfe00ff7fc35c7be97c427a6cf5d9dde45..05593d8820233b72681af9f76510cdc9a5eefb52 100644 (file)
@@ -635,9 +635,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
                return;
 
        BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
-       ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
-                                       npo.copy_prod);
-       BUG_ON(ret != 0);
+       gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
                sco = (struct skb_cb_overlay *)skb->cb;
@@ -1460,18 +1458,15 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
 static void xen_netbk_tx_action(struct xen_netbk *netbk)
 {
        unsigned nr_gops;
-       int ret;
 
        nr_gops = xen_netbk_tx_build_gops(netbk);
 
        if (nr_gops == 0)
                return;
-       ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
-                                       netbk->tx_copy_ops, nr_gops);
-       BUG_ON(ret);
 
-       xen_netbk_tx_submit(netbk);
+       gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
 
+       xen_netbk_tx_submit(netbk);
 }
 
 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
index 650f79a1f2bd4a89cd96326d63c85982b24db0aa..c934fe8583f5f17c33a577993a934205db2d3609 100644 (file)
@@ -1712,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateConnected:
-               netif_notify_peers(netdev);
+               netdev_notify_peers(netdev);
                break;
 
        case XenbusStateClosing:
index 3b20b73ee649bf46d62cc27f81c7a6f78366a148..ec857676c39ffaffcb6d55c25917ec9beb1192ea 100644 (file)
@@ -5,21 +5,9 @@
 menu "Near Field Communication (NFC) devices"
        depends on NFC
 
-config PN544_NFC
-       tristate "PN544 NFC driver"
-       depends on I2C
-       select CRC_CCITT
-       default n
-       ---help---
-         Say yes if you want PN544 Near Field Communication driver.
-         This is for i2c connected version. If unsure, say N here.
-
-         To compile this driver as a module, choose m here. The module will
-         be called pn544.
-
 config PN544_HCI_NFC
        tristate "HCI PN544 NFC driver"
-       depends on I2C && NFC_SHDLC
+       depends on I2C && NFC_HCI && NFC_SHDLC
        select CRC_CCITT
        default n
        ---help---
index 473e44cef6122fdc4530fbdf9d796e29afc3f85a..bf05831fdf091d372f154c8f7b3ea9e023cdba64 100644 (file)
@@ -2,7 +2,6 @@
 # Makefile for nfc devices
 #
 
-obj-$(CONFIG_PN544_NFC)                += pn544.o
 obj-$(CONFIG_PN544_HCI_NFC)    += pn544_hci.o
 obj-$(CONFIG_NFC_PN533)                += pn533.o
 obj-$(CONFIG_NFC_WILINK)       += nfcwilink.o
index e7fd4938f9bc2e36191de06f38fa30fd807eb307..50b1ee41afc60e2a3789f1a4e26ab12ff811ce4e 100644 (file)
@@ -352,8 +352,6 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
        struct nfcwilink *drv = priv_data;
        int rc;
 
-       nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
-
        if (!skb)
                return -EFAULT;
 
@@ -362,6 +360,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
                return -EFAULT;
        }
 
+       nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+
        /* strip the ST header
        (apart for the chnl byte, which is not received in the hdr) */
        skb_pull(skb, (NFCWILINK_HDR_LEN-1));
@@ -604,21 +604,7 @@ static struct platform_driver nfcwilink_driver = {
        },
 };
 
-/* ------- Module Init/Exit interfaces ------ */
-static int __init nfcwilink_init(void)
-{
-       printk(KERN_INFO "NFC Driver for TI WiLink");
-
-       return platform_driver_register(&nfcwilink_driver);
-}
-
-static void __exit nfcwilink_exit(void)
-{
-       platform_driver_unregister(&nfcwilink_driver);
-}
-
-module_init(nfcwilink_init);
-module_exit(nfcwilink_exit);
+module_platform_driver(nfcwilink_driver);
 
 /* ------ Module Info ------ */
 
index d606f52fec842d5a613cff3f63719a97827178b4..97c440a8cd615798a1e61250628e0030ede37694 100644 (file)
@@ -356,6 +356,7 @@ struct pn533 {
 
        struct workqueue_struct *wq;
        struct work_struct cmd_work;
+       struct work_struct cmd_complete_work;
        struct work_struct poll_work;
        struct work_struct mi_work;
        struct work_struct tg_work;
@@ -383,6 +384,19 @@ struct pn533 {
        u8 tgt_mode;
 
        u32 device_type;
+
+       struct list_head cmd_queue;
+       u8 cmd_pending;
+};
+
+struct pn533_cmd {
+       struct list_head queue;
+       struct pn533_frame *out_frame;
+       struct pn533_frame *in_frame;
+       int in_frame_len;
+       pn533_cmd_complete_t cmd_complete;
+       void *arg;
+       gfp_t flags;
 };
 
 struct pn533_frame {
@@ -487,7 +501,7 @@ static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
 
 static void pn533_wq_cmd_complete(struct work_struct *work)
 {
-       struct pn533 *dev = container_of(work, struct pn533, cmd_work);
+       struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
        struct pn533_frame *in_frame;
        int rc;
 
@@ -502,7 +516,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
                                        PN533_FRAME_CMD_PARAMS_LEN(in_frame));
 
        if (rc != -EINPROGRESS)
-               mutex_unlock(&dev->cmd_lock);
+               queue_work(dev->wq, &dev->cmd_work);
 }
 
 static void pn533_recv_response(struct urb *urb)
@@ -550,7 +564,7 @@ static void pn533_recv_response(struct urb *urb)
        dev->wq_in_frame = in_frame;
 
 sched_wq:
-       queue_work(dev->wq, &dev->cmd_work);
+       queue_work(dev->wq, &dev->cmd_complete_work);
 }
 
 static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
@@ -606,7 +620,7 @@ static void pn533_recv_ack(struct urb *urb)
 
 sched_wq:
        dev->wq_in_frame = NULL;
-       queue_work(dev->wq, &dev->cmd_work);
+       queue_work(dev->wq, &dev->cmd_complete_work);
 }
 
 static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
@@ -669,6 +683,31 @@ error:
        return rc;
 }
 
+static void pn533_wq_cmd(struct work_struct *work)
+{
+       struct pn533 *dev = container_of(work, struct pn533, cmd_work);
+       struct pn533_cmd *cmd;
+
+       mutex_lock(&dev->cmd_lock);
+
+       if (list_empty(&dev->cmd_queue)) {
+               dev->cmd_pending = 0;
+               mutex_unlock(&dev->cmd_lock);
+               return;
+       }
+
+       cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
+
+       mutex_unlock(&dev->cmd_lock);
+
+       __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
+                                    cmd->in_frame_len, cmd->cmd_complete,
+                                    cmd->arg, cmd->flags);
+
+       list_del(&cmd->queue);
+       kfree(cmd);
+}
+
 static int pn533_send_cmd_frame_async(struct pn533 *dev,
                                        struct pn533_frame *out_frame,
                                        struct pn533_frame *in_frame,
@@ -676,21 +715,44 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
                                        pn533_cmd_complete_t cmd_complete,
                                        void *arg, gfp_t flags)
 {
-       int rc;
+       struct pn533_cmd *cmd;
+       int rc = 0;
 
        nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
 
-       if (!mutex_trylock(&dev->cmd_lock))
-               return -EBUSY;
+       mutex_lock(&dev->cmd_lock);
 
-       rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
-                                       in_frame_len, cmd_complete, arg, flags);
-       if (rc)
-               goto error;
+       if (!dev->cmd_pending) {
+               rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
+                                                 in_frame_len, cmd_complete,
+                                                 arg, flags);
+               if (!rc)
+                       dev->cmd_pending = 1;
 
-       return 0;
-error:
+               goto unlock;
+       }
+
+       nfc_dev_dbg(&dev->interface->dev, "%s Queueing command", __func__);
+
+       cmd = kzalloc(sizeof(struct pn533_cmd), flags);
+       if (!cmd) {
+               rc = -ENOMEM;
+               goto unlock;
+       }
+
+       INIT_LIST_HEAD(&cmd->queue);
+       cmd->out_frame = out_frame;
+       cmd->in_frame = in_frame;
+       cmd->in_frame_len = in_frame_len;
+       cmd->cmd_complete = cmd_complete;
+       cmd->arg = arg;
+       cmd->flags = flags;
+
+       list_add_tail(&cmd->queue, &dev->cmd_queue);
+
+unlock:
        mutex_unlock(&dev->cmd_lock);
+
        return rc;
 }
 
@@ -1305,8 +1367,6 @@ static void pn533_listen_mode_timer(unsigned long data)
 
        dev->cancel_listen = 1;
 
-       mutex_unlock(&dev->cmd_lock);
-
        pn533_poll_next_mod(dev);
 
        queue_work(dev->wq, &dev->poll_work);
@@ -2131,7 +2191,7 @@ error_cmd:
 
        kfree(arg);
 
-       mutex_unlock(&dev->cmd_lock);
+       queue_work(dev->wq, &dev->cmd_work);
 }
 
 static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -2330,13 +2390,12 @@ static int pn533_probe(struct usb_interface *interface,
                        NULL, 0,
                        pn533_send_complete, dev);
 
-       INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
+       INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
+       INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
        INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
        INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
        INIT_WORK(&dev->poll_work, pn533_wq_poll);
-       dev->wq = alloc_workqueue("pn533",
-                                 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                                 1);
+       dev->wq = alloc_ordered_workqueue("pn533", 0);
        if (dev->wq == NULL)
                goto error;
 
@@ -2346,6 +2405,8 @@ static int pn533_probe(struct usb_interface *interface,
 
        skb_queue_head_init(&dev->resp_q);
 
+       INIT_LIST_HEAD(&dev->cmd_queue);
+
        usb_set_intfdata(interface, dev);
 
        pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION);
@@ -2417,6 +2478,7 @@ error:
 static void pn533_disconnect(struct usb_interface *interface)
 {
        struct pn533 *dev;
+       struct pn533_cmd *cmd, *n;
 
        dev = usb_get_intfdata(interface);
        usb_set_intfdata(interface, NULL);
@@ -2433,6 +2495,11 @@ static void pn533_disconnect(struct usb_interface *interface)
 
        del_timer(&dev->listen_timer);
 
+       list_for_each_entry_safe(cmd, n, &dev->cmd_queue, queue) {
+               list_del(&cmd->queue);
+               kfree(cmd);
+       }
+
        kfree(dev->in_frame);
        usb_free_urb(dev->in_urb);
        kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
deleted file mode 100644 (file)
index 724f65d..0000000
+++ /dev/null
@@ -1,893 +0,0 @@
-/*
- * Driver for the PN544 NFC chip.
- *
- * Copyright (C) Nokia Corporation
- *
- * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
- * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/completion.h>
-#include <linux/crc-ccitt.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nfc/pn544.h>
-#include <linux/poll.h>
-#include <linux/regulator/consumer.h>
-#include <linux/serial_core.h> /* for TCGETS */
-#include <linux/slab.h>
-
-#define DRIVER_CARD    "PN544 NFC"
-#define DRIVER_DESC    "NFC driver for PN544"
-
-static struct i2c_device_id pn544_id_table[] = {
-       { PN544_DRIVER_NAME, 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(i2c, pn544_id_table);
-
-#define HCI_MODE       0
-#define FW_MODE                1
-
-enum pn544_state {
-       PN544_ST_COLD,
-       PN544_ST_FW_READY,
-       PN544_ST_READY,
-};
-
-enum pn544_irq {
-       PN544_NONE,
-       PN544_INT,
-};
-
-struct pn544_info {
-       struct miscdevice miscdev;
-       struct i2c_client *i2c_dev;
-       struct regulator_bulk_data regs[3];
-
-       enum pn544_state state;
-       wait_queue_head_t read_wait;
-       loff_t read_offset;
-       enum pn544_irq read_irq;
-       struct mutex read_mutex; /* Serialize read_irq access */
-       struct mutex mutex; /* Serialize info struct access */
-       u8 *buf;
-       size_t buflen;
-};
-
-static const char reg_vdd_io[] = "Vdd_IO";
-static const char reg_vbat[]   = "VBat";
-static const char reg_vsim[]   = "VSim";
-
-/* sysfs interface */
-static ssize_t pn544_test(struct device *dev,
-                         struct device_attribute *attr, char *buf)
-{
-       struct pn544_info *info = dev_get_drvdata(dev);
-       struct i2c_client *client = info->i2c_dev;
-       struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
-       return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
-}
-
-static int pn544_enable(struct pn544_info *info, int mode)
-{
-       struct pn544_nfc_platform_data *pdata;
-       struct i2c_client *client = info->i2c_dev;
-
-       int r;
-
-       r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
-       if (r < 0)
-               return r;
-
-       pdata = client->dev.platform_data;
-       info->read_irq = PN544_NONE;
-       if (pdata->enable)
-               pdata->enable(mode);
-
-       if (mode) {
-               info->state = PN544_ST_FW_READY;
-               dev_dbg(&client->dev, "now in FW-mode\n");
-       } else {
-               info->state = PN544_ST_READY;
-               dev_dbg(&client->dev, "now in HCI-mode\n");
-       }
-
-       usleep_range(10000, 15000);
-
-       return 0;
-}
-
-static void pn544_disable(struct pn544_info *info)
-{
-       struct pn544_nfc_platform_data *pdata;
-       struct i2c_client *client = info->i2c_dev;
-
-       pdata = client->dev.platform_data;
-       if (pdata->disable)
-               pdata->disable();
-
-       info->state = PN544_ST_COLD;
-
-       dev_dbg(&client->dev, "Now in OFF-mode\n");
-
-       msleep(PN544_RESETVEN_TIME);
-
-       info->read_irq = PN544_NONE;
-       regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
-}
-
-static int check_crc(u8 *buf, int buflen)
-{
-       u8 len;
-       u16 crc;
-
-       len = buf[0] + 1;
-       if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
-               pr_err(PN544_DRIVER_NAME
-                      ": CRC; corrupt packet len %u (%d)\n", len, buflen);
-               print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
-                              16, 2, buf, buflen, false);
-               return -EPERM;
-       }
-       crc = crc_ccitt(0xffff, buf, len - 2);
-       crc = ~crc;
-
-       if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
-               pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
-                      crc, buf[len-1], buf[len-2]);
-
-               print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
-                              16, 2, buf, buflen, false);
-               return -EPERM;
-       }
-       return 0;
-}
-
-static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
-{
-       int r;
-
-       if (len < 4 || len != (buf[0] + 1)) {
-               dev_err(&client->dev, "%s: Illegal message length: %d\n",
-                       __func__, len);
-               return -EINVAL;
-       }
-
-       if (check_crc(buf, len))
-               return -EINVAL;
-
-       usleep_range(3000, 6000);
-
-       r = i2c_master_send(client, buf, len);
-       dev_dbg(&client->dev, "send: %d\n", r);
-
-       if (r == -EREMOTEIO) { /* Retry, chip was in standby */
-               usleep_range(6000, 10000);
-               r = i2c_master_send(client, buf, len);
-               dev_dbg(&client->dev, "send2: %d\n", r);
-       }
-
-       if (r != len)
-               return -EREMOTEIO;
-
-       return r;
-}
-
-static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
-{
-       int r;
-       u8 len;
-
-       /*
-        * You could read a packet in one go, but then you'd need to read
-        * max size and rest would be 0xff fill, so we do split reads.
-        */
-       r = i2c_master_recv(client, &len, 1);
-       dev_dbg(&client->dev, "recv1: %d\n", r);
-
-       if (r != 1)
-               return -EREMOTEIO;
-
-       if (len < PN544_LLC_HCI_OVERHEAD)
-               len = PN544_LLC_HCI_OVERHEAD;
-       else if (len > (PN544_MSG_MAX_SIZE - 1))
-               len = PN544_MSG_MAX_SIZE - 1;
-
-       if (1 + len > buflen) /* len+(data+crc16) */
-               return -EMSGSIZE;
-
-       buf[0] = len;
-
-       r = i2c_master_recv(client, buf + 1, len);
-       dev_dbg(&client->dev, "recv2: %d\n", r);
-
-       if (r != len)
-               return -EREMOTEIO;
-
-       usleep_range(3000, 6000);
-
-       return r + 1;
-}
-
-static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
-{
-       int r;
-
-       dev_dbg(&client->dev, "%s\n", __func__);
-
-       if (len < PN544_FW_HEADER_SIZE ||
-           (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
-               return -EINVAL;
-
-       r = i2c_master_send(client, buf, len);
-       dev_dbg(&client->dev, "fw send: %d\n", r);
-
-       if (r == -EREMOTEIO) { /* Retry, chip was in standby */
-               usleep_range(6000, 10000);
-               r = i2c_master_send(client, buf, len);
-               dev_dbg(&client->dev, "fw send2: %d\n", r);
-       }
-
-       if (r != len)
-               return -EREMOTEIO;
-
-       return r;
-}
-
-static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
-{
-       int r, len;
-
-       if (buflen < PN544_FW_HEADER_SIZE)
-               return -EINVAL;
-
-       r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
-       dev_dbg(&client->dev, "FW recv1: %d\n", r);
-
-       if (r < 0)
-               return r;
-
-       if (r < PN544_FW_HEADER_SIZE)
-               return -EINVAL;
-
-       len = (buf[1] << 8) + buf[2];
-       if (len == 0) /* just header, no additional data */
-               return r;
-
-       if (len > buflen - PN544_FW_HEADER_SIZE)
-               return -EMSGSIZE;
-
-       r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
-       dev_dbg(&client->dev, "fw recv2: %d\n", r);
-
-       if (r != len)
-               return -EINVAL;
-
-       return r + PN544_FW_HEADER_SIZE;
-}
-
-static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
-{
-       struct pn544_info *info = dev_id;
-       struct i2c_client *client = info->i2c_dev;
-
-       BUG_ON(!info);
-       BUG_ON(irq != info->i2c_dev->irq);
-
-       dev_dbg(&client->dev, "IRQ\n");
-
-       mutex_lock(&info->read_mutex);
-       info->read_irq = PN544_INT;
-       mutex_unlock(&info->read_mutex);
-
-       wake_up_interruptible(&info->read_wait);
-
-       return IRQ_HANDLED;
-}
-
-static enum pn544_irq pn544_irq_state(struct pn544_info *info)
-{
-       enum pn544_irq irq;
-
-       mutex_lock(&info->read_mutex);
-       irq = info->read_irq;
-       mutex_unlock(&info->read_mutex);
-       /*
-        * XXX: should we check GPIO-line status directly?
-        * return pdata->irq_status() ? PN544_INT : PN544_NONE;
-        */
-
-       return irq;
-}
-
-static ssize_t pn544_read(struct file *file, char __user *buf,
-                         size_t count, loff_t *offset)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       enum pn544_irq irq;
-       size_t len;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
-               info, count);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       irq = pn544_irq_state(info);
-       if (irq == PN544_NONE) {
-               if (file->f_flags & O_NONBLOCK) {
-                       r = -EAGAIN;
-                       goto out;
-               }
-
-               if (wait_event_interruptible(info->read_wait,
-                                            (info->read_irq == PN544_INT))) {
-                       r = -ERESTARTSYS;
-                       goto out;
-               }
-       }
-
-       if (info->state == PN544_ST_FW_READY) {
-               len = min(count, info->buflen);
-
-               mutex_lock(&info->read_mutex);
-               r = pn544_fw_read(info->i2c_dev, info->buf, len);
-               info->read_irq = PN544_NONE;
-               mutex_unlock(&info->read_mutex);
-
-               if (r < 0) {
-                       dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
-                       goto out;
-               }
-
-               print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, r, false);
-
-               *offset += r;
-               if (copy_to_user(buf, info->buf, r)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-       } else {
-               len = min(count, info->buflen);
-
-               mutex_lock(&info->read_mutex);
-               r = pn544_i2c_read(info->i2c_dev, info->buf, len);
-               info->read_irq = PN544_NONE;
-               mutex_unlock(&info->read_mutex);
-
-               if (r < 0) {
-                       dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
-                       goto out;
-               }
-               print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, r, false);
-
-               *offset += r;
-               if (copy_to_user(buf, info->buf, r)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-       }
-
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static unsigned int pn544_poll(struct file *file, poll_table *wait)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       poll_wait(file, &info->read_wait, wait);
-
-       if (pn544_irq_state(info) == PN544_INT) {
-               r = POLLIN | POLLRDNORM;
-               goto out;
-       }
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static ssize_t pn544_write(struct file *file, const char __user *buf,
-                          size_t count, loff_t *ppos)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       ssize_t len;
-       int r;
-
-       dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
-               info, count);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       /*
-        * XXX: should we detect rset-writes and clean possible
-        * read_irq state
-        */
-       if (info->state == PN544_ST_FW_READY) {
-               size_t fw_len;
-
-               if (count < PN544_FW_HEADER_SIZE) {
-                       r = -EINVAL;
-                       goto out;
-               }
-
-               len = min(count, info->buflen);
-               if (copy_from_user(info->buf, buf, len)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, len, false);
-
-               fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
-                       info->buf[2];
-
-               if (len > fw_len) /* 1 msg at a time */
-                       len = fw_len;
-
-               r = pn544_fw_write(info->i2c_dev, info->buf, len);
-       } else {
-               if (count < PN544_LLC_MIN_SIZE) {
-                       r = -EINVAL;
-                       goto out;
-               }
-
-               len = min(count, info->buflen);
-               if (copy_from_user(info->buf, buf, len)) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
-                              16, 2, info->buf, len, false);
-
-               if (len > (info->buf[0] + 1)) /* 1 msg at a time */
-                       len  = info->buf[0] + 1;
-
-               r = pn544_i2c_write(info->i2c_dev, info->buf, len);
-       }
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-
-}
-
-static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       struct pn544_nfc_platform_data *pdata;
-       unsigned int val;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
-
-       mutex_lock(&info->mutex);
-
-       if (info->state == PN544_ST_COLD) {
-               r = -ENODEV;
-               goto out;
-       }
-
-       pdata = info->i2c_dev->dev.platform_data;
-       switch (cmd) {
-       case PN544_GET_FW_MODE:
-               dev_dbg(&client->dev, "%s:  PN544_GET_FW_MODE\n", __func__);
-
-               val = (info->state == PN544_ST_FW_READY);
-               if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               break;
-
-       case PN544_SET_FW_MODE:
-               dev_dbg(&client->dev, "%s:  PN544_SET_FW_MODE\n", __func__);
-
-               if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
-                       r = -EFAULT;
-                       goto out;
-               }
-
-               if (val) {
-                       if (info->state == PN544_ST_FW_READY)
-                               break;
-
-                       pn544_disable(info);
-                       r = pn544_enable(info, FW_MODE);
-                       if (r < 0)
-                               goto out;
-               } else {
-                       if (info->state == PN544_ST_READY)
-                               break;
-                       pn544_disable(info);
-                       r = pn544_enable(info, HCI_MODE);
-                       if (r < 0)
-                               goto out;
-               }
-               file->f_pos = info->read_offset;
-               break;
-
-       case TCGETS:
-               dev_dbg(&client->dev, "%s:  TCGETS\n", __func__);
-
-               r = -ENOIOCTLCMD;
-               break;
-
-       default:
-               dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
-               r = -ENOIOCTLCMD;
-               break;
-       }
-
-out:
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static int pn544_open(struct inode *inode, struct file *file)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
-               info, info->i2c_dev);
-
-       mutex_lock(&info->mutex);
-
-       /*
-        * Only 1 at a time.
-        * XXX: maybe user (counter) would work better
-        */
-       if (info->state != PN544_ST_COLD) {
-               r = -EBUSY;
-               goto out;
-       }
-
-       file->f_pos = info->read_offset;
-       r = pn544_enable(info, HCI_MODE);
-
-out:
-       mutex_unlock(&info->mutex);
-       return r;
-}
-
-static int pn544_close(struct inode *inode, struct file *file)
-{
-       struct pn544_info *info = container_of(file->private_data,
-                                              struct pn544_info, miscdev);
-       struct i2c_client *client = info->i2c_dev;
-
-       dev_dbg(&client->dev, "%s: info: %p, client %p\n",
-               __func__, info, info->i2c_dev);
-
-       mutex_lock(&info->mutex);
-       pn544_disable(info);
-       mutex_unlock(&info->mutex);
-
-       return 0;
-}
-
-static const struct file_operations pn544_fops = {
-       .owner          = THIS_MODULE,
-       .llseek         = no_llseek,
-       .read           = pn544_read,
-       .write          = pn544_write,
-       .poll           = pn544_poll,
-       .open           = pn544_open,
-       .release        = pn544_close,
-       .unlocked_ioctl = pn544_ioctl,
-};
-
-#ifdef CONFIG_PM
-static int pn544_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct pn544_info *info;
-       int r = 0;
-
-       dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
-
-       info = i2c_get_clientdata(client);
-       dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
-                info, client);
-
-       mutex_lock(&info->mutex);
-
-       switch (info->state) {
-       case PN544_ST_FW_READY:
-               /* Do not suspend while upgrading FW, please! */
-               r = -EPERM;
-               break;
-
-       case PN544_ST_READY:
-               /*
-                * CHECK: Device should be in standby-mode. No way to check?
-                * Allowing low power mode for the regulator is potentially
-                * dangerous if pn544 does not go to suspension.
-                */
-               break;
-
-       case PN544_ST_COLD:
-               break;
-       };
-
-       mutex_unlock(&info->mutex);
-       return r;
-}
-
-static int pn544_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct pn544_info *info = i2c_get_clientdata(client);
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
-               info, client);
-
-       mutex_lock(&info->mutex);
-
-       switch (info->state) {
-       case PN544_ST_READY:
-               /*
-                * CHECK: If regulator low power mode is allowed in
-                * pn544_suspend, we should go back to normal mode
-                * here.
-                */
-               break;
-
-       case PN544_ST_COLD:
-               break;
-
-       case PN544_ST_FW_READY:
-               break;
-       };
-
-       mutex_unlock(&info->mutex);
-
-       return r;
-}
-
-static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
-#endif
-
-static struct device_attribute pn544_attr =
-       __ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
-
-static int __devinit pn544_probe(struct i2c_client *client,
-                                const struct i2c_device_id *id)
-{
-       struct pn544_info *info;
-       struct pn544_nfc_platform_data *pdata;
-       int r = 0;
-
-       dev_dbg(&client->dev, "%s\n", __func__);
-       dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
-       /* private data allocation */
-       info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
-       if (!info) {
-               dev_err(&client->dev,
-                       "Cannot allocate memory for pn544_info.\n");
-               r = -ENOMEM;
-               goto err_info_alloc;
-       }
-
-       info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
-       info->buf = kzalloc(info->buflen, GFP_KERNEL);
-       if (!info->buf) {
-               dev_err(&client->dev,
-                       "Cannot allocate memory for pn544_info->buf.\n");
-               r = -ENOMEM;
-               goto err_buf_alloc;
-       }
-
-       info->regs[0].supply = reg_vdd_io;
-       info->regs[1].supply = reg_vbat;
-       info->regs[2].supply = reg_vsim;
-       r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
-                                info->regs);
-       if (r < 0)
-               goto err_kmalloc;
-
-       info->i2c_dev = client;
-       info->state = PN544_ST_COLD;
-       info->read_irq = PN544_NONE;
-       mutex_init(&info->read_mutex);
-       mutex_init(&info->mutex);
-       init_waitqueue_head(&info->read_wait);
-       i2c_set_clientdata(client, info);
-       pdata = client->dev.platform_data;
-       if (!pdata) {
-               dev_err(&client->dev, "No platform data\n");
-               r = -EINVAL;
-               goto err_reg;
-       }
-
-       if (!pdata->request_resources) {
-               dev_err(&client->dev, "request_resources() missing\n");
-               r = -EINVAL;
-               goto err_reg;
-       }
-
-       r = pdata->request_resources(client);
-       if (r) {
-               dev_err(&client->dev, "Cannot get platform resources\n");
-               goto err_reg;
-       }
-
-       r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
-                                IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
-                                info);
-       if (r < 0) {
-               dev_err(&client->dev, "Unable to register IRQ handler\n");
-               goto err_res;
-       }
-
-       /* If we don't have the test we don't need the sysfs file */
-       if (pdata->test) {
-               r = device_create_file(&client->dev, &pn544_attr);
-               if (r) {
-                       dev_err(&client->dev,
-                               "sysfs registration failed, error %d\n", r);
-                       goto err_irq;
-               }
-       }
-
-       info->miscdev.minor = MISC_DYNAMIC_MINOR;
-       info->miscdev.name = PN544_DRIVER_NAME;
-       info->miscdev.fops = &pn544_fops;
-       info->miscdev.parent = &client->dev;
-       r = misc_register(&info->miscdev);
-       if (r < 0) {
-               dev_err(&client->dev, "Device registration failed\n");
-               goto err_sysfs;
-       }
-
-       dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
-               __func__, info, pdata, client);
-
-       return 0;
-
-err_sysfs:
-       if (pdata->test)
-               device_remove_file(&client->dev, &pn544_attr);
-err_irq:
-       free_irq(client->irq, info);
-err_res:
-       if (pdata->free_resources)
-               pdata->free_resources();
-err_reg:
-       regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
-err_kmalloc:
-       kfree(info->buf);
-err_buf_alloc:
-       kfree(info);
-err_info_alloc:
-       return r;
-}
-
-static __devexit int pn544_remove(struct i2c_client *client)
-{
-       struct pn544_info *info = i2c_get_clientdata(client);
-       struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
-
-       dev_dbg(&client->dev, "%s\n", __func__);
-
-       misc_deregister(&info->miscdev);
-       if (pdata->test)
-               device_remove_file(&client->dev, &pn544_attr);
-
-       if (info->state != PN544_ST_COLD) {
-               if (pdata->disable)
-                       pdata->disable();
-
-               info->read_irq = PN544_NONE;
-       }
-
-       free_irq(client->irq, info);
-       if (pdata->free_resources)
-               pdata->free_resources();
-
-       regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
-       kfree(info->buf);
-       kfree(info);
-
-       return 0;
-}
-
-static struct i2c_driver pn544_driver = {
-       .driver = {
-               .name = PN544_DRIVER_NAME,
-#ifdef CONFIG_PM
-               .pm = &pn544_pm_ops,
-#endif
-       },
-       .probe = pn544_probe,
-       .id_table = pn544_id_table,
-       .remove = __devexit_p(pn544_remove),
-};
-
-static int __init pn544_init(void)
-{
-       int r;
-
-       pr_debug(DRIVER_DESC ": %s\n", __func__);
-
-       r = i2c_add_driver(&pn544_driver);
-       if (r) {
-               pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
-               return r;
-       }
-
-       return 0;
-}
-
-static void __exit pn544_exit(void)
-{
-       i2c_del_driver(&pn544_driver);
-       pr_info(DRIVER_DESC ", Exiting.\n");
-}
-
-module_init(pn544_init);
-module_exit(pn544_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(DRIVER_DESC);
index aa71807189ba3235a0720fc1660049507bf19039..c9c8570273ab5a58c782e9fe953339e091042d2a 100644 (file)
@@ -29,7 +29,7 @@
 
 #include <linux/nfc.h>
 #include <net/nfc/hci.h>
-#include <net/nfc/shdlc.h>
+#include <net/nfc/llc.h>
 
 #include <linux/nfc/pn544.h>
 
@@ -128,10 +128,12 @@ static struct nfc_hci_gate pn544_gates[] = {
 
 /* Largest headroom needed for outgoing custom commands */
 #define PN544_CMDS_HEADROOM    2
+#define PN544_FRAME_HEADROOM 1
+#define PN544_FRAME_TAILROOM 2
 
 struct pn544_hci_info {
        struct i2c_client *i2c_dev;
-       struct nfc_shdlc *shdlc;
+       struct nfc_hci_dev *hdev;
 
        enum pn544_state state;
 
@@ -146,6 +148,9 @@ struct pn544_hci_info {
                                 * < 0 if hardware error occured (e.g. i2c err)
                                 * and prevents normal operation.
                                 */
+       int async_cb_type;
+       data_exchange_cb_t async_cb;
+       void *async_cb_context;
 };
 
 static void pn544_hci_platform_init(struct pn544_hci_info *info)
@@ -230,8 +235,12 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
                r = i2c_master_send(client, buf, len);
        }
 
-       if (r >= 0 && r != len)
-               r = -EREMOTEIO;
+       if (r >= 0) {
+               if (r != len)
+                       return -EREMOTEIO;
+               else
+                       return 0;
+       }
 
        return r;
 }
@@ -341,13 +350,16 @@ flush:
 static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
 {
        struct pn544_hci_info *info = dev_id;
-       struct i2c_client *client = info->i2c_dev;
+       struct i2c_client *client;
        struct sk_buff *skb = NULL;
        int r;
 
-       BUG_ON(!info);
-       BUG_ON(irq != info->i2c_dev->irq);
+       if (!info || irq != info->i2c_dev->irq) {
+               WARN_ON_ONCE(1);
+               return IRQ_NONE;
+       }
 
+       client = info->i2c_dev;
        dev_dbg(&client->dev, "IRQ\n");
 
        if (info->hard_fault != 0)
@@ -357,21 +369,21 @@ static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
        if (r == -EREMOTEIO) {
                info->hard_fault = r;
 
-               nfc_shdlc_recv_frame(info->shdlc, NULL);
+               nfc_hci_recv_frame(info->hdev, NULL);
 
                return IRQ_HANDLED;
        } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
                return IRQ_HANDLED;
        }
 
-       nfc_shdlc_recv_frame(info->shdlc, skb);
+       nfc_hci_recv_frame(info->hdev, skb);
 
        return IRQ_HANDLED;
 }
 
-static int pn544_hci_open(struct nfc_shdlc *shdlc)
+static int pn544_hci_open(struct nfc_hci_dev *hdev)
 {
-       struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
        int r = 0;
 
        mutex_lock(&info->info_lock);
@@ -391,9 +403,9 @@ out:
        return r;
 }
 
-static void pn544_hci_close(struct nfc_shdlc *shdlc)
+static void pn544_hci_close(struct nfc_hci_dev *hdev)
 {
-       struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
 
        mutex_lock(&info->info_lock);
 
@@ -408,9 +420,8 @@ out:
        mutex_unlock(&info->info_lock);
 }
 
-static int pn544_hci_ready(struct nfc_shdlc *shdlc)
+static int pn544_hci_ready(struct nfc_hci_dev *hdev)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
        struct sk_buff *skb;
        static struct hw_config {
                u8 adr[2];
@@ -576,21 +587,45 @@ static int pn544_hci_ready(struct nfc_shdlc *shdlc)
        return 0;
 }
 
-static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
+static void pn544_hci_add_len_crc(struct sk_buff *skb)
 {
-       struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+       u16 crc;
+       int len;
+
+       len = skb->len + 2;
+       *skb_push(skb, 1) = len;
+
+       crc = crc_ccitt(0xffff, skb->data, skb->len);
+       crc = ~crc;
+       *skb_put(skb, 1) = crc & 0xff;
+       *skb_put(skb, 1) = crc >> 8;
+}
+
+static void pn544_hci_remove_len_crc(struct sk_buff *skb)
+{
+       skb_pull(skb, PN544_FRAME_HEADROOM);
+       skb_trim(skb, PN544_FRAME_TAILROOM);
+}
+
+static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
        struct i2c_client *client = info->i2c_dev;
+       int r;
 
        if (info->hard_fault != 0)
                return info->hard_fault;
 
-       return pn544_hci_i2c_write(client, skb->data, skb->len);
+       pn544_hci_add_len_crc(skb);
+       r = pn544_hci_i2c_write(client, skb->data, skb->len);
+       pn544_hci_remove_len_crc(skb);
+
+       return r;
 }
 
-static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
+static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
                                u32 im_protocols, u32 tm_protocols)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
        u8 phases = 0;
        int r;
        u8 duration[2];
@@ -641,7 +676,7 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
        return r;
 }
 
-static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
+static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
                                      struct nfc_target *target)
 {
        switch (gate) {
@@ -659,11 +694,10 @@ static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
        return 0;
 }
 
-static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
+static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
                                                u8 gate,
                                                struct nfc_target *target)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
        struct sk_buff *uid_skb;
        int r = 0;
 
@@ -704,6 +738,26 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
        return r;
 }
 
+#define PN544_CB_TYPE_READER_F 1
+
+static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
+                                      int err)
+{
+       struct pn544_hci_info *info = context;
+
+       switch (info->async_cb_type) {
+       case PN544_CB_TYPE_READER_F:
+               if (err == 0)
+                       skb_pull(skb, 1);
+               info->async_cb(info->async_cb_context, skb, err);
+               break;
+       default:
+               if (err == 0)
+                       kfree_skb(skb);
+               break;
+       }
+}
+
 #define MIFARE_CMD_AUTH_KEY_A  0x60
 #define MIFARE_CMD_AUTH_KEY_B  0x61
 #define MIFARE_CMD_HEADER      2
@@ -715,13 +769,12 @@ static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
  * <= 0: driver handled the data exchange
  *    1: driver doesn't especially handle, please do standard processing
  */
-static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
+static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
                                   struct nfc_target *target,
-                                  struct sk_buff *skb,
-                                  struct sk_buff **res_skb)
+                                  struct sk_buff *skb, data_exchange_cb_t cb,
+                                  void *cb_context)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
-       int r;
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
 
        pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
                target->hci_reader_gate);
@@ -746,41 +799,43 @@ static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
                                memcpy(data, uid, MIFARE_UID_LEN);
                        }
 
-                       return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                               PN544_MIFARE_CMD,
-                                               skb->data, skb->len, res_skb);
+                       return nfc_hci_send_cmd_async(hdev,
+                                                     target->hci_reader_gate,
+                                                     PN544_MIFARE_CMD,
+                                                     skb->data, skb->len,
+                                                     cb, cb_context);
                } else
                        return 1;
        case PN544_RF_READER_F_GATE:
                *skb_push(skb, 1) = 0;
                *skb_push(skb, 1) = 0;
 
-               r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                    PN544_FELICA_RAW,
-                                    skb->data, skb->len, res_skb);
-               if (r == 0)
-                       skb_pull(*res_skb, 1);
-               return r;
+               info->async_cb_type = PN544_CB_TYPE_READER_F;
+               info->async_cb = cb;
+               info->async_cb_context = cb_context;
+
+               return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                             PN544_FELICA_RAW, skb->data,
+                                             skb->len,
+                                             pn544_hci_data_exchange_cb, info);
        case PN544_RF_READER_JEWEL_GATE:
-               return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                       PN544_JEWEL_RAW_CMD,
-                                       skb->data, skb->len, res_skb);
+               return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                             PN544_JEWEL_RAW_CMD, skb->data,
+                                             skb->len, cb, cb_context);
        default:
                return 1;
        }
 }
 
-static int pn544_hci_check_presence(struct nfc_shdlc *shdlc,
+static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
                                   struct nfc_target *target)
 {
-       struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
-
        return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
                                PN544_RF_READER_CMD_PRESENCE_CHECK,
                                NULL, 0, NULL);
 }
 
-static struct nfc_shdlc_ops pn544_shdlc_ops = {
+static struct nfc_hci_ops pn544_hci_ops = {
        .open = pn544_hci_open,
        .close = pn544_hci_close,
        .hci_ready = pn544_hci_ready,
@@ -848,8 +903,8 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
        pn544_hci_platform_init(info);
 
        r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
-                                IRQF_TRIGGER_RISING, PN544_HCI_DRIVER_NAME,
-                                info);
+                                IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                PN544_HCI_DRIVER_NAME, info);
        if (r < 0) {
                dev_err(&client->dev, "Unable to register IRQ handler\n");
                goto err_rti;
@@ -872,22 +927,30 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
                    NFC_PROTO_ISO14443_B_MASK |
                    NFC_PROTO_NFC_DEP_MASK;
 
-       info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops,
-                                        &init_data, protocols,
-                                        PN544_CMDS_HEADROOM, 0,
-                                        PN544_HCI_LLC_MAX_PAYLOAD,
-                                        dev_name(&client->dev));
-       if (!info->shdlc) {
-               dev_err(&client->dev, "Cannot allocate nfc shdlc.\n");
+       info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
+                                            protocols, LLC_SHDLC_NAME,
+                                            PN544_FRAME_HEADROOM +
+                                            PN544_CMDS_HEADROOM,
+                                            PN544_FRAME_TAILROOM,
+                                            PN544_HCI_LLC_MAX_PAYLOAD);
+       if (!info->hdev) {
+               dev_err(&client->dev, "Cannot allocate nfc hdev.\n");
                r = -ENOMEM;
-               goto err_allocshdlc;
+               goto err_alloc_hdev;
        }
 
-       nfc_shdlc_set_clientdata(info->shdlc, info);
+       nfc_hci_set_clientdata(info->hdev, info);
+
+       r = nfc_hci_register_device(info->hdev);
+       if (r)
+               goto err_regdev;
 
        return 0;
 
-err_allocshdlc:
+err_regdev:
+       nfc_hci_free_device(info->hdev);
+
+err_alloc_hdev:
        free_irq(client->irq, info);
 
 err_rti:
@@ -908,7 +971,7 @@ static __devexit int pn544_hci_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "%s\n", __func__);
 
-       nfc_shdlc_free(info->shdlc);
+       nfc_hci_free_device(info->hdev);
 
        if (info->state != PN544_ST_COLD) {
                if (pdata->disable)
index 7e262a6124c5919a495545bb1f7402ae4350295a..72e496f1e9b082d576f439c442a07721330f1928 100644 (file)
@@ -9,8 +9,8 @@
 
 /* Max address size we deal with */
 #define OF_MAX_ADDR_CELLS      4
-#define OF_CHECK_COUNTS(na, ns)        ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
-                       (ns) > 0)
+#define OF_CHECK_ADDR_COUNT(na)        ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
+#define OF_CHECK_COUNTS(na, ns)        (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
 
 static struct of_bus *of_match_bus(struct device_node *np);
 static int __of_address_to_resource(struct device_node *dev,
@@ -69,6 +69,14 @@ static u64 of_bus_default_map(u32 *addr, const __be32 *range,
                 (unsigned long long)cp, (unsigned long long)s,
                 (unsigned long long)da);
 
+       /*
+        * If the number of address cells is larger than 2 we assume the
+        * mapping doesn't specify a physical address. Rather, the address
+        * specifies an identifier that must match exactly.
+        */
+       if (na > 2 && memcmp(range, addr, na * 4) != 0)
+               return OF_BAD_ADDR;
+
        if (da < cp || da >= (cp + s))
                return OF_BAD_ADDR;
        return da - cp;
@@ -182,7 +190,7 @@ const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
        }
        bus->count_cells(dev, &na, &ns);
        of_node_put(parent);
-       if (!OF_CHECK_COUNTS(na, ns))
+       if (!OF_CHECK_ADDR_COUNT(na))
                return NULL;
 
        /* Get "reg" or "assigned-addresses" property */
@@ -490,6 +498,25 @@ u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
 }
 EXPORT_SYMBOL(of_translate_dma_address);
 
+bool of_can_translate_address(struct device_node *dev)
+{
+       struct device_node *parent;
+       struct of_bus *bus;
+       int na, ns;
+
+       parent = of_get_parent(dev);
+       if (parent == NULL)
+               return false;
+
+       bus = of_match_bus(parent);
+       bus->count_cells(dev, &na, &ns);
+
+       of_node_put(parent);
+
+       return OF_CHECK_COUNTS(na, ns);
+}
+EXPORT_SYMBOL(of_can_translate_address);
+
 const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
                    unsigned int *flags)
 {
@@ -506,7 +533,7 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
        bus = of_match_bus(parent);
        bus->count_cells(dev, &na, &ns);
        of_node_put(parent);
-       if (!OF_CHECK_COUNTS(na, ns))
+       if (!OF_CHECK_ADDR_COUNT(na))
                return NULL;
 
        /* Get "reg" or "assigned-addresses" property */
index d4a1c9a043e12d3572faefe9a9fd11a1683cdd97..af3b22ac762755b532286ac505fb85ef936ca64b 100644 (file)
@@ -390,6 +390,29 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
 }
 EXPORT_SYMBOL(of_get_next_available_child);
 
+/**
+ *     of_get_child_by_name - Find the child node by name for a given parent
+ *     @node:  parent node
+ *     @name:  child name to look for.
+ *
+ *      This function looks for child node for given matching name
+ *
+ *     Returns a node pointer if found, with refcount incremented, use
+ *     of_node_put() on it when done.
+ *     Returns NULL if node is not found.
+ */
+struct device_node *of_get_child_by_name(const struct device_node *node,
+                               const char *name)
+{
+       struct device_node *child;
+
+       for_each_child_of_node(node, child)
+               if (child->name && (of_node_cmp(child->name, name) == 0))
+                       break;
+       return child;
+}
+EXPORT_SYMBOL(of_get_child_by_name);
+
 /**
  *     of_find_node_by_path - Find a node matching a full OF path
  *     @path:  The full path to match
index ff8ab7b27373bd58e31070a366ed0e48217ecceb..a23ec7779997b553269ad7af6e36a2580f98375c 100644 (file)
@@ -392,6 +392,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
 
        return i;
 }
+EXPORT_SYMBOL_GPL(of_irq_to_resource_table);
 
 struct intc_desc {
        struct list_head        list;
index 1e173f3576743a702bef6125d2fc5d70bf612d24..3550f3bf4f92738cc992eee88b25a19829f9dcb5 100644 (file)
@@ -61,6 +61,9 @@ void of_i2c_register_devices(struct i2c_adapter *adap)
                info.of_node = of_node_get(node);
                info.archdata = &dev_ad;
 
+               if (of_get_property(node, "wakeup-source", NULL))
+                       info.flags |= I2C_CLIENT_WAKE;
+
                request_module("%s%s", I2C_MODULE_PREFIX, info.type);
 
                result = i2c_new_device(adap, &info);
index e44f8c2d239d253afc045164834f0476b94cf932..9bdeaf30b17dd5a197f373f9baa18401338ef3d8 100644 (file)
@@ -78,6 +78,7 @@ void of_device_make_bus_id(struct device *dev)
        struct device_node *node = dev->of_node;
        const u32 *reg;
        u64 addr;
+       const __be32 *addrp;
        int magic;
 
 #ifdef CONFIG_PPC_DCR
@@ -105,7 +106,15 @@ void of_device_make_bus_id(struct device *dev)
         */
        reg = of_get_property(node, "reg", NULL);
        if (reg) {
-               addr = of_translate_address(node, reg);
+               if (of_can_translate_address(node)) {
+                       addr = of_translate_address(node, reg);
+               } else {
+                       addrp = of_get_address(node, 0, NULL, NULL);
+                       if (addrp)
+                               addr = of_read_number(addrp, 1);
+                       else
+                               addr = OF_BAD_ADDR;
+               }
                if (addr != OF_BAD_ADDR) {
                        dev_set_name(dev, "%llx.%s",
                                     (unsigned long long)addr, node->name);
@@ -140,8 +149,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
                return NULL;
 
        /* count the io and irq resources */
-       while (of_address_to_resource(np, num_reg, &temp_res) == 0)
-               num_reg++;
+       if (of_can_translate_address(np))
+               while (of_address_to_resource(np, num_reg, &temp_res) == 0)
+                       num_reg++;
        num_irq = of_irq_count(np);
 
        /* Populate the resource table */
index 9e1d2959e22682004c7f3f975c7991ad2b561060..94c6e2aa03d658defb5d30b0f2fbb1f8383cf9d0 100644 (file)
@@ -606,21 +606,6 @@ static int pci_pm_prepare(struct device *dev)
        struct device_driver *drv = dev->driver;
        int error = 0;
 
-       /*
-        * If a PCI device configured to wake up the system from sleep states
-        * has been suspended at run time and there's a resume request pending
-        * for it, this is equivalent to the device signaling wakeup, so the
-        * system suspend operation should be aborted.
-        */
-       pm_runtime_get_noresume(dev);
-       if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
-               pm_wakeup_event(dev, 0);
-
-       if (pm_wakeup_pending()) {
-               pm_runtime_put_sync(dev);
-               return -EBUSY;
-       }
-
        /*
         * PCI devices suspended at run time need to be resumed at this
         * point, because in general it is necessary to reconfigure them for
@@ -644,8 +629,6 @@ static void pci_pm_complete(struct device *dev)
 
        if (drv && drv->pm && drv->pm->complete)
                drv->pm->complete(dev);
-
-       pm_runtime_put_sync(dev);
 }
 
 #else /* !CONFIG_PM_SLEEP */
index def8d0b5620c01bf4b8fd372a0ee0f68f57dabab..0aab85a51559c1f46ce4567cd3de1ca27a0d7a07 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/bitops.h>
 #include <linux/time.h>
 
+#include <asm/xen/swiotlb-xen.h>
 #define INVALID_GRANT_REF (0)
 #define INVALID_EVTCHN    (-1)
 
@@ -236,7 +237,7 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
        return errno_to_pcibios_err(do_pci_op(pdev, &op));
 }
 
-struct pci_ops pcifront_bus_ops = {
+static struct pci_ops pcifront_bus_ops = {
        .read = pcifront_bus_read,
        .write = pcifront_bus_write,
 };
@@ -668,7 +669,7 @@ static irqreturn_t pcifront_handler_aer(int irq, void *dev)
        schedule_pcifront_aer_op(pdev);
        return IRQ_HANDLED;
 }
-static int pcifront_connect(struct pcifront_device *pdev)
+static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
 {
        int err = 0;
 
@@ -681,9 +682,13 @@ static int pcifront_connect(struct pcifront_device *pdev)
                dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
                err = -EEXIST;
        }
-
        spin_unlock(&pcifront_dev_lock);
 
+       if (!err && !swiotlb_nr_tbl()) {
+               err = pci_xen_swiotlb_init_late();
+               if (err)
+                       dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
+       }
        return err;
 }
 
@@ -842,10 +847,10 @@ static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
            XenbusStateInitialised)
                goto out;
 
-       err = pcifront_connect(pdev);
+       err = pcifront_connect_and_init_dma(pdev);
        if (err) {
                xenbus_dev_fatal(pdev->xdev, err,
-                                "Error connecting PCI Frontend");
+                                "Error setting up PCI Frontend");
                goto out;
        }
 
index 34e94c7f68caabc10181ea999327a8e88c657cfc..1ef6e1e8c6c6dac902c486414c164446cc97b290 100644 (file)
@@ -26,11 +26,24 @@ config DEBUG_PINCTRL
        help
          Say Y here to add some extra checks and diagnostics to PINCTRL calls.
 
+config PINCTRL_BCM2835
+       bool
+       select PINMUX
+       select PINCONF
+
 config PINCTRL_IMX
        bool
        select PINMUX
        select PINCONF
 
+config PINCTRL_IMX35
+       bool "IMX35 pinctrl driver"
+       depends on OF
+       depends on SOC_IMX35
+       select PINCTRL_IMX
+       help
+         Say Y here to enable the imx35 pinctrl driver
+
 config PINCTRL_IMX51
        bool "IMX51 pinctrl driver"
        depends on OF
@@ -86,10 +99,18 @@ config PINCTRL_NOMADIK
        select PINMUX
        select PINCONF
 
+config PINCTRL_STN8815
+       bool "STN8815 pin controller driver"
+       depends on PINCTRL_NOMADIK && ARCH_NOMADIK
+
 config PINCTRL_DB8500
        bool "DB8500 pin controller driver"
        depends on PINCTRL_NOMADIK && ARCH_U8500
 
+config PINCTRL_DB8540
+       bool "DB8540 pin controller driver"
+       depends on PINCTRL_NOMADIK && ARCH_U8500
+
 config PINCTRL_PXA168
        bool "PXA168 pin controller driver"
        depends on ARCH_MMP
index 6a88113e11d9dff2a7fefd066ace97be9baff840..698527dce29dba381062a3425b04dc006b296682 100644 (file)
@@ -9,7 +9,9 @@ ifeq ($(CONFIG_OF),y)
 obj-$(CONFIG_PINCTRL)          += devicetree.o
 endif
 obj-$(CONFIG_GENERIC_PINCONF)  += pinconf-generic.o
+obj-$(CONFIG_PINCTRL_BCM2835)  += pinctrl-bcm2835.o
 obj-$(CONFIG_PINCTRL_IMX)      += pinctrl-imx.o
+obj-$(CONFIG_PINCTRL_IMX35)    += pinctrl-imx35.o
 obj-$(CONFIG_PINCTRL_IMX51)    += pinctrl-imx51.o
 obj-$(CONFIG_PINCTRL_IMX53)    += pinctrl-imx53.o
 obj-$(CONFIG_PINCTRL_IMX6Q)    += pinctrl-imx6q.o
@@ -19,7 +21,9 @@ obj-$(CONFIG_PINCTRL_MXS)     += pinctrl-mxs.o
 obj-$(CONFIG_PINCTRL_IMX23)    += pinctrl-imx23.o
 obj-$(CONFIG_PINCTRL_IMX28)    += pinctrl-imx28.o
 obj-$(CONFIG_PINCTRL_NOMADIK)  += pinctrl-nomadik.o
+obj-$(CONFIG_PINCTRL_STN8815)  += pinctrl-nomadik-stn8815.o
 obj-$(CONFIG_PINCTRL_DB8500)   += pinctrl-nomadik-db8500.o
+obj-$(CONFIG_PINCTRL_DB8540)   += pinctrl-nomadik-db8540.o
 obj-$(CONFIG_PINCTRL_PXA168)   += pinctrl-pxa168.o
 obj-$(CONFIG_PINCTRL_PXA910)   += pinctrl-pxa910.o
 obj-$(CONFIG_PINCTRL_SINGLE)   += pinctrl-single.o
index dc5c126e398a0ac84fc13fa5f564d832ade0831e..0f1ec9e8ff14b02448877cb4f4872f45baeeccd3 100644 (file)
@@ -230,8 +230,10 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
                pindesc->name = name;
        } else {
                pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number);
-               if (pindesc->name == NULL)
+               if (pindesc->name == NULL) {
+                       kfree(pindesc);
                        return -ENOMEM;
+               }
                pindesc->dynamic_name = true;
        }
 
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
new file mode 100644 (file)
index 0000000..a4adee6
--- /dev/null
@@ -0,0 +1,1075 @@
+/*
+ * Driver for Broadcom BCM2835 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ *
+ * This driver is inspired by:
+ * pinctrl-nomadik.c, please see original file for copyright information
+ * pinctrl-tegra.c, please see original file for copyright information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define MODULE_NAME "pinctrl-bcm2835"
+#define BCM2835_NUM_GPIOS 54
+#define BCM2835_NUM_BANKS 2
+
+#define BCM2835_PIN_BITMAP_SZ \
+       DIV_ROUND_UP(BCM2835_NUM_GPIOS, sizeof(unsigned long) * 8)
+
+/* GPIO register offsets */
+#define GPFSEL0                0x0     /* Function Select */
+#define GPSET0         0x1c    /* Pin Output Set */
+#define GPCLR0         0x28    /* Pin Output Clear */
+#define GPLEV0         0x34    /* Pin Level */
+#define GPEDS0         0x40    /* Pin Event Detect Status */
+#define GPREN0         0x4c    /* Pin Rising Edge Detect Enable */
+#define GPFEN0         0x58    /* Pin Falling Edge Detect Enable */
+#define GPHEN0         0x64    /* Pin High Detect Enable */
+#define GPLEN0         0x70    /* Pin Low Detect Enable */
+#define GPAREN0                0x7c    /* Pin Async Rising Edge Detect */
+#define GPAFEN0                0x88    /* Pin Async Falling Edge Detect */
+#define GPPUD          0x94    /* Pin Pull-up/down Enable */
+#define GPPUDCLK0      0x98    /* Pin Pull-up/down Enable Clock */
+
+#define FSEL_REG(p)            (GPFSEL0 + (((p) / 10) * 4))
+#define FSEL_SHIFT(p)          (((p) % 10) * 3)
+#define GPIO_REG_OFFSET(p)     ((p) / 32)
+#define GPIO_REG_SHIFT(p)      ((p) % 32)
+
+enum bcm2835_pinconf_param {
+       /* argument: bcm2835_pinconf_pull */
+       BCM2835_PINCONF_PARAM_PULL,
+};
+
+enum bcm2835_pinconf_pull {
+       BCM2835_PINCONFIG_PULL_NONE,
+       BCM2835_PINCONFIG_PULL_DOWN,
+       BCM2835_PINCONFIG_PULL_UP,
+};
+
+#define BCM2835_PINCONF_PACK(_param_, _arg_) ((_param_) << 16 | (_arg_))
+#define BCM2835_PINCONF_UNPACK_PARAM(_conf_) ((_conf_) >> 16)
+#define BCM2835_PINCONF_UNPACK_ARG(_conf_) ((_conf_) & 0xffff)
+
+struct bcm2835_gpio_irqdata {
+       struct bcm2835_pinctrl *pc;
+       int bank;
+};
+
+struct bcm2835_pinctrl {
+       struct device *dev;
+       void __iomem *base;
+       int irq[BCM2835_NUM_BANKS];
+
+       /* note: locking assumes each bank will have its own unsigned long */
+       unsigned long enabled_irq_map[BCM2835_NUM_BANKS];
+       unsigned int irq_type[BCM2835_NUM_GPIOS];
+
+       struct pinctrl_dev *pctl_dev;
+       struct irq_domain *irq_domain;
+       struct gpio_chip gpio_chip;
+       struct pinctrl_gpio_range gpio_range;
+
+       struct bcm2835_gpio_irqdata irq_data[BCM2835_NUM_BANKS];
+       spinlock_t irq_lock[BCM2835_NUM_BANKS];
+};
+
+static struct lock_class_key gpio_lock_class;
+
+/* pins are just named GPIO0..GPIO53 */
+#define BCM2835_GPIO_PIN(a) PINCTRL_PIN(a, "gpio" #a)
+struct pinctrl_pin_desc bcm2835_gpio_pins[] = {
+       BCM2835_GPIO_PIN(0),
+       BCM2835_GPIO_PIN(1),
+       BCM2835_GPIO_PIN(2),
+       BCM2835_GPIO_PIN(3),
+       BCM2835_GPIO_PIN(4),
+       BCM2835_GPIO_PIN(5),
+       BCM2835_GPIO_PIN(6),
+       BCM2835_GPIO_PIN(7),
+       BCM2835_GPIO_PIN(8),
+       BCM2835_GPIO_PIN(9),
+       BCM2835_GPIO_PIN(10),
+       BCM2835_GPIO_PIN(11),
+       BCM2835_GPIO_PIN(12),
+       BCM2835_GPIO_PIN(13),
+       BCM2835_GPIO_PIN(14),
+       BCM2835_GPIO_PIN(15),
+       BCM2835_GPIO_PIN(16),
+       BCM2835_GPIO_PIN(17),
+       BCM2835_GPIO_PIN(18),
+       BCM2835_GPIO_PIN(19),
+       BCM2835_GPIO_PIN(20),
+       BCM2835_GPIO_PIN(21),
+       BCM2835_GPIO_PIN(22),
+       BCM2835_GPIO_PIN(23),
+       BCM2835_GPIO_PIN(24),
+       BCM2835_GPIO_PIN(25),
+       BCM2835_GPIO_PIN(26),
+       BCM2835_GPIO_PIN(27),
+       BCM2835_GPIO_PIN(28),
+       BCM2835_GPIO_PIN(29),
+       BCM2835_GPIO_PIN(30),
+       BCM2835_GPIO_PIN(31),
+       BCM2835_GPIO_PIN(32),
+       BCM2835_GPIO_PIN(33),
+       BCM2835_GPIO_PIN(34),
+       BCM2835_GPIO_PIN(35),
+       BCM2835_GPIO_PIN(36),
+       BCM2835_GPIO_PIN(37),
+       BCM2835_GPIO_PIN(38),
+       BCM2835_GPIO_PIN(39),
+       BCM2835_GPIO_PIN(40),
+       BCM2835_GPIO_PIN(41),
+       BCM2835_GPIO_PIN(42),
+       BCM2835_GPIO_PIN(43),
+       BCM2835_GPIO_PIN(44),
+       BCM2835_GPIO_PIN(45),
+       BCM2835_GPIO_PIN(46),
+       BCM2835_GPIO_PIN(47),
+       BCM2835_GPIO_PIN(48),
+       BCM2835_GPIO_PIN(49),
+       BCM2835_GPIO_PIN(50),
+       BCM2835_GPIO_PIN(51),
+       BCM2835_GPIO_PIN(52),
+       BCM2835_GPIO_PIN(53),
+};
+
+/* one pin per group */
+static const char * const bcm2835_gpio_groups[] = {
+       "gpio0",
+       "gpio1",
+       "gpio2",
+       "gpio3",
+       "gpio4",
+       "gpio5",
+       "gpio6",
+       "gpio7",
+       "gpio8",
+       "gpio9",
+       "gpio10",
+       "gpio11",
+       "gpio12",
+       "gpio13",
+       "gpio14",
+       "gpio15",
+       "gpio16",
+       "gpio17",
+       "gpio18",
+       "gpio19",
+       "gpio20",
+       "gpio21",
+       "gpio22",
+       "gpio23",
+       "gpio24",
+       "gpio25",
+       "gpio26",
+       "gpio27",
+       "gpio28",
+       "gpio29",
+       "gpio30",
+       "gpio31",
+       "gpio32",
+       "gpio33",
+       "gpio34",
+       "gpio35",
+       "gpio36",
+       "gpio37",
+       "gpio38",
+       "gpio39",
+       "gpio40",
+       "gpio41",
+       "gpio42",
+       "gpio43",
+       "gpio44",
+       "gpio45",
+       "gpio46",
+       "gpio47",
+       "gpio48",
+       "gpio49",
+       "gpio50",
+       "gpio51",
+       "gpio52",
+       "gpio53",
+};
+
+enum bcm2835_fsel {
+       BCM2835_FSEL_GPIO_IN = 0,
+       BCM2835_FSEL_GPIO_OUT = 1,
+       BCM2835_FSEL_ALT0 = 4,
+       BCM2835_FSEL_ALT1 = 5,
+       BCM2835_FSEL_ALT2 = 6,
+       BCM2835_FSEL_ALT3 = 7,
+       BCM2835_FSEL_ALT4 = 3,
+       BCM2835_FSEL_ALT5 = 2,
+       BCM2835_FSEL_COUNT = 8,
+       BCM2835_FSEL_MASK = 0x7,
+};
+
+static const char * const bcm2835_functions[BCM2835_FSEL_COUNT] = {
+       [BCM2835_FSEL_GPIO_IN] = "gpio_in",
+       [BCM2835_FSEL_GPIO_OUT] = "gpio_out",
+       [BCM2835_FSEL_ALT0] = "alt0",
+       [BCM2835_FSEL_ALT1] = "alt1",
+       [BCM2835_FSEL_ALT2] = "alt2",
+       [BCM2835_FSEL_ALT3] = "alt3",
+       [BCM2835_FSEL_ALT4] = "alt4",
+       [BCM2835_FSEL_ALT5] = "alt5",
+};
+
+static const char * const irq_type_names[] = {
+       [IRQ_TYPE_NONE] = "none",
+       [IRQ_TYPE_EDGE_RISING] = "edge-rising",
+       [IRQ_TYPE_EDGE_FALLING] = "edge-falling",
+       [IRQ_TYPE_EDGE_BOTH] = "edge-both",
+       [IRQ_TYPE_LEVEL_HIGH] = "level-high",
+       [IRQ_TYPE_LEVEL_LOW] = "level-low",
+};
+
+static inline u32 bcm2835_gpio_rd(struct bcm2835_pinctrl *pc, unsigned reg)
+{
+       return readl(pc->base + reg);
+}
+
+static inline void bcm2835_gpio_wr(struct bcm2835_pinctrl *pc, unsigned reg,
+               u32 val)
+{
+       writel(val, pc->base + reg);
+}
+
+static inline int bcm2835_gpio_get_bit(struct bcm2835_pinctrl *pc, unsigned reg,
+               unsigned bit)
+{
+       reg += GPIO_REG_OFFSET(bit) * 4;
+       return (bcm2835_gpio_rd(pc, reg) >> GPIO_REG_SHIFT(bit)) & 1;
+}
+
+/* note NOT a read/modify/write cycle */
+static inline void bcm2835_gpio_set_bit(struct bcm2835_pinctrl *pc,
+               unsigned reg, unsigned bit)
+{
+       reg += GPIO_REG_OFFSET(bit) * 4;
+       bcm2835_gpio_wr(pc, reg, BIT(GPIO_REG_SHIFT(bit)));
+}
+
+static inline enum bcm2835_fsel bcm2835_pinctrl_fsel_get(
+               struct bcm2835_pinctrl *pc, unsigned pin)
+{
+       u32 val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
+       enum bcm2835_fsel status = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
+
+       dev_dbg(pc->dev, "get %08x (%u => %s)\n", val, pin,
+                       bcm2835_functions[status]);
+
+       return status;
+}
+
+static inline void bcm2835_pinctrl_fsel_set(
+               struct bcm2835_pinctrl *pc, unsigned pin,
+               enum bcm2835_fsel fsel)
+{
+       u32 val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
+       enum bcm2835_fsel cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
+
+       dev_dbg(pc->dev, "read %08x (%u => %s)\n", val, pin,
+                       bcm2835_functions[cur]);
+
+       if (cur == fsel)
+               return;
+
+       if (cur != BCM2835_FSEL_GPIO_IN && fsel != BCM2835_FSEL_GPIO_IN) {
+               /* always transition through GPIO_IN */
+               val &= ~(BCM2835_FSEL_MASK << FSEL_SHIFT(pin));
+               val |= BCM2835_FSEL_GPIO_IN << FSEL_SHIFT(pin);
+
+               dev_dbg(pc->dev, "trans %08x (%u <= %s)\n", val, pin,
+                               bcm2835_functions[BCM2835_FSEL_GPIO_IN]);
+               bcm2835_gpio_wr(pc, FSEL_REG(pin), val);
+       }
+
+       val &= ~(BCM2835_FSEL_MASK << FSEL_SHIFT(pin));
+       val |= fsel << FSEL_SHIFT(pin);
+
+       dev_dbg(pc->dev, "write %08x (%u <= %s)\n", val, pin,
+                       bcm2835_functions[fsel]);
+       bcm2835_gpio_wr(pc, FSEL_REG(pin), val);
+}
+
+static int bcm2835_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void bcm2835_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+       pinctrl_free_gpio(chip->base + offset);
+}
+
+static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
+
+       return bcm2835_gpio_get_bit(pc, GPLEV0, offset);
+}
+
+static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
+               unsigned offset, int value)
+{
+       return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+       struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
+
+       bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
+}
+
+static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
+
+       return irq_linear_revmap(pc->irq_domain, offset);
+}
+
+static struct gpio_chip bcm2835_gpio_chip __devinitconst = {
+       .label = MODULE_NAME,
+       .owner = THIS_MODULE,
+       .request = bcm2835_gpio_request,
+       .free = bcm2835_gpio_free,
+       .direction_input = bcm2835_gpio_direction_input,
+       .direction_output = bcm2835_gpio_direction_output,
+       .get = bcm2835_gpio_get,
+       .set = bcm2835_gpio_set,
+       .to_irq = bcm2835_gpio_to_irq,
+       .base = -1,
+       .ngpio = BCM2835_NUM_GPIOS,
+       .can_sleep = 0,
+};
+
+static irqreturn_t bcm2835_gpio_irq_handler(int irq, void *dev_id)
+{
+       struct bcm2835_gpio_irqdata *irqdata = dev_id;
+       struct bcm2835_pinctrl *pc = irqdata->pc;
+       int bank = irqdata->bank;
+       unsigned long events;
+       unsigned offset;
+       unsigned gpio;
+       unsigned int type;
+
+       events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
+       events &= pc->enabled_irq_map[bank];
+       for_each_set_bit(offset, &events, 32) {
+               gpio = (32 * bank) + offset;
+               type = pc->irq_type[gpio];
+
+               /* ack edge triggered IRQs immediately */
+               if (!(type & IRQ_TYPE_LEVEL_MASK))
+                       bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
+
+               generic_handle_irq(irq_linear_revmap(pc->irq_domain, gpio));
+
+               /* ack level triggered IRQ after handling them */
+               if (type & IRQ_TYPE_LEVEL_MASK)
+                       bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
+       }
+       return events ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static inline void __bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
+       unsigned reg, unsigned offset, bool enable)
+{
+       u32 value;
+       reg += GPIO_REG_OFFSET(offset) * 4;
+       value = bcm2835_gpio_rd(pc, reg);
+       if (enable)
+               value |= BIT(GPIO_REG_SHIFT(offset));
+       else
+               value &= ~(BIT(GPIO_REG_SHIFT(offset)));
+       bcm2835_gpio_wr(pc, reg, value);
+}
+
+/* fast path for IRQ handler */
+static void bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
+       unsigned offset, bool enable)
+{
+       switch (pc->irq_type[offset]) {
+       case IRQ_TYPE_EDGE_RISING:
+               __bcm2835_gpio_irq_config(pc, GPREN0, offset, enable);
+               break;
+
+       case IRQ_TYPE_EDGE_FALLING:
+               __bcm2835_gpio_irq_config(pc, GPFEN0, offset, enable);
+               break;
+
+       case IRQ_TYPE_EDGE_BOTH:
+               __bcm2835_gpio_irq_config(pc, GPREN0, offset, enable);
+               __bcm2835_gpio_irq_config(pc, GPFEN0, offset, enable);
+               break;
+
+       case IRQ_TYPE_LEVEL_HIGH:
+               __bcm2835_gpio_irq_config(pc, GPHEN0, offset, enable);
+               break;
+
+       case IRQ_TYPE_LEVEL_LOW:
+               __bcm2835_gpio_irq_config(pc, GPLEN0, offset, enable);
+               break;
+       }
+}
+
+static void bcm2835_gpio_irq_enable(struct irq_data *data)
+{
+       struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+       unsigned gpio = irqd_to_hwirq(data);
+       unsigned offset = GPIO_REG_SHIFT(gpio);
+       unsigned bank = GPIO_REG_OFFSET(gpio);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pc->irq_lock[bank], flags);
+       set_bit(offset, &pc->enabled_irq_map[bank]);
+       bcm2835_gpio_irq_config(pc, gpio, true);
+       spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+}
+
+static void bcm2835_gpio_irq_disable(struct irq_data *data)
+{
+       struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+       unsigned gpio = irqd_to_hwirq(data);
+       unsigned offset = GPIO_REG_SHIFT(gpio);
+       unsigned bank = GPIO_REG_OFFSET(gpio);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pc->irq_lock[bank], flags);
+       bcm2835_gpio_irq_config(pc, gpio, false);
+       clear_bit(offset, &pc->enabled_irq_map[bank]);
+       spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+}
+
+static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
+       unsigned offset, unsigned int type)
+{
+       switch (type) {
+       case IRQ_TYPE_NONE:
+       case IRQ_TYPE_EDGE_RISING:
+       case IRQ_TYPE_EDGE_FALLING:
+       case IRQ_TYPE_EDGE_BOTH:
+       case IRQ_TYPE_LEVEL_HIGH:
+       case IRQ_TYPE_LEVEL_LOW:
+               pc->irq_type[offset] = type;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/* slower path for reconfiguring IRQ type */
+static int __bcm2835_gpio_irq_set_type_enabled(struct bcm2835_pinctrl *pc,
+       unsigned offset, unsigned int type)
+{
+       switch (type) {
+       case IRQ_TYPE_NONE:
+               if (pc->irq_type[offset] != type) {
+                       bcm2835_gpio_irq_config(pc, offset, false);
+                       pc->irq_type[offset] = type;
+               }
+               break;
+
+       case IRQ_TYPE_EDGE_RISING:
+               if (pc->irq_type[offset] == IRQ_TYPE_EDGE_BOTH) {
+                       /* RISING already enabled, disable FALLING */
+                       pc->irq_type[offset] = IRQ_TYPE_EDGE_FALLING;
+                       bcm2835_gpio_irq_config(pc, offset, false);
+                       pc->irq_type[offset] = type;
+               } else if (pc->irq_type[offset] != type) {
+                       bcm2835_gpio_irq_config(pc, offset, false);
+                       pc->irq_type[offset] = type;
+                       bcm2835_gpio_irq_config(pc, offset, true);
+               }
+               break;
+
+       case IRQ_TYPE_EDGE_FALLING:
+               if (pc->irq_type[offset] == IRQ_TYPE_EDGE_BOTH) {
+                       /* FALLING already enabled, disable RISING */
+                       pc->irq_type[offset] = IRQ_TYPE_EDGE_RISING;
+                       bcm2835_gpio_irq_config(pc, offset, false);
+                       pc->irq_type[offset] = type;
+               } else if (pc->irq_type[offset] != type) {
+                       bcm2835_gpio_irq_config(pc, offset, false);
+                       pc->irq_type[offset] = type;
+                       bcm2835_gpio_irq_config(pc, offset, true);
+               }
+               break;
+
+       case IRQ_TYPE_EDGE_BOTH:
+               if (pc->irq_type[offset] == IRQ_TYPE_EDGE_RISING) {
+                       /* RISING already enabled, enable FALLING too */
+                       pc->irq_type[offset] = IRQ_TYPE_EDGE_FALLING;
+                       bcm2835_gpio_irq_config(pc, offset, true);
+                       pc->irq_type[offset] = type;
+               } else if (pc->irq_type[offset] == IRQ_TYPE_EDGE_FALLING) {
+                       /* FALLING already enabled, enable RISING too */
+                       pc->irq_type[offset] = IRQ_TYPE_EDGE_RISING;
+                       bcm2835_gpio_irq_config(pc, offset, true);
+                       pc->irq_type[offset] = type;
+               } else if (pc->irq_type[offset] != type) {
+                       bcm2835_gpio_irq_config(pc, offset, false);
+                       pc->irq_type[offset] = type;
+                       bcm2835_gpio_irq_config(pc, offset, true);
+               }
+               break;
+
+       case IRQ_TYPE_LEVEL_HIGH:
+       case IRQ_TYPE_LEVEL_LOW:
+               if (pc->irq_type[offset] != type) {
+                       bcm2835_gpio_irq_config(pc, offset, false);
+                       pc->irq_type[offset] = type;
+                       bcm2835_gpio_irq_config(pc, offset, true);
+               }
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+       struct bcm2835_pinctrl *pc = irq_data_get_irq_chip_data(data);
+       unsigned gpio = irqd_to_hwirq(data);
+       unsigned offset = GPIO_REG_SHIFT(gpio);
+       unsigned bank = GPIO_REG_OFFSET(gpio);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&pc->irq_lock[bank], flags);
+
+       if (test_bit(offset, &pc->enabled_irq_map[bank]))
+               ret = __bcm2835_gpio_irq_set_type_enabled(pc, gpio, type);
+       else
+               ret = __bcm2835_gpio_irq_set_type_disabled(pc, gpio, type);
+
+       spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+
+       return ret;
+}
+
+static struct irq_chip bcm2835_gpio_irq_chip = {
+       .name = MODULE_NAME,
+       .irq_enable = bcm2835_gpio_irq_enable,
+       .irq_disable = bcm2835_gpio_irq_disable,
+       .irq_set_type = bcm2835_gpio_irq_set_type,
+};
+
+static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+       return ARRAY_SIZE(bcm2835_gpio_groups);
+}
+
+static const char *bcm2835_pctl_get_group_name(struct pinctrl_dev *pctldev,
+               unsigned selector)
+{
+       return bcm2835_gpio_groups[selector];
+}
+
+static int bcm2835_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+               unsigned selector,
+               const unsigned **pins,
+               unsigned *num_pins)
+{
+       *pins = &bcm2835_gpio_pins[selector].number;
+       *num_pins = 1;
+
+       return 0;
+}
+
+static void bcm2835_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+               struct seq_file *s,
+               unsigned offset)
+{
+       struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+       enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
+       const char *fname = bcm2835_functions[fsel];
+       int value = bcm2835_gpio_get_bit(pc, GPLEV0, offset);
+       int irq = irq_find_mapping(pc->irq_domain, offset);
+
+       seq_printf(s, "function %s in %s; irq %d (%s)",
+               fname, value ? "hi" : "lo",
+               irq, irq_type_names[pc->irq_type[offset]]);
+}
+
+static void bcm2835_pctl_dt_free_map(struct pinctrl_dev *pctldev,
+               struct pinctrl_map *maps, unsigned num_maps)
+{
+       int i;
+
+       for (i = 0; i < num_maps; i++)
+               if (maps[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+                       kfree(maps[i].data.configs.configs);
+
+       kfree(maps);
+}
+
+static int bcm2835_pctl_dt_node_to_map_func(struct bcm2835_pinctrl *pc,
+               struct device_node *np, u32 pin, u32 fnum,
+               struct pinctrl_map **maps)
+{
+       struct pinctrl_map *map = *maps;
+
+       if (fnum >= ARRAY_SIZE(bcm2835_functions)) {
+               dev_err(pc->dev, "%s: invalid brcm,function %d\n",
+                       of_node_full_name(np), fnum);
+               return -EINVAL;
+       }
+
+       map->type = PIN_MAP_TYPE_MUX_GROUP;
+       map->data.mux.group = bcm2835_gpio_groups[pin];
+       map->data.mux.function = bcm2835_functions[fnum];
+       (*maps)++;
+
+       return 0;
+}
+
+static int bcm2835_pctl_dt_node_to_map_pull(struct bcm2835_pinctrl *pc,
+               struct device_node *np, u32 pin, u32 pull,
+               struct pinctrl_map **maps)
+{
+       struct pinctrl_map *map = *maps;
+       unsigned long *configs;
+
+       if (pull > 2) {
+               dev_err(pc->dev, "%s: invalid brcm,pull %d\n",
+                       of_node_full_name(np), pull);
+               return -EINVAL;
+       }
+
+       configs = kzalloc(sizeof(*configs), GFP_KERNEL);
+       if (!configs)
+               return -ENOMEM;
+       configs[0] = BCM2835_PINCONF_PACK(BCM2835_PINCONF_PARAM_PULL, pull);
+
+       map->type = PIN_MAP_TYPE_CONFIGS_PIN;
+       map->data.configs.group_or_pin = bcm2835_gpio_pins[pin].name;
+       map->data.configs.configs = configs;
+       map->data.configs.num_configs = 1;
+       (*maps)++;
+
+       return 0;
+}
+
+static inline u32 prop_u32(struct property *p, int i)
+{
+       return be32_to_cpup(((__be32 *)p->value) + i);
+}
+
+static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
+               struct device_node *np,
+               struct pinctrl_map **map, unsigned *num_maps)
+{
+       struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+       struct property *pins, *funcs, *pulls;
+       int num_pins, num_funcs, num_pulls, maps_per_pin;
+       struct pinctrl_map *maps, *cur_map;
+       int i, err;
+       u32 pin, func, pull;
+
+       pins = of_find_property(np, "brcm,pins", NULL);
+       if (!pins) {
+               dev_err(pc->dev, "%s: missing brcm,pins property\n",
+                               of_node_full_name(np));
+               return -EINVAL;
+       }
+
+       funcs = of_find_property(np, "brcm,function", NULL);
+       pulls = of_find_property(np, "brcm,pull", NULL);
+
+       if (!funcs && !pulls) {
+               dev_err(pc->dev,
+                       "%s: neither brcm,function nor brcm,pull specified\n",
+                       of_node_full_name(np));
+               return -EINVAL;
+       }
+
+       num_pins = pins->length / 4;
+       num_funcs = funcs ? (funcs->length / 4) : 0;
+       num_pulls = pulls ? (pulls->length / 4) : 0;
+
+       if (num_funcs > 1 && num_funcs != num_pins) {
+               dev_err(pc->dev,
+                       "%s: brcm,function must have 1 or %d entries\n",
+                       of_node_full_name(np), num_pins);
+               return -EINVAL;
+       }
+
+       if (num_pulls > 1 && num_pulls != num_pins) {
+               dev_err(pc->dev,
+                       "%s: brcm,pull must have 1 or %d entries\n",
+                       of_node_full_name(np), num_pins);
+               return -EINVAL;
+       }
+
+       maps_per_pin = 0;
+       if (num_funcs)
+               maps_per_pin++;
+       if (num_pulls)
+               maps_per_pin++;
+       cur_map = maps = kzalloc(num_pins * maps_per_pin * sizeof(*maps),
+                               GFP_KERNEL);
+       if (!maps)
+               return -ENOMEM;
+
+       for (i = 0; i < num_pins; i++) {
+               pin = prop_u32(pins, i);
+               if (pin >= ARRAY_SIZE(bcm2835_gpio_pins)) {
+                       dev_err(pc->dev, "%s: invalid brcm,pins value %d\n",
+                               of_node_full_name(np), pin);
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               if (num_funcs) {
+                       func = prop_u32(funcs, (num_funcs > 1) ? i : 0);
+                       err = bcm2835_pctl_dt_node_to_map_func(pc, np, pin,
+                                                       func, &cur_map);
+                       if (err)
+                               goto out;
+               }
+               if (num_pulls) {
+                       pull = prop_u32(pulls, (num_pulls > 1) ? i : 0);
+                       err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
+                                                       pull, &cur_map);
+                       if (err)
+                               goto out;
+               }
+       }
+
+       *map = maps;
+       *num_maps = num_pins * maps_per_pin;
+
+       return 0;
+
+out:
+       kfree(maps);
+       return err;
+}
+
+static struct pinctrl_ops bcm2835_pctl_ops = {
+       .get_groups_count = bcm2835_pctl_get_groups_count,
+       .get_group_name = bcm2835_pctl_get_group_name,
+       .get_group_pins = bcm2835_pctl_get_group_pins,
+       .pin_dbg_show = bcm2835_pctl_pin_dbg_show,
+       .dt_node_to_map = bcm2835_pctl_dt_node_to_map,
+       .dt_free_map = bcm2835_pctl_dt_free_map,
+};
+
+static int bcm2835_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+       return BCM2835_FSEL_COUNT;
+}
+
+static const char *bcm2835_pmx_get_function_name(struct pinctrl_dev *pctldev,
+               unsigned selector)
+{
+       return bcm2835_functions[selector];
+}
+
+static int bcm2835_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+               unsigned selector,
+               const char * const **groups,
+               unsigned * const num_groups)
+{
+       /* every pin can do every function */
+       *groups = bcm2835_gpio_groups;
+       *num_groups = ARRAY_SIZE(bcm2835_gpio_groups);
+
+       return 0;
+}
+
+static int bcm2835_pmx_enable(struct pinctrl_dev *pctldev,
+               unsigned func_selector,
+               unsigned group_selector)
+{
+       struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+       bcm2835_pinctrl_fsel_set(pc, group_selector, func_selector);
+
+       return 0;
+}
+
+static void bcm2835_pmx_disable(struct pinctrl_dev *pctldev,
+               unsigned func_selector,
+               unsigned group_selector)
+{
+       struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+       /* disable by setting to GPIO_IN */
+       bcm2835_pinctrl_fsel_set(pc, group_selector, BCM2835_FSEL_GPIO_IN);
+}
+
+static void bcm2835_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
+               struct pinctrl_gpio_range *range,
+               unsigned offset)
+{
+       struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+       /* disable by setting to GPIO_IN */
+       bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_IN);
+}
+
+static int bcm2835_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+               struct pinctrl_gpio_range *range,
+               unsigned offset,
+               bool input)
+{
+       struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+       enum bcm2835_fsel fsel = input ?
+               BCM2835_FSEL_GPIO_IN : BCM2835_FSEL_GPIO_OUT;
+
+       bcm2835_pinctrl_fsel_set(pc, offset, fsel);
+
+       return 0;
+}
+
+static struct pinmux_ops bcm2835_pmx_ops = {
+       .get_functions_count = bcm2835_pmx_get_functions_count,
+       .get_function_name = bcm2835_pmx_get_function_name,
+       .get_function_groups = bcm2835_pmx_get_function_groups,
+       .enable = bcm2835_pmx_enable,
+       .disable = bcm2835_pmx_disable,
+       .gpio_disable_free = bcm2835_pmx_gpio_disable_free,
+       .gpio_set_direction = bcm2835_pmx_gpio_set_direction,
+};
+
+static int bcm2835_pinconf_get(struct pinctrl_dev *pctldev,
+                       unsigned pin, unsigned long *config)
+{
+       /* No way to read back config in HW */
+       return -ENOTSUPP;
+}
+
+static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
+                       unsigned pin, unsigned long config)
+{
+       struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+       enum bcm2835_pinconf_param param = BCM2835_PINCONF_UNPACK_PARAM(config);
+       u16 arg = BCM2835_PINCONF_UNPACK_ARG(config);
+       u32 off, bit;
+
+       if (param != BCM2835_PINCONF_PARAM_PULL)
+               return -EINVAL;
+
+       off = GPIO_REG_OFFSET(pin);
+       bit = GPIO_REG_SHIFT(pin);
+
+       bcm2835_gpio_wr(pc, GPPUD, arg & 3);
+       /*
+        * Docs say to wait 150 cycles, but not of what. We assume a
+        * 1 MHz clock here, which is pretty slow...
+        */
+       udelay(150);
+       bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
+       udelay(150);
+       bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
+
+       return 0;
+}
+
+struct pinconf_ops bcm2835_pinconf_ops = {
+       .pin_config_get = bcm2835_pinconf_get,
+       .pin_config_set = bcm2835_pinconf_set,
+};
+
+static struct pinctrl_desc bcm2835_pinctrl_desc = {
+       .name = MODULE_NAME,
+       .pins = bcm2835_gpio_pins,
+       .npins = ARRAY_SIZE(bcm2835_gpio_pins),
+       .pctlops = &bcm2835_pctl_ops,
+       .pmxops = &bcm2835_pmx_ops,
+       .confops = &bcm2835_pinconf_ops,
+       .owner = THIS_MODULE,
+};
+
+static struct pinctrl_gpio_range bcm2835_pinctrl_gpio_range __devinitconst = {
+       .name = MODULE_NAME,
+       .npins = BCM2835_NUM_GPIOS,
+};
+
+static int __devinit bcm2835_pinctrl_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct bcm2835_pinctrl *pc;
+       struct resource iomem;
+       int err, i;
+       BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_pins) != BCM2835_NUM_GPIOS);
+       BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_groups) != BCM2835_NUM_GPIOS);
+
+       pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+       if (!pc)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, pc);
+       pc->dev = dev;
+
+       err = of_address_to_resource(np, 0, &iomem);
+       if (err) {
+               dev_err(dev, "could not get IO memory\n");
+               return err;
+       }
+
+       pc->base = devm_request_and_ioremap(&pdev->dev, &iomem);
+       if (!pc->base)
+               return -EADDRNOTAVAIL;
+
+       pc->gpio_chip = bcm2835_gpio_chip;
+       pc->gpio_chip.dev = dev;
+       pc->gpio_chip.of_node = np;
+
+       pc->irq_domain = irq_domain_add_linear(np, BCM2835_NUM_GPIOS,
+                       &irq_domain_simple_ops, NULL);
+       if (!pc->irq_domain) {
+               dev_err(dev, "could not create IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < BCM2835_NUM_GPIOS; i++) {
+               int irq = irq_create_mapping(pc->irq_domain, i);
+               irq_set_lockdep_class(irq, &gpio_lock_class);
+               irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip,
+                               handle_simple_irq);
+               irq_set_chip_data(irq, pc);
+               set_irq_flags(irq, IRQF_VALID);
+       }
+
+       for (i = 0; i < BCM2835_NUM_BANKS; i++) {
+               unsigned long events;
+               unsigned offset;
+               int len;
+               char *name;
+
+               /* clear event detection flags */
+               bcm2835_gpio_wr(pc, GPREN0 + i * 4, 0);
+               bcm2835_gpio_wr(pc, GPFEN0 + i * 4, 0);
+               bcm2835_gpio_wr(pc, GPHEN0 + i * 4, 0);
+               bcm2835_gpio_wr(pc, GPLEN0 + i * 4, 0);
+               bcm2835_gpio_wr(pc, GPAREN0 + i * 4, 0);
+               bcm2835_gpio_wr(pc, GPAFEN0 + i * 4, 0);
+
+               /* clear all the events */
+               events = bcm2835_gpio_rd(pc, GPEDS0 + i * 4);
+               for_each_set_bit(offset, &events, 32)
+                       bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset));
+
+               pc->irq[i] = irq_of_parse_and_map(np, i);
+               pc->irq_data[i].pc = pc;
+               pc->irq_data[i].bank = i;
+               spin_lock_init(&pc->irq_lock[i]);
+
+               len = strlen(dev_name(pc->dev)) + 16;
+               name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
+               if (!name)
+                       return -ENOMEM;
+               snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
+
+               err = devm_request_irq(dev, pc->irq[i],
+                       bcm2835_gpio_irq_handler, IRQF_SHARED,
+                       name, &pc->irq_data[i]);
+               if (err) {
+                       dev_err(dev, "unable to request IRQ %d\n", pc->irq[i]);
+                       return err;
+               }
+       }
+
+       err = gpiochip_add(&pc->gpio_chip);
+       if (err) {
+               dev_err(dev, "could not add GPIO chip\n");
+               return err;
+       }
+
+       pc->pctl_dev = pinctrl_register(&bcm2835_pinctrl_desc, dev, pc);
+       if (!pc->pctl_dev) {
+               gpiochip_remove(&pc->gpio_chip);
+               return PTR_ERR(pc->pctl_dev);
+       }
+
+       pc->gpio_range = bcm2835_pinctrl_gpio_range;
+       pc->gpio_range.base = pc->gpio_chip.base;
+       pc->gpio_range.gc = &pc->gpio_chip;
+       pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
+
+       return 0;
+}
+
+static int __devexit bcm2835_pinctrl_remove(struct platform_device *pdev)
+{
+       struct bcm2835_pinctrl *pc = platform_get_drvdata(pdev);
+
+       pinctrl_unregister(pc->pctl_dev);
+       gpiochip_remove(&pc->gpio_chip);
+
+       return 0;
+}
+
+static struct of_device_id bcm2835_pinctrl_match[] __devinitconst = {
+       { .compatible = "brcm,bcm2835-gpio" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835_pinctrl_match);
+
+static struct platform_driver bcm2835_pinctrl_driver = {
+       .probe = bcm2835_pinctrl_probe,
+       .remove = bcm2835_pinctrl_remove,
+       .driver = {
+               .name = MODULE_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = bcm2835_pinctrl_match,
+       },
+};
+module_platform_driver(bcm2835_pinctrl_driver);
+
+MODULE_AUTHOR("Chris Boot, Simon Arlott, Stephen Warren");
+MODULE_DESCRIPTION("BCM2835 Pin control driver");
+MODULE_LICENSE("GPL");
index 44e97265cd7dc59dd4658175ec394e9c0d0a2b23..63866d95357ddb9692ed7692f7e682424199b9e6 100644 (file)
@@ -432,7 +432,7 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
 {
        unsigned int pin_func_id;
        int ret, size;
-       const const __be32 *list;
+       const __be32 *list;
        int i, j;
        u32 config;
 
diff --git a/drivers/pinctrl/pinctrl-imx35.c b/drivers/pinctrl/pinctrl-imx35.c
new file mode 100644 (file)
index 0000000..82f109e
--- /dev/null
@@ -0,0 +1,1595 @@
+/*
+ * imx35 pinctrl driver.
+ *
+ * This driver was mostly copied from the imx51 pinctrl driver which has:
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro, Inc.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx35_pads {
+       MX35_PAD_CAPTURE = 0,
+       MX35_PAD_COMPARE = 1,
+       MX35_PAD_WDOG_RST = 2,
+       MX35_PAD_GPIO1_0 = 3,
+       MX35_PAD_GPIO1_1 = 4,
+       MX35_PAD_GPIO2_0 = 5,
+       MX35_PAD_GPIO3_0 = 6,
+       MX35_PAD_RESET_IN_B = 7,
+       MX35_PAD_POR_B = 8,
+       MX35_PAD_CLKO = 9,
+       MX35_PAD_BOOT_MODE0 = 10,
+       MX35_PAD_BOOT_MODE1 = 11,
+       MX35_PAD_CLK_MODE0 = 12,
+       MX35_PAD_CLK_MODE1 = 13,
+       MX35_PAD_POWER_FAIL = 14,
+       MX35_PAD_VSTBY = 15,
+       MX35_PAD_A0 = 16,
+       MX35_PAD_A1 = 17,
+       MX35_PAD_A2 = 18,
+       MX35_PAD_A3 = 19,
+       MX35_PAD_A4 = 20,
+       MX35_PAD_A5 = 21,
+       MX35_PAD_A6 = 22,
+       MX35_PAD_A7 = 23,
+       MX35_PAD_A8 = 24,
+       MX35_PAD_A9 = 25,
+       MX35_PAD_A10 = 26,
+       MX35_PAD_MA10 = 27,
+       MX35_PAD_A11 = 28,
+       MX35_PAD_A12 = 29,
+       MX35_PAD_A13 = 30,
+       MX35_PAD_A14 = 31,
+       MX35_PAD_A15 = 32,
+       MX35_PAD_A16 = 33,
+       MX35_PAD_A17 = 34,
+       MX35_PAD_A18 = 35,
+       MX35_PAD_A19 = 36,
+       MX35_PAD_A20 = 37,
+       MX35_PAD_A21 = 38,
+       MX35_PAD_A22 = 39,
+       MX35_PAD_A23 = 40,
+       MX35_PAD_A24 = 41,
+       MX35_PAD_A25 = 42,
+       MX35_PAD_SDBA1 = 43,
+       MX35_PAD_SDBA0 = 44,
+       MX35_PAD_SD0 = 45,
+       MX35_PAD_SD1 = 46,
+       MX35_PAD_SD2 = 47,
+       MX35_PAD_SD3 = 48,
+       MX35_PAD_SD4 = 49,
+       MX35_PAD_SD5 = 50,
+       MX35_PAD_SD6 = 51,
+       MX35_PAD_SD7 = 52,
+       MX35_PAD_SD8 = 53,
+       MX35_PAD_SD9 = 54,
+       MX35_PAD_SD10 = 55,
+       MX35_PAD_SD11 = 56,
+       MX35_PAD_SD12 = 57,
+       MX35_PAD_SD13 = 58,
+       MX35_PAD_SD14 = 59,
+       MX35_PAD_SD15 = 60,
+       MX35_PAD_SD16 = 61,
+       MX35_PAD_SD17 = 62,
+       MX35_PAD_SD18 = 63,
+       MX35_PAD_SD19 = 64,
+       MX35_PAD_SD20 = 65,
+       MX35_PAD_SD21 = 66,
+       MX35_PAD_SD22 = 67,
+       MX35_PAD_SD23 = 68,
+       MX35_PAD_SD24 = 69,
+       MX35_PAD_SD25 = 70,
+       MX35_PAD_SD26 = 71,
+       MX35_PAD_SD27 = 72,
+       MX35_PAD_SD28 = 73,
+       MX35_PAD_SD29 = 74,
+       MX35_PAD_SD30 = 75,
+       MX35_PAD_SD31 = 76,
+       MX35_PAD_DQM0 = 77,
+       MX35_PAD_DQM1 = 78,
+       MX35_PAD_DQM2 = 79,
+       MX35_PAD_DQM3 = 80,
+       MX35_PAD_EB0 = 81,
+       MX35_PAD_EB1 = 82,
+       MX35_PAD_OE = 83,
+       MX35_PAD_CS0 = 84,
+       MX35_PAD_CS1 = 85,
+       MX35_PAD_CS2 = 86,
+       MX35_PAD_CS3 = 87,
+       MX35_PAD_CS4 = 88,
+       MX35_PAD_CS5 = 89,
+       MX35_PAD_NF_CE0 = 90,
+       MX35_PAD_ECB = 91,
+       MX35_PAD_LBA = 92,
+       MX35_PAD_BCLK = 93,
+       MX35_PAD_RW = 94,
+       MX35_PAD_RAS = 95,
+       MX35_PAD_CAS = 96,
+       MX35_PAD_SDWE = 97,
+       MX35_PAD_SDCKE0 = 98,
+       MX35_PAD_SDCKE1 = 99,
+       MX35_PAD_SDCLK = 100,
+       MX35_PAD_SDQS0 = 101,
+       MX35_PAD_SDQS1 = 102,
+       MX35_PAD_SDQS2 = 103,
+       MX35_PAD_SDQS3 = 104,
+       MX35_PAD_NFWE_B = 105,
+       MX35_PAD_NFRE_B = 106,
+       MX35_PAD_NFALE = 107,
+       MX35_PAD_NFCLE = 108,
+       MX35_PAD_NFWP_B = 109,
+       MX35_PAD_NFRB = 110,
+       MX35_PAD_D15 = 111,
+       MX35_PAD_D14 = 112,
+       MX35_PAD_D13 = 113,
+       MX35_PAD_D12 = 114,
+       MX35_PAD_D11 = 115,
+       MX35_PAD_D10 = 116,
+       MX35_PAD_D9 = 117,
+       MX35_PAD_D8 = 118,
+       MX35_PAD_D7 = 119,
+       MX35_PAD_D6 = 120,
+       MX35_PAD_D5 = 121,
+       MX35_PAD_D4 = 122,
+       MX35_PAD_D3 = 123,
+       MX35_PAD_D2 = 124,
+       MX35_PAD_D1 = 125,
+       MX35_PAD_D0 = 126,
+       MX35_PAD_CSI_D8 = 127,
+       MX35_PAD_CSI_D9 = 128,
+       MX35_PAD_CSI_D10 = 129,
+       MX35_PAD_CSI_D11 = 130,
+       MX35_PAD_CSI_D12 = 131,
+       MX35_PAD_CSI_D13 = 132,
+       MX35_PAD_CSI_D14 = 133,
+       MX35_PAD_CSI_D15 = 134,
+       MX35_PAD_CSI_MCLK = 135,
+       MX35_PAD_CSI_VSYNC = 136,
+       MX35_PAD_CSI_HSYNC = 137,
+       MX35_PAD_CSI_PIXCLK = 138,
+       MX35_PAD_I2C1_CLK = 139,
+       MX35_PAD_I2C1_DAT = 140,
+       MX35_PAD_I2C2_CLK = 141,
+       MX35_PAD_I2C2_DAT = 142,
+       MX35_PAD_STXD4 = 143,
+       MX35_PAD_SRXD4 = 144,
+       MX35_PAD_SCK4 = 145,
+       MX35_PAD_STXFS4 = 146,
+       MX35_PAD_STXD5 = 147,
+       MX35_PAD_SRXD5 = 148,
+       MX35_PAD_SCK5 = 149,
+       MX35_PAD_STXFS5 = 150,
+       MX35_PAD_SCKR = 151,
+       MX35_PAD_FSR = 152,
+       MX35_PAD_HCKR = 153,
+       MX35_PAD_SCKT = 154,
+       MX35_PAD_FST = 155,
+       MX35_PAD_HCKT = 156,
+       MX35_PAD_TX5_RX0 = 157,
+       MX35_PAD_TX4_RX1 = 158,
+       MX35_PAD_TX3_RX2 = 159,
+       MX35_PAD_TX2_RX3 = 160,
+       MX35_PAD_TX1 = 161,
+       MX35_PAD_TX0 = 162,
+       MX35_PAD_CSPI1_MOSI = 163,
+       MX35_PAD_CSPI1_MISO = 164,
+       MX35_PAD_CSPI1_SS0 = 165,
+       MX35_PAD_CSPI1_SS1 = 166,
+       MX35_PAD_CSPI1_SCLK = 167,
+       MX35_PAD_CSPI1_SPI_RDY = 168,
+       MX35_PAD_RXD1 = 169,
+       MX35_PAD_TXD1 = 170,
+       MX35_PAD_RTS1 = 171,
+       MX35_PAD_CTS1 = 172,
+       MX35_PAD_RXD2 = 173,
+       MX35_PAD_TXD2 = 174,
+       MX35_PAD_RTS2 = 175,
+       MX35_PAD_CTS2 = 176,
+       MX35_PAD_RTCK = 177,
+       MX35_PAD_TCK = 178,
+       MX35_PAD_TMS = 179,
+       MX35_PAD_TDI = 180,
+       MX35_PAD_TDO = 181,
+       MX35_PAD_TRSTB = 182,
+       MX35_PAD_DE_B = 183,
+       MX35_PAD_SJC_MOD = 184,
+       MX35_PAD_USBOTG_PWR = 185,
+       MX35_PAD_USBOTG_OC = 186,
+       MX35_PAD_LD0 = 187,
+       MX35_PAD_LD1 = 188,
+       MX35_PAD_LD2 = 189,
+       MX35_PAD_LD3 = 190,
+       MX35_PAD_LD4 = 191,
+       MX35_PAD_LD5 = 192,
+       MX35_PAD_LD6 = 193,
+       MX35_PAD_LD7 = 194,
+       MX35_PAD_LD8 = 195,
+       MX35_PAD_LD9 = 196,
+       MX35_PAD_LD10 = 197,
+       MX35_PAD_LD11 = 198,
+       MX35_PAD_LD12 = 199,
+       MX35_PAD_LD13 = 200,
+       MX35_PAD_LD14 = 201,
+       MX35_PAD_LD15 = 202,
+       MX35_PAD_LD16 = 203,
+       MX35_PAD_LD17 = 204,
+       MX35_PAD_LD18 = 205,
+       MX35_PAD_LD19 = 206,
+       MX35_PAD_LD20 = 207,
+       MX35_PAD_LD21 = 208,
+       MX35_PAD_LD22 = 209,
+       MX35_PAD_LD23 = 210,
+       MX35_PAD_D3_HSYNC = 211,
+       MX35_PAD_D3_FPSHIFT = 212,
+       MX35_PAD_D3_DRDY = 213,
+       MX35_PAD_CONTRAST = 214,
+       MX35_PAD_D3_VSYNC = 215,
+       MX35_PAD_D3_REV = 216,
+       MX35_PAD_D3_CLS = 217,
+       MX35_PAD_D3_SPL = 218,
+       MX35_PAD_SD1_CMD = 219,
+       MX35_PAD_SD1_CLK = 220,
+       MX35_PAD_SD1_DATA0 = 221,
+       MX35_PAD_SD1_DATA1 = 222,
+       MX35_PAD_SD1_DATA2 = 223,
+       MX35_PAD_SD1_DATA3 = 224,
+       MX35_PAD_SD2_CMD = 225,
+       MX35_PAD_SD2_CLK = 226,
+       MX35_PAD_SD2_DATA0 = 227,
+       MX35_PAD_SD2_DATA1 = 228,
+       MX35_PAD_SD2_DATA2 = 229,
+       MX35_PAD_SD2_DATA3 = 230,
+       MX35_PAD_ATA_CS0 = 231,
+       MX35_PAD_ATA_CS1 = 232,
+       MX35_PAD_ATA_DIOR = 233,
+       MX35_PAD_ATA_DIOW = 234,
+       MX35_PAD_ATA_DMACK = 235,
+       MX35_PAD_ATA_RESET_B = 236,
+       MX35_PAD_ATA_IORDY = 237,
+       MX35_PAD_ATA_DATA0 = 238,
+       MX35_PAD_ATA_DATA1 = 239,
+       MX35_PAD_ATA_DATA2 = 240,
+       MX35_PAD_ATA_DATA3 = 241,
+       MX35_PAD_ATA_DATA4 = 242,
+       MX35_PAD_ATA_DATA5 = 243,
+       MX35_PAD_ATA_DATA6 = 244,
+       MX35_PAD_ATA_DATA7 = 245,
+       MX35_PAD_ATA_DATA8 = 246,
+       MX35_PAD_ATA_DATA9 = 247,
+       MX35_PAD_ATA_DATA10 = 248,
+       MX35_PAD_ATA_DATA11 = 249,
+       MX35_PAD_ATA_DATA12 = 250,
+       MX35_PAD_ATA_DATA13 = 251,
+       MX35_PAD_ATA_DATA14 = 252,
+       MX35_PAD_ATA_DATA15 = 253,
+       MX35_PAD_ATA_INTRQ = 254,
+       MX35_PAD_ATA_BUFF_EN = 255,
+       MX35_PAD_ATA_DMARQ = 256,
+       MX35_PAD_ATA_DA0 = 257,
+       MX35_PAD_ATA_DA1 = 258,
+       MX35_PAD_ATA_DA2 = 259,
+       MX35_PAD_MLB_CLK = 260,
+       MX35_PAD_MLB_DAT = 261,
+       MX35_PAD_MLB_SIG = 262,
+       MX35_PAD_FEC_TX_CLK = 263,
+       MX35_PAD_FEC_RX_CLK = 264,
+       MX35_PAD_FEC_RX_DV = 265,
+       MX35_PAD_FEC_COL = 266,
+       MX35_PAD_FEC_RDATA0 = 267,
+       MX35_PAD_FEC_TDATA0 = 268,
+       MX35_PAD_FEC_TX_EN = 269,
+       MX35_PAD_FEC_MDC = 270,
+       MX35_PAD_FEC_MDIO = 271,
+       MX35_PAD_FEC_TX_ERR = 272,
+       MX35_PAD_FEC_RX_ERR = 273,
+       MX35_PAD_FEC_CRS = 274,
+       MX35_PAD_FEC_RDATA1 = 275,
+       MX35_PAD_FEC_TDATA1 = 276,
+       MX35_PAD_FEC_RDATA2 = 277,
+       MX35_PAD_FEC_TDATA2 = 278,
+       MX35_PAD_FEC_RDATA3 = 279,
+       MX35_PAD_FEC_TDATA3 = 280,
+       MX35_PAD_EXT_ARMCLK = 281,
+       MX35_PAD_TEST_MODE = 282,
+};
+
+/* imx35 register maps */
+static struct imx_pin_reg imx35_pin_regs[] = {
+       [0] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 0, 0x0, 0), /* MX35_PAD_CAPTURE__GPT_CAPIN1 */
+       [1] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 1, 0x0, 0), /* MX35_PAD_CAPTURE__GPT_CMPOUT2 */
+       [2] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 2, 0x7f4, 0), /* MX35_PAD_CAPTURE__CSPI2_SS1 */
+       [3] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 3, 0x0, 0), /* MX35_PAD_CAPTURE__EPIT1_EPITO */
+       [4] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 4, 0x7d0, 0), /* MX35_PAD_CAPTURE__CCM_CLK32K */
+       [5] = IMX_PIN_REG(MX35_PAD_CAPTURE, 0x328, 0x004, 5, 0x850, 0), /* MX35_PAD_CAPTURE__GPIO1_4 */
+       [6] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 0, 0x0, 0), /* MX35_PAD_COMPARE__GPT_CMPOUT1 */
+       [7] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 1, 0x0, 0), /* MX35_PAD_COMPARE__GPT_CAPIN2 */
+       [8] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 2, 0x0, 0), /* MX35_PAD_COMPARE__GPT_CMPOUT3 */
+       [9] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 3, 0x0, 0), /* MX35_PAD_COMPARE__EPIT2_EPITO */
+       [10] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 5, 0x854, 0), /* MX35_PAD_COMPARE__GPIO1_5 */
+       [11] = IMX_PIN_REG(MX35_PAD_COMPARE, 0x32c, 0x008, 7, 0x0, 0), /* MX35_PAD_COMPARE__SDMA_EXTDMA_2 */
+       [12] = IMX_PIN_REG(MX35_PAD_WDOG_RST, 0x330, 0x00c, 0, 0x0, 0), /* MX35_PAD_WDOG_RST__WDOG_WDOG_B */
+       [13] = IMX_PIN_REG(MX35_PAD_WDOG_RST, 0x330, 0x00c, 3, 0x0, 0), /* MX35_PAD_WDOG_RST__IPU_FLASH_STROBE */
+       [14] = IMX_PIN_REG(MX35_PAD_WDOG_RST, 0x330, 0x00c, 5, 0x858, 0), /* MX35_PAD_WDOG_RST__GPIO1_6 */
+       [15] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 0, 0x82c, 0), /* MX35_PAD_GPIO1_0__GPIO1_0 */
+       [16] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 1, 0x7d4, 0), /* MX35_PAD_GPIO1_0__CCM_PMIC_RDY */
+       [17] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 2, 0x990, 0), /* MX35_PAD_GPIO1_0__OWIRE_LINE */
+       [18] = IMX_PIN_REG(MX35_PAD_GPIO1_0, 0x334, 0x010, 7, 0x0, 0), /* MX35_PAD_GPIO1_0__SDMA_EXTDMA_0 */
+       [19] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 0, 0x838, 0), /* MX35_PAD_GPIO1_1__GPIO1_1 */
+       [20] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 2, 0x0, 0), /* MX35_PAD_GPIO1_1__PWM_PWMO */
+       [21] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 3, 0x7d8, 0), /* MX35_PAD_GPIO1_1__CSPI1_SS2 */
+       [22] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 6, 0x0, 0), /* MX35_PAD_GPIO1_1__SCC_TAMPER_DETECT */
+       [23] = IMX_PIN_REG(MX35_PAD_GPIO1_1, 0x338, 0x014, 7, 0x0, 0), /* MX35_PAD_GPIO1_1__SDMA_EXTDMA_1 */
+       [24] = IMX_PIN_REG(MX35_PAD_GPIO2_0, 0x33c, 0x018, 0, 0x868, 0), /* MX35_PAD_GPIO2_0__GPIO2_0 */
+       [25] = IMX_PIN_REG(MX35_PAD_GPIO2_0, 0x33c, 0x018, 1, 0x0, 0), /* MX35_PAD_GPIO2_0__USB_TOP_USBOTG_CLK */
+       [26] = IMX_PIN_REG(MX35_PAD_GPIO3_0, 0x340, 0x01c, 0, 0x8e8, 0), /* MX35_PAD_GPIO3_0__GPIO3_0 */
+       [27] = IMX_PIN_REG(MX35_PAD_GPIO3_0, 0x340, 0x01c, 1, 0x0, 0), /* MX35_PAD_GPIO3_0__USB_TOP_USBH2_CLK */
+       [28] = IMX_PIN_REG(MX35_PAD_RESET_IN_B, 0x344, 0x0, 0, 0x0, 0), /* MX35_PAD_RESET_IN_B__CCM_RESET_IN_B */
+       [29] = IMX_PIN_REG(MX35_PAD_POR_B, 0x348, 0x0, 0, 0x0, 0), /* MX35_PAD_POR_B__CCM_POR_B */
+       [30] = IMX_PIN_REG(MX35_PAD_CLKO, 0x34c, 0x020, 0, 0x0, 0), /* MX35_PAD_CLKO__CCM_CLKO */
+       [31] = IMX_PIN_REG(MX35_PAD_CLKO, 0x34c, 0x020, 5, 0x860, 0), /* MX35_PAD_CLKO__GPIO1_8 */
+       [32] = IMX_PIN_REG(MX35_PAD_BOOT_MODE0, 0x350, 0x0, 0, 0x0, 0), /* MX35_PAD_BOOT_MODE0__CCM_BOOT_MODE_0 */
+       [33] = IMX_PIN_REG(MX35_PAD_BOOT_MODE1, 0x354, 0x0, 0, 0x0, 0), /* MX35_PAD_BOOT_MODE1__CCM_BOOT_MODE_1 */
+       [34] = IMX_PIN_REG(MX35_PAD_CLK_MODE0, 0x358, 0x0, 0, 0x0, 0), /* MX35_PAD_CLK_MODE0__CCM_CLK_MODE_0 */
+       [35] = IMX_PIN_REG(MX35_PAD_CLK_MODE1, 0x35c, 0x0, 0, 0x0, 0), /* MX35_PAD_CLK_MODE1__CCM_CLK_MODE_1 */
+       [36] = IMX_PIN_REG(MX35_PAD_POWER_FAIL, 0x360, 0x0, 0, 0x0, 0), /* MX35_PAD_POWER_FAIL__CCM_DSM_WAKEUP_INT_26 */
+       [37] = IMX_PIN_REG(MX35_PAD_VSTBY, 0x364, 0x024, 0, 0x0, 0), /* MX35_PAD_VSTBY__CCM_VSTBY */
+       [38] = IMX_PIN_REG(MX35_PAD_VSTBY, 0x364, 0x024, 5, 0x85c, 0), /* MX35_PAD_VSTBY__GPIO1_7 */
+       [39] = IMX_PIN_REG(MX35_PAD_A0, 0x368, 0x028, 0, 0x0, 0), /* MX35_PAD_A0__EMI_EIM_DA_L_0 */
+       [40] = IMX_PIN_REG(MX35_PAD_A1, 0x36c, 0x02c, 0, 0x0, 0), /* MX35_PAD_A1__EMI_EIM_DA_L_1 */
+       [41] = IMX_PIN_REG(MX35_PAD_A2, 0x370, 0x030, 0, 0x0, 0), /* MX35_PAD_A2__EMI_EIM_DA_L_2 */
+       [42] = IMX_PIN_REG(MX35_PAD_A3, 0x374, 0x034, 0, 0x0, 0), /* MX35_PAD_A3__EMI_EIM_DA_L_3 */
+       [43] = IMX_PIN_REG(MX35_PAD_A4, 0x378, 0x038, 0, 0x0, 0), /* MX35_PAD_A4__EMI_EIM_DA_L_4 */
+       [44] = IMX_PIN_REG(MX35_PAD_A5, 0x37c, 0x03c, 0, 0x0, 0), /* MX35_PAD_A5__EMI_EIM_DA_L_5 */
+       [45] = IMX_PIN_REG(MX35_PAD_A6, 0x380, 0x040, 0, 0x0, 0), /* MX35_PAD_A6__EMI_EIM_DA_L_6 */
+       [46] = IMX_PIN_REG(MX35_PAD_A7, 0x384, 0x044, 0, 0x0, 0), /* MX35_PAD_A7__EMI_EIM_DA_L_7 */
+       [47] = IMX_PIN_REG(MX35_PAD_A8, 0x388, 0x048, 0, 0x0, 0), /* MX35_PAD_A8__EMI_EIM_DA_H_8 */
+       [48] = IMX_PIN_REG(MX35_PAD_A9, 0x38c, 0x04c, 0, 0x0, 0), /* MX35_PAD_A9__EMI_EIM_DA_H_9 */
+       [49] = IMX_PIN_REG(MX35_PAD_A10, 0x390, 0x050, 0, 0x0, 0), /* MX35_PAD_A10__EMI_EIM_DA_H_10 */
+       [50] = IMX_PIN_REG(MX35_PAD_MA10, 0x394, 0x054, 0, 0x0, 0), /* MX35_PAD_MA10__EMI_MA10 */
+       [51] = IMX_PIN_REG(MX35_PAD_A11, 0x398, 0x058, 0, 0x0, 0), /* MX35_PAD_A11__EMI_EIM_DA_H_11 */
+       [52] = IMX_PIN_REG(MX35_PAD_A12, 0x39c, 0x05c, 0, 0x0, 0), /* MX35_PAD_A12__EMI_EIM_DA_H_12 */
+       [53] = IMX_PIN_REG(MX35_PAD_A13, 0x3a0, 0x060, 0, 0x0, 0), /* MX35_PAD_A13__EMI_EIM_DA_H_13 */
+       [54] = IMX_PIN_REG(MX35_PAD_A14, 0x3a4, 0x064, 0, 0x0, 0), /* MX35_PAD_A14__EMI_EIM_DA_H2_14 */
+       [55] = IMX_PIN_REG(MX35_PAD_A15, 0x3a8, 0x068, 0, 0x0, 0), /* MX35_PAD_A15__EMI_EIM_DA_H2_15 */
+       [56] = IMX_PIN_REG(MX35_PAD_A16, 0x3ac, 0x06c, 0, 0x0, 0), /* MX35_PAD_A16__EMI_EIM_A_16 */
+       [57] = IMX_PIN_REG(MX35_PAD_A17, 0x3b0, 0x070, 0, 0x0, 0), /* MX35_PAD_A17__EMI_EIM_A_17 */
+       [58] = IMX_PIN_REG(MX35_PAD_A18, 0x3b4, 0x074, 0, 0x0, 0), /* MX35_PAD_A18__EMI_EIM_A_18 */
+       [59] = IMX_PIN_REG(MX35_PAD_A19, 0x3b8, 0x078, 0, 0x0, 0), /* MX35_PAD_A19__EMI_EIM_A_19 */
+       [60] = IMX_PIN_REG(MX35_PAD_A20, 0x3bc, 0x07c, 0, 0x0, 0), /* MX35_PAD_A20__EMI_EIM_A_20 */
+       [61] = IMX_PIN_REG(MX35_PAD_A21, 0x3c0, 0x080, 0, 0x0, 0), /* MX35_PAD_A21__EMI_EIM_A_21 */
+       [62] = IMX_PIN_REG(MX35_PAD_A22, 0x3c4, 0x084, 0, 0x0, 0), /* MX35_PAD_A22__EMI_EIM_A_22 */
+       [63] = IMX_PIN_REG(MX35_PAD_A23, 0x3c8, 0x088, 0, 0x0, 0), /* MX35_PAD_A23__EMI_EIM_A_23 */
+       [64] = IMX_PIN_REG(MX35_PAD_A24, 0x3cc, 0x08c, 0, 0x0, 0), /* MX35_PAD_A24__EMI_EIM_A_24 */
+       [65] = IMX_PIN_REG(MX35_PAD_A25, 0x3d0, 0x090, 0, 0x0, 0), /* MX35_PAD_A25__EMI_EIM_A_25 */
+       [66] = IMX_PIN_REG(MX35_PAD_SDBA1, 0x3d4, 0x0, 0, 0x0, 0), /* MX35_PAD_SDBA1__EMI_EIM_SDBA1 */
+       [67] = IMX_PIN_REG(MX35_PAD_SDBA0, 0x3d8, 0x0, 0, 0x0, 0), /* MX35_PAD_SDBA0__EMI_EIM_SDBA0 */
+       [68] = IMX_PIN_REG(MX35_PAD_SD0, 0x3dc, 0x0, 0, 0x0, 0), /* MX35_PAD_SD0__EMI_DRAM_D_0 */
+       [69] = IMX_PIN_REG(MX35_PAD_SD1, 0x3e0, 0x0, 0, 0x0, 0), /* MX35_PAD_SD1__EMI_DRAM_D_1 */
+       [70] = IMX_PIN_REG(MX35_PAD_SD2, 0x3e4, 0x0, 0, 0x0, 0), /* MX35_PAD_SD2__EMI_DRAM_D_2 */
+       [71] = IMX_PIN_REG(MX35_PAD_SD3, 0x3e8, 0x0, 0, 0x0, 0), /* MX35_PAD_SD3__EMI_DRAM_D_3 */
+       [72] = IMX_PIN_REG(MX35_PAD_SD4, 0x3ec, 0x0, 0, 0x0, 0), /* MX35_PAD_SD4__EMI_DRAM_D_4 */
+       [73] = IMX_PIN_REG(MX35_PAD_SD5, 0x3f0, 0x0, 0, 0x0, 0), /* MX35_PAD_SD5__EMI_DRAM_D_5 */
+       [74] = IMX_PIN_REG(MX35_PAD_SD6, 0x3f4, 0x0, 0, 0x0, 0), /* MX35_PAD_SD6__EMI_DRAM_D_6 */
+       [75] = IMX_PIN_REG(MX35_PAD_SD7, 0x3f8, 0x0, 0, 0x0, 0), /* MX35_PAD_SD7__EMI_DRAM_D_7 */
+       [76] = IMX_PIN_REG(MX35_PAD_SD8, 0x3fc, 0x0, 0, 0x0, 0), /* MX35_PAD_SD8__EMI_DRAM_D_8 */
+       [77] = IMX_PIN_REG(MX35_PAD_SD9, 0x400, 0x0, 0, 0x0, 0), /* MX35_PAD_SD9__EMI_DRAM_D_9 */
+       [78] = IMX_PIN_REG(MX35_PAD_SD10, 0x404, 0x0, 0, 0x0, 0), /* MX35_PAD_SD10__EMI_DRAM_D_10 */
+       [79] = IMX_PIN_REG(MX35_PAD_SD11, 0x408, 0x0, 0, 0x0, 0), /* MX35_PAD_SD11__EMI_DRAM_D_11 */
+       [80] = IMX_PIN_REG(MX35_PAD_SD12, 0x40c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD12__EMI_DRAM_D_12 */
+       [81] = IMX_PIN_REG(MX35_PAD_SD13, 0x410, 0x0, 0, 0x0, 0), /* MX35_PAD_SD13__EMI_DRAM_D_13 */
+       [82] = IMX_PIN_REG(MX35_PAD_SD14, 0x414, 0x0, 0, 0x0, 0), /* MX35_PAD_SD14__EMI_DRAM_D_14 */
+       [83] = IMX_PIN_REG(MX35_PAD_SD15, 0x418, 0x0, 0, 0x0, 0), /* MX35_PAD_SD15__EMI_DRAM_D_15 */
+       [84] = IMX_PIN_REG(MX35_PAD_SD16, 0x41c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD16__EMI_DRAM_D_16 */
+       [85] = IMX_PIN_REG(MX35_PAD_SD17, 0x420, 0x0, 0, 0x0, 0), /* MX35_PAD_SD17__EMI_DRAM_D_17 */
+       [86] = IMX_PIN_REG(MX35_PAD_SD18, 0x424, 0x0, 0, 0x0, 0), /* MX35_PAD_SD18__EMI_DRAM_D_18 */
+       [87] = IMX_PIN_REG(MX35_PAD_SD19, 0x428, 0x0, 0, 0x0, 0), /* MX35_PAD_SD19__EMI_DRAM_D_19 */
+       [88] = IMX_PIN_REG(MX35_PAD_SD20, 0x42c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD20__EMI_DRAM_D_20 */
+       [89] = IMX_PIN_REG(MX35_PAD_SD21, 0x430, 0x0, 0, 0x0, 0), /* MX35_PAD_SD21__EMI_DRAM_D_21 */
+       [90] = IMX_PIN_REG(MX35_PAD_SD22, 0x434, 0x0, 0, 0x0, 0), /* MX35_PAD_SD22__EMI_DRAM_D_22 */
+       [91] = IMX_PIN_REG(MX35_PAD_SD23, 0x438, 0x0, 0, 0x0, 0), /* MX35_PAD_SD23__EMI_DRAM_D_23 */
+       [92] = IMX_PIN_REG(MX35_PAD_SD24, 0x43c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD24__EMI_DRAM_D_24 */
+       [93] = IMX_PIN_REG(MX35_PAD_SD25, 0x440, 0x0, 0, 0x0, 0), /* MX35_PAD_SD25__EMI_DRAM_D_25 */
+       [94] = IMX_PIN_REG(MX35_PAD_SD26, 0x444, 0x0, 0, 0x0, 0), /* MX35_PAD_SD26__EMI_DRAM_D_26 */
+       [95] = IMX_PIN_REG(MX35_PAD_SD27, 0x448, 0x0, 0, 0x0, 0), /* MX35_PAD_SD27__EMI_DRAM_D_27 */
+       [96] = IMX_PIN_REG(MX35_PAD_SD28, 0x44c, 0x0, 0, 0x0, 0), /* MX35_PAD_SD28__EMI_DRAM_D_28 */
+       [97] = IMX_PIN_REG(MX35_PAD_SD29, 0x450, 0x0, 0, 0x0, 0), /* MX35_PAD_SD29__EMI_DRAM_D_29 */
+       [98] = IMX_PIN_REG(MX35_PAD_SD30, 0x454, 0x0, 0, 0x0, 0), /* MX35_PAD_SD30__EMI_DRAM_D_30 */
+       [99] = IMX_PIN_REG(MX35_PAD_SD31, 0x458, 0x0, 0, 0x0, 0), /* MX35_PAD_SD31__EMI_DRAM_D_31 */
+       [100] = IMX_PIN_REG(MX35_PAD_DQM0, 0x45c, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM0__EMI_DRAM_DQM_0 */
+       [101] = IMX_PIN_REG(MX35_PAD_DQM1, 0x460, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM1__EMI_DRAM_DQM_1 */
+       [102] = IMX_PIN_REG(MX35_PAD_DQM2, 0x464, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM2__EMI_DRAM_DQM_2 */
+       [103] = IMX_PIN_REG(MX35_PAD_DQM3, 0x468, 0x0, 0, 0x0, 0), /* MX35_PAD_DQM3__EMI_DRAM_DQM_3 */
+       [104] = IMX_PIN_REG(MX35_PAD_EB0, 0x46c, 0x094, 0, 0x0, 0), /* MX35_PAD_EB0__EMI_EIM_EB0_B */
+       [105] = IMX_PIN_REG(MX35_PAD_EB1, 0x470, 0x098, 0, 0x0, 0), /* MX35_PAD_EB1__EMI_EIM_EB1_B */
+       [106] = IMX_PIN_REG(MX35_PAD_OE, 0x474, 0x09c, 0, 0x0, 0), /* MX35_PAD_OE__EMI_EIM_OE */
+       [107] = IMX_PIN_REG(MX35_PAD_CS0, 0x478, 0x0a0, 0, 0x0, 0), /* MX35_PAD_CS0__EMI_EIM_CS0 */
+       [108] = IMX_PIN_REG(MX35_PAD_CS1, 0x47c, 0x0a4, 0, 0x0, 0), /* MX35_PAD_CS1__EMI_EIM_CS1 */
+       [109] = IMX_PIN_REG(MX35_PAD_CS1, 0x47c, 0x0a4, 3, 0x0, 0), /* MX35_PAD_CS1__EMI_NANDF_CE3 */
+       [110] = IMX_PIN_REG(MX35_PAD_CS2, 0x480, 0x0a8, 0, 0x0, 0), /* MX35_PAD_CS2__EMI_EIM_CS2 */
+       [111] = IMX_PIN_REG(MX35_PAD_CS3, 0x484, 0x0ac, 0, 0x0, 0), /* MX35_PAD_CS3__EMI_EIM_CS3 */
+       [112] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 0, 0x0, 0), /* MX35_PAD_CS4__EMI_EIM_CS4 */
+       [113] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 1, 0x800, 0), /* MX35_PAD_CS4__EMI_DTACK_B */
+       [114] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 3, 0x0, 0), /* MX35_PAD_CS4__EMI_NANDF_CE1 */
+       [115] = IMX_PIN_REG(MX35_PAD_CS4, 0x488, 0x0b0, 5, 0x83c, 0), /* MX35_PAD_CS4__GPIO1_20 */
+       [116] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 0, 0x0, 0), /* MX35_PAD_CS5__EMI_EIM_CS5 */
+       [117] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 1, 0x7f8, 0), /* MX35_PAD_CS5__CSPI2_SS2 */
+       [118] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 2, 0x7d8, 1), /* MX35_PAD_CS5__CSPI1_SS2 */
+       [119] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 3, 0x0, 0), /* MX35_PAD_CS5__EMI_NANDF_CE2 */
+       [120] = IMX_PIN_REG(MX35_PAD_CS5, 0x48c, 0x0b4, 5, 0x840, 0), /* MX35_PAD_CS5__GPIO1_21 */
+       [121] = IMX_PIN_REG(MX35_PAD_NF_CE0, 0x490, 0x0b8, 0, 0x0, 0), /* MX35_PAD_NF_CE0__EMI_NANDF_CE0 */
+       [122] = IMX_PIN_REG(MX35_PAD_NF_CE0, 0x490, 0x0b8, 5, 0x844, 0), /* MX35_PAD_NF_CE0__GPIO1_22 */
+       [123] = IMX_PIN_REG(MX35_PAD_ECB, 0x494, 0x0, 0, 0x0, 0), /* MX35_PAD_ECB__EMI_EIM_ECB */
+       [124] = IMX_PIN_REG(MX35_PAD_LBA, 0x498, 0x0bc, 0, 0x0, 0), /* MX35_PAD_LBA__EMI_EIM_LBA */
+       [125] = IMX_PIN_REG(MX35_PAD_BCLK, 0x49c, 0x0c0, 0, 0x0, 0), /* MX35_PAD_BCLK__EMI_EIM_BCLK */
+       [126] = IMX_PIN_REG(MX35_PAD_RW, 0x4a0, 0x0c4, 0, 0x0, 0), /* MX35_PAD_RW__EMI_EIM_RW */
+       [127] = IMX_PIN_REG(MX35_PAD_RAS, 0x4a4, 0x0, 0, 0x0, 0), /* MX35_PAD_RAS__EMI_DRAM_RAS */
+       [128] = IMX_PIN_REG(MX35_PAD_CAS, 0x4a8, 0x0, 0, 0x0, 0), /* MX35_PAD_CAS__EMI_DRAM_CAS */
+       [129] = IMX_PIN_REG(MX35_PAD_SDWE, 0x4ac, 0x0, 0, 0x0, 0), /* MX35_PAD_SDWE__EMI_DRAM_SDWE */
+       [130] = IMX_PIN_REG(MX35_PAD_SDCKE0, 0x4b0, 0x0, 0, 0x0, 0), /* MX35_PAD_SDCKE0__EMI_DRAM_SDCKE_0 */
+       [131] = IMX_PIN_REG(MX35_PAD_SDCKE1, 0x4b4, 0x0, 0, 0x0, 0), /* MX35_PAD_SDCKE1__EMI_DRAM_SDCKE_1 */
+       [132] = IMX_PIN_REG(MX35_PAD_SDCLK, 0x4b8, 0x0, 0, 0x0, 0), /* MX35_PAD_SDCLK__EMI_DRAM_SDCLK */
+       [133] = IMX_PIN_REG(MX35_PAD_SDQS0, 0x4bc, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS0__EMI_DRAM_SDQS_0 */
+       [134] = IMX_PIN_REG(MX35_PAD_SDQS1, 0x4c0, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS1__EMI_DRAM_SDQS_1 */
+       [135] = IMX_PIN_REG(MX35_PAD_SDQS2, 0x4c4, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS2__EMI_DRAM_SDQS_2 */
+       [136] = IMX_PIN_REG(MX35_PAD_SDQS3, 0x4c8, 0x0, 0, 0x0, 0), /* MX35_PAD_SDQS3__EMI_DRAM_SDQS_3 */
+       [137] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 0, 0x0, 0), /* MX35_PAD_NFWE_B__EMI_NANDF_WE_B */
+       [138] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 1, 0x9d8, 0), /* MX35_PAD_NFWE_B__USB_TOP_USBH2_DATA_3 */
+       [139] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 2, 0x924, 0), /* MX35_PAD_NFWE_B__IPU_DISPB_D0_VSYNC */
+       [140] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 5, 0x88c, 0), /* MX35_PAD_NFWE_B__GPIO2_18 */
+       [141] = IMX_PIN_REG(MX35_PAD_NFWE_B, 0x4cc, 0x0c8, 7, 0x0, 0), /* MX35_PAD_NFWE_B__ARM11P_TOP_TRACE_0 */
+       [142] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 0, 0x0, 0), /* MX35_PAD_NFRE_B__EMI_NANDF_RE_B */
+       [143] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 1, 0x9ec, 0), /* MX35_PAD_NFRE_B__USB_TOP_USBH2_DIR */
+       [144] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 2, 0x0, 0), /* MX35_PAD_NFRE_B__IPU_DISPB_BCLK */
+       [145] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 5, 0x890, 0), /* MX35_PAD_NFRE_B__GPIO2_19 */
+       [146] = IMX_PIN_REG(MX35_PAD_NFRE_B, 0x4d0, 0x0cc, 7, 0x0, 0), /* MX35_PAD_NFRE_B__ARM11P_TOP_TRACE_1 */
+       [147] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 0, 0x0, 0), /* MX35_PAD_NFALE__EMI_NANDF_ALE */
+       [148] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 1, 0x0, 0), /* MX35_PAD_NFALE__USB_TOP_USBH2_STP */
+       [149] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 2, 0x0, 0), /* MX35_PAD_NFALE__IPU_DISPB_CS0 */
+       [150] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 5, 0x898, 0), /* MX35_PAD_NFALE__GPIO2_20 */
+       [151] = IMX_PIN_REG(MX35_PAD_NFALE, 0x4d4, 0x0d0, 7, 0x0, 0), /* MX35_PAD_NFALE__ARM11P_TOP_TRACE_2 */
+       [152] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 0, 0x0, 0), /* MX35_PAD_NFCLE__EMI_NANDF_CLE */
+       [153] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 1, 0x9f0, 0), /* MX35_PAD_NFCLE__USB_TOP_USBH2_NXT */
+       [154] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 2, 0x0, 0), /* MX35_PAD_NFCLE__IPU_DISPB_PAR_RS */
+       [155] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 5, 0x89c, 0), /* MX35_PAD_NFCLE__GPIO2_21 */
+       [156] = IMX_PIN_REG(MX35_PAD_NFCLE, 0x4d8, 0x0d4, 7, 0x0, 0), /* MX35_PAD_NFCLE__ARM11P_TOP_TRACE_3 */
+       [157] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 0, 0x0, 0), /* MX35_PAD_NFWP_B__EMI_NANDF_WP_B */
+       [158] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 1, 0x9e8, 0), /* MX35_PAD_NFWP_B__USB_TOP_USBH2_DATA_7 */
+       [159] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 2, 0x0, 0), /* MX35_PAD_NFWP_B__IPU_DISPB_WR */
+       [160] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 5, 0x8a0, 0), /* MX35_PAD_NFWP_B__GPIO2_22 */
+       [161] = IMX_PIN_REG(MX35_PAD_NFWP_B, 0x4dc, 0x0d8, 7, 0x0, 0), /* MX35_PAD_NFWP_B__ARM11P_TOP_TRCTL */
+       [162] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 0, 0x0, 0), /* MX35_PAD_NFRB__EMI_NANDF_RB */
+       [163] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 2, 0x0, 0), /* MX35_PAD_NFRB__IPU_DISPB_RD */
+       [164] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 5, 0x8a4, 0), /* MX35_PAD_NFRB__GPIO2_23 */
+       [165] = IMX_PIN_REG(MX35_PAD_NFRB, 0x4e0, 0x0dc, 7, 0x0, 0), /* MX35_PAD_NFRB__ARM11P_TOP_TRCLK */
+       [166] = IMX_PIN_REG(MX35_PAD_D15, 0x4e4, 0x0, 0, 0x0, 0), /* MX35_PAD_D15__EMI_EIM_D_15 */
+       [167] = IMX_PIN_REG(MX35_PAD_D14, 0x4e8, 0x0, 0, 0x0, 0), /* MX35_PAD_D14__EMI_EIM_D_14 */
+       [168] = IMX_PIN_REG(MX35_PAD_D13, 0x4ec, 0x0, 0, 0x0, 0), /* MX35_PAD_D13__EMI_EIM_D_13 */
+       [169] = IMX_PIN_REG(MX35_PAD_D12, 0x4f0, 0x0, 0, 0x0, 0), /* MX35_PAD_D12__EMI_EIM_D_12 */
+       [170] = IMX_PIN_REG(MX35_PAD_D11, 0x4f4, 0x0, 0, 0x0, 0), /* MX35_PAD_D11__EMI_EIM_D_11 */
+       [171] = IMX_PIN_REG(MX35_PAD_D10, 0x4f8, 0x0, 0, 0x0, 0), /* MX35_PAD_D10__EMI_EIM_D_10 */
+       [172] = IMX_PIN_REG(MX35_PAD_D9, 0x4fc, 0x0, 0, 0x0, 0), /* MX35_PAD_D9__EMI_EIM_D_9 */
+       [173] = IMX_PIN_REG(MX35_PAD_D8, 0x500, 0x0, 0, 0x0, 0), /* MX35_PAD_D8__EMI_EIM_D_8 */
+       [174] = IMX_PIN_REG(MX35_PAD_D7, 0x504, 0x0, 0, 0x0, 0), /* MX35_PAD_D7__EMI_EIM_D_7 */
+       [175] = IMX_PIN_REG(MX35_PAD_D6, 0x508, 0x0, 0, 0x0, 0), /* MX35_PAD_D6__EMI_EIM_D_6 */
+       [176] = IMX_PIN_REG(MX35_PAD_D5, 0x50c, 0x0, 0, 0x0, 0), /* MX35_PAD_D5__EMI_EIM_D_5 */
+       [177] = IMX_PIN_REG(MX35_PAD_D4, 0x510, 0x0, 0, 0x0, 0), /* MX35_PAD_D4__EMI_EIM_D_4 */
+       [178] = IMX_PIN_REG(MX35_PAD_D3, 0x514, 0x0, 0, 0x0, 0), /* MX35_PAD_D3__EMI_EIM_D_3 */
+       [179] = IMX_PIN_REG(MX35_PAD_D2, 0x518, 0x0, 0, 0x0, 0), /* MX35_PAD_D2__EMI_EIM_D_2 */
+       [180] = IMX_PIN_REG(MX35_PAD_D1, 0x51c, 0x0, 0, 0x0, 0), /* MX35_PAD_D1__EMI_EIM_D_1 */
+       [181] = IMX_PIN_REG(MX35_PAD_D0, 0x520, 0x0, 0, 0x0, 0), /* MX35_PAD_D0__EMI_EIM_D_0 */
+       [182] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 0, 0x0, 0), /* MX35_PAD_CSI_D8__IPU_CSI_D_8 */
+       [183] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 1, 0x950, 0), /* MX35_PAD_CSI_D8__KPP_COL_0 */
+       [184] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 5, 0x83c, 1), /* MX35_PAD_CSI_D8__GPIO1_20 */
+       [185] = IMX_PIN_REG(MX35_PAD_CSI_D8, 0x524, 0x0e0, 7, 0x0, 0), /* MX35_PAD_CSI_D8__ARM11P_TOP_EVNTBUS_13 */
+       [186] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 0, 0x0, 0), /* MX35_PAD_CSI_D9__IPU_CSI_D_9 */
+       [187] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 1, 0x954, 0), /* MX35_PAD_CSI_D9__KPP_COL_1 */
+       [188] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 5, 0x840, 1), /* MX35_PAD_CSI_D9__GPIO1_21 */
+       [189] = IMX_PIN_REG(MX35_PAD_CSI_D9, 0x528, 0x0e4, 7, 0x0, 0), /* MX35_PAD_CSI_D9__ARM11P_TOP_EVNTBUS_14 */
+       [190] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 0, 0x0, 0), /* MX35_PAD_CSI_D10__IPU_CSI_D_10 */
+       [191] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 1, 0x958, 0), /* MX35_PAD_CSI_D10__KPP_COL_2 */
+       [192] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 5, 0x844, 1), /* MX35_PAD_CSI_D10__GPIO1_22 */
+       [193] = IMX_PIN_REG(MX35_PAD_CSI_D10, 0x52c, 0x0e8, 7, 0x0, 0), /* MX35_PAD_CSI_D10__ARM11P_TOP_EVNTBUS_15 */
+       [194] = IMX_PIN_REG(MX35_PAD_CSI_D11, 0x530, 0x0ec, 0, 0x0, 0), /* MX35_PAD_CSI_D11__IPU_CSI_D_11 */
+       [195] = IMX_PIN_REG(MX35_PAD_CSI_D11, 0x530, 0x0ec, 1, 0x95c, 0), /* MX35_PAD_CSI_D11__KPP_COL_3 */
+       [196] = IMX_PIN_REG(MX35_PAD_CSI_D11, 0x530, 0x0ec, 5, 0x0, 0), /* MX35_PAD_CSI_D11__GPIO1_23 */
+       [197] = IMX_PIN_REG(MX35_PAD_CSI_D12, 0x534, 0x0f0, 0, 0x0, 0), /* MX35_PAD_CSI_D12__IPU_CSI_D_12 */
+       [198] = IMX_PIN_REG(MX35_PAD_CSI_D12, 0x534, 0x0f0, 1, 0x970, 0), /* MX35_PAD_CSI_D12__KPP_ROW_0 */
+       [199] = IMX_PIN_REG(MX35_PAD_CSI_D12, 0x534, 0x0f0, 5, 0x0, 0), /* MX35_PAD_CSI_D12__GPIO1_24 */
+       [200] = IMX_PIN_REG(MX35_PAD_CSI_D13, 0x538, 0x0f4, 0, 0x0, 0), /* MX35_PAD_CSI_D13__IPU_CSI_D_13 */
+       [201] = IMX_PIN_REG(MX35_PAD_CSI_D13, 0x538, 0x0f4, 1, 0x974, 0), /* MX35_PAD_CSI_D13__KPP_ROW_1 */
+       [202] = IMX_PIN_REG(MX35_PAD_CSI_D13, 0x538, 0x0f4, 5, 0x0, 0), /* MX35_PAD_CSI_D13__GPIO1_25 */
+       [203] = IMX_PIN_REG(MX35_PAD_CSI_D14, 0x53c, 0x0f8, 0, 0x0, 0), /* MX35_PAD_CSI_D14__IPU_CSI_D_14 */
+       [204] = IMX_PIN_REG(MX35_PAD_CSI_D14, 0x53c, 0x0f8, 1, 0x978, 0), /* MX35_PAD_CSI_D14__KPP_ROW_2 */
+       [205] = IMX_PIN_REG(MX35_PAD_CSI_D14, 0x53c, 0x0f8, 5, 0x0, 0), /* MX35_PAD_CSI_D14__GPIO1_26 */
+       [206] = IMX_PIN_REG(MX35_PAD_CSI_D15, 0x540, 0x0fc, 0, 0x97c, 0), /* MX35_PAD_CSI_D15__IPU_CSI_D_15 */
+       [207] = IMX_PIN_REG(MX35_PAD_CSI_D15, 0x540, 0x0fc, 1, 0x0, 0), /* MX35_PAD_CSI_D15__KPP_ROW_3 */
+       [208] = IMX_PIN_REG(MX35_PAD_CSI_D15, 0x540, 0x0fc, 5, 0x0, 0), /* MX35_PAD_CSI_D15__GPIO1_27 */
+       [209] = IMX_PIN_REG(MX35_PAD_CSI_MCLK, 0x544, 0x100, 0, 0x0, 0), /* MX35_PAD_CSI_MCLK__IPU_CSI_MCLK */
+       [210] = IMX_PIN_REG(MX35_PAD_CSI_MCLK, 0x544, 0x100, 5, 0x0, 0), /* MX35_PAD_CSI_MCLK__GPIO1_28 */
+       [211] = IMX_PIN_REG(MX35_PAD_CSI_VSYNC, 0x548, 0x104, 0, 0x0, 0), /* MX35_PAD_CSI_VSYNC__IPU_CSI_VSYNC */
+       [212] = IMX_PIN_REG(MX35_PAD_CSI_VSYNC, 0x548, 0x104, 5, 0x0, 0), /* MX35_PAD_CSI_VSYNC__GPIO1_29 */
+       [213] = IMX_PIN_REG(MX35_PAD_CSI_HSYNC, 0x54c, 0x108, 0, 0x0, 0), /* MX35_PAD_CSI_HSYNC__IPU_CSI_HSYNC */
+       [214] = IMX_PIN_REG(MX35_PAD_CSI_HSYNC, 0x54c, 0x108, 5, 0x0, 0), /* MX35_PAD_CSI_HSYNC__GPIO1_30 */
+       [215] = IMX_PIN_REG(MX35_PAD_CSI_PIXCLK, 0x550, 0x10c, 0, 0x0, 0), /* MX35_PAD_CSI_PIXCLK__IPU_CSI_PIXCLK */
+       [216] = IMX_PIN_REG(MX35_PAD_CSI_PIXCLK, 0x550, 0x10c, 5, 0x0, 0), /* MX35_PAD_CSI_PIXCLK__GPIO1_31 */
+       [217] = IMX_PIN_REG(MX35_PAD_I2C1_CLK, 0x554, 0x110, 0, 0x0, 0), /* MX35_PAD_I2C1_CLK__I2C1_SCL */
+       [218] = IMX_PIN_REG(MX35_PAD_I2C1_CLK, 0x554, 0x110, 5, 0x8a8, 0), /* MX35_PAD_I2C1_CLK__GPIO2_24 */
+       [219] = IMX_PIN_REG(MX35_PAD_I2C1_CLK, 0x554, 0x110, 6, 0x0, 0), /* MX35_PAD_I2C1_CLK__CCM_USB_BYP_CLK */
+       [220] = IMX_PIN_REG(MX35_PAD_I2C1_DAT, 0x558, 0x114, 0, 0x0, 0), /* MX35_PAD_I2C1_DAT__I2C1_SDA */
+       [221] = IMX_PIN_REG(MX35_PAD_I2C1_DAT, 0x558, 0x114, 5, 0x8ac, 0), /* MX35_PAD_I2C1_DAT__GPIO2_25 */
+       [222] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 0, 0x0, 0), /* MX35_PAD_I2C2_CLK__I2C2_SCL */
+       [223] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 1, 0x0, 0), /* MX35_PAD_I2C2_CLK__CAN1_TXCAN */
+       [224] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 2, 0x0, 0), /* MX35_PAD_I2C2_CLK__USB_TOP_USBH2_PWR */
+       [225] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 5, 0x8b0, 0), /* MX35_PAD_I2C2_CLK__GPIO2_26 */
+       [226] = IMX_PIN_REG(MX35_PAD_I2C2_CLK, 0x55c, 0x118, 6, 0x0, 0), /* MX35_PAD_I2C2_CLK__SDMA_DEBUG_BUS_DEVICE_2 */
+       [227] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 0, 0x0, 0), /* MX35_PAD_I2C2_DAT__I2C2_SDA */
+       [228] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 1, 0x7c8, 0), /* MX35_PAD_I2C2_DAT__CAN1_RXCAN */
+       [229] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 2, 0x9f4, 0), /* MX35_PAD_I2C2_DAT__USB_TOP_USBH2_OC */
+       [230] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 5, 0x8b4, 0), /* MX35_PAD_I2C2_DAT__GPIO2_27 */
+       [231] = IMX_PIN_REG(MX35_PAD_I2C2_DAT, 0x560, 0x11c, 6, 0x0, 0), /* MX35_PAD_I2C2_DAT__SDMA_DEBUG_BUS_DEVICE_3 */
+       [232] = IMX_PIN_REG(MX35_PAD_STXD4, 0x564, 0x120, 0, 0x0, 0), /* MX35_PAD_STXD4__AUDMUX_AUD4_TXD */
+       [233] = IMX_PIN_REG(MX35_PAD_STXD4, 0x564, 0x120, 5, 0x8b8, 0), /* MX35_PAD_STXD4__GPIO2_28 */
+       [234] = IMX_PIN_REG(MX35_PAD_STXD4, 0x564, 0x120, 7, 0x0, 0), /* MX35_PAD_STXD4__ARM11P_TOP_ARM_COREASID0 */
+       [235] = IMX_PIN_REG(MX35_PAD_SRXD4, 0x568, 0x124, 0, 0x0, 0), /* MX35_PAD_SRXD4__AUDMUX_AUD4_RXD */
+       [236] = IMX_PIN_REG(MX35_PAD_SRXD4, 0x568, 0x124, 5, 0x8bc, 0), /* MX35_PAD_SRXD4__GPIO2_29 */
+       [237] = IMX_PIN_REG(MX35_PAD_SRXD4, 0x568, 0x124, 7, 0x0, 0), /* MX35_PAD_SRXD4__ARM11P_TOP_ARM_COREASID1 */
+       [238] = IMX_PIN_REG(MX35_PAD_SCK4, 0x56c, 0x128, 0, 0x0, 0), /* MX35_PAD_SCK4__AUDMUX_AUD4_TXC */
+       [239] = IMX_PIN_REG(MX35_PAD_SCK4, 0x56c, 0x128, 5, 0x8c4, 0), /* MX35_PAD_SCK4__GPIO2_30 */
+       [240] = IMX_PIN_REG(MX35_PAD_SCK4, 0x56c, 0x128, 7, 0x0, 0), /* MX35_PAD_SCK4__ARM11P_TOP_ARM_COREASID2 */
+       [241] = IMX_PIN_REG(MX35_PAD_STXFS4, 0x570, 0x12c, 0, 0x0, 0), /* MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS */
+       [242] = IMX_PIN_REG(MX35_PAD_STXFS4, 0x570, 0x12c, 5, 0x8c8, 0), /* MX35_PAD_STXFS4__GPIO2_31 */
+       [243] = IMX_PIN_REG(MX35_PAD_STXFS4, 0x570, 0x12c, 7, 0x0, 0), /* MX35_PAD_STXFS4__ARM11P_TOP_ARM_COREASID3 */
+       [244] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 0, 0x0, 0), /* MX35_PAD_STXD5__AUDMUX_AUD5_TXD */
+       [245] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 1, 0x0, 0), /* MX35_PAD_STXD5__SPDIF_SPDIF_OUT1 */
+       [246] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 2, 0x7ec, 0), /* MX35_PAD_STXD5__CSPI2_MOSI */
+       [247] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 5, 0x82c, 1), /* MX35_PAD_STXD5__GPIO1_0 */
+       [248] = IMX_PIN_REG(MX35_PAD_STXD5, 0x574, 0x130, 7, 0x0, 0), /* MX35_PAD_STXD5__ARM11P_TOP_ARM_COREASID4 */
+       [249] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 0, 0x0, 0), /* MX35_PAD_SRXD5__AUDMUX_AUD5_RXD */
+       [250] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 1, 0x998, 0), /* MX35_PAD_SRXD5__SPDIF_SPDIF_IN1 */
+       [251] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 2, 0x7e8, 0), /* MX35_PAD_SRXD5__CSPI2_MISO */
+       [252] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 5, 0x838, 1), /* MX35_PAD_SRXD5__GPIO1_1 */
+       [253] = IMX_PIN_REG(MX35_PAD_SRXD5, 0x578, 0x134, 7, 0x0, 0), /* MX35_PAD_SRXD5__ARM11P_TOP_ARM_COREASID5 */
+       [254] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 0, 0x0, 0), /* MX35_PAD_SCK5__AUDMUX_AUD5_TXC */
+       [255] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 1, 0x994, 0), /* MX35_PAD_SCK5__SPDIF_SPDIF_EXTCLK */
+       [256] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 2, 0x7e0, 0), /* MX35_PAD_SCK5__CSPI2_SCLK */
+       [257] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 5, 0x848, 0), /* MX35_PAD_SCK5__GPIO1_2 */
+       [258] = IMX_PIN_REG(MX35_PAD_SCK5, 0x57c, 0x138, 7, 0x0, 0), /* MX35_PAD_SCK5__ARM11P_TOP_ARM_COREASID6 */
+       [259] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 0, 0x0, 0), /* MX35_PAD_STXFS5__AUDMUX_AUD5_TXFS */
+       [260] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 2, 0x7e4, 0), /* MX35_PAD_STXFS5__CSPI2_RDY */
+       [261] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 5, 0x84c, 0), /* MX35_PAD_STXFS5__GPIO1_3 */
+       [262] = IMX_PIN_REG(MX35_PAD_STXFS5, 0x580, 0x13c, 7, 0x0, 0), /* MX35_PAD_STXFS5__ARM11P_TOP_ARM_COREASID7 */
+       [263] = IMX_PIN_REG(MX35_PAD_SCKR, 0x584, 0x140, 0, 0x0, 0), /* MX35_PAD_SCKR__ESAI_SCKR */
+       [264] = IMX_PIN_REG(MX35_PAD_SCKR, 0x584, 0x140, 5, 0x850, 1), /* MX35_PAD_SCKR__GPIO1_4 */
+       [265] = IMX_PIN_REG(MX35_PAD_SCKR, 0x584, 0x140, 7, 0x0, 0), /* MX35_PAD_SCKR__ARM11P_TOP_EVNTBUS_10 */
+       [266] = IMX_PIN_REG(MX35_PAD_FSR, 0x588, 0x144, 0, 0x0, 0), /* MX35_PAD_FSR__ESAI_FSR */
+       [267] = IMX_PIN_REG(MX35_PAD_FSR, 0x588, 0x144, 5, 0x854, 1), /* MX35_PAD_FSR__GPIO1_5 */
+       [268] = IMX_PIN_REG(MX35_PAD_FSR, 0x588, 0x144, 7, 0x0, 0), /* MX35_PAD_FSR__ARM11P_TOP_EVNTBUS_11 */
+       [269] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 0, 0x0, 0), /* MX35_PAD_HCKR__ESAI_HCKR */
+       [270] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 1, 0x0, 0), /* MX35_PAD_HCKR__AUDMUX_AUD5_RXFS */
+       [271] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 2, 0x7f0, 0), /* MX35_PAD_HCKR__CSPI2_SS0 */
+       [272] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 3, 0x0, 0), /* MX35_PAD_HCKR__IPU_FLASH_STROBE */
+       [273] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 5, 0x858, 1), /* MX35_PAD_HCKR__GPIO1_6 */
+       [274] = IMX_PIN_REG(MX35_PAD_HCKR, 0x58c, 0x148, 7, 0x0, 0), /* MX35_PAD_HCKR__ARM11P_TOP_EVNTBUS_12 */
+       [275] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 0, 0x0, 0), /* MX35_PAD_SCKT__ESAI_SCKT */
+       [276] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 5, 0x85c, 1), /* MX35_PAD_SCKT__GPIO1_7 */
+       [277] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 6, 0x930, 0), /* MX35_PAD_SCKT__IPU_CSI_D_0 */
+       [278] = IMX_PIN_REG(MX35_PAD_SCKT, 0x590, 0x14c, 7, 0x978, 1), /* MX35_PAD_SCKT__KPP_ROW_2 */
+       [279] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 0, 0x0, 0), /* MX35_PAD_FST__ESAI_FST */
+       [280] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 5, 0x860, 1), /* MX35_PAD_FST__GPIO1_8 */
+       [281] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 6, 0x934, 0), /* MX35_PAD_FST__IPU_CSI_D_1 */
+       [282] = IMX_PIN_REG(MX35_PAD_FST, 0x594, 0x150, 7, 0x97c, 1), /* MX35_PAD_FST__KPP_ROW_3 */
+       [283] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 0, 0x0, 0), /* MX35_PAD_HCKT__ESAI_HCKT */
+       [284] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 1, 0x7a8, 0), /* MX35_PAD_HCKT__AUDMUX_AUD5_RXC */
+       [285] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 5, 0x864, 0), /* MX35_PAD_HCKT__GPIO1_9 */
+       [286] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 6, 0x938, 0), /* MX35_PAD_HCKT__IPU_CSI_D_2 */
+       [287] = IMX_PIN_REG(MX35_PAD_HCKT, 0x598, 0x154, 7, 0x95c, 1), /* MX35_PAD_HCKT__KPP_COL_3 */
+       [288] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 0, 0x0, 0), /* MX35_PAD_TX5_RX0__ESAI_TX5_RX0 */
+       [289] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 1, 0x0, 0), /* MX35_PAD_TX5_RX0__AUDMUX_AUD4_RXC */
+       [290] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 2, 0x7f8, 1), /* MX35_PAD_TX5_RX0__CSPI2_SS2 */
+       [291] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 3, 0x0, 0), /* MX35_PAD_TX5_RX0__CAN2_TXCAN */
+       [292] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 4, 0x0, 0), /* MX35_PAD_TX5_RX0__UART2_DTR */
+       [293] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 5, 0x830, 0), /* MX35_PAD_TX5_RX0__GPIO1_10 */
+       [294] = IMX_PIN_REG(MX35_PAD_TX5_RX0, 0x59c, 0x158, 7, 0x0, 0), /* MX35_PAD_TX5_RX0__EMI_M3IF_CHOSEN_MASTER_0 */
+       [295] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 0, 0x0, 0), /* MX35_PAD_TX4_RX1__ESAI_TX4_RX1 */
+       [296] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 1, 0x0, 0), /* MX35_PAD_TX4_RX1__AUDMUX_AUD4_RXFS */
+       [297] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 2, 0x7fc, 0), /* MX35_PAD_TX4_RX1__CSPI2_SS3 */
+       [298] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 3, 0x7cc, 0), /* MX35_PAD_TX4_RX1__CAN2_RXCAN */
+       [299] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 4, 0x0, 0), /* MX35_PAD_TX4_RX1__UART2_DSR */
+       [300] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 5, 0x834, 0), /* MX35_PAD_TX4_RX1__GPIO1_11 */
+       [301] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 6, 0x93c, 0), /* MX35_PAD_TX4_RX1__IPU_CSI_D_3 */
+       [302] = IMX_PIN_REG(MX35_PAD_TX4_RX1, 0x5a0, 0x15c, 7, 0x970, 1), /* MX35_PAD_TX4_RX1__KPP_ROW_0 */
+       [303] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 0, 0x0, 0), /* MX35_PAD_TX3_RX2__ESAI_TX3_RX2 */
+       [304] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 1, 0x91c, 0), /* MX35_PAD_TX3_RX2__I2C3_SCL */
+       [305] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 3, 0x0, 0), /* MX35_PAD_TX3_RX2__EMI_NANDF_CE1 */
+       [306] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 5, 0x0, 0), /* MX35_PAD_TX3_RX2__GPIO1_12 */
+       [307] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 6, 0x940, 0), /* MX35_PAD_TX3_RX2__IPU_CSI_D_4 */
+       [308] = IMX_PIN_REG(MX35_PAD_TX3_RX2, 0x5a4, 0x160, 7, 0x974, 1), /* MX35_PAD_TX3_RX2__KPP_ROW_1 */
+       [309] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 0, 0x0, 0), /* MX35_PAD_TX2_RX3__ESAI_TX2_RX3 */
+       [310] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 1, 0x920, 0), /* MX35_PAD_TX2_RX3__I2C3_SDA */
+       [311] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 3, 0x0, 0), /* MX35_PAD_TX2_RX3__EMI_NANDF_CE2 */
+       [312] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 5, 0x0, 0), /* MX35_PAD_TX2_RX3__GPIO1_13 */
+       [313] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 6, 0x944, 0), /* MX35_PAD_TX2_RX3__IPU_CSI_D_5 */
+       [314] = IMX_PIN_REG(MX35_PAD_TX2_RX3, 0x5a8, 0x164, 7, 0x950, 1), /* MX35_PAD_TX2_RX3__KPP_COL_0 */
+       [315] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 0, 0x0, 0), /* MX35_PAD_TX1__ESAI_TX1 */
+       [316] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 1, 0x7d4, 1), /* MX35_PAD_TX1__CCM_PMIC_RDY */
+       [317] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 2, 0x7d8, 2), /* MX35_PAD_TX1__CSPI1_SS2 */
+       [318] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 3, 0x0, 0), /* MX35_PAD_TX1__EMI_NANDF_CE3 */
+       [319] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 4, 0x0, 0), /* MX35_PAD_TX1__UART2_RI */
+       [320] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 5, 0x0, 0), /* MX35_PAD_TX1__GPIO1_14 */
+       [321] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 6, 0x948, 0), /* MX35_PAD_TX1__IPU_CSI_D_6 */
+       [322] = IMX_PIN_REG(MX35_PAD_TX1, 0x5ac, 0x168, 7, 0x954, 1), /* MX35_PAD_TX1__KPP_COL_1 */
+       [323] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 0, 0x0, 0), /* MX35_PAD_TX0__ESAI_TX0 */
+       [324] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 1, 0x994, 1), /* MX35_PAD_TX0__SPDIF_SPDIF_EXTCLK */
+       [325] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 2, 0x7dc, 0), /* MX35_PAD_TX0__CSPI1_SS3 */
+       [326] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 3, 0x800, 1), /* MX35_PAD_TX0__EMI_DTACK_B */
+       [327] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 4, 0x0, 0), /* MX35_PAD_TX0__UART2_DCD */
+       [328] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 5, 0x0, 0), /* MX35_PAD_TX0__GPIO1_15 */
+       [329] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 6, 0x94c, 0), /* MX35_PAD_TX0__IPU_CSI_D_7 */
+       [330] = IMX_PIN_REG(MX35_PAD_TX0, 0x5b0, 0x16c, 7, 0x958, 1), /* MX35_PAD_TX0__KPP_COL_2 */
+       [331] = IMX_PIN_REG(MX35_PAD_CSPI1_MOSI, 0x5b4, 0x170, 0, 0x0, 0), /* MX35_PAD_CSPI1_MOSI__CSPI1_MOSI */
+       [332] = IMX_PIN_REG(MX35_PAD_CSPI1_MOSI, 0x5b4, 0x170, 5, 0x0, 0), /* MX35_PAD_CSPI1_MOSI__GPIO1_16 */
+       [333] = IMX_PIN_REG(MX35_PAD_CSPI1_MOSI, 0x5b4, 0x170, 7, 0x0, 0), /* MX35_PAD_CSPI1_MOSI__ECT_CTI_TRIG_OUT1_2 */
+       [334] = IMX_PIN_REG(MX35_PAD_CSPI1_MISO, 0x5b8, 0x174, 0, 0x0, 0), /* MX35_PAD_CSPI1_MISO__CSPI1_MISO */
+       [335] = IMX_PIN_REG(MX35_PAD_CSPI1_MISO, 0x5b8, 0x174, 5, 0x0, 0), /* MX35_PAD_CSPI1_MISO__GPIO1_17 */
+       [336] = IMX_PIN_REG(MX35_PAD_CSPI1_MISO, 0x5b8, 0x174, 7, 0x0, 0), /* MX35_PAD_CSPI1_MISO__ECT_CTI_TRIG_OUT1_3 */
+       [337] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 0, 0x0, 0), /* MX35_PAD_CSPI1_SS0__CSPI1_SS0 */
+       [338] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 1, 0x990, 1), /* MX35_PAD_CSPI1_SS0__OWIRE_LINE */
+       [339] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 2, 0x7fc, 1), /* MX35_PAD_CSPI1_SS0__CSPI2_SS3 */
+       [340] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 5, 0x0, 0), /* MX35_PAD_CSPI1_SS0__GPIO1_18 */
+       [341] = IMX_PIN_REG(MX35_PAD_CSPI1_SS0, 0x5bc, 0x178, 7, 0x0, 0), /* MX35_PAD_CSPI1_SS0__ECT_CTI_TRIG_OUT1_4 */
+       [342] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 0, 0x0, 0), /* MX35_PAD_CSPI1_SS1__CSPI1_SS1 */
+       [343] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 1, 0x0, 0), /* MX35_PAD_CSPI1_SS1__PWM_PWMO */
+       [344] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 2, 0x7d0, 1), /* MX35_PAD_CSPI1_SS1__CCM_CLK32K */
+       [345] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 5, 0x0, 0), /* MX35_PAD_CSPI1_SS1__GPIO1_19 */
+       [346] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 6, 0x0, 0), /* MX35_PAD_CSPI1_SS1__IPU_DIAGB_29 */
+       [347] = IMX_PIN_REG(MX35_PAD_CSPI1_SS1, 0x5c0, 0x17c, 7, 0x0, 0), /* MX35_PAD_CSPI1_SS1__ECT_CTI_TRIG_OUT1_5 */
+       [348] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 0, 0x0, 0), /* MX35_PAD_CSPI1_SCLK__CSPI1_SCLK */
+       [349] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 5, 0x904, 0), /* MX35_PAD_CSPI1_SCLK__GPIO3_4 */
+       [350] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 6, 0x0, 0), /* MX35_PAD_CSPI1_SCLK__IPU_DIAGB_30 */
+       [351] = IMX_PIN_REG(MX35_PAD_CSPI1_SCLK, 0x5c4, 0x180, 7, 0x0, 0), /* MX35_PAD_CSPI1_SCLK__EMI_M3IF_CHOSEN_MASTER_1 */
+       [352] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 0, 0x0, 0), /* MX35_PAD_CSPI1_SPI_RDY__CSPI1_RDY */
+       [353] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 5, 0x908, 0), /* MX35_PAD_CSPI1_SPI_RDY__GPIO3_5 */
+       [354] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 6, 0x0, 0), /* MX35_PAD_CSPI1_SPI_RDY__IPU_DIAGB_31 */
+       [355] = IMX_PIN_REG(MX35_PAD_CSPI1_SPI_RDY, 0x5c8, 0x184, 7, 0x0, 0), /* MX35_PAD_CSPI1_SPI_RDY__EMI_M3IF_CHOSEN_MASTER_2 */
+       [356] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 0, 0x0, 0), /* MX35_PAD_RXD1__UART1_RXD_MUX */
+       [357] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 1, 0x7ec, 1), /* MX35_PAD_RXD1__CSPI2_MOSI */
+       [358] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 4, 0x960, 0), /* MX35_PAD_RXD1__KPP_COL_4 */
+       [359] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 5, 0x90c, 0), /* MX35_PAD_RXD1__GPIO3_6 */
+       [360] = IMX_PIN_REG(MX35_PAD_RXD1, 0x5cc, 0x188, 7, 0x0, 0), /* MX35_PAD_RXD1__ARM11P_TOP_EVNTBUS_16 */
+       [361] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 0, 0x0, 0), /* MX35_PAD_TXD1__UART1_TXD_MUX */
+       [362] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 1, 0x7e8, 1), /* MX35_PAD_TXD1__CSPI2_MISO */
+       [363] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 4, 0x964, 0), /* MX35_PAD_TXD1__KPP_COL_5 */
+       [364] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 5, 0x910, 0), /* MX35_PAD_TXD1__GPIO3_7 */
+       [365] = IMX_PIN_REG(MX35_PAD_TXD1, 0x5d0, 0x18c, 7, 0x0, 0), /* MX35_PAD_TXD1__ARM11P_TOP_EVNTBUS_17 */
+       [366] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 0, 0x0, 0), /* MX35_PAD_RTS1__UART1_RTS */
+       [367] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 1, 0x7e0, 1), /* MX35_PAD_RTS1__CSPI2_SCLK */
+       [368] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 2, 0x91c, 1), /* MX35_PAD_RTS1__I2C3_SCL */
+       [369] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 3, 0x930, 1), /* MX35_PAD_RTS1__IPU_CSI_D_0 */
+       [370] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 4, 0x968, 0), /* MX35_PAD_RTS1__KPP_COL_6 */
+       [371] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 5, 0x914, 0), /* MX35_PAD_RTS1__GPIO3_8 */
+       [372] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 6, 0x0, 0), /* MX35_PAD_RTS1__EMI_NANDF_CE1 */
+       [373] = IMX_PIN_REG(MX35_PAD_RTS1, 0x5d4, 0x190, 7, 0x0, 0), /* MX35_PAD_RTS1__ARM11P_TOP_EVNTBUS_18 */
+       [374] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 0, 0x0, 0), /* MX35_PAD_CTS1__UART1_CTS */
+       [375] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 1, 0x7e4, 1), /* MX35_PAD_CTS1__CSPI2_RDY */
+       [376] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 2, 0x920, 1), /* MX35_PAD_CTS1__I2C3_SDA */
+       [377] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 3, 0x934, 1), /* MX35_PAD_CTS1__IPU_CSI_D_1 */
+       [378] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 4, 0x96c, 0), /* MX35_PAD_CTS1__KPP_COL_7 */
+       [379] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 5, 0x918, 0), /* MX35_PAD_CTS1__GPIO3_9 */
+       [380] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 6, 0x0, 0), /* MX35_PAD_CTS1__EMI_NANDF_CE2 */
+       [381] = IMX_PIN_REG(MX35_PAD_CTS1, 0x5d8, 0x194, 7, 0x0, 0), /* MX35_PAD_CTS1__ARM11P_TOP_EVNTBUS_19 */
+       [382] = IMX_PIN_REG(MX35_PAD_RXD2, 0x5dc, 0x198, 0, 0x0, 0), /* MX35_PAD_RXD2__UART2_RXD_MUX */
+       [383] = IMX_PIN_REG(MX35_PAD_RXD2, 0x5dc, 0x198, 4, 0x980, 0), /* MX35_PAD_RXD2__KPP_ROW_4 */
+       [384] = IMX_PIN_REG(MX35_PAD_RXD2, 0x5dc, 0x198, 5, 0x8ec, 0), /* MX35_PAD_RXD2__GPIO3_10 */
+       [385] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 0, 0x0, 0), /* MX35_PAD_TXD2__UART2_TXD_MUX */
+       [386] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 1, 0x994, 2), /* MX35_PAD_TXD2__SPDIF_SPDIF_EXTCLK */
+       [387] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 4, 0x984, 0), /* MX35_PAD_TXD2__KPP_ROW_5 */
+       [388] = IMX_PIN_REG(MX35_PAD_TXD2, 0x5e0, 0x19c, 5, 0x8f0, 0), /* MX35_PAD_TXD2__GPIO3_11 */
+       [389] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 0, 0x0, 0), /* MX35_PAD_RTS2__UART2_RTS */
+       [390] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 1, 0x998, 1), /* MX35_PAD_RTS2__SPDIF_SPDIF_IN1 */
+       [391] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 2, 0x7cc, 1), /* MX35_PAD_RTS2__CAN2_RXCAN */
+       [392] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 3, 0x938, 1), /* MX35_PAD_RTS2__IPU_CSI_D_2 */
+       [393] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 4, 0x988, 0), /* MX35_PAD_RTS2__KPP_ROW_6 */
+       [394] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 5, 0x8f4, 0), /* MX35_PAD_RTS2__GPIO3_12 */
+       [395] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 6, 0x0, 0), /* MX35_PAD_RTS2__AUDMUX_AUD5_RXC */
+       [396] = IMX_PIN_REG(MX35_PAD_RTS2, 0x5e4, 0x1a0, 7, 0x9a0, 0), /* MX35_PAD_RTS2__UART3_RXD_MUX */
+       [397] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 0, 0x0, 0), /* MX35_PAD_CTS2__UART2_CTS */
+       [398] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 1, 0x0, 0), /* MX35_PAD_CTS2__SPDIF_SPDIF_OUT1 */
+       [399] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 2, 0x0, 0), /* MX35_PAD_CTS2__CAN2_TXCAN */
+       [400] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 3, 0x93c, 1), /* MX35_PAD_CTS2__IPU_CSI_D_3 */
+       [401] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 4, 0x98c, 0), /* MX35_PAD_CTS2__KPP_ROW_7 */
+       [402] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 5, 0x8f8, 0), /* MX35_PAD_CTS2__GPIO3_13 */
+       [403] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 6, 0x0, 0), /* MX35_PAD_CTS2__AUDMUX_AUD5_RXFS */
+       [404] = IMX_PIN_REG(MX35_PAD_CTS2, 0x5e8, 0x1a4, 7, 0x0, 0), /* MX35_PAD_CTS2__UART3_TXD_MUX */
+       [405] = IMX_PIN_REG(MX35_PAD_RTCK, 0x5ec, 0x0, 0, 0x0, 0), /* MX35_PAD_RTCK__ARM11P_TOP_RTCK */
+       [406] = IMX_PIN_REG(MX35_PAD_TCK, 0x5f0, 0x0, 0, 0x0, 0), /* MX35_PAD_TCK__SJC_TCK */
+       [407] = IMX_PIN_REG(MX35_PAD_TMS, 0x5f4, 0x0, 0, 0x0, 0), /* MX35_PAD_TMS__SJC_TMS */
+       [408] = IMX_PIN_REG(MX35_PAD_TDI, 0x5f8, 0x0, 0, 0x0, 0), /* MX35_PAD_TDI__SJC_TDI */
+       [409] = IMX_PIN_REG(MX35_PAD_TDO, 0x5fc, 0x0, 0, 0x0, 0), /* MX35_PAD_TDO__SJC_TDO */
+       [410] = IMX_PIN_REG(MX35_PAD_TRSTB, 0x600, 0x0, 0, 0x0, 0), /* MX35_PAD_TRSTB__SJC_TRSTB */
+       [411] = IMX_PIN_REG(MX35_PAD_DE_B, 0x604, 0x0, 0, 0x0, 0), /* MX35_PAD_DE_B__SJC_DE_B */
+       [412] = IMX_PIN_REG(MX35_PAD_SJC_MOD, 0x608, 0x0, 0, 0x0, 0), /* MX35_PAD_SJC_MOD__SJC_MOD */
+       [413] = IMX_PIN_REG(MX35_PAD_USBOTG_PWR, 0x60c, 0x1a8, 0, 0x0, 0), /* MX35_PAD_USBOTG_PWR__USB_TOP_USBOTG_PWR */
+       [414] = IMX_PIN_REG(MX35_PAD_USBOTG_PWR, 0x60c, 0x1a8, 1, 0x0, 0), /* MX35_PAD_USBOTG_PWR__USB_TOP_USBH2_PWR */
+       [415] = IMX_PIN_REG(MX35_PAD_USBOTG_PWR, 0x60c, 0x1a8, 5, 0x8fc, 0), /* MX35_PAD_USBOTG_PWR__GPIO3_14 */
+       [416] = IMX_PIN_REG(MX35_PAD_USBOTG_OC, 0x610, 0x1ac, 0, 0x0, 0), /* MX35_PAD_USBOTG_OC__USB_TOP_USBOTG_OC */
+       [417] = IMX_PIN_REG(MX35_PAD_USBOTG_OC, 0x610, 0x1ac, 1, 0x9f4, 1), /* MX35_PAD_USBOTG_OC__USB_TOP_USBH2_OC */
+       [418] = IMX_PIN_REG(MX35_PAD_USBOTG_OC, 0x610, 0x1ac, 5, 0x900, 0), /* MX35_PAD_USBOTG_OC__GPIO3_15 */
+       [419] = IMX_PIN_REG(MX35_PAD_LD0, 0x614, 0x1b0, 0, 0x0, 0), /* MX35_PAD_LD0__IPU_DISPB_DAT_0 */
+       [420] = IMX_PIN_REG(MX35_PAD_LD0, 0x614, 0x1b0, 5, 0x868, 1), /* MX35_PAD_LD0__GPIO2_0 */
+       [421] = IMX_PIN_REG(MX35_PAD_LD0, 0x614, 0x1b0, 6, 0x0, 0), /* MX35_PAD_LD0__SDMA_SDMA_DEBUG_PC_0 */
+       [422] = IMX_PIN_REG(MX35_PAD_LD1, 0x618, 0x1b4, 0, 0x0, 0), /* MX35_PAD_LD1__IPU_DISPB_DAT_1 */
+       [423] = IMX_PIN_REG(MX35_PAD_LD1, 0x618, 0x1b4, 5, 0x894, 0), /* MX35_PAD_LD1__GPIO2_1 */
+       [424] = IMX_PIN_REG(MX35_PAD_LD1, 0x618, 0x1b4, 6, 0x0, 0), /* MX35_PAD_LD1__SDMA_SDMA_DEBUG_PC_1 */
+       [425] = IMX_PIN_REG(MX35_PAD_LD2, 0x61c, 0x1b8, 0, 0x0, 0), /* MX35_PAD_LD2__IPU_DISPB_DAT_2 */
+       [426] = IMX_PIN_REG(MX35_PAD_LD2, 0x61c, 0x1b8, 5, 0x8c0, 0), /* MX35_PAD_LD2__GPIO2_2 */
+       [427] = IMX_PIN_REG(MX35_PAD_LD2, 0x61c, 0x1b8, 6, 0x0, 0), /* MX35_PAD_LD2__SDMA_SDMA_DEBUG_PC_2 */
+       [428] = IMX_PIN_REG(MX35_PAD_LD3, 0x620, 0x1bc, 0, 0x0, 0), /* MX35_PAD_LD3__IPU_DISPB_DAT_3 */
+       [429] = IMX_PIN_REG(MX35_PAD_LD3, 0x620, 0x1bc, 5, 0x8cc, 0), /* MX35_PAD_LD3__GPIO2_3 */
+       [430] = IMX_PIN_REG(MX35_PAD_LD3, 0x620, 0x1bc, 6, 0x0, 0), /* MX35_PAD_LD3__SDMA_SDMA_DEBUG_PC_3 */
+       [431] = IMX_PIN_REG(MX35_PAD_LD4, 0x624, 0x1c0, 0, 0x0, 0), /* MX35_PAD_LD4__IPU_DISPB_DAT_4 */
+       [432] = IMX_PIN_REG(MX35_PAD_LD4, 0x624, 0x1c0, 5, 0x8d0, 0), /* MX35_PAD_LD4__GPIO2_4 */
+       [433] = IMX_PIN_REG(MX35_PAD_LD4, 0x624, 0x1c0, 6, 0x0, 0), /* MX35_PAD_LD4__SDMA_SDMA_DEBUG_PC_4 */
+       [434] = IMX_PIN_REG(MX35_PAD_LD5, 0x628, 0x1c4, 0, 0x0, 0), /* MX35_PAD_LD5__IPU_DISPB_DAT_5 */
+       [435] = IMX_PIN_REG(MX35_PAD_LD5, 0x628, 0x1c4, 5, 0x8d4, 0), /* MX35_PAD_LD5__GPIO2_5 */
+       [436] = IMX_PIN_REG(MX35_PAD_LD5, 0x628, 0x1c4, 6, 0x0, 0), /* MX35_PAD_LD5__SDMA_SDMA_DEBUG_PC_5 */
+       [437] = IMX_PIN_REG(MX35_PAD_LD6, 0x62c, 0x1c8, 0, 0x0, 0), /* MX35_PAD_LD6__IPU_DISPB_DAT_6 */
+       [438] = IMX_PIN_REG(MX35_PAD_LD6, 0x62c, 0x1c8, 5, 0x8d8, 0), /* MX35_PAD_LD6__GPIO2_6 */
+       [439] = IMX_PIN_REG(MX35_PAD_LD6, 0x62c, 0x1c8, 6, 0x0, 0), /* MX35_PAD_LD6__SDMA_SDMA_DEBUG_PC_6 */
+       [440] = IMX_PIN_REG(MX35_PAD_LD7, 0x630, 0x1cc, 0, 0x0, 0), /* MX35_PAD_LD7__IPU_DISPB_DAT_7 */
+       [441] = IMX_PIN_REG(MX35_PAD_LD7, 0x630, 0x1cc, 5, 0x8dc, 0), /* MX35_PAD_LD7__GPIO2_7 */
+       [442] = IMX_PIN_REG(MX35_PAD_LD7, 0x630, 0x1cc, 6, 0x0, 0), /* MX35_PAD_LD7__SDMA_SDMA_DEBUG_PC_7 */
+       [443] = IMX_PIN_REG(MX35_PAD_LD8, 0x634, 0x1d0, 0, 0x0, 0), /* MX35_PAD_LD8__IPU_DISPB_DAT_8 */
+       [444] = IMX_PIN_REG(MX35_PAD_LD8, 0x634, 0x1d0, 5, 0x8e0, 0), /* MX35_PAD_LD8__GPIO2_8 */
+       [445] = IMX_PIN_REG(MX35_PAD_LD8, 0x634, 0x1d0, 6, 0x0, 0), /* MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 */
+       [446] = IMX_PIN_REG(MX35_PAD_LD9, 0x638, 0x1d4, 0, 0x0, 0), /* MX35_PAD_LD9__IPU_DISPB_DAT_9 */
+       [447] = IMX_PIN_REG(MX35_PAD_LD9, 0x638, 0x1d4, 5, 0x8e4, 0), /* MX35_PAD_LD9__GPIO2_9 */
+       [448] = IMX_PIN_REG(MX35_PAD_LD9, 0x638, 0x1d4, 6, 0x0, 0), /* MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 */
+       [449] = IMX_PIN_REG(MX35_PAD_LD10, 0x63c, 0x1d8, 0, 0x0, 0), /* MX35_PAD_LD10__IPU_DISPB_DAT_10 */
+       [450] = IMX_PIN_REG(MX35_PAD_LD10, 0x63c, 0x1d8, 5, 0x86c, 0), /* MX35_PAD_LD10__GPIO2_10 */
+       [451] = IMX_PIN_REG(MX35_PAD_LD10, 0x63c, 0x1d8, 6, 0x0, 0), /* MX35_PAD_LD10__SDMA_SDMA_DEBUG_PC_10 */
+       [452] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 0, 0x0, 0), /* MX35_PAD_LD11__IPU_DISPB_DAT_11 */
+       [453] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 5, 0x870, 0), /* MX35_PAD_LD11__GPIO2_11 */
+       [454] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 6, 0x0, 0), /* MX35_PAD_LD11__SDMA_SDMA_DEBUG_PC_11 */
+       [455] = IMX_PIN_REG(MX35_PAD_LD11, 0x640, 0x1dc, 7, 0x0, 0), /* MX35_PAD_LD11__ARM11P_TOP_TRACE_4 */
+       [456] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 0, 0x0, 0), /* MX35_PAD_LD12__IPU_DISPB_DAT_12 */
+       [457] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 5, 0x874, 0), /* MX35_PAD_LD12__GPIO2_12 */
+       [458] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 6, 0x0, 0), /* MX35_PAD_LD12__SDMA_SDMA_DEBUG_PC_12 */
+       [459] = IMX_PIN_REG(MX35_PAD_LD12, 0x644, 0x1e0, 7, 0x0, 0), /* MX35_PAD_LD12__ARM11P_TOP_TRACE_5 */
+       [460] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 0, 0x0, 0), /* MX35_PAD_LD13__IPU_DISPB_DAT_13 */
+       [461] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 5, 0x878, 0), /* MX35_PAD_LD13__GPIO2_13 */
+       [462] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 6, 0x0, 0), /* MX35_PAD_LD13__SDMA_SDMA_DEBUG_PC_13 */
+       [463] = IMX_PIN_REG(MX35_PAD_LD13, 0x648, 0x1e4, 7, 0x0, 0), /* MX35_PAD_LD13__ARM11P_TOP_TRACE_6 */
+       [464] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 0, 0x0, 0), /* MX35_PAD_LD14__IPU_DISPB_DAT_14 */
+       [465] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 5, 0x87c, 0), /* MX35_PAD_LD14__GPIO2_14 */
+       [466] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 6, 0x0, 0), /* MX35_PAD_LD14__SDMA_SDMA_DEBUG_EVENT_CHANNEL_0 */
+       [467] = IMX_PIN_REG(MX35_PAD_LD14, 0x64c, 0x1e8, 7, 0x0, 0), /* MX35_PAD_LD14__ARM11P_TOP_TRACE_7 */
+       [468] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 0, 0x0, 0), /* MX35_PAD_LD15__IPU_DISPB_DAT_15 */
+       [469] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 5, 0x880, 0), /* MX35_PAD_LD15__GPIO2_15 */
+       [470] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 6, 0x0, 0), /* MX35_PAD_LD15__SDMA_SDMA_DEBUG_EVENT_CHANNEL_1 */
+       [471] = IMX_PIN_REG(MX35_PAD_LD15, 0x650, 0x1ec, 7, 0x0, 0), /* MX35_PAD_LD15__ARM11P_TOP_TRACE_8 */
+       [472] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 0, 0x0, 0), /* MX35_PAD_LD16__IPU_DISPB_DAT_16 */
+       [473] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 2, 0x928, 0), /* MX35_PAD_LD16__IPU_DISPB_D12_VSYNC */
+       [474] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 5, 0x884, 0), /* MX35_PAD_LD16__GPIO2_16 */
+       [475] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 6, 0x0, 0), /* MX35_PAD_LD16__SDMA_SDMA_DEBUG_EVENT_CHANNEL_2 */
+       [476] = IMX_PIN_REG(MX35_PAD_LD16, 0x654, 0x1f0, 7, 0x0, 0), /* MX35_PAD_LD16__ARM11P_TOP_TRACE_9 */
+       [477] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 0, 0x0, 0), /* MX35_PAD_LD17__IPU_DISPB_DAT_17 */
+       [478] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 2, 0x0, 0), /* MX35_PAD_LD17__IPU_DISPB_CS2 */
+       [479] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 5, 0x888, 0), /* MX35_PAD_LD17__GPIO2_17 */
+       [480] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 6, 0x0, 0), /* MX35_PAD_LD17__SDMA_SDMA_DEBUG_EVENT_CHANNEL_3 */
+       [481] = IMX_PIN_REG(MX35_PAD_LD17, 0x658, 0x1f4, 7, 0x0, 0), /* MX35_PAD_LD17__ARM11P_TOP_TRACE_10 */
+       [482] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 0, 0x0, 0), /* MX35_PAD_LD18__IPU_DISPB_DAT_18 */
+       [483] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 1, 0x924, 1), /* MX35_PAD_LD18__IPU_DISPB_D0_VSYNC */
+       [484] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 2, 0x928, 1), /* MX35_PAD_LD18__IPU_DISPB_D12_VSYNC */
+       [485] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 3, 0x818, 0), /* MX35_PAD_LD18__ESDHC3_CMD */
+       [486] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 4, 0x9b0, 0), /* MX35_PAD_LD18__USB_TOP_USBOTG_DATA_3 */
+       [487] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 5, 0x0, 0), /* MX35_PAD_LD18__GPIO3_24 */
+       [488] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 6, 0x0, 0), /* MX35_PAD_LD18__SDMA_SDMA_DEBUG_EVENT_CHANNEL_4 */
+       [489] = IMX_PIN_REG(MX35_PAD_LD18, 0x65c, 0x1f8, 7, 0x0, 0), /* MX35_PAD_LD18__ARM11P_TOP_TRACE_11 */
+       [490] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 0, 0x0, 0), /* MX35_PAD_LD19__IPU_DISPB_DAT_19 */
+       [491] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 1, 0x0, 0), /* MX35_PAD_LD19__IPU_DISPB_BCLK */
+       [492] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 2, 0x0, 0), /* MX35_PAD_LD19__IPU_DISPB_CS1 */
+       [493] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 3, 0x814, 0), /* MX35_PAD_LD19__ESDHC3_CLK */
+       [494] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 4, 0x9c4, 0), /* MX35_PAD_LD19__USB_TOP_USBOTG_DIR */
+       [495] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 5, 0x0, 0), /* MX35_PAD_LD19__GPIO3_25 */
+       [496] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 6, 0x0, 0), /* MX35_PAD_LD19__SDMA_SDMA_DEBUG_EVENT_CHANNEL_5 */
+       [497] = IMX_PIN_REG(MX35_PAD_LD19, 0x660, 0x1fc, 7, 0x0, 0), /* MX35_PAD_LD19__ARM11P_TOP_TRACE_12 */
+       [498] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 0, 0x0, 0), /* MX35_PAD_LD20__IPU_DISPB_DAT_20 */
+       [499] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 1, 0x0, 0), /* MX35_PAD_LD20__IPU_DISPB_CS0 */
+       [500] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 2, 0x0, 0), /* MX35_PAD_LD20__IPU_DISPB_SD_CLK */
+       [501] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 3, 0x81c, 0), /* MX35_PAD_LD20__ESDHC3_DAT0 */
+       [502] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 5, 0x0, 0), /* MX35_PAD_LD20__GPIO3_26 */
+       [503] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 6, 0x0, 0), /* MX35_PAD_LD20__SDMA_SDMA_DEBUG_CORE_STATUS_3 */
+       [504] = IMX_PIN_REG(MX35_PAD_LD20, 0x664, 0x200, 7, 0x0, 0), /* MX35_PAD_LD20__ARM11P_TOP_TRACE_13 */
+       [505] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 0, 0x0, 0), /* MX35_PAD_LD21__IPU_DISPB_DAT_21 */
+       [506] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 1, 0x0, 0), /* MX35_PAD_LD21__IPU_DISPB_PAR_RS */
+       [507] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 2, 0x0, 0), /* MX35_PAD_LD21__IPU_DISPB_SER_RS */
+       [508] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 3, 0x820, 0), /* MX35_PAD_LD21__ESDHC3_DAT1 */
+       [509] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 4, 0x0, 0), /* MX35_PAD_LD21__USB_TOP_USBOTG_STP */
+       [510] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 5, 0x0, 0), /* MX35_PAD_LD21__GPIO3_27 */
+       [511] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 6, 0x0, 0), /* MX35_PAD_LD21__SDMA_DEBUG_EVENT_CHANNEL_SEL */
+       [512] = IMX_PIN_REG(MX35_PAD_LD21, 0x668, 0x204, 7, 0x0, 0), /* MX35_PAD_LD21__ARM11P_TOP_TRACE_14 */
+       [513] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 0, 0x0, 0), /* MX35_PAD_LD22__IPU_DISPB_DAT_22 */
+       [514] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 1, 0x0, 0), /* MX35_PAD_LD22__IPU_DISPB_WR */
+       [515] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 2, 0x92c, 0), /* MX35_PAD_LD22__IPU_DISPB_SD_D_I */
+       [516] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 3, 0x824, 0), /* MX35_PAD_LD22__ESDHC3_DAT2 */
+       [517] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 4, 0x9c8, 0), /* MX35_PAD_LD22__USB_TOP_USBOTG_NXT */
+       [518] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 5, 0x0, 0), /* MX35_PAD_LD22__GPIO3_28 */
+       [519] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 6, 0x0, 0), /* MX35_PAD_LD22__SDMA_DEBUG_BUS_ERROR */
+       [520] = IMX_PIN_REG(MX35_PAD_LD22, 0x66c, 0x208, 7, 0x0, 0), /* MX35_PAD_LD22__ARM11P_TOP_TRCTL */
+       [521] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 0, 0x0, 0), /* MX35_PAD_LD23__IPU_DISPB_DAT_23 */
+       [522] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 1, 0x0, 0), /* MX35_PAD_LD23__IPU_DISPB_RD */
+       [523] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 2, 0x92c, 1), /* MX35_PAD_LD23__IPU_DISPB_SD_D_IO */
+       [524] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 3, 0x828, 0), /* MX35_PAD_LD23__ESDHC3_DAT3 */
+       [525] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 4, 0x9c0, 0), /* MX35_PAD_LD23__USB_TOP_USBOTG_DATA_7 */
+       [526] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 5, 0x0, 0), /* MX35_PAD_LD23__GPIO3_29 */
+       [527] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 6, 0x0, 0), /* MX35_PAD_LD23__SDMA_DEBUG_MATCHED_DMBUS */
+       [528] = IMX_PIN_REG(MX35_PAD_LD23, 0x670, 0x20c, 7, 0x0, 0), /* MX35_PAD_LD23__ARM11P_TOP_TRCLK */
+       [529] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 0, 0x0, 0), /* MX35_PAD_D3_HSYNC__IPU_DISPB_D3_HSYNC */
+       [530] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 2, 0x92c, 2), /* MX35_PAD_D3_HSYNC__IPU_DISPB_SD_D_IO */
+       [531] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 5, 0x0, 0), /* MX35_PAD_D3_HSYNC__GPIO3_30 */
+       [532] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 6, 0x0, 0), /* MX35_PAD_D3_HSYNC__SDMA_DEBUG_RTBUFFER_WRITE */
+       [533] = IMX_PIN_REG(MX35_PAD_D3_HSYNC, 0x674, 0x210, 7, 0x0, 0), /* MX35_PAD_D3_HSYNC__ARM11P_TOP_TRACE_15 */
+       [534] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 0, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK */
+       [535] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 2, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__IPU_DISPB_SD_CLK */
+       [536] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 5, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__GPIO3_31 */
+       [537] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 6, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__SDMA_SDMA_DEBUG_CORE_STATUS_0 */
+       [538] = IMX_PIN_REG(MX35_PAD_D3_FPSHIFT, 0x678, 0x214, 7, 0x0, 0), /* MX35_PAD_D3_FPSHIFT__ARM11P_TOP_TRACE_16 */
+       [539] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 0, 0x0, 0), /* MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY */
+       [540] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 2, 0x0, 0), /* MX35_PAD_D3_DRDY__IPU_DISPB_SD_D_O */
+       [541] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 5, 0x82c, 2), /* MX35_PAD_D3_DRDY__GPIO1_0 */
+       [542] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 6, 0x0, 0), /* MX35_PAD_D3_DRDY__SDMA_SDMA_DEBUG_CORE_STATUS_1 */
+       [543] = IMX_PIN_REG(MX35_PAD_D3_DRDY, 0x67c, 0x218, 7, 0x0, 0), /* MX35_PAD_D3_DRDY__ARM11P_TOP_TRACE_17 */
+       [544] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 0, 0x0, 0), /* MX35_PAD_CONTRAST__IPU_DISPB_CONTR */
+       [545] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 5, 0x838, 2), /* MX35_PAD_CONTRAST__GPIO1_1 */
+       [546] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 6, 0x0, 0), /* MX35_PAD_CONTRAST__SDMA_SDMA_DEBUG_CORE_STATUS_2 */
+       [547] = IMX_PIN_REG(MX35_PAD_CONTRAST, 0x680, 0x21c, 7, 0x0, 0), /* MX35_PAD_CONTRAST__ARM11P_TOP_TRACE_18 */
+       [548] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 0, 0x0, 0), /* MX35_PAD_D3_VSYNC__IPU_DISPB_D3_VSYNC */
+       [549] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 2, 0x0, 0), /* MX35_PAD_D3_VSYNC__IPU_DISPB_CS1 */
+       [550] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 5, 0x848, 1), /* MX35_PAD_D3_VSYNC__GPIO1_2 */
+       [551] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 6, 0x0, 0), /* MX35_PAD_D3_VSYNC__SDMA_DEBUG_YIELD */
+       [552] = IMX_PIN_REG(MX35_PAD_D3_VSYNC, 0x684, 0x220, 7, 0x0, 0), /* MX35_PAD_D3_VSYNC__ARM11P_TOP_TRACE_19 */
+       [553] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 0, 0x0, 0), /* MX35_PAD_D3_REV__IPU_DISPB_D3_REV */
+       [554] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 2, 0x0, 0), /* MX35_PAD_D3_REV__IPU_DISPB_SER_RS */
+       [555] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 5, 0x84c, 1), /* MX35_PAD_D3_REV__GPIO1_3 */
+       [556] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 6, 0x0, 0), /* MX35_PAD_D3_REV__SDMA_DEBUG_BUS_RWB */
+       [557] = IMX_PIN_REG(MX35_PAD_D3_REV, 0x688, 0x224, 7, 0x0, 0), /* MX35_PAD_D3_REV__ARM11P_TOP_TRACE_20 */
+       [558] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 0, 0x0, 0), /* MX35_PAD_D3_CLS__IPU_DISPB_D3_CLS */
+       [559] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 2, 0x0, 0), /* MX35_PAD_D3_CLS__IPU_DISPB_CS2 */
+       [560] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 5, 0x850, 2), /* MX35_PAD_D3_CLS__GPIO1_4 */
+       [561] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 6, 0x0, 0), /* MX35_PAD_D3_CLS__SDMA_DEBUG_BUS_DEVICE_0 */
+       [562] = IMX_PIN_REG(MX35_PAD_D3_CLS, 0x68c, 0x228, 7, 0x0, 0), /* MX35_PAD_D3_CLS__ARM11P_TOP_TRACE_21 */
+       [563] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 0, 0x0, 0), /* MX35_PAD_D3_SPL__IPU_DISPB_D3_SPL */
+       [564] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 2, 0x928, 2), /* MX35_PAD_D3_SPL__IPU_DISPB_D12_VSYNC */
+       [565] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 5, 0x854, 2), /* MX35_PAD_D3_SPL__GPIO1_5 */
+       [566] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 6, 0x0, 0), /* MX35_PAD_D3_SPL__SDMA_DEBUG_BUS_DEVICE_1 */
+       [567] = IMX_PIN_REG(MX35_PAD_D3_SPL, 0x690, 0x22c, 7, 0x0, 0), /* MX35_PAD_D3_SPL__ARM11P_TOP_TRACE_22 */
+       [568] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 0, 0x0, 0), /* MX35_PAD_SD1_CMD__ESDHC1_CMD */
+       [569] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 1, 0x0, 0), /* MX35_PAD_SD1_CMD__MSHC_SCLK */
+       [570] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 3, 0x924, 2), /* MX35_PAD_SD1_CMD__IPU_DISPB_D0_VSYNC */
+       [571] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 4, 0x9b4, 0), /* MX35_PAD_SD1_CMD__USB_TOP_USBOTG_DATA_4 */
+       [572] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 5, 0x858, 2), /* MX35_PAD_SD1_CMD__GPIO1_6 */
+       [573] = IMX_PIN_REG(MX35_PAD_SD1_CMD, 0x694, 0x230, 7, 0x0, 0), /* MX35_PAD_SD1_CMD__ARM11P_TOP_TRCTL */
+       [574] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 0, 0x0, 0), /* MX35_PAD_SD1_CLK__ESDHC1_CLK */
+       [575] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 1, 0x0, 0), /* MX35_PAD_SD1_CLK__MSHC_BS */
+       [576] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 3, 0x0, 0), /* MX35_PAD_SD1_CLK__IPU_DISPB_BCLK */
+       [577] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 4, 0x9b8, 0), /* MX35_PAD_SD1_CLK__USB_TOP_USBOTG_DATA_5 */
+       [578] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 5, 0x85c, 2), /* MX35_PAD_SD1_CLK__GPIO1_7 */
+       [579] = IMX_PIN_REG(MX35_PAD_SD1_CLK, 0x698, 0x234, 7, 0x0, 0), /* MX35_PAD_SD1_CLK__ARM11P_TOP_TRCLK */
+       [580] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 0, 0x0, 0), /* MX35_PAD_SD1_DATA0__ESDHC1_DAT0 */
+       [581] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 1, 0x0, 0), /* MX35_PAD_SD1_DATA0__MSHC_DATA_0 */
+       [582] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 3, 0x0, 0), /* MX35_PAD_SD1_DATA0__IPU_DISPB_CS0 */
+       [583] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 4, 0x9bc, 0), /* MX35_PAD_SD1_DATA0__USB_TOP_USBOTG_DATA_6 */
+       [584] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 5, 0x860, 2), /* MX35_PAD_SD1_DATA0__GPIO1_8 */
+       [585] = IMX_PIN_REG(MX35_PAD_SD1_DATA0, 0x69c, 0x238, 7, 0x0, 0), /* MX35_PAD_SD1_DATA0__ARM11P_TOP_TRACE_23 */
+       [586] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 0, 0x0, 0), /* MX35_PAD_SD1_DATA1__ESDHC1_DAT1 */
+       [587] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 1, 0x0, 0), /* MX35_PAD_SD1_DATA1__MSHC_DATA_1 */
+       [588] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 3, 0x0, 0), /* MX35_PAD_SD1_DATA1__IPU_DISPB_PAR_RS */
+       [589] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 4, 0x9a4, 0), /* MX35_PAD_SD1_DATA1__USB_TOP_USBOTG_DATA_0 */
+       [590] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 5, 0x864, 1), /* MX35_PAD_SD1_DATA1__GPIO1_9 */
+       [591] = IMX_PIN_REG(MX35_PAD_SD1_DATA1, 0x6a0, 0x23c, 7, 0x0, 0), /* MX35_PAD_SD1_DATA1__ARM11P_TOP_TRACE_24 */
+       [592] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 0, 0x0, 0), /* MX35_PAD_SD1_DATA2__ESDHC1_DAT2 */
+       [593] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 1, 0x0, 0), /* MX35_PAD_SD1_DATA2__MSHC_DATA_2 */
+       [594] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 3, 0x0, 0), /* MX35_PAD_SD1_DATA2__IPU_DISPB_WR */
+       [595] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 4, 0x9a8, 0), /* MX35_PAD_SD1_DATA2__USB_TOP_USBOTG_DATA_1 */
+       [596] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 5, 0x830, 1), /* MX35_PAD_SD1_DATA2__GPIO1_10 */
+       [597] = IMX_PIN_REG(MX35_PAD_SD1_DATA2, 0x6a4, 0x240, 7, 0x0, 0), /* MX35_PAD_SD1_DATA2__ARM11P_TOP_TRACE_25 */
+       [598] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 0, 0x0, 0), /* MX35_PAD_SD1_DATA3__ESDHC1_DAT3 */
+       [599] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 1, 0x0, 0), /* MX35_PAD_SD1_DATA3__MSHC_DATA_3 */
+       [600] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 3, 0x0, 0), /* MX35_PAD_SD1_DATA3__IPU_DISPB_RD */
+       [601] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 4, 0x9ac, 0), /* MX35_PAD_SD1_DATA3__USB_TOP_USBOTG_DATA_2 */
+       [602] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 5, 0x834, 1), /* MX35_PAD_SD1_DATA3__GPIO1_11 */
+       [603] = IMX_PIN_REG(MX35_PAD_SD1_DATA3, 0x6a8, 0x244, 7, 0x0, 0), /* MX35_PAD_SD1_DATA3__ARM11P_TOP_TRACE_26 */
+       [604] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 0, 0x0, 0), /* MX35_PAD_SD2_CMD__ESDHC2_CMD */
+       [605] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 1, 0x91c, 2), /* MX35_PAD_SD2_CMD__I2C3_SCL */
+       [606] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 2, 0x804, 0), /* MX35_PAD_SD2_CMD__ESDHC1_DAT4 */
+       [607] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 3, 0x938, 2), /* MX35_PAD_SD2_CMD__IPU_CSI_D_2 */
+       [608] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 4, 0x9dc, 0), /* MX35_PAD_SD2_CMD__USB_TOP_USBH2_DATA_4 */
+       [609] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 5, 0x868, 2), /* MX35_PAD_SD2_CMD__GPIO2_0 */
+       [610] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 6, 0x0, 0), /* MX35_PAD_SD2_CMD__SPDIF_SPDIF_OUT1 */
+       [611] = IMX_PIN_REG(MX35_PAD_SD2_CMD, 0x6ac, 0x248, 7, 0x928, 3), /* MX35_PAD_SD2_CMD__IPU_DISPB_D12_VSYNC */
+       [612] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 0, 0x0, 0), /* MX35_PAD_SD2_CLK__ESDHC2_CLK */
+       [613] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 1, 0x920, 2), /* MX35_PAD_SD2_CLK__I2C3_SDA */
+       [614] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 2, 0x808, 0), /* MX35_PAD_SD2_CLK__ESDHC1_DAT5 */
+       [615] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 3, 0x93c, 2), /* MX35_PAD_SD2_CLK__IPU_CSI_D_3 */
+       [616] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 4, 0x9e0, 0), /* MX35_PAD_SD2_CLK__USB_TOP_USBH2_DATA_5 */
+       [617] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 5, 0x894, 1), /* MX35_PAD_SD2_CLK__GPIO2_1 */
+       [618] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 6, 0x998, 2), /* MX35_PAD_SD2_CLK__SPDIF_SPDIF_IN1 */
+       [619] = IMX_PIN_REG(MX35_PAD_SD2_CLK, 0x6b0, 0x24c, 7, 0x0, 0), /* MX35_PAD_SD2_CLK__IPU_DISPB_CS2 */
+       [620] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 0, 0x0, 0), /* MX35_PAD_SD2_DATA0__ESDHC2_DAT0 */
+       [621] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 1, 0x9a0, 1), /* MX35_PAD_SD2_DATA0__UART3_RXD_MUX */
+       [622] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 2, 0x80c, 0), /* MX35_PAD_SD2_DATA0__ESDHC1_DAT6 */
+       [623] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 3, 0x940, 1), /* MX35_PAD_SD2_DATA0__IPU_CSI_D_4 */
+       [624] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 4, 0x9e4, 0), /* MX35_PAD_SD2_DATA0__USB_TOP_USBH2_DATA_6 */
+       [625] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 5, 0x8c0, 1), /* MX35_PAD_SD2_DATA0__GPIO2_2 */
+       [626] = IMX_PIN_REG(MX35_PAD_SD2_DATA0, 0x6b4, 0x250, 6, 0x994, 3), /* MX35_PAD_SD2_DATA0__SPDIF_SPDIF_EXTCLK */
+       [627] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 0, 0x0, 0), /* MX35_PAD_SD2_DATA1__ESDHC2_DAT1 */
+       [628] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 1, 0x0, 0), /* MX35_PAD_SD2_DATA1__UART3_TXD_MUX */
+       [629] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 2, 0x810, 0), /* MX35_PAD_SD2_DATA1__ESDHC1_DAT7 */
+       [630] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 3, 0x944, 1), /* MX35_PAD_SD2_DATA1__IPU_CSI_D_5 */
+       [631] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 4, 0x9cc, 0), /* MX35_PAD_SD2_DATA1__USB_TOP_USBH2_DATA_0 */
+       [632] = IMX_PIN_REG(MX35_PAD_SD2_DATA1, 0x6b8, 0x254, 5, 0x8cc, 1), /* MX35_PAD_SD2_DATA1__GPIO2_3 */
+       [633] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 0, 0x0, 0), /* MX35_PAD_SD2_DATA2__ESDHC2_DAT2 */
+       [634] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 1, 0x99c, 0), /* MX35_PAD_SD2_DATA2__UART3_RTS */
+       [635] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 2, 0x7c8, 1), /* MX35_PAD_SD2_DATA2__CAN1_RXCAN */
+       [636] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 3, 0x948, 1), /* MX35_PAD_SD2_DATA2__IPU_CSI_D_6 */
+       [637] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 4, 0x9d0, 0), /* MX35_PAD_SD2_DATA2__USB_TOP_USBH2_DATA_1 */
+       [638] = IMX_PIN_REG(MX35_PAD_SD2_DATA2, 0x6bc, 0x258, 5, 0x8d0, 1), /* MX35_PAD_SD2_DATA2__GPIO2_4 */
+       [639] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 0, 0x0, 0), /* MX35_PAD_SD2_DATA3__ESDHC2_DAT3 */
+       [640] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 1, 0x0, 0), /* MX35_PAD_SD2_DATA3__UART3_CTS */
+       [641] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 2, 0x0, 0), /* MX35_PAD_SD2_DATA3__CAN1_TXCAN */
+       [642] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 3, 0x94c, 1), /* MX35_PAD_SD2_DATA3__IPU_CSI_D_7 */
+       [643] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 4, 0x9d4, 0), /* MX35_PAD_SD2_DATA3__USB_TOP_USBH2_DATA_2 */
+       [644] = IMX_PIN_REG(MX35_PAD_SD2_DATA3, 0x6c0, 0x25c, 5, 0x8d4, 1), /* MX35_PAD_SD2_DATA3__GPIO2_5 */
+       [645] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 0, 0x0, 0), /* MX35_PAD_ATA_CS0__ATA_CS0 */
+       [646] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 1, 0x7dc, 1), /* MX35_PAD_ATA_CS0__CSPI1_SS3 */
+       [647] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 3, 0x0, 0), /* MX35_PAD_ATA_CS0__IPU_DISPB_CS1 */
+       [648] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 5, 0x8d8, 1), /* MX35_PAD_ATA_CS0__GPIO2_6 */
+       [649] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 6, 0x0, 0), /* MX35_PAD_ATA_CS0__IPU_DIAGB_0 */
+       [650] = IMX_PIN_REG(MX35_PAD_ATA_CS0, 0x6c4, 0x260, 7, 0x0, 0), /* MX35_PAD_ATA_CS0__ARM11P_TOP_MAX1_HMASTER_0 */
+       [651] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 0, 0x0, 0), /* MX35_PAD_ATA_CS1__ATA_CS1 */
+       [652] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 3, 0x0, 0), /* MX35_PAD_ATA_CS1__IPU_DISPB_CS2 */
+       [653] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 4, 0x7f0, 1), /* MX35_PAD_ATA_CS1__CSPI2_SS0 */
+       [654] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 5, 0x8dc, 1), /* MX35_PAD_ATA_CS1__GPIO2_7 */
+       [655] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 6, 0x0, 0), /* MX35_PAD_ATA_CS1__IPU_DIAGB_1 */
+       [656] = IMX_PIN_REG(MX35_PAD_ATA_CS1, 0x6c8, 0x264, 7, 0x0, 0), /* MX35_PAD_ATA_CS1__ARM11P_TOP_MAX1_HMASTER_1 */
+       [657] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 0, 0x0, 0), /* MX35_PAD_ATA_DIOR__ATA_DIOR */
+       [658] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 1, 0x81c, 1), /* MX35_PAD_ATA_DIOR__ESDHC3_DAT0 */
+       [659] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 2, 0x9c4, 1), /* MX35_PAD_ATA_DIOR__USB_TOP_USBOTG_DIR */
+       [660] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 3, 0x0, 0), /* MX35_PAD_ATA_DIOR__IPU_DISPB_BE0 */
+       [661] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 4, 0x7f4, 1), /* MX35_PAD_ATA_DIOR__CSPI2_SS1 */
+       [662] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 5, 0x8e0, 1), /* MX35_PAD_ATA_DIOR__GPIO2_8 */
+       [663] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 6, 0x0, 0), /* MX35_PAD_ATA_DIOR__IPU_DIAGB_2 */
+       [664] = IMX_PIN_REG(MX35_PAD_ATA_DIOR, 0x6cc, 0x268, 7, 0x0, 0), /* MX35_PAD_ATA_DIOR__ARM11P_TOP_MAX1_HMASTER_2 */
+       [665] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 0, 0x0, 0), /* MX35_PAD_ATA_DIOW__ATA_DIOW */
+       [666] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 1, 0x820, 1), /* MX35_PAD_ATA_DIOW__ESDHC3_DAT1 */
+       [667] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 2, 0x0, 0), /* MX35_PAD_ATA_DIOW__USB_TOP_USBOTG_STP */
+       [668] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 3, 0x0, 0), /* MX35_PAD_ATA_DIOW__IPU_DISPB_BE1 */
+       [669] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 4, 0x7ec, 2), /* MX35_PAD_ATA_DIOW__CSPI2_MOSI */
+       [670] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 5, 0x8e4, 1), /* MX35_PAD_ATA_DIOW__GPIO2_9 */
+       [671] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 6, 0x0, 0), /* MX35_PAD_ATA_DIOW__IPU_DIAGB_3 */
+       [672] = IMX_PIN_REG(MX35_PAD_ATA_DIOW, 0x6d0, 0x26c, 7, 0x0, 0), /* MX35_PAD_ATA_DIOW__ARM11P_TOP_MAX1_HMASTER_3 */
+       [673] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 0, 0x0, 0), /* MX35_PAD_ATA_DMACK__ATA_DMACK */
+       [674] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 1, 0x824, 1), /* MX35_PAD_ATA_DMACK__ESDHC3_DAT2 */
+       [675] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 2, 0x9c8, 1), /* MX35_PAD_ATA_DMACK__USB_TOP_USBOTG_NXT */
+       [676] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 4, 0x7e8, 2), /* MX35_PAD_ATA_DMACK__CSPI2_MISO */
+       [677] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 5, 0x86c, 1), /* MX35_PAD_ATA_DMACK__GPIO2_10 */
+       [678] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 6, 0x0, 0), /* MX35_PAD_ATA_DMACK__IPU_DIAGB_4 */
+       [679] = IMX_PIN_REG(MX35_PAD_ATA_DMACK, 0x6d4, 0x270, 7, 0x0, 0), /* MX35_PAD_ATA_DMACK__ARM11P_TOP_MAX0_HMASTER_0 */
+       [680] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 0, 0x0, 0), /* MX35_PAD_ATA_RESET_B__ATA_RESET_B */
+       [681] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 1, 0x828, 1), /* MX35_PAD_ATA_RESET_B__ESDHC3_DAT3 */
+       [682] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 2, 0x9a4, 1), /* MX35_PAD_ATA_RESET_B__USB_TOP_USBOTG_DATA_0 */
+       [683] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 3, 0x0, 0), /* MX35_PAD_ATA_RESET_B__IPU_DISPB_SD_D_O */
+       [684] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 4, 0x7e4, 2), /* MX35_PAD_ATA_RESET_B__CSPI2_RDY */
+       [685] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 5, 0x870, 1), /* MX35_PAD_ATA_RESET_B__GPIO2_11 */
+       [686] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 6, 0x0, 0), /* MX35_PAD_ATA_RESET_B__IPU_DIAGB_5 */
+       [687] = IMX_PIN_REG(MX35_PAD_ATA_RESET_B, 0x6d8, 0x274, 7, 0x0, 0), /* MX35_PAD_ATA_RESET_B__ARM11P_TOP_MAX0_HMASTER_1 */
+       [688] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 0, 0x0, 0), /* MX35_PAD_ATA_IORDY__ATA_IORDY */
+       [689] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 1, 0x0, 0), /* MX35_PAD_ATA_IORDY__ESDHC3_DAT4 */
+       [690] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 2, 0x9a8, 1), /* MX35_PAD_ATA_IORDY__USB_TOP_USBOTG_DATA_1 */
+       [691] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 3, 0x92c, 3), /* MX35_PAD_ATA_IORDY__IPU_DISPB_SD_D_IO */
+       [692] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 4, 0x0, 0), /* MX35_PAD_ATA_IORDY__ESDHC2_DAT4 */
+       [693] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 5, 0x874, 1), /* MX35_PAD_ATA_IORDY__GPIO2_12 */
+       [694] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 6, 0x0, 0), /* MX35_PAD_ATA_IORDY__IPU_DIAGB_6 */
+       [695] = IMX_PIN_REG(MX35_PAD_ATA_IORDY, 0x6dc, 0x278, 7, 0x0, 0), /* MX35_PAD_ATA_IORDY__ARM11P_TOP_MAX0_HMASTER_2 */
+       [696] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 0, 0x0, 0), /* MX35_PAD_ATA_DATA0__ATA_DATA_0 */
+       [697] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 1, 0x0, 0), /* MX35_PAD_ATA_DATA0__ESDHC3_DAT5 */
+       [698] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 2, 0x9ac, 1), /* MX35_PAD_ATA_DATA0__USB_TOP_USBOTG_DATA_2 */
+       [699] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 3, 0x928, 4), /* MX35_PAD_ATA_DATA0__IPU_DISPB_D12_VSYNC */
+       [700] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 4, 0x0, 0), /* MX35_PAD_ATA_DATA0__ESDHC2_DAT5 */
+       [701] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 5, 0x878, 1), /* MX35_PAD_ATA_DATA0__GPIO2_13 */
+       [702] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 6, 0x0, 0), /* MX35_PAD_ATA_DATA0__IPU_DIAGB_7 */
+       [703] = IMX_PIN_REG(MX35_PAD_ATA_DATA0, 0x6e0, 0x27c, 7, 0x0, 0), /* MX35_PAD_ATA_DATA0__ARM11P_TOP_MAX0_HMASTER_3 */
+       [704] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 0, 0x0, 0), /* MX35_PAD_ATA_DATA1__ATA_DATA_1 */
+       [705] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 1, 0x0, 0), /* MX35_PAD_ATA_DATA1__ESDHC3_DAT6 */
+       [706] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 2, 0x9b0, 1), /* MX35_PAD_ATA_DATA1__USB_TOP_USBOTG_DATA_3 */
+       [707] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 3, 0x0, 0), /* MX35_PAD_ATA_DATA1__IPU_DISPB_SD_CLK */
+       [708] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 4, 0x0, 0), /* MX35_PAD_ATA_DATA1__ESDHC2_DAT6 */
+       [709] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 5, 0x87c, 1), /* MX35_PAD_ATA_DATA1__GPIO2_14 */
+       [710] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 6, 0x0, 0), /* MX35_PAD_ATA_DATA1__IPU_DIAGB_8 */
+       [711] = IMX_PIN_REG(MX35_PAD_ATA_DATA1, 0x6e4, 0x280, 7, 0x0, 0), /* MX35_PAD_ATA_DATA1__ARM11P_TOP_TRACE_27 */
+       [712] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 0, 0x0, 0), /* MX35_PAD_ATA_DATA2__ATA_DATA_2 */
+       [713] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 1, 0x0, 0), /* MX35_PAD_ATA_DATA2__ESDHC3_DAT7 */
+       [714] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 2, 0x9b4, 1), /* MX35_PAD_ATA_DATA2__USB_TOP_USBOTG_DATA_4 */
+       [715] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 3, 0x0, 0), /* MX35_PAD_ATA_DATA2__IPU_DISPB_SER_RS */
+       [716] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 4, 0x0, 0), /* MX35_PAD_ATA_DATA2__ESDHC2_DAT7 */
+       [717] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 5, 0x880, 1), /* MX35_PAD_ATA_DATA2__GPIO2_15 */
+       [718] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 6, 0x0, 0), /* MX35_PAD_ATA_DATA2__IPU_DIAGB_9 */
+       [719] = IMX_PIN_REG(MX35_PAD_ATA_DATA2, 0x6e8, 0x284, 7, 0x0, 0), /* MX35_PAD_ATA_DATA2__ARM11P_TOP_TRACE_28 */
+       [720] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 0, 0x0, 0), /* MX35_PAD_ATA_DATA3__ATA_DATA_3 */
+       [721] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 1, 0x814, 1), /* MX35_PAD_ATA_DATA3__ESDHC3_CLK */
+       [722] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 2, 0x9b8, 1), /* MX35_PAD_ATA_DATA3__USB_TOP_USBOTG_DATA_5 */
+       [723] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 4, 0x7e0, 2), /* MX35_PAD_ATA_DATA3__CSPI2_SCLK */
+       [724] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 5, 0x884, 1), /* MX35_PAD_ATA_DATA3__GPIO2_16 */
+       [725] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 6, 0x0, 0), /* MX35_PAD_ATA_DATA3__IPU_DIAGB_10 */
+       [726] = IMX_PIN_REG(MX35_PAD_ATA_DATA3, 0x6ec, 0x288, 7, 0x0, 0), /* MX35_PAD_ATA_DATA3__ARM11P_TOP_TRACE_29 */
+       [727] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 0, 0x0, 0), /* MX35_PAD_ATA_DATA4__ATA_DATA_4 */
+       [728] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 1, 0x818, 1), /* MX35_PAD_ATA_DATA4__ESDHC3_CMD */
+       [729] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 2, 0x9bc, 1), /* MX35_PAD_ATA_DATA4__USB_TOP_USBOTG_DATA_6 */
+       [730] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 5, 0x888, 1), /* MX35_PAD_ATA_DATA4__GPIO2_17 */
+       [731] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 6, 0x0, 0), /* MX35_PAD_ATA_DATA4__IPU_DIAGB_11 */
+       [732] = IMX_PIN_REG(MX35_PAD_ATA_DATA4, 0x6f0, 0x28c, 7, 0x0, 0), /* MX35_PAD_ATA_DATA4__ARM11P_TOP_TRACE_30 */
+       [733] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 0, 0x0, 0), /* MX35_PAD_ATA_DATA5__ATA_DATA_5 */
+       [734] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 2, 0x9c0, 1), /* MX35_PAD_ATA_DATA5__USB_TOP_USBOTG_DATA_7 */
+       [735] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 5, 0x88c, 1), /* MX35_PAD_ATA_DATA5__GPIO2_18 */
+       [736] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 6, 0x0, 0), /* MX35_PAD_ATA_DATA5__IPU_DIAGB_12 */
+       [737] = IMX_PIN_REG(MX35_PAD_ATA_DATA5, 0x6f4, 0x290, 7, 0x0, 0), /* MX35_PAD_ATA_DATA5__ARM11P_TOP_TRACE_31 */
+       [738] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 0, 0x0, 0), /* MX35_PAD_ATA_DATA6__ATA_DATA_6 */
+       [739] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 1, 0x0, 0), /* MX35_PAD_ATA_DATA6__CAN1_TXCAN */
+       [740] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 2, 0x0, 0), /* MX35_PAD_ATA_DATA6__UART1_DTR */
+       [741] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 3, 0x7b4, 0), /* MX35_PAD_ATA_DATA6__AUDMUX_AUD6_TXD */
+       [742] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 5, 0x890, 1), /* MX35_PAD_ATA_DATA6__GPIO2_19 */
+       [743] = IMX_PIN_REG(MX35_PAD_ATA_DATA6, 0x6f8, 0x294, 6, 0x0, 0), /* MX35_PAD_ATA_DATA6__IPU_DIAGB_13 */
+       [744] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 0, 0x0, 0), /* MX35_PAD_ATA_DATA7__ATA_DATA_7 */
+       [745] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 1, 0x7c8, 2), /* MX35_PAD_ATA_DATA7__CAN1_RXCAN */
+       [746] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 2, 0x0, 0), /* MX35_PAD_ATA_DATA7__UART1_DSR */
+       [747] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 3, 0x7b0, 0), /* MX35_PAD_ATA_DATA7__AUDMUX_AUD6_RXD */
+       [748] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 5, 0x898, 1), /* MX35_PAD_ATA_DATA7__GPIO2_20 */
+       [749] = IMX_PIN_REG(MX35_PAD_ATA_DATA7, 0x6fc, 0x298, 6, 0x0, 0), /* MX35_PAD_ATA_DATA7__IPU_DIAGB_14 */
+       [750] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 0, 0x0, 0), /* MX35_PAD_ATA_DATA8__ATA_DATA_8 */
+       [751] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 1, 0x99c, 1), /* MX35_PAD_ATA_DATA8__UART3_RTS */
+       [752] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 2, 0x0, 0), /* MX35_PAD_ATA_DATA8__UART1_RI */
+       [753] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 3, 0x7c0, 0), /* MX35_PAD_ATA_DATA8__AUDMUX_AUD6_TXC */
+       [754] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 5, 0x89c, 1), /* MX35_PAD_ATA_DATA8__GPIO2_21 */
+       [755] = IMX_PIN_REG(MX35_PAD_ATA_DATA8, 0x700, 0x29c, 6, 0x0, 0), /* MX35_PAD_ATA_DATA8__IPU_DIAGB_15 */
+       [756] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 0, 0x0, 0), /* MX35_PAD_ATA_DATA9__ATA_DATA_9 */
+       [757] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 1, 0x0, 0), /* MX35_PAD_ATA_DATA9__UART3_CTS */
+       [758] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 2, 0x0, 0), /* MX35_PAD_ATA_DATA9__UART1_DCD */
+       [759] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 3, 0x7c4, 0), /* MX35_PAD_ATA_DATA9__AUDMUX_AUD6_TXFS */
+       [760] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 5, 0x8a0, 1), /* MX35_PAD_ATA_DATA9__GPIO2_22 */
+       [761] = IMX_PIN_REG(MX35_PAD_ATA_DATA9, 0x704, 0x2a0, 6, 0x0, 0), /* MX35_PAD_ATA_DATA9__IPU_DIAGB_16 */
+       [762] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 0, 0x0, 0), /* MX35_PAD_ATA_DATA10__ATA_DATA_10 */
+       [763] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 1, 0x9a0, 2), /* MX35_PAD_ATA_DATA10__UART3_RXD_MUX */
+       [764] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 3, 0x7b8, 0), /* MX35_PAD_ATA_DATA10__AUDMUX_AUD6_RXC */
+       [765] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 5, 0x8a4, 1), /* MX35_PAD_ATA_DATA10__GPIO2_23 */
+       [766] = IMX_PIN_REG(MX35_PAD_ATA_DATA10, 0x708, 0x2a4, 6, 0x0, 0), /* MX35_PAD_ATA_DATA10__IPU_DIAGB_17 */
+       [767] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 0, 0x0, 0), /* MX35_PAD_ATA_DATA11__ATA_DATA_11 */
+       [768] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 1, 0x0, 0), /* MX35_PAD_ATA_DATA11__UART3_TXD_MUX */
+       [769] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 3, 0x7bc, 0), /* MX35_PAD_ATA_DATA11__AUDMUX_AUD6_RXFS */
+       [770] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 5, 0x8a8, 1), /* MX35_PAD_ATA_DATA11__GPIO2_24 */
+       [771] = IMX_PIN_REG(MX35_PAD_ATA_DATA11, 0x70c, 0x2a8, 6, 0x0, 0), /* MX35_PAD_ATA_DATA11__IPU_DIAGB_18 */
+       [772] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 0, 0x0, 0), /* MX35_PAD_ATA_DATA12__ATA_DATA_12 */
+       [773] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 1, 0x91c, 3), /* MX35_PAD_ATA_DATA12__I2C3_SCL */
+       [774] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 5, 0x8ac, 1), /* MX35_PAD_ATA_DATA12__GPIO2_25 */
+       [775] = IMX_PIN_REG(MX35_PAD_ATA_DATA12, 0x710, 0x2ac, 6, 0x0, 0), /* MX35_PAD_ATA_DATA12__IPU_DIAGB_19 */
+       [776] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 0, 0x0, 0), /* MX35_PAD_ATA_DATA13__ATA_DATA_13 */
+       [777] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 1, 0x920, 3), /* MX35_PAD_ATA_DATA13__I2C3_SDA */
+       [778] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 5, 0x8b0, 1), /* MX35_PAD_ATA_DATA13__GPIO2_26 */
+       [779] = IMX_PIN_REG(MX35_PAD_ATA_DATA13, 0x714, 0x2b0, 6, 0x0, 0), /* MX35_PAD_ATA_DATA13__IPU_DIAGB_20 */
+       [780] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 0, 0x0, 0), /* MX35_PAD_ATA_DATA14__ATA_DATA_14 */
+       [781] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 1, 0x930, 2), /* MX35_PAD_ATA_DATA14__IPU_CSI_D_0 */
+       [782] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 3, 0x970, 2), /* MX35_PAD_ATA_DATA14__KPP_ROW_0 */
+       [783] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 5, 0x8b4, 1), /* MX35_PAD_ATA_DATA14__GPIO2_27 */
+       [784] = IMX_PIN_REG(MX35_PAD_ATA_DATA14, 0x718, 0x2b4, 6, 0x0, 0), /* MX35_PAD_ATA_DATA14__IPU_DIAGB_21 */
+       [785] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 0, 0x0, 0), /* MX35_PAD_ATA_DATA15__ATA_DATA_15 */
+       [786] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 1, 0x934, 2), /* MX35_PAD_ATA_DATA15__IPU_CSI_D_1 */
+       [787] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 3, 0x974, 2), /* MX35_PAD_ATA_DATA15__KPP_ROW_1 */
+       [788] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 5, 0x8b8, 1), /* MX35_PAD_ATA_DATA15__GPIO2_28 */
+       [789] = IMX_PIN_REG(MX35_PAD_ATA_DATA15, 0x71c, 0x2b8, 6, 0x0, 0), /* MX35_PAD_ATA_DATA15__IPU_DIAGB_22 */
+       [790] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 0, 0x0, 0), /* MX35_PAD_ATA_INTRQ__ATA_INTRQ */
+       [791] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 1, 0x938, 3), /* MX35_PAD_ATA_INTRQ__IPU_CSI_D_2 */
+       [792] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 3, 0x978, 2), /* MX35_PAD_ATA_INTRQ__KPP_ROW_2 */
+       [793] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 5, 0x8bc, 1), /* MX35_PAD_ATA_INTRQ__GPIO2_29 */
+       [794] = IMX_PIN_REG(MX35_PAD_ATA_INTRQ, 0x720, 0x2bc, 6, 0x0, 0), /* MX35_PAD_ATA_INTRQ__IPU_DIAGB_23 */
+       [795] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 0, 0x0, 0), /* MX35_PAD_ATA_BUFF_EN__ATA_BUFFER_EN */
+       [796] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 1, 0x93c, 3), /* MX35_PAD_ATA_BUFF_EN__IPU_CSI_D_3 */
+       [797] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 3, 0x97c, 2), /* MX35_PAD_ATA_BUFF_EN__KPP_ROW_3 */
+       [798] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 5, 0x8c4, 1), /* MX35_PAD_ATA_BUFF_EN__GPIO2_30 */
+       [799] = IMX_PIN_REG(MX35_PAD_ATA_BUFF_EN, 0x724, 0x2c0, 6, 0x0, 0), /* MX35_PAD_ATA_BUFF_EN__IPU_DIAGB_24 */
+       [800] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 0, 0x0, 0), /* MX35_PAD_ATA_DMARQ__ATA_DMARQ */
+       [801] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 1, 0x940, 2), /* MX35_PAD_ATA_DMARQ__IPU_CSI_D_4 */
+       [802] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 3, 0x950, 2), /* MX35_PAD_ATA_DMARQ__KPP_COL_0 */
+       [803] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 5, 0x8c8, 1), /* MX35_PAD_ATA_DMARQ__GPIO2_31 */
+       [804] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 6, 0x0, 0), /* MX35_PAD_ATA_DMARQ__IPU_DIAGB_25 */
+       [805] = IMX_PIN_REG(MX35_PAD_ATA_DMARQ, 0x728, 0x2c4, 7, 0x0, 0), /* MX35_PAD_ATA_DMARQ__ECT_CTI_TRIG_IN1_4 */
+       [806] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 0, 0x0, 0), /* MX35_PAD_ATA_DA0__ATA_DA_0 */
+       [807] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 1, 0x944, 2), /* MX35_PAD_ATA_DA0__IPU_CSI_D_5 */
+       [808] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 3, 0x954, 2), /* MX35_PAD_ATA_DA0__KPP_COL_1 */
+       [809] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 5, 0x8e8, 1), /* MX35_PAD_ATA_DA0__GPIO3_0 */
+       [810] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 6, 0x0, 0), /* MX35_PAD_ATA_DA0__IPU_DIAGB_26 */
+       [811] = IMX_PIN_REG(MX35_PAD_ATA_DA0, 0x72c, 0x2c8, 7, 0x0, 0), /* MX35_PAD_ATA_DA0__ECT_CTI_TRIG_IN1_5 */
+       [812] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 0, 0x0, 0), /* MX35_PAD_ATA_DA1__ATA_DA_1 */
+       [813] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 1, 0x948, 2), /* MX35_PAD_ATA_DA1__IPU_CSI_D_6 */
+       [814] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 3, 0x958, 2), /* MX35_PAD_ATA_DA1__KPP_COL_2 */
+       [815] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 5, 0x0, 0), /* MX35_PAD_ATA_DA1__GPIO3_1 */
+       [816] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 6, 0x0, 0), /* MX35_PAD_ATA_DA1__IPU_DIAGB_27 */
+       [817] = IMX_PIN_REG(MX35_PAD_ATA_DA1, 0x730, 0x2cc, 7, 0x0, 0), /* MX35_PAD_ATA_DA1__ECT_CTI_TRIG_IN1_6 */
+       [818] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 0, 0x0, 0), /* MX35_PAD_ATA_DA2__ATA_DA_2 */
+       [819] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 1, 0x94c, 2), /* MX35_PAD_ATA_DA2__IPU_CSI_D_7 */
+       [820] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 3, 0x95c, 2), /* MX35_PAD_ATA_DA2__KPP_COL_3 */
+       [821] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 5, 0x0, 0), /* MX35_PAD_ATA_DA2__GPIO3_2 */
+       [822] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 6, 0x0, 0), /* MX35_PAD_ATA_DA2__IPU_DIAGB_28 */
+       [823] = IMX_PIN_REG(MX35_PAD_ATA_DA2, 0x734, 0x2d0, 7, 0x0, 0), /* MX35_PAD_ATA_DA2__ECT_CTI_TRIG_IN1_7 */
+       [824] = IMX_PIN_REG(MX35_PAD_MLB_CLK, 0x738, 0x2d4, 0, 0x0, 0), /* MX35_PAD_MLB_CLK__MLB_MLBCLK */
+       [825] = IMX_PIN_REG(MX35_PAD_MLB_CLK, 0x738, 0x2d4, 5, 0x0, 0), /* MX35_PAD_MLB_CLK__GPIO3_3 */
+       [826] = IMX_PIN_REG(MX35_PAD_MLB_DAT, 0x73c, 0x2d8, 0, 0x0, 0), /* MX35_PAD_MLB_DAT__MLB_MLBDAT */
+       [827] = IMX_PIN_REG(MX35_PAD_MLB_DAT, 0x73c, 0x2d8, 5, 0x904, 1), /* MX35_PAD_MLB_DAT__GPIO3_4 */
+       [828] = IMX_PIN_REG(MX35_PAD_MLB_SIG, 0x740, 0x2dc, 0, 0x0, 0), /* MX35_PAD_MLB_SIG__MLB_MLBSIG */
+       [829] = IMX_PIN_REG(MX35_PAD_MLB_SIG, 0x740, 0x2dc, 5, 0x908, 1), /* MX35_PAD_MLB_SIG__GPIO3_5 */
+       [830] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 0, 0x0, 0), /* MX35_PAD_FEC_TX_CLK__FEC_TX_CLK */
+       [831] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 1, 0x804, 1), /* MX35_PAD_FEC_TX_CLK__ESDHC1_DAT4 */
+       [832] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 2, 0x9a0, 3), /* MX35_PAD_FEC_TX_CLK__UART3_RXD_MUX */
+       [833] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 3, 0x9ec, 1), /* MX35_PAD_FEC_TX_CLK__USB_TOP_USBH2_DIR */
+       [834] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 4, 0x7ec, 3), /* MX35_PAD_FEC_TX_CLK__CSPI2_MOSI */
+       [835] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 5, 0x90c, 1), /* MX35_PAD_FEC_TX_CLK__GPIO3_6 */
+       [836] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 6, 0x928, 5), /* MX35_PAD_FEC_TX_CLK__IPU_DISPB_D12_VSYNC */
+       [837] = IMX_PIN_REG(MX35_PAD_FEC_TX_CLK, 0x744, 0x2e0, 7, 0x0, 0), /* MX35_PAD_FEC_TX_CLK__ARM11P_TOP_EVNTBUS_0 */
+       [838] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 0, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__FEC_RX_CLK */
+       [839] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 1, 0x808, 1), /* MX35_PAD_FEC_RX_CLK__ESDHC1_DAT5 */
+       [840] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 2, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__UART3_TXD_MUX */
+       [841] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 3, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__USB_TOP_USBH2_STP */
+       [842] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 4, 0x7e8, 3), /* MX35_PAD_FEC_RX_CLK__CSPI2_MISO */
+       [843] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 5, 0x910, 1), /* MX35_PAD_FEC_RX_CLK__GPIO3_7 */
+       [844] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 6, 0x92c, 4), /* MX35_PAD_FEC_RX_CLK__IPU_DISPB_SD_D_I */
+       [845] = IMX_PIN_REG(MX35_PAD_FEC_RX_CLK, 0x748, 0x2e4, 7, 0x0, 0), /* MX35_PAD_FEC_RX_CLK__ARM11P_TOP_EVNTBUS_1 */
+       [846] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 0, 0x0, 0), /* MX35_PAD_FEC_RX_DV__FEC_RX_DV */
+       [847] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 1, 0x80c, 1), /* MX35_PAD_FEC_RX_DV__ESDHC1_DAT6 */
+       [848] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 2, 0x99c, 2), /* MX35_PAD_FEC_RX_DV__UART3_RTS */
+       [849] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 3, 0x9f0, 1), /* MX35_PAD_FEC_RX_DV__USB_TOP_USBH2_NXT */
+       [850] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 4, 0x7e0, 3), /* MX35_PAD_FEC_RX_DV__CSPI2_SCLK */
+       [851] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 5, 0x914, 1), /* MX35_PAD_FEC_RX_DV__GPIO3_8 */
+       [852] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 6, 0x0, 0), /* MX35_PAD_FEC_RX_DV__IPU_DISPB_SD_CLK */
+       [853] = IMX_PIN_REG(MX35_PAD_FEC_RX_DV, 0x74c, 0x2e8, 7, 0x0, 0), /* MX35_PAD_FEC_RX_DV__ARM11P_TOP_EVNTBUS_2 */
+       [854] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 0, 0x0, 0), /* MX35_PAD_FEC_COL__FEC_COL */
+       [855] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 1, 0x810, 1), /* MX35_PAD_FEC_COL__ESDHC1_DAT7 */
+       [856] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 2, 0x0, 0), /* MX35_PAD_FEC_COL__UART3_CTS */
+       [857] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 3, 0x9cc, 1), /* MX35_PAD_FEC_COL__USB_TOP_USBH2_DATA_0 */
+       [858] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 4, 0x7e4, 3), /* MX35_PAD_FEC_COL__CSPI2_RDY */
+       [859] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 5, 0x918, 1), /* MX35_PAD_FEC_COL__GPIO3_9 */
+       [860] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 6, 0x0, 0), /* MX35_PAD_FEC_COL__IPU_DISPB_SER_RS */
+       [861] = IMX_PIN_REG(MX35_PAD_FEC_COL, 0x750, 0x2ec, 7, 0x0, 0), /* MX35_PAD_FEC_COL__ARM11P_TOP_EVNTBUS_3 */
+       [862] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA0__FEC_RDATA_0 */
+       [863] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 1, 0x0, 0), /* MX35_PAD_FEC_RDATA0__PWM_PWMO */
+       [864] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 2, 0x0, 0), /* MX35_PAD_FEC_RDATA0__UART3_DTR */
+       [865] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 3, 0x9d0, 1), /* MX35_PAD_FEC_RDATA0__USB_TOP_USBH2_DATA_1 */
+       [866] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 4, 0x7f0, 2), /* MX35_PAD_FEC_RDATA0__CSPI2_SS0 */
+       [867] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 5, 0x8ec, 1), /* MX35_PAD_FEC_RDATA0__GPIO3_10 */
+       [868] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 6, 0x0, 0), /* MX35_PAD_FEC_RDATA0__IPU_DISPB_CS1 */
+       [869] = IMX_PIN_REG(MX35_PAD_FEC_RDATA0, 0x754, 0x2f0, 7, 0x0, 0), /* MX35_PAD_FEC_RDATA0__ARM11P_TOP_EVNTBUS_4 */
+       [870] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA0__FEC_TDATA_0 */
+       [871] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 1, 0x0, 0), /* MX35_PAD_FEC_TDATA0__SPDIF_SPDIF_OUT1 */
+       [872] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 2, 0x0, 0), /* MX35_PAD_FEC_TDATA0__UART3_DSR */
+       [873] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 3, 0x9d4, 1), /* MX35_PAD_FEC_TDATA0__USB_TOP_USBH2_DATA_2 */
+       [874] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 4, 0x7f4, 2), /* MX35_PAD_FEC_TDATA0__CSPI2_SS1 */
+       [875] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 5, 0x8f0, 1), /* MX35_PAD_FEC_TDATA0__GPIO3_11 */
+       [876] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 6, 0x0, 0), /* MX35_PAD_FEC_TDATA0__IPU_DISPB_CS0 */
+       [877] = IMX_PIN_REG(MX35_PAD_FEC_TDATA0, 0x758, 0x2f4, 7, 0x0, 0), /* MX35_PAD_FEC_TDATA0__ARM11P_TOP_EVNTBUS_5 */
+       [878] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 0, 0x0, 0), /* MX35_PAD_FEC_TX_EN__FEC_TX_EN */
+       [879] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 1, 0x998, 3), /* MX35_PAD_FEC_TX_EN__SPDIF_SPDIF_IN1 */
+       [880] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 2, 0x0, 0), /* MX35_PAD_FEC_TX_EN__UART3_RI */
+       [881] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 3, 0x9d8, 1), /* MX35_PAD_FEC_TX_EN__USB_TOP_USBH2_DATA_3 */
+       [882] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 5, 0x8f4, 1), /* MX35_PAD_FEC_TX_EN__GPIO3_12 */
+       [883] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 6, 0x0, 0), /* MX35_PAD_FEC_TX_EN__IPU_DISPB_PAR_RS */
+       [884] = IMX_PIN_REG(MX35_PAD_FEC_TX_EN, 0x75c, 0x2f8, 7, 0x0, 0), /* MX35_PAD_FEC_TX_EN__ARM11P_TOP_EVNTBUS_6 */
+       [885] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 0, 0x0, 0), /* MX35_PAD_FEC_MDC__FEC_MDC */
+       [886] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 1, 0x0, 0), /* MX35_PAD_FEC_MDC__CAN2_TXCAN */
+       [887] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 2, 0x0, 0), /* MX35_PAD_FEC_MDC__UART3_DCD */
+       [888] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 3, 0x9dc, 1), /* MX35_PAD_FEC_MDC__USB_TOP_USBH2_DATA_4 */
+       [889] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 5, 0x8f8, 1), /* MX35_PAD_FEC_MDC__GPIO3_13 */
+       [890] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 6, 0x0, 0), /* MX35_PAD_FEC_MDC__IPU_DISPB_WR */
+       [891] = IMX_PIN_REG(MX35_PAD_FEC_MDC, 0x760, 0x2fc, 7, 0x0, 0), /* MX35_PAD_FEC_MDC__ARM11P_TOP_EVNTBUS_7 */
+       [892] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 0, 0x0, 0), /* MX35_PAD_FEC_MDIO__FEC_MDIO */
+       [893] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 1, 0x7cc, 2), /* MX35_PAD_FEC_MDIO__CAN2_RXCAN */
+       [894] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 3, 0x9e0, 1), /* MX35_PAD_FEC_MDIO__USB_TOP_USBH2_DATA_5 */
+       [895] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 5, 0x8fc, 1), /* MX35_PAD_FEC_MDIO__GPIO3_14 */
+       [896] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 6, 0x0, 0), /* MX35_PAD_FEC_MDIO__IPU_DISPB_RD */
+       [897] = IMX_PIN_REG(MX35_PAD_FEC_MDIO, 0x764, 0x300, 7, 0x0, 0), /* MX35_PAD_FEC_MDIO__ARM11P_TOP_EVNTBUS_8 */
+       [898] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 0, 0x0, 0), /* MX35_PAD_FEC_TX_ERR__FEC_TX_ERR */
+       [899] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 1, 0x990, 2), /* MX35_PAD_FEC_TX_ERR__OWIRE_LINE */
+       [900] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 2, 0x994, 4), /* MX35_PAD_FEC_TX_ERR__SPDIF_SPDIF_EXTCLK */
+       [901] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 3, 0x9e4, 1), /* MX35_PAD_FEC_TX_ERR__USB_TOP_USBH2_DATA_6 */
+       [902] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 5, 0x900, 1), /* MX35_PAD_FEC_TX_ERR__GPIO3_15 */
+       [903] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 6, 0x924, 3), /* MX35_PAD_FEC_TX_ERR__IPU_DISPB_D0_VSYNC */
+       [904] = IMX_PIN_REG(MX35_PAD_FEC_TX_ERR, 0x768, 0x304, 7, 0x0, 0), /* MX35_PAD_FEC_TX_ERR__ARM11P_TOP_EVNTBUS_9 */
+       [905] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 0, 0x0, 0), /* MX35_PAD_FEC_RX_ERR__FEC_RX_ERR */
+       [906] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 1, 0x930, 3), /* MX35_PAD_FEC_RX_ERR__IPU_CSI_D_0 */
+       [907] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 3, 0x9e8, 1), /* MX35_PAD_FEC_RX_ERR__USB_TOP_USBH2_DATA_7 */
+       [908] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 4, 0x960, 1), /* MX35_PAD_FEC_RX_ERR__KPP_COL_4 */
+       [909] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 5, 0x0, 0), /* MX35_PAD_FEC_RX_ERR__GPIO3_16 */
+       [910] = IMX_PIN_REG(MX35_PAD_FEC_RX_ERR, 0x76c, 0x308, 6, 0x92c, 5), /* MX35_PAD_FEC_RX_ERR__IPU_DISPB_SD_D_IO */
+       [911] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 0, 0x0, 0), /* MX35_PAD_FEC_CRS__FEC_CRS */
+       [912] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 1, 0x934, 3), /* MX35_PAD_FEC_CRS__IPU_CSI_D_1 */
+       [913] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 3, 0x0, 0), /* MX35_PAD_FEC_CRS__USB_TOP_USBH2_PWR */
+       [914] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 4, 0x964, 1), /* MX35_PAD_FEC_CRS__KPP_COL_5 */
+       [915] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 5, 0x0, 0), /* MX35_PAD_FEC_CRS__GPIO3_17 */
+       [916] = IMX_PIN_REG(MX35_PAD_FEC_CRS, 0x770, 0x30c, 6, 0x0, 0), /* MX35_PAD_FEC_CRS__IPU_FLASH_STROBE */
+       [917] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA1__FEC_RDATA_1 */
+       [918] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 1, 0x938, 4), /* MX35_PAD_FEC_RDATA1__IPU_CSI_D_2 */
+       [919] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 2, 0x0, 0), /* MX35_PAD_FEC_RDATA1__AUDMUX_AUD6_RXC */
+       [920] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 3, 0x9f4, 2), /* MX35_PAD_FEC_RDATA1__USB_TOP_USBH2_OC */
+       [921] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 4, 0x968, 1), /* MX35_PAD_FEC_RDATA1__KPP_COL_6 */
+       [922] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 5, 0x0, 0), /* MX35_PAD_FEC_RDATA1__GPIO3_18 */
+       [923] = IMX_PIN_REG(MX35_PAD_FEC_RDATA1, 0x774, 0x310, 6, 0x0, 0), /* MX35_PAD_FEC_RDATA1__IPU_DISPB_BE0 */
+       [924] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA1__FEC_TDATA_1 */
+       [925] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 1, 0x93c, 4), /* MX35_PAD_FEC_TDATA1__IPU_CSI_D_3 */
+       [926] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 2, 0x7bc, 1), /* MX35_PAD_FEC_TDATA1__AUDMUX_AUD6_RXFS */
+       [927] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 4, 0x96c, 1), /* MX35_PAD_FEC_TDATA1__KPP_COL_7 */
+       [928] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 5, 0x0, 0), /* MX35_PAD_FEC_TDATA1__GPIO3_19 */
+       [929] = IMX_PIN_REG(MX35_PAD_FEC_TDATA1, 0x778, 0x314, 6, 0x0, 0), /* MX35_PAD_FEC_TDATA1__IPU_DISPB_BE1 */
+       [930] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA2__FEC_RDATA_2 */
+       [931] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 1, 0x940, 3), /* MX35_PAD_FEC_RDATA2__IPU_CSI_D_4 */
+       [932] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 2, 0x7b4, 1), /* MX35_PAD_FEC_RDATA2__AUDMUX_AUD6_TXD */
+       [933] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 4, 0x980, 1), /* MX35_PAD_FEC_RDATA2__KPP_ROW_4 */
+       [934] = IMX_PIN_REG(MX35_PAD_FEC_RDATA2, 0x77c, 0x318, 5, 0x0, 0), /* MX35_PAD_FEC_RDATA2__GPIO3_20 */
+       [935] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA2__FEC_TDATA_2 */
+       [936] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 1, 0x944, 3), /* MX35_PAD_FEC_TDATA2__IPU_CSI_D_5 */
+       [937] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 2, 0x7b0, 1), /* MX35_PAD_FEC_TDATA2__AUDMUX_AUD6_RXD */
+       [938] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 4, 0x984, 1), /* MX35_PAD_FEC_TDATA2__KPP_ROW_5 */
+       [939] = IMX_PIN_REG(MX35_PAD_FEC_TDATA2, 0x780, 0x31c, 5, 0x0, 0), /* MX35_PAD_FEC_TDATA2__GPIO3_21 */
+       [940] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 0, 0x0, 0), /* MX35_PAD_FEC_RDATA3__FEC_RDATA_3 */
+       [941] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 1, 0x948, 3), /* MX35_PAD_FEC_RDATA3__IPU_CSI_D_6 */
+       [942] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 2, 0x7c0, 1), /* MX35_PAD_FEC_RDATA3__AUDMUX_AUD6_TXC */
+       [943] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 4, 0x988, 1), /* MX35_PAD_FEC_RDATA3__KPP_ROW_6 */
+       [944] = IMX_PIN_REG(MX35_PAD_FEC_RDATA3, 0x784, 0x320, 6, 0x0, 0), /* MX35_PAD_FEC_RDATA3__GPIO3_22 */
+       [945] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 0, 0x0, 0), /* MX35_PAD_FEC_TDATA3__FEC_TDATA_3 */
+       [946] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 1, 0x94c, 3), /* MX35_PAD_FEC_TDATA3__IPU_CSI_D_7 */
+       [947] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 2, 0x7c4, 1), /* MX35_PAD_FEC_TDATA3__AUDMUX_AUD6_TXFS */
+       [948] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 4, 0x98c, 1), /* MX35_PAD_FEC_TDATA3__KPP_ROW_7 */
+       [949] = IMX_PIN_REG(MX35_PAD_FEC_TDATA3, 0x788, 0x324, 5, 0x0, 0), /* MX35_PAD_FEC_TDATA3__GPIO3_23 */
+       [950] = IMX_PIN_REG(MX35_PAD_EXT_ARMCLK, 0x78c, 0x0, 0, 0x0, 0), /* MX35_PAD_EXT_ARMCLK__CCM_EXT_ARMCLK */
+       [951] = IMX_PIN_REG(MX35_PAD_TEST_MODE, 0x790, 0x0, 0, 0x0, 0), /* MX35_PAD_TEST_MODE__TCU_TEST_MODE */
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx35_pinctrl_pads[] = {
+       IMX_PINCTRL_PIN(MX35_PAD_CAPTURE),
+       IMX_PINCTRL_PIN(MX35_PAD_COMPARE),
+       IMX_PINCTRL_PIN(MX35_PAD_WDOG_RST),
+       IMX_PINCTRL_PIN(MX35_PAD_GPIO1_0),
+       IMX_PINCTRL_PIN(MX35_PAD_GPIO1_1),
+       IMX_PINCTRL_PIN(MX35_PAD_GPIO2_0),
+       IMX_PINCTRL_PIN(MX35_PAD_GPIO3_0),
+       IMX_PINCTRL_PIN(MX35_PAD_RESET_IN_B),
+       IMX_PINCTRL_PIN(MX35_PAD_POR_B),
+       IMX_PINCTRL_PIN(MX35_PAD_CLKO),
+       IMX_PINCTRL_PIN(MX35_PAD_BOOT_MODE0),
+       IMX_PINCTRL_PIN(MX35_PAD_BOOT_MODE1),
+       IMX_PINCTRL_PIN(MX35_PAD_CLK_MODE0),
+       IMX_PINCTRL_PIN(MX35_PAD_CLK_MODE1),
+       IMX_PINCTRL_PIN(MX35_PAD_POWER_FAIL),
+       IMX_PINCTRL_PIN(MX35_PAD_VSTBY),
+       IMX_PINCTRL_PIN(MX35_PAD_A0),
+       IMX_PINCTRL_PIN(MX35_PAD_A1),
+       IMX_PINCTRL_PIN(MX35_PAD_A2),
+       IMX_PINCTRL_PIN(MX35_PAD_A3),
+       IMX_PINCTRL_PIN(MX35_PAD_A4),
+       IMX_PINCTRL_PIN(MX35_PAD_A5),
+       IMX_PINCTRL_PIN(MX35_PAD_A6),
+       IMX_PINCTRL_PIN(MX35_PAD_A7),
+       IMX_PINCTRL_PIN(MX35_PAD_A8),
+       IMX_PINCTRL_PIN(MX35_PAD_A9),
+       IMX_PINCTRL_PIN(MX35_PAD_A10),
+       IMX_PINCTRL_PIN(MX35_PAD_MA10),
+       IMX_PINCTRL_PIN(MX35_PAD_A11),
+       IMX_PINCTRL_PIN(MX35_PAD_A12),
+       IMX_PINCTRL_PIN(MX35_PAD_A13),
+       IMX_PINCTRL_PIN(MX35_PAD_A14),
+       IMX_PINCTRL_PIN(MX35_PAD_A15),
+       IMX_PINCTRL_PIN(MX35_PAD_A16),
+       IMX_PINCTRL_PIN(MX35_PAD_A17),
+       IMX_PINCTRL_PIN(MX35_PAD_A18),
+       IMX_PINCTRL_PIN(MX35_PAD_A19),
+       IMX_PINCTRL_PIN(MX35_PAD_A20),
+       IMX_PINCTRL_PIN(MX35_PAD_A21),
+       IMX_PINCTRL_PIN(MX35_PAD_A22),
+       IMX_PINCTRL_PIN(MX35_PAD_A23),
+       IMX_PINCTRL_PIN(MX35_PAD_A24),
+       IMX_PINCTRL_PIN(MX35_PAD_A25),
+       IMX_PINCTRL_PIN(MX35_PAD_SDBA1),
+       IMX_PINCTRL_PIN(MX35_PAD_SDBA0),
+       IMX_PINCTRL_PIN(MX35_PAD_SD0),
+       IMX_PINCTRL_PIN(MX35_PAD_SD1),
+       IMX_PINCTRL_PIN(MX35_PAD_SD2),
+       IMX_PINCTRL_PIN(MX35_PAD_SD3),
+       IMX_PINCTRL_PIN(MX35_PAD_SD4),
+       IMX_PINCTRL_PIN(MX35_PAD_SD5),
+       IMX_PINCTRL_PIN(MX35_PAD_SD6),
+       IMX_PINCTRL_PIN(MX35_PAD_SD7),
+       IMX_PINCTRL_PIN(MX35_PAD_SD8),
+       IMX_PINCTRL_PIN(MX35_PAD_SD9),
+       IMX_PINCTRL_PIN(MX35_PAD_SD10),
+       IMX_PINCTRL_PIN(MX35_PAD_SD11),
+       IMX_PINCTRL_PIN(MX35_PAD_SD12),
+       IMX_PINCTRL_PIN(MX35_PAD_SD13),
+       IMX_PINCTRL_PIN(MX35_PAD_SD14),
+       IMX_PINCTRL_PIN(MX35_PAD_SD15),
+       IMX_PINCTRL_PIN(MX35_PAD_SD16),
+       IMX_PINCTRL_PIN(MX35_PAD_SD17),
+       IMX_PINCTRL_PIN(MX35_PAD_SD18),
+       IMX_PINCTRL_PIN(MX35_PAD_SD19),
+       IMX_PINCTRL_PIN(MX35_PAD_SD20),
+       IMX_PINCTRL_PIN(MX35_PAD_SD21),
+       IMX_PINCTRL_PIN(MX35_PAD_SD22),
+       IMX_PINCTRL_PIN(MX35_PAD_SD23),
+       IMX_PINCTRL_PIN(MX35_PAD_SD24),
+       IMX_PINCTRL_PIN(MX35_PAD_SD25),
+       IMX_PINCTRL_PIN(MX35_PAD_SD26),
+       IMX_PINCTRL_PIN(MX35_PAD_SD27),
+       IMX_PINCTRL_PIN(MX35_PAD_SD28),
+       IMX_PINCTRL_PIN(MX35_PAD_SD29),
+       IMX_PINCTRL_PIN(MX35_PAD_SD30),
+       IMX_PINCTRL_PIN(MX35_PAD_SD31),
+       IMX_PINCTRL_PIN(MX35_PAD_DQM0),
+       IMX_PINCTRL_PIN(MX35_PAD_DQM1),
+       IMX_PINCTRL_PIN(MX35_PAD_DQM2),
+       IMX_PINCTRL_PIN(MX35_PAD_DQM3),
+       IMX_PINCTRL_PIN(MX35_PAD_EB0),
+       IMX_PINCTRL_PIN(MX35_PAD_EB1),
+       IMX_PINCTRL_PIN(MX35_PAD_OE),
+       IMX_PINCTRL_PIN(MX35_PAD_CS0),
+       IMX_PINCTRL_PIN(MX35_PAD_CS1),
+       IMX_PINCTRL_PIN(MX35_PAD_CS2),
+       IMX_PINCTRL_PIN(MX35_PAD_CS3),
+       IMX_PINCTRL_PIN(MX35_PAD_CS4),
+       IMX_PINCTRL_PIN(MX35_PAD_CS5),
+       IMX_PINCTRL_PIN(MX35_PAD_NF_CE0),
+       IMX_PINCTRL_PIN(MX35_PAD_ECB),
+       IMX_PINCTRL_PIN(MX35_PAD_LBA),
+       IMX_PINCTRL_PIN(MX35_PAD_BCLK),
+       IMX_PINCTRL_PIN(MX35_PAD_RW),
+       IMX_PINCTRL_PIN(MX35_PAD_RAS),
+       IMX_PINCTRL_PIN(MX35_PAD_CAS),
+       IMX_PINCTRL_PIN(MX35_PAD_SDWE),
+       IMX_PINCTRL_PIN(MX35_PAD_SDCKE0),
+       IMX_PINCTRL_PIN(MX35_PAD_SDCKE1),
+       IMX_PINCTRL_PIN(MX35_PAD_SDCLK),
+       IMX_PINCTRL_PIN(MX35_PAD_SDQS0),
+       IMX_PINCTRL_PIN(MX35_PAD_SDQS1),
+       IMX_PINCTRL_PIN(MX35_PAD_SDQS2),
+       IMX_PINCTRL_PIN(MX35_PAD_SDQS3),
+       IMX_PINCTRL_PIN(MX35_PAD_NFWE_B),
+       IMX_PINCTRL_PIN(MX35_PAD_NFRE_B),
+       IMX_PINCTRL_PIN(MX35_PAD_NFALE),
+       IMX_PINCTRL_PIN(MX35_PAD_NFCLE),
+       IMX_PINCTRL_PIN(MX35_PAD_NFWP_B),
+       IMX_PINCTRL_PIN(MX35_PAD_NFRB),
+       IMX_PINCTRL_PIN(MX35_PAD_D15),
+       IMX_PINCTRL_PIN(MX35_PAD_D14),
+       IMX_PINCTRL_PIN(MX35_PAD_D13),
+       IMX_PINCTRL_PIN(MX35_PAD_D12),
+       IMX_PINCTRL_PIN(MX35_PAD_D11),
+       IMX_PINCTRL_PIN(MX35_PAD_D10),
+       IMX_PINCTRL_PIN(MX35_PAD_D9),
+       IMX_PINCTRL_PIN(MX35_PAD_D8),
+       IMX_PINCTRL_PIN(MX35_PAD_D7),
+       IMX_PINCTRL_PIN(MX35_PAD_D6),
+       IMX_PINCTRL_PIN(MX35_PAD_D5),
+       IMX_PINCTRL_PIN(MX35_PAD_D4),
+       IMX_PINCTRL_PIN(MX35_PAD_D3),
+       IMX_PINCTRL_PIN(MX35_PAD_D2),
+       IMX_PINCTRL_PIN(MX35_PAD_D1),
+       IMX_PINCTRL_PIN(MX35_PAD_D0),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D8),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D9),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D10),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D11),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D12),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D13),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D14),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_D15),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_MCLK),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_VSYNC),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_HSYNC),
+       IMX_PINCTRL_PIN(MX35_PAD_CSI_PIXCLK),
+       IMX_PINCTRL_PIN(MX35_PAD_I2C1_CLK),
+       IMX_PINCTRL_PIN(MX35_PAD_I2C1_DAT),
+       IMX_PINCTRL_PIN(MX35_PAD_I2C2_CLK),
+       IMX_PINCTRL_PIN(MX35_PAD_I2C2_DAT),
+       IMX_PINCTRL_PIN(MX35_PAD_STXD4),
+       IMX_PINCTRL_PIN(MX35_PAD_SRXD4),
+       IMX_PINCTRL_PIN(MX35_PAD_SCK4),
+       IMX_PINCTRL_PIN(MX35_PAD_STXFS4),
+       IMX_PINCTRL_PIN(MX35_PAD_STXD5),
+       IMX_PINCTRL_PIN(MX35_PAD_SRXD5),
+       IMX_PINCTRL_PIN(MX35_PAD_SCK5),
+       IMX_PINCTRL_PIN(MX35_PAD_STXFS5),
+       IMX_PINCTRL_PIN(MX35_PAD_SCKR),
+       IMX_PINCTRL_PIN(MX35_PAD_FSR),
+       IMX_PINCTRL_PIN(MX35_PAD_HCKR),
+       IMX_PINCTRL_PIN(MX35_PAD_SCKT),
+       IMX_PINCTRL_PIN(MX35_PAD_FST),
+       IMX_PINCTRL_PIN(MX35_PAD_HCKT),
+       IMX_PINCTRL_PIN(MX35_PAD_TX5_RX0),
+       IMX_PINCTRL_PIN(MX35_PAD_TX4_RX1),
+       IMX_PINCTRL_PIN(MX35_PAD_TX3_RX2),
+       IMX_PINCTRL_PIN(MX35_PAD_TX2_RX3),
+       IMX_PINCTRL_PIN(MX35_PAD_TX1),
+       IMX_PINCTRL_PIN(MX35_PAD_TX0),
+       IMX_PINCTRL_PIN(MX35_PAD_CSPI1_MOSI),
+       IMX_PINCTRL_PIN(MX35_PAD_CSPI1_MISO),
+       IMX_PINCTRL_PIN(MX35_PAD_CSPI1_SS0),
+       IMX_PINCTRL_PIN(MX35_PAD_CSPI1_SS1),
+       IMX_PINCTRL_PIN(MX35_PAD_CSPI1_SCLK),
+       IMX_PINCTRL_PIN(MX35_PAD_CSPI1_SPI_RDY),
+       IMX_PINCTRL_PIN(MX35_PAD_RXD1),
+       IMX_PINCTRL_PIN(MX35_PAD_TXD1),
+       IMX_PINCTRL_PIN(MX35_PAD_RTS1),
+       IMX_PINCTRL_PIN(MX35_PAD_CTS1),
+       IMX_PINCTRL_PIN(MX35_PAD_RXD2),
+       IMX_PINCTRL_PIN(MX35_PAD_TXD2),
+       IMX_PINCTRL_PIN(MX35_PAD_RTS2),
+       IMX_PINCTRL_PIN(MX35_PAD_CTS2),
+       IMX_PINCTRL_PIN(MX35_PAD_RTCK),
+       IMX_PINCTRL_PIN(MX35_PAD_TCK),
+       IMX_PINCTRL_PIN(MX35_PAD_TMS),
+       IMX_PINCTRL_PIN(MX35_PAD_TDI),
+       IMX_PINCTRL_PIN(MX35_PAD_TDO),
+       IMX_PINCTRL_PIN(MX35_PAD_TRSTB),
+       IMX_PINCTRL_PIN(MX35_PAD_DE_B),
+       IMX_PINCTRL_PIN(MX35_PAD_SJC_MOD),
+       IMX_PINCTRL_PIN(MX35_PAD_USBOTG_PWR),
+       IMX_PINCTRL_PIN(MX35_PAD_USBOTG_OC),
+       IMX_PINCTRL_PIN(MX35_PAD_LD0),
+       IMX_PINCTRL_PIN(MX35_PAD_LD1),
+       IMX_PINCTRL_PIN(MX35_PAD_LD2),
+       IMX_PINCTRL_PIN(MX35_PAD_LD3),
+       IMX_PINCTRL_PIN(MX35_PAD_LD4),
+       IMX_PINCTRL_PIN(MX35_PAD_LD5),
+       IMX_PINCTRL_PIN(MX35_PAD_LD6),
+       IMX_PINCTRL_PIN(MX35_PAD_LD7),
+       IMX_PINCTRL_PIN(MX35_PAD_LD8),
+       IMX_PINCTRL_PIN(MX35_PAD_LD9),
+       IMX_PINCTRL_PIN(MX35_PAD_LD10),
+       IMX_PINCTRL_PIN(MX35_PAD_LD11),
+       IMX_PINCTRL_PIN(MX35_PAD_LD12),
+       IMX_PINCTRL_PIN(MX35_PAD_LD13),
+       IMX_PINCTRL_PIN(MX35_PAD_LD14),
+       IMX_PINCTRL_PIN(MX35_PAD_LD15),
+       IMX_PINCTRL_PIN(MX35_PAD_LD16),
+       IMX_PINCTRL_PIN(MX35_PAD_LD17),
+       IMX_PINCTRL_PIN(MX35_PAD_LD18),
+       IMX_PINCTRL_PIN(MX35_PAD_LD19),
+       IMX_PINCTRL_PIN(MX35_PAD_LD20),
+       IMX_PINCTRL_PIN(MX35_PAD_LD21),
+       IMX_PINCTRL_PIN(MX35_PAD_LD22),
+       IMX_PINCTRL_PIN(MX35_PAD_LD23),
+       IMX_PINCTRL_PIN(MX35_PAD_D3_HSYNC),
+       IMX_PINCTRL_PIN(MX35_PAD_D3_FPSHIFT),
+       IMX_PINCTRL_PIN(MX35_PAD_D3_DRDY),
+       IMX_PINCTRL_PIN(MX35_PAD_CONTRAST),
+       IMX_PINCTRL_PIN(MX35_PAD_D3_VSYNC),
+       IMX_PINCTRL_PIN(MX35_PAD_D3_REV),
+       IMX_PINCTRL_PIN(MX35_PAD_D3_CLS),
+       IMX_PINCTRL_PIN(MX35_PAD_D3_SPL),
+       IMX_PINCTRL_PIN(MX35_PAD_SD1_CMD),
+       IMX_PINCTRL_PIN(MX35_PAD_SD1_CLK),
+       IMX_PINCTRL_PIN(MX35_PAD_SD1_DATA0),
+       IMX_PINCTRL_PIN(MX35_PAD_SD1_DATA1),
+       IMX_PINCTRL_PIN(MX35_PAD_SD1_DATA2),
+       IMX_PINCTRL_PIN(MX35_PAD_SD1_DATA3),
+       IMX_PINCTRL_PIN(MX35_PAD_SD2_CMD),
+       IMX_PINCTRL_PIN(MX35_PAD_SD2_CLK),
+       IMX_PINCTRL_PIN(MX35_PAD_SD2_DATA0),
+       IMX_PINCTRL_PIN(MX35_PAD_SD2_DATA1),
+       IMX_PINCTRL_PIN(MX35_PAD_SD2_DATA2),
+       IMX_PINCTRL_PIN(MX35_PAD_SD2_DATA3),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_CS0),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_CS1),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DIOR),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DIOW),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DMACK),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_RESET_B),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_IORDY),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA0),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA1),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA2),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA3),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA4),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA5),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA6),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA7),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA8),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA9),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA10),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA11),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA12),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA13),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA14),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DATA15),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_INTRQ),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_BUFF_EN),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DMARQ),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DA0),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DA1),
+       IMX_PINCTRL_PIN(MX35_PAD_ATA_DA2),
+       IMX_PINCTRL_PIN(MX35_PAD_MLB_CLK),
+       IMX_PINCTRL_PIN(MX35_PAD_MLB_DAT),
+       IMX_PINCTRL_PIN(MX35_PAD_MLB_SIG),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_TX_CLK),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_RX_CLK),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_RX_DV),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_COL),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_RDATA0),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_TDATA0),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_TX_EN),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_MDC),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_MDIO),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_TX_ERR),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_RX_ERR),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_CRS),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_RDATA1),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_TDATA1),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_RDATA2),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_TDATA2),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_RDATA3),
+       IMX_PINCTRL_PIN(MX35_PAD_FEC_TDATA3),
+       IMX_PINCTRL_PIN(MX35_PAD_EXT_ARMCLK),
+       IMX_PINCTRL_PIN(MX35_PAD_TEST_MODE),
+};
+
+static struct imx_pinctrl_soc_info imx35_pinctrl_info = {
+       .pins = imx35_pinctrl_pads,
+       .npins = ARRAY_SIZE(imx35_pinctrl_pads),
+       .pin_regs = imx35_pin_regs,
+       .npin_regs = ARRAY_SIZE(imx35_pin_regs),
+};
+
+static struct of_device_id imx35_pinctrl_of_match[] __devinitdata = {
+       { .compatible = "fsl,imx35-iomuxc", },
+       { /* sentinel */ }
+};
+
+static int __devinit imx35_pinctrl_probe(struct platform_device *pdev)
+{
+       return imx_pinctrl_probe(pdev, &imx35_pinctrl_info);
+}
+
+static struct platform_driver imx35_pinctrl_driver = {
+       .driver = {
+               .name = "imx35-pinctrl",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(imx35_pinctrl_of_match),
+       },
+       .probe = imx35_pinctrl_probe,
+       .remove = __devexit_p(imx_pinctrl_remove),
+};
+
+static int __init imx35_pinctrl_init(void)
+{
+       return platform_driver_register(&imx35_pinctrl_driver);
+}
+arch_initcall(imx35_pinctrl_init);
+
+static void __exit imx35_pinctrl_exit(void)
+{
+       platform_driver_unregister(&imx35_pinctrl_driver);
+}
+module_exit(imx35_pinctrl_exit);
+MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
+MODULE_DESCRIPTION("Freescale IMX35 pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 9fd02162a3c21ec1ca810c163ab8c5143e1af41b..fb846896677972f09e7a69661a469ea390d397c7 100644 (file)
 #include "pinctrl-imx.h"
 
 enum imx51_pads {
-       MX51_PAD_EIM_D16 = 1,
-       MX51_PAD_EIM_D17 = 2,
-       MX51_PAD_EIM_D18 = 3,
-       MX51_PAD_EIM_D19 = 4,
-       MX51_PAD_EIM_D20 = 5,
-       MX51_PAD_EIM_D21 = 6,
-       MX51_PAD_EIM_D22 = 7,
-       MX51_PAD_EIM_D23 = 8,
-       MX51_PAD_EIM_D24 = 9,
-       MX51_PAD_EIM_D25 = 10,
-       MX51_PAD_EIM_D26 = 11,
-       MX51_PAD_EIM_D27 = 12,
-       MX51_PAD_EIM_D28 = 13,
-       MX51_PAD_EIM_D29 = 14,
-       MX51_PAD_EIM_D30 = 15,
-       MX51_PAD_EIM_D31 = 16,
-       MX51_PAD_EIM_A16 = 17,
-       MX51_PAD_EIM_A17 = 18,
-       MX51_PAD_EIM_A18 = 19,
-       MX51_PAD_EIM_A19 = 20,
-       MX51_PAD_EIM_A20 = 21,
-       MX51_PAD_EIM_A21 = 22,
-       MX51_PAD_EIM_A22 = 23,
-       MX51_PAD_EIM_A23 = 24,
-       MX51_PAD_EIM_A24 = 25,
-       MX51_PAD_EIM_A25 = 26,
-       MX51_PAD_EIM_A26 = 27,
-       MX51_PAD_EIM_A27 = 28,
-       MX51_PAD_EIM_EB0 = 29,
-       MX51_PAD_EIM_EB1 = 30,
-       MX51_PAD_EIM_EB2 = 31,
-       MX51_PAD_EIM_EB3 = 32,
-       MX51_PAD_EIM_OE = 33,
-       MX51_PAD_EIM_CS0 = 34,
-       MX51_PAD_EIM_CS1 = 35,
-       MX51_PAD_EIM_CS2 = 36,
-       MX51_PAD_EIM_CS3 = 37,
-       MX51_PAD_EIM_CS4 = 38,
-       MX51_PAD_EIM_CS5 = 39,
-       MX51_PAD_EIM_DTACK = 40,
-       MX51_PAD_EIM_LBA = 41,
-       MX51_PAD_EIM_CRE = 42,
-       MX51_PAD_DRAM_CS1 = 43,
-       MX51_PAD_NANDF_WE_B = 44,
-       MX51_PAD_NANDF_RE_B = 45,
-       MX51_PAD_NANDF_ALE = 46,
-       MX51_PAD_NANDF_CLE = 47,
-       MX51_PAD_NANDF_WP_B = 48,
-       MX51_PAD_NANDF_RB0 = 49,
-       MX51_PAD_NANDF_RB1 = 50,
-       MX51_PAD_NANDF_RB2 = 51,
-       MX51_PAD_NANDF_RB3 = 52,
-       MX51_PAD_GPIO_NAND = 53,
-       MX51_PAD_NANDF_CS0 = 54,
-       MX51_PAD_NANDF_CS1 = 55,
-       MX51_PAD_NANDF_CS2 = 56,
-       MX51_PAD_NANDF_CS3 = 57,
-       MX51_PAD_NANDF_CS4 = 58,
-       MX51_PAD_NANDF_CS5 = 59,
-       MX51_PAD_NANDF_CS6 = 60,
-       MX51_PAD_NANDF_CS7 = 61,
-       MX51_PAD_NANDF_RDY_INT = 62,
-       MX51_PAD_NANDF_D15 = 63,
-       MX51_PAD_NANDF_D14 = 64,
-       MX51_PAD_NANDF_D13 = 65,
-       MX51_PAD_NANDF_D12 = 66,
-       MX51_PAD_NANDF_D11 = 67,
-       MX51_PAD_NANDF_D10 = 68,
-       MX51_PAD_NANDF_D9 = 69,
-       MX51_PAD_NANDF_D8 = 70,
-       MX51_PAD_NANDF_D7 = 71,
-       MX51_PAD_NANDF_D6 = 72,
-       MX51_PAD_NANDF_D5 = 73,
-       MX51_PAD_NANDF_D4 = 74,
-       MX51_PAD_NANDF_D3 = 75,
-       MX51_PAD_NANDF_D2 = 76,
-       MX51_PAD_NANDF_D1 = 77,
-       MX51_PAD_NANDF_D0 = 78,
-       MX51_PAD_CSI1_D8 = 79,
-       MX51_PAD_CSI1_D9 = 80,
-       MX51_PAD_CSI1_D10 = 81,
-       MX51_PAD_CSI1_D11 = 82,
-       MX51_PAD_CSI1_D12 = 83,
-       MX51_PAD_CSI1_D13 = 84,
-       MX51_PAD_CSI1_D14 = 85,
-       MX51_PAD_CSI1_D15 = 86,
-       MX51_PAD_CSI1_D16 = 87,
-       MX51_PAD_CSI1_D17 = 88,
-       MX51_PAD_CSI1_D18 = 89,
-       MX51_PAD_CSI1_D19 = 90,
-       MX51_PAD_CSI1_VSYNC = 91,
-       MX51_PAD_CSI1_HSYNC = 92,
-       MX51_PAD_CSI1_PIXCLK = 93,
-       MX51_PAD_CSI1_MCLK = 94,
-       MX51_PAD_CSI2_D12 = 95,
-       MX51_PAD_CSI2_D13 = 96,
-       MX51_PAD_CSI2_D14 = 97,
-       MX51_PAD_CSI2_D15 = 98,
-       MX51_PAD_CSI2_D16 = 99,
-       MX51_PAD_CSI2_D17 = 100,
-       MX51_PAD_CSI2_D18 = 101,
-       MX51_PAD_CSI2_D19 = 102,
-       MX51_PAD_CSI2_VSYNC = 103,
-       MX51_PAD_CSI2_HSYNC = 104,
-       MX51_PAD_CSI2_PIXCLK = 105,
-       MX51_PAD_I2C1_CLK = 106,
-       MX51_PAD_I2C1_DAT = 107,
-       MX51_PAD_AUD3_BB_TXD = 108,
-       MX51_PAD_AUD3_BB_RXD = 109,
-       MX51_PAD_AUD3_BB_CK = 110,
-       MX51_PAD_AUD3_BB_FS = 111,
-       MX51_PAD_CSPI1_MOSI = 112,
-       MX51_PAD_CSPI1_MISO = 113,
-       MX51_PAD_CSPI1_SS0 = 114,
-       MX51_PAD_CSPI1_SS1 = 115,
-       MX51_PAD_CSPI1_RDY = 116,
-       MX51_PAD_CSPI1_SCLK = 117,
-       MX51_PAD_UART1_RXD = 118,
-       MX51_PAD_UART1_TXD = 119,
-       MX51_PAD_UART1_RTS = 120,
-       MX51_PAD_UART1_CTS = 121,
-       MX51_PAD_UART2_RXD = 122,
-       MX51_PAD_UART2_TXD = 123,
-       MX51_PAD_UART3_RXD = 124,
-       MX51_PAD_UART3_TXD = 125,
-       MX51_PAD_OWIRE_LINE = 126,
-       MX51_PAD_KEY_ROW0 = 127,
-       MX51_PAD_KEY_ROW1 = 128,
-       MX51_PAD_KEY_ROW2 = 129,
-       MX51_PAD_KEY_ROW3 = 130,
-       MX51_PAD_KEY_COL0 = 131,
-       MX51_PAD_KEY_COL1 = 132,
-       MX51_PAD_KEY_COL2 = 133,
-       MX51_PAD_KEY_COL3 = 134,
-       MX51_PAD_KEY_COL4 = 135,
-       MX51_PAD_KEY_COL5 = 136,
-       MX51_PAD_USBH1_CLK = 137,
-       MX51_PAD_USBH1_DIR = 138,
-       MX51_PAD_USBH1_STP = 139,
-       MX51_PAD_USBH1_NXT = 140,
-       MX51_PAD_USBH1_DATA0 = 141,
-       MX51_PAD_USBH1_DATA1 = 142,
-       MX51_PAD_USBH1_DATA2 = 143,
-       MX51_PAD_USBH1_DATA3 = 144,
-       MX51_PAD_USBH1_DATA4 = 145,
-       MX51_PAD_USBH1_DATA5 = 146,
-       MX51_PAD_USBH1_DATA6 = 147,
-       MX51_PAD_USBH1_DATA7 = 148,
-       MX51_PAD_DI1_PIN11 = 149,
-       MX51_PAD_DI1_PIN12 = 150,
-       MX51_PAD_DI1_PIN13 = 151,
-       MX51_PAD_DI1_D0_CS = 152,
-       MX51_PAD_DI1_D1_CS = 153,
-       MX51_PAD_DISPB2_SER_DIN = 154,
-       MX51_PAD_DISPB2_SER_DIO = 155,
-       MX51_PAD_DISPB2_SER_CLK = 156,
-       MX51_PAD_DISPB2_SER_RS = 157,
-       MX51_PAD_DISP1_DAT0 = 158,
-       MX51_PAD_DISP1_DAT1 = 159,
-       MX51_PAD_DISP1_DAT2 = 160,
-       MX51_PAD_DISP1_DAT3 = 161,
-       MX51_PAD_DISP1_DAT4 = 162,
-       MX51_PAD_DISP1_DAT5 = 163,
-       MX51_PAD_DISP1_DAT6 = 164,
-       MX51_PAD_DISP1_DAT7 = 165,
-       MX51_PAD_DISP1_DAT8 = 166,
-       MX51_PAD_DISP1_DAT9 = 167,
-       MX51_PAD_DISP1_DAT10 = 168,
-       MX51_PAD_DISP1_DAT11 = 169,
-       MX51_PAD_DISP1_DAT12 = 170,
-       MX51_PAD_DISP1_DAT13 = 171,
-       MX51_PAD_DISP1_DAT14 = 172,
-       MX51_PAD_DISP1_DAT15 = 173,
-       MX51_PAD_DISP1_DAT16 = 174,
-       MX51_PAD_DISP1_DAT17 = 175,
-       MX51_PAD_DISP1_DAT18 = 176,
-       MX51_PAD_DISP1_DAT19 = 177,
-       MX51_PAD_DISP1_DAT20 = 178,
-       MX51_PAD_DISP1_DAT21 = 179,
-       MX51_PAD_DISP1_DAT22 = 180,
-       MX51_PAD_DISP1_DAT23 = 181,
-       MX51_PAD_DI1_PIN3 = 182,
-       MX51_PAD_DI1_PIN2 = 183,
-       MX51_PAD_DI_GP2 = 184,
-       MX51_PAD_DI_GP3 = 185,
-       MX51_PAD_DI2_PIN4 = 186,
-       MX51_PAD_DI2_PIN2 = 187,
-       MX51_PAD_DI2_PIN3 = 188,
-       MX51_PAD_DI2_DISP_CLK = 189,
-       MX51_PAD_DI_GP4 = 190,
-       MX51_PAD_DISP2_DAT0 = 191,
-       MX51_PAD_DISP2_DAT1 = 192,
-       MX51_PAD_DISP2_DAT2 = 193,
-       MX51_PAD_DISP2_DAT3 = 194,
-       MX51_PAD_DISP2_DAT4 = 195,
-       MX51_PAD_DISP2_DAT5 = 196,
-       MX51_PAD_DISP2_DAT6 = 197,
-       MX51_PAD_DISP2_DAT7 = 198,
-       MX51_PAD_DISP2_DAT8 = 199,
-       MX51_PAD_DISP2_DAT9 = 200,
-       MX51_PAD_DISP2_DAT10 = 201,
-       MX51_PAD_DISP2_DAT11 = 202,
-       MX51_PAD_DISP2_DAT12 = 203,
-       MX51_PAD_DISP2_DAT13 = 204,
-       MX51_PAD_DISP2_DAT14 = 205,
-       MX51_PAD_DISP2_DAT15 = 206,
-       MX51_PAD_SD1_CMD = 207,
-       MX51_PAD_SD1_CLK = 208,
-       MX51_PAD_SD1_DATA0 = 209,
-       MX51_PAD_EIM_DA0 = 210,
-       MX51_PAD_EIM_DA1 = 211,
-       MX51_PAD_EIM_DA2 = 212,
-       MX51_PAD_EIM_DA3 = 213,
-       MX51_PAD_SD1_DATA1 = 214,
-       MX51_PAD_EIM_DA4 = 215,
-       MX51_PAD_EIM_DA5 = 216,
-       MX51_PAD_EIM_DA6 = 217,
-       MX51_PAD_EIM_DA7 = 218,
-       MX51_PAD_SD1_DATA2 = 219,
-       MX51_PAD_EIM_DA10 = 220,
-       MX51_PAD_EIM_DA11 = 221,
-       MX51_PAD_EIM_DA8 = 222,
-       MX51_PAD_EIM_DA9 = 223,
-       MX51_PAD_SD1_DATA3 = 224,
-       MX51_PAD_GPIO1_0 = 225,
-       MX51_PAD_GPIO1_1 = 226,
-       MX51_PAD_EIM_DA12 = 227,
-       MX51_PAD_EIM_DA13 = 228,
-       MX51_PAD_EIM_DA14 = 229,
-       MX51_PAD_EIM_DA15 = 230,
-       MX51_PAD_SD2_CMD = 231,
-       MX51_PAD_SD2_CLK = 232,
-       MX51_PAD_SD2_DATA0 = 233,
-       MX51_PAD_SD2_DATA1 = 234,
-       MX51_PAD_SD2_DATA2 = 235,
-       MX51_PAD_SD2_DATA3 = 236,
-       MX51_PAD_GPIO1_2 = 237,
-       MX51_PAD_GPIO1_3 = 238,
-       MX51_PAD_PMIC_INT_REQ = 239,
-       MX51_PAD_GPIO1_4 = 240,
-       MX51_PAD_GPIO1_5 = 241,
-       MX51_PAD_GPIO1_6 = 242,
-       MX51_PAD_GPIO1_7 = 243,
-       MX51_PAD_GPIO1_8 = 244,
-       MX51_PAD_GPIO1_9 = 245,
+       MX51_PAD_EIM_D16 = 0,
+       MX51_PAD_EIM_D17 = 1,
+       MX51_PAD_EIM_D18 = 2,
+       MX51_PAD_EIM_D19 = 3,
+       MX51_PAD_EIM_D20 = 4,
+       MX51_PAD_EIM_D21 = 5,
+       MX51_PAD_EIM_D22 = 6,
+       MX51_PAD_EIM_D23 = 7,
+       MX51_PAD_EIM_D24 = 8,
+       MX51_PAD_EIM_D25 = 9,
+       MX51_PAD_EIM_D26 = 10,
+       MX51_PAD_EIM_D27 = 11,
+       MX51_PAD_EIM_D28 = 12,
+       MX51_PAD_EIM_D29 = 13,
+       MX51_PAD_EIM_D30 = 14,
+       MX51_PAD_EIM_D31 = 15,
+       MX51_PAD_EIM_A16 = 16,
+       MX51_PAD_EIM_A17 = 17,
+       MX51_PAD_EIM_A18 = 18,
+       MX51_PAD_EIM_A19 = 19,
+       MX51_PAD_EIM_A20 = 20,
+       MX51_PAD_EIM_A21 = 21,
+       MX51_PAD_EIM_A22 = 22,
+       MX51_PAD_EIM_A23 = 23,
+       MX51_PAD_EIM_A24 = 24,
+       MX51_PAD_EIM_A25 = 25,
+       MX51_PAD_EIM_A26 = 26,
+       MX51_PAD_EIM_A27 = 27,
+       MX51_PAD_EIM_EB0 = 28,
+       MX51_PAD_EIM_EB1 = 29,
+       MX51_PAD_EIM_EB2 = 30,
+       MX51_PAD_EIM_EB3 = 31,
+       MX51_PAD_EIM_OE = 32,
+       MX51_PAD_EIM_CS0 = 33,
+       MX51_PAD_EIM_CS1 = 34,
+       MX51_PAD_EIM_CS2 = 35,
+       MX51_PAD_EIM_CS3 = 36,
+       MX51_PAD_EIM_CS4 = 37,
+       MX51_PAD_EIM_CS5 = 38,
+       MX51_PAD_EIM_DTACK = 39,
+       MX51_PAD_EIM_LBA = 40,
+       MX51_PAD_EIM_CRE = 41,
+       MX51_PAD_DRAM_CS1 = 42,
+       MX51_PAD_NANDF_WE_B = 43,
+       MX51_PAD_NANDF_RE_B = 44,
+       MX51_PAD_NANDF_ALE = 45,
+       MX51_PAD_NANDF_CLE = 46,
+       MX51_PAD_NANDF_WP_B = 47,
+       MX51_PAD_NANDF_RB0 = 48,
+       MX51_PAD_NANDF_RB1 = 49,
+       MX51_PAD_NANDF_RB2 = 50,
+       MX51_PAD_NANDF_RB3 = 51,
+       MX51_PAD_GPIO_NAND = 52,
+       MX51_PAD_NANDF_CS0 = 53,
+       MX51_PAD_NANDF_CS1 = 54,
+       MX51_PAD_NANDF_CS2 = 55,
+       MX51_PAD_NANDF_CS3 = 56,
+       MX51_PAD_NANDF_CS4 = 57,
+       MX51_PAD_NANDF_CS5 = 58,
+       MX51_PAD_NANDF_CS6 = 59,
+       MX51_PAD_NANDF_CS7 = 60,
+       MX51_PAD_NANDF_RDY_INT = 61,
+       MX51_PAD_NANDF_D15 = 62,
+       MX51_PAD_NANDF_D14 = 63,
+       MX51_PAD_NANDF_D13 = 64,
+       MX51_PAD_NANDF_D12 = 65,
+       MX51_PAD_NANDF_D11 = 66,
+       MX51_PAD_NANDF_D10 = 67,
+       MX51_PAD_NANDF_D9 = 68,
+       MX51_PAD_NANDF_D8 = 69,
+       MX51_PAD_NANDF_D7 = 70,
+       MX51_PAD_NANDF_D6 = 71,
+       MX51_PAD_NANDF_D5 = 72,
+       MX51_PAD_NANDF_D4 = 73,
+       MX51_PAD_NANDF_D3 = 74,
+       MX51_PAD_NANDF_D2 = 75,
+       MX51_PAD_NANDF_D1 = 76,
+       MX51_PAD_NANDF_D0 = 77,
+       MX51_PAD_CSI1_D8 = 78,
+       MX51_PAD_CSI1_D9 = 79,
+       MX51_PAD_CSI1_D10 = 80,
+       MX51_PAD_CSI1_D11 = 81,
+       MX51_PAD_CSI1_D12 = 82,
+       MX51_PAD_CSI1_D13 = 83,
+       MX51_PAD_CSI1_D14 = 84,
+       MX51_PAD_CSI1_D15 = 85,
+       MX51_PAD_CSI1_D16 = 86,
+       MX51_PAD_CSI1_D17 = 87,
+       MX51_PAD_CSI1_D18 = 88,
+       MX51_PAD_CSI1_D19 = 89,
+       MX51_PAD_CSI1_VSYNC = 90,
+       MX51_PAD_CSI1_HSYNC = 91,
+       MX51_PAD_CSI1_PIXCLK = 92,
+       MX51_PAD_CSI1_MCLK = 93,
+       MX51_PAD_CSI2_D12 = 94,
+       MX51_PAD_CSI2_D13 = 95,
+       MX51_PAD_CSI2_D14 = 96,
+       MX51_PAD_CSI2_D15 = 97,
+       MX51_PAD_CSI2_D16 = 98,
+       MX51_PAD_CSI2_D17 = 99,
+       MX51_PAD_CSI2_D18 = 100,
+       MX51_PAD_CSI2_D19 = 101,
+       MX51_PAD_CSI2_VSYNC = 102,
+       MX51_PAD_CSI2_HSYNC = 103,
+       MX51_PAD_CSI2_PIXCLK = 104,
+       MX51_PAD_I2C1_CLK = 105,
+       MX51_PAD_I2C1_DAT = 106,
+       MX51_PAD_AUD3_BB_TXD = 107,
+       MX51_PAD_AUD3_BB_RXD = 108,
+       MX51_PAD_AUD3_BB_CK = 109,
+       MX51_PAD_AUD3_BB_FS = 110,
+       MX51_PAD_CSPI1_MOSI = 111,
+       MX51_PAD_CSPI1_MISO = 112,
+       MX51_PAD_CSPI1_SS0 = 113,
+       MX51_PAD_CSPI1_SS1 = 114,
+       MX51_PAD_CSPI1_RDY = 115,
+       MX51_PAD_CSPI1_SCLK = 116,
+       MX51_PAD_UART1_RXD = 117,
+       MX51_PAD_UART1_TXD = 118,
+       MX51_PAD_UART1_RTS = 119,
+       MX51_PAD_UART1_CTS = 120,
+       MX51_PAD_UART2_RXD = 121,
+       MX51_PAD_UART2_TXD = 122,
+       MX51_PAD_UART3_RXD = 123,
+       MX51_PAD_UART3_TXD = 124,
+       MX51_PAD_OWIRE_LINE = 125,
+       MX51_PAD_KEY_ROW0 = 126,
+       MX51_PAD_KEY_ROW1 = 127,
+       MX51_PAD_KEY_ROW2 = 128,
+       MX51_PAD_KEY_ROW3 = 129,
+       MX51_PAD_KEY_COL0 = 130,
+       MX51_PAD_KEY_COL1 = 131,
+       MX51_PAD_KEY_COL2 = 132,
+       MX51_PAD_KEY_COL3 = 133,
+       MX51_PAD_KEY_COL4 = 134,
+       MX51_PAD_KEY_COL5 = 135,
+       MX51_PAD_USBH1_CLK = 136,
+       MX51_PAD_USBH1_DIR = 137,
+       MX51_PAD_USBH1_STP = 138,
+       MX51_PAD_USBH1_NXT = 139,
+       MX51_PAD_USBH1_DATA0 = 140,
+       MX51_PAD_USBH1_DATA1 = 141,
+       MX51_PAD_USBH1_DATA2 = 142,
+       MX51_PAD_USBH1_DATA3 = 143,
+       MX51_PAD_USBH1_DATA4 = 144,
+       MX51_PAD_USBH1_DATA5 = 145,
+       MX51_PAD_USBH1_DATA6 = 146,
+       MX51_PAD_USBH1_DATA7 = 147,
+       MX51_PAD_DI1_PIN11 = 148,
+       MX51_PAD_DI1_PIN12 = 149,
+       MX51_PAD_DI1_PIN13 = 150,
+       MX51_PAD_DI1_D0_CS = 151,
+       MX51_PAD_DI1_D1_CS = 152,
+       MX51_PAD_DISPB2_SER_DIN = 153,
+       MX51_PAD_DISPB2_SER_DIO = 154,
+       MX51_PAD_DISPB2_SER_CLK = 155,
+       MX51_PAD_DISPB2_SER_RS = 156,
+       MX51_PAD_DISP1_DAT0 = 157,
+       MX51_PAD_DISP1_DAT1 = 158,
+       MX51_PAD_DISP1_DAT2 = 159,
+       MX51_PAD_DISP1_DAT3 = 160,
+       MX51_PAD_DISP1_DAT4 = 161,
+       MX51_PAD_DISP1_DAT5 = 162,
+       MX51_PAD_DISP1_DAT6 = 163,
+       MX51_PAD_DISP1_DAT7 = 164,
+       MX51_PAD_DISP1_DAT8 = 165,
+       MX51_PAD_DISP1_DAT9 = 166,
+       MX51_PAD_DISP1_DAT10 = 167,
+       MX51_PAD_DISP1_DAT11 = 168,
+       MX51_PAD_DISP1_DAT12 = 169,
+       MX51_PAD_DISP1_DAT13 = 170,
+       MX51_PAD_DISP1_DAT14 = 171,
+       MX51_PAD_DISP1_DAT15 = 172,
+       MX51_PAD_DISP1_DAT16 = 173,
+       MX51_PAD_DISP1_DAT17 = 174,
+       MX51_PAD_DISP1_DAT18 = 175,
+       MX51_PAD_DISP1_DAT19 = 176,
+       MX51_PAD_DISP1_DAT20 = 177,
+       MX51_PAD_DISP1_DAT21 = 178,
+       MX51_PAD_DISP1_DAT22 = 179,
+       MX51_PAD_DISP1_DAT23 = 180,
+       MX51_PAD_DI1_PIN3 = 181,
+       MX51_PAD_DI1_PIN2 = 182,
+       MX51_PAD_DI_GP2 = 183,
+       MX51_PAD_DI_GP3 = 184,
+       MX51_PAD_DI2_PIN4 = 185,
+       MX51_PAD_DI2_PIN2 = 186,
+       MX51_PAD_DI2_PIN3 = 187,
+       MX51_PAD_DI2_DISP_CLK = 188,
+       MX51_PAD_DI_GP4 = 189,
+       MX51_PAD_DISP2_DAT0 = 190,
+       MX51_PAD_DISP2_DAT1 = 191,
+       MX51_PAD_DISP2_DAT2 = 192,
+       MX51_PAD_DISP2_DAT3 = 193,
+       MX51_PAD_DISP2_DAT4 = 194,
+       MX51_PAD_DISP2_DAT5 = 195,
+       MX51_PAD_DISP2_DAT6 = 196,
+       MX51_PAD_DISP2_DAT7 = 197,
+       MX51_PAD_DISP2_DAT8 = 198,
+       MX51_PAD_DISP2_DAT9 = 199,
+       MX51_PAD_DISP2_DAT10 = 200,
+       MX51_PAD_DISP2_DAT11 = 201,
+       MX51_PAD_DISP2_DAT12 = 202,
+       MX51_PAD_DISP2_DAT13 = 203,
+       MX51_PAD_DISP2_DAT14 = 204,
+       MX51_PAD_DISP2_DAT15 = 205,
+       MX51_PAD_SD1_CMD = 206,
+       MX51_PAD_SD1_CLK = 207,
+       MX51_PAD_SD1_DATA0 = 208,
+       MX51_PAD_EIM_DA0 = 209,
+       MX51_PAD_EIM_DA1 = 210,
+       MX51_PAD_EIM_DA2 = 211,
+       MX51_PAD_EIM_DA3 = 212,
+       MX51_PAD_SD1_DATA1 = 213,
+       MX51_PAD_EIM_DA4 = 214,
+       MX51_PAD_EIM_DA5 = 215,
+       MX51_PAD_EIM_DA6 = 216,
+       MX51_PAD_EIM_DA7 = 217,
+       MX51_PAD_SD1_DATA2 = 218,
+       MX51_PAD_EIM_DA10 = 219,
+       MX51_PAD_EIM_DA11 = 220,
+       MX51_PAD_EIM_DA8 = 221,
+       MX51_PAD_EIM_DA9 = 222,
+       MX51_PAD_SD1_DATA3 = 223,
+       MX51_PAD_GPIO1_0 = 224,
+       MX51_PAD_GPIO1_1 = 225,
+       MX51_PAD_EIM_DA12 = 226,
+       MX51_PAD_EIM_DA13 = 227,
+       MX51_PAD_EIM_DA14 = 228,
+       MX51_PAD_EIM_DA15 = 229,
+       MX51_PAD_SD2_CMD = 230,
+       MX51_PAD_SD2_CLK = 231,
+       MX51_PAD_SD2_DATA0 = 232,
+       MX51_PAD_SD2_DATA1 = 233,
+       MX51_PAD_SD2_DATA2 = 234,
+       MX51_PAD_SD2_DATA3 = 235,
+       MX51_PAD_GPIO1_2 = 236,
+       MX51_PAD_GPIO1_3 = 237,
+       MX51_PAD_PMIC_INT_REQ = 238,
+       MX51_PAD_GPIO1_4 = 239,
+       MX51_PAD_GPIO1_5 = 240,
+       MX51_PAD_GPIO1_6 = 241,
+       MX51_PAD_GPIO1_7 = 242,
+       MX51_PAD_GPIO1_8 = 243,
+       MX51_PAD_GPIO1_9 = 244,
 };
 
 /* imx51 register maps */
index 1f49e16a9bcd6afd31fe5282b267404f2d6ee22a..783feb1ce064ef50bfc498f29045b5589498c7de 100644 (file)
 #include "pinctrl-imx.h"
 
 enum imx53_pads {
-       MX53_PAD_GPIO_19 = 1,
-       MX53_PAD_KEY_COL0 = 2,
-       MX53_PAD_KEY_ROW0 = 3,
-       MX53_PAD_KEY_COL1 = 4,
-       MX53_PAD_KEY_ROW1 = 5,
-       MX53_PAD_KEY_COL2 = 6,
-       MX53_PAD_KEY_ROW2 = 7,
-       MX53_PAD_KEY_COL3 = 8,
-       MX53_PAD_KEY_ROW3 = 9,
-       MX53_PAD_KEY_COL4 = 10,
-       MX53_PAD_KEY_ROW4 = 11,
-       MX53_PAD_DI0_DISP_CLK = 12,
-       MX53_PAD_DI0_PIN15 = 13,
-       MX53_PAD_DI0_PIN2 = 14,
-       MX53_PAD_DI0_PIN3 = 15,
-       MX53_PAD_DI0_PIN4 = 16,
-       MX53_PAD_DISP0_DAT0 = 17,
-       MX53_PAD_DISP0_DAT1 = 18,
-       MX53_PAD_DISP0_DAT2 = 19,
-       MX53_PAD_DISP0_DAT3 = 20,
-       MX53_PAD_DISP0_DAT4 = 21,
-       MX53_PAD_DISP0_DAT5 = 22,
-       MX53_PAD_DISP0_DAT6 = 23,
-       MX53_PAD_DISP0_DAT7 = 24,
-       MX53_PAD_DISP0_DAT8 = 25,
-       MX53_PAD_DISP0_DAT9 = 26,
-       MX53_PAD_DISP0_DAT10 = 27,
-       MX53_PAD_DISP0_DAT11 = 28,
-       MX53_PAD_DISP0_DAT12 = 29,
-       MX53_PAD_DISP0_DAT13 = 30,
-       MX53_PAD_DISP0_DAT14 = 31,
-       MX53_PAD_DISP0_DAT15 = 32,
-       MX53_PAD_DISP0_DAT16 = 33,
-       MX53_PAD_DISP0_DAT17 = 34,
-       MX53_PAD_DISP0_DAT18 = 35,
-       MX53_PAD_DISP0_DAT19 = 36,
-       MX53_PAD_DISP0_DAT20 = 37,
-       MX53_PAD_DISP0_DAT21 = 38,
-       MX53_PAD_DISP0_DAT22 = 39,
-       MX53_PAD_DISP0_DAT23 = 40,
-       MX53_PAD_CSI0_PIXCLK = 41,
-       MX53_PAD_CSI0_MCLK = 42,
-       MX53_PAD_CSI0_DATA_EN = 43,
-       MX53_PAD_CSI0_VSYNC = 44,
-       MX53_PAD_CSI0_DAT4 = 45,
-       MX53_PAD_CSI0_DAT5 = 46,
-       MX53_PAD_CSI0_DAT6 = 47,
-       MX53_PAD_CSI0_DAT7 = 48,
-       MX53_PAD_CSI0_DAT8 = 49,
-       MX53_PAD_CSI0_DAT9 = 50,
-       MX53_PAD_CSI0_DAT10 = 51,
-       MX53_PAD_CSI0_DAT11 = 52,
-       MX53_PAD_CSI0_DAT12 = 53,
-       MX53_PAD_CSI0_DAT13 = 54,
-       MX53_PAD_CSI0_DAT14 = 55,
-       MX53_PAD_CSI0_DAT15 = 56,
-       MX53_PAD_CSI0_DAT16 = 57,
-       MX53_PAD_CSI0_DAT17 = 58,
-       MX53_PAD_CSI0_DAT18 = 59,
-       MX53_PAD_CSI0_DAT19 = 60,
-       MX53_PAD_EIM_A25 = 61,
-       MX53_PAD_EIM_EB2 = 62,
-       MX53_PAD_EIM_D16 = 63,
-       MX53_PAD_EIM_D17 = 64,
-       MX53_PAD_EIM_D18 = 65,
-       MX53_PAD_EIM_D19 = 66,
-       MX53_PAD_EIM_D20 = 67,
-       MX53_PAD_EIM_D21 = 68,
-       MX53_PAD_EIM_D22 = 69,
-       MX53_PAD_EIM_D23 = 70,
-       MX53_PAD_EIM_EB3 = 71,
-       MX53_PAD_EIM_D24 = 72,
-       MX53_PAD_EIM_D25 = 73,
-       MX53_PAD_EIM_D26 = 74,
-       MX53_PAD_EIM_D27 = 75,
-       MX53_PAD_EIM_D28 = 76,
-       MX53_PAD_EIM_D29 = 77,
-       MX53_PAD_EIM_D30 = 78,
-       MX53_PAD_EIM_D31 = 79,
-       MX53_PAD_EIM_A24 = 80,
-       MX53_PAD_EIM_A23 = 81,
-       MX53_PAD_EIM_A22 = 82,
-       MX53_PAD_EIM_A21 = 83,
-       MX53_PAD_EIM_A20 = 84,
-       MX53_PAD_EIM_A19 = 85,
-       MX53_PAD_EIM_A18 = 86,
-       MX53_PAD_EIM_A17 = 87,
-       MX53_PAD_EIM_A16 = 88,
-       MX53_PAD_EIM_CS0 = 89,
-       MX53_PAD_EIM_CS1 = 90,
-       MX53_PAD_EIM_OE = 91,
-       MX53_PAD_EIM_RW = 92,
-       MX53_PAD_EIM_LBA = 93,
-       MX53_PAD_EIM_EB0 = 94,
-       MX53_PAD_EIM_EB1 = 95,
-       MX53_PAD_EIM_DA0 = 96,
-       MX53_PAD_EIM_DA1 = 97,
-       MX53_PAD_EIM_DA2 = 98,
-       MX53_PAD_EIM_DA3 = 99,
-       MX53_PAD_EIM_DA4 = 100,
-       MX53_PAD_EIM_DA5 = 101,
-       MX53_PAD_EIM_DA6 = 102,
-       MX53_PAD_EIM_DA7 = 103,
-       MX53_PAD_EIM_DA8 = 104,
-       MX53_PAD_EIM_DA9 = 105,
-       MX53_PAD_EIM_DA10 = 106,
-       MX53_PAD_EIM_DA11 = 107,
-       MX53_PAD_EIM_DA12 = 108,
-       MX53_PAD_EIM_DA13 = 109,
-       MX53_PAD_EIM_DA14 = 110,
-       MX53_PAD_EIM_DA15 = 111,
-       MX53_PAD_NANDF_WE_B = 112,
-       MX53_PAD_NANDF_RE_B = 113,
-       MX53_PAD_EIM_WAIT = 114,
-       MX53_PAD_LVDS1_TX3_P = 115,
-       MX53_PAD_LVDS1_TX2_P = 116,
-       MX53_PAD_LVDS1_CLK_P = 117,
-       MX53_PAD_LVDS1_TX1_P = 118,
-       MX53_PAD_LVDS1_TX0_P = 119,
-       MX53_PAD_LVDS0_TX3_P = 120,
-       MX53_PAD_LVDS0_CLK_P = 121,
-       MX53_PAD_LVDS0_TX2_P = 122,
-       MX53_PAD_LVDS0_TX1_P = 123,
-       MX53_PAD_LVDS0_TX0_P = 124,
-       MX53_PAD_GPIO_10 = 125,
-       MX53_PAD_GPIO_11 = 126,
-       MX53_PAD_GPIO_12 = 127,
-       MX53_PAD_GPIO_13 = 128,
-       MX53_PAD_GPIO_14 = 129,
-       MX53_PAD_NANDF_CLE = 130,
-       MX53_PAD_NANDF_ALE = 131,
-       MX53_PAD_NANDF_WP_B = 132,
-       MX53_PAD_NANDF_RB0 = 133,
-       MX53_PAD_NANDF_CS0 = 134,
-       MX53_PAD_NANDF_CS1 = 135,
-       MX53_PAD_NANDF_CS2 = 136,
-       MX53_PAD_NANDF_CS3 = 137,
-       MX53_PAD_FEC_MDIO = 138,
-       MX53_PAD_FEC_REF_CLK = 139,
-       MX53_PAD_FEC_RX_ER = 140,
-       MX53_PAD_FEC_CRS_DV = 141,
-       MX53_PAD_FEC_RXD1 = 142,
-       MX53_PAD_FEC_RXD0 = 143,
-       MX53_PAD_FEC_TX_EN = 144,
-       MX53_PAD_FEC_TXD1 = 145,
-       MX53_PAD_FEC_TXD0 = 146,
-       MX53_PAD_FEC_MDC = 147,
-       MX53_PAD_PATA_DIOW = 148,
-       MX53_PAD_PATA_DMACK = 149,
-       MX53_PAD_PATA_DMARQ = 150,
-       MX53_PAD_PATA_BUFFER_EN = 151,
-       MX53_PAD_PATA_INTRQ = 152,
-       MX53_PAD_PATA_DIOR = 153,
-       MX53_PAD_PATA_RESET_B = 154,
-       MX53_PAD_PATA_IORDY = 155,
-       MX53_PAD_PATA_DA_0 = 156,
-       MX53_PAD_PATA_DA_1 = 157,
-       MX53_PAD_PATA_DA_2 = 158,
-       MX53_PAD_PATA_CS_0 = 159,
-       MX53_PAD_PATA_CS_1 = 160,
-       MX53_PAD_PATA_DATA0 = 161,
-       MX53_PAD_PATA_DATA1 = 162,
-       MX53_PAD_PATA_DATA2 = 163,
-       MX53_PAD_PATA_DATA3 = 164,
-       MX53_PAD_PATA_DATA4 = 165,
-       MX53_PAD_PATA_DATA5 = 166,
-       MX53_PAD_PATA_DATA6 = 167,
-       MX53_PAD_PATA_DATA7 = 168,
-       MX53_PAD_PATA_DATA8 = 169,
-       MX53_PAD_PATA_DATA9 = 170,
-       MX53_PAD_PATA_DATA10 = 171,
-       MX53_PAD_PATA_DATA11 = 172,
-       MX53_PAD_PATA_DATA12 = 173,
-       MX53_PAD_PATA_DATA13 = 174,
-       MX53_PAD_PATA_DATA14 = 175,
-       MX53_PAD_PATA_DATA15 = 176,
-       MX53_PAD_SD1_DATA0 = 177,
-       MX53_PAD_SD1_DATA1 = 178,
-       MX53_PAD_SD1_CMD = 179,
-       MX53_PAD_SD1_DATA2 = 180,
-       MX53_PAD_SD1_CLK = 181,
-       MX53_PAD_SD1_DATA3 = 182,
-       MX53_PAD_SD2_CLK = 183,
-       MX53_PAD_SD2_CMD = 184,
-       MX53_PAD_SD2_DATA3 = 185,
-       MX53_PAD_SD2_DATA2 = 186,
-       MX53_PAD_SD2_DATA1 = 187,
-       MX53_PAD_SD2_DATA0 = 188,
-       MX53_PAD_GPIO_0 = 189,
-       MX53_PAD_GPIO_1 = 190,
-       MX53_PAD_GPIO_9 = 191,
-       MX53_PAD_GPIO_3 = 192,
-       MX53_PAD_GPIO_6 = 193,
-       MX53_PAD_GPIO_2 = 194,
-       MX53_PAD_GPIO_4 = 195,
-       MX53_PAD_GPIO_5 = 196,
-       MX53_PAD_GPIO_7 = 197,
-       MX53_PAD_GPIO_8 = 198,
-       MX53_PAD_GPIO_16 = 199,
-       MX53_PAD_GPIO_17 = 200,
-       MX53_PAD_GPIO_18 = 201,
+       MX53_PAD_GPIO_19 = 0,
+       MX53_PAD_KEY_COL0 = 1,
+       MX53_PAD_KEY_ROW0 = 2,
+       MX53_PAD_KEY_COL1 = 3,
+       MX53_PAD_KEY_ROW1 = 4,
+       MX53_PAD_KEY_COL2 = 5,
+       MX53_PAD_KEY_ROW2 = 6,
+       MX53_PAD_KEY_COL3 = 7,
+       MX53_PAD_KEY_ROW3 = 8,
+       MX53_PAD_KEY_COL4 = 9,
+       MX53_PAD_KEY_ROW4 = 10,
+       MX53_PAD_DI0_DISP_CLK = 11,
+       MX53_PAD_DI0_PIN15 = 12,
+       MX53_PAD_DI0_PIN2 = 13,
+       MX53_PAD_DI0_PIN3 = 14,
+       MX53_PAD_DI0_PIN4 = 15,
+       MX53_PAD_DISP0_DAT0 = 16,
+       MX53_PAD_DISP0_DAT1 = 17,
+       MX53_PAD_DISP0_DAT2 = 18,
+       MX53_PAD_DISP0_DAT3 = 19,
+       MX53_PAD_DISP0_DAT4 = 20,
+       MX53_PAD_DISP0_DAT5 = 21,
+       MX53_PAD_DISP0_DAT6 = 22,
+       MX53_PAD_DISP0_DAT7 = 23,
+       MX53_PAD_DISP0_DAT8 = 24,
+       MX53_PAD_DISP0_DAT9 = 25,
+       MX53_PAD_DISP0_DAT10 = 26,
+       MX53_PAD_DISP0_DAT11 = 27,
+       MX53_PAD_DISP0_DAT12 = 28,
+       MX53_PAD_DISP0_DAT13 = 29,
+       MX53_PAD_DISP0_DAT14 = 30,
+       MX53_PAD_DISP0_DAT15 = 31,
+       MX53_PAD_DISP0_DAT16 = 32,
+       MX53_PAD_DISP0_DAT17 = 33,
+       MX53_PAD_DISP0_DAT18 = 34,
+       MX53_PAD_DISP0_DAT19 = 35,
+       MX53_PAD_DISP0_DAT20 = 36,
+       MX53_PAD_DISP0_DAT21 = 37,
+       MX53_PAD_DISP0_DAT22 = 38,
+       MX53_PAD_DISP0_DAT23 = 39,
+       MX53_PAD_CSI0_PIXCLK = 40,
+       MX53_PAD_CSI0_MCLK = 41,
+       MX53_PAD_CSI0_DATA_EN = 42,
+       MX53_PAD_CSI0_VSYNC = 43,
+       MX53_PAD_CSI0_DAT4 = 44,
+       MX53_PAD_CSI0_DAT5 = 45,
+       MX53_PAD_CSI0_DAT6 = 46,
+       MX53_PAD_CSI0_DAT7 = 47,
+       MX53_PAD_CSI0_DAT8 = 48,
+       MX53_PAD_CSI0_DAT9 = 49,
+       MX53_PAD_CSI0_DAT10 = 50,
+       MX53_PAD_CSI0_DAT11 = 51,
+       MX53_PAD_CSI0_DAT12 = 52,
+       MX53_PAD_CSI0_DAT13 = 53,
+       MX53_PAD_CSI0_DAT14 = 54,
+       MX53_PAD_CSI0_DAT15 = 55,
+       MX53_PAD_CSI0_DAT16 = 56,
+       MX53_PAD_CSI0_DAT17 = 57,
+       MX53_PAD_CSI0_DAT18 = 58,
+       MX53_PAD_CSI0_DAT19 = 59,
+       MX53_PAD_EIM_A25 = 60,
+       MX53_PAD_EIM_EB2 = 61,
+       MX53_PAD_EIM_D16 = 62,
+       MX53_PAD_EIM_D17 = 63,
+       MX53_PAD_EIM_D18 = 64,
+       MX53_PAD_EIM_D19 = 65,
+       MX53_PAD_EIM_D20 = 66,
+       MX53_PAD_EIM_D21 = 67,
+       MX53_PAD_EIM_D22 = 68,
+       MX53_PAD_EIM_D23 = 69,
+       MX53_PAD_EIM_EB3 = 70,
+       MX53_PAD_EIM_D24 = 71,
+       MX53_PAD_EIM_D25 = 72,
+       MX53_PAD_EIM_D26 = 73,
+       MX53_PAD_EIM_D27 = 74,
+       MX53_PAD_EIM_D28 = 75,
+       MX53_PAD_EIM_D29 = 76,
+       MX53_PAD_EIM_D30 = 77,
+       MX53_PAD_EIM_D31 = 78,
+       MX53_PAD_EIM_A24 = 79,
+       MX53_PAD_EIM_A23 = 80,
+       MX53_PAD_EIM_A22 = 81,
+       MX53_PAD_EIM_A21 = 82,
+       MX53_PAD_EIM_A20 = 83,
+       MX53_PAD_EIM_A19 = 84,
+       MX53_PAD_EIM_A18 = 85,
+       MX53_PAD_EIM_A17 = 86,
+       MX53_PAD_EIM_A16 = 87,
+       MX53_PAD_EIM_CS0 = 88,
+       MX53_PAD_EIM_CS1 = 89,
+       MX53_PAD_EIM_OE = 90,
+       MX53_PAD_EIM_RW = 91,
+       MX53_PAD_EIM_LBA = 92,
+       MX53_PAD_EIM_EB0 = 93,
+       MX53_PAD_EIM_EB1 = 94,
+       MX53_PAD_EIM_DA0 = 95,
+       MX53_PAD_EIM_DA1 = 96,
+       MX53_PAD_EIM_DA2 = 97,
+       MX53_PAD_EIM_DA3 = 98,
+       MX53_PAD_EIM_DA4 = 99,
+       MX53_PAD_EIM_DA5 = 100,
+       MX53_PAD_EIM_DA6 = 101,
+       MX53_PAD_EIM_DA7 = 102,
+       MX53_PAD_EIM_DA8 = 103,
+       MX53_PAD_EIM_DA9 = 104,
+       MX53_PAD_EIM_DA10 = 105,
+       MX53_PAD_EIM_DA11 = 106,
+       MX53_PAD_EIM_DA12 = 107,
+       MX53_PAD_EIM_DA13 = 108,
+       MX53_PAD_EIM_DA14 = 109,
+       MX53_PAD_EIM_DA15 = 110,
+       MX53_PAD_NANDF_WE_B = 111,
+       MX53_PAD_NANDF_RE_B = 112,
+       MX53_PAD_EIM_WAIT = 113,
+       MX53_PAD_LVDS1_TX3_P = 114,
+       MX53_PAD_LVDS1_TX2_P = 115,
+       MX53_PAD_LVDS1_CLK_P = 116,
+       MX53_PAD_LVDS1_TX1_P = 117,
+       MX53_PAD_LVDS1_TX0_P = 118,
+       MX53_PAD_LVDS0_TX3_P = 119,
+       MX53_PAD_LVDS0_CLK_P = 120,
+       MX53_PAD_LVDS0_TX2_P = 121,
+       MX53_PAD_LVDS0_TX1_P = 122,
+       MX53_PAD_LVDS0_TX0_P = 123,
+       MX53_PAD_GPIO_10 = 124,
+       MX53_PAD_GPIO_11 = 125,
+       MX53_PAD_GPIO_12 = 126,
+       MX53_PAD_GPIO_13 = 127,
+       MX53_PAD_GPIO_14 = 128,
+       MX53_PAD_NANDF_CLE = 129,
+       MX53_PAD_NANDF_ALE = 130,
+       MX53_PAD_NANDF_WP_B = 131,
+       MX53_PAD_NANDF_RB0 = 132,
+       MX53_PAD_NANDF_CS0 = 133,
+       MX53_PAD_NANDF_CS1 = 134,
+       MX53_PAD_NANDF_CS2 = 135,
+       MX53_PAD_NANDF_CS3 = 136,
+       MX53_PAD_FEC_MDIO = 137,
+       MX53_PAD_FEC_REF_CLK = 138,
+       MX53_PAD_FEC_RX_ER = 139,
+       MX53_PAD_FEC_CRS_DV = 140,
+       MX53_PAD_FEC_RXD1 = 141,
+       MX53_PAD_FEC_RXD0 = 142,
+       MX53_PAD_FEC_TX_EN = 143,
+       MX53_PAD_FEC_TXD1 = 144,
+       MX53_PAD_FEC_TXD0 = 145,
+       MX53_PAD_FEC_MDC = 146,
+       MX53_PAD_PATA_DIOW = 147,
+       MX53_PAD_PATA_DMACK = 148,
+       MX53_PAD_PATA_DMARQ = 149,
+       MX53_PAD_PATA_BUFFER_EN = 150,
+       MX53_PAD_PATA_INTRQ = 151,
+       MX53_PAD_PATA_DIOR = 152,
+       MX53_PAD_PATA_RESET_B = 153,
+       MX53_PAD_PATA_IORDY = 154,
+       MX53_PAD_PATA_DA_0 = 155,
+       MX53_PAD_PATA_DA_1 = 156,
+       MX53_PAD_PATA_DA_2 = 157,
+       MX53_PAD_PATA_CS_0 = 158,
+       MX53_PAD_PATA_CS_1 = 159,
+       MX53_PAD_PATA_DATA0 = 160,
+       MX53_PAD_PATA_DATA1 = 161,
+       MX53_PAD_PATA_DATA2 = 162,
+       MX53_PAD_PATA_DATA3 = 163,
+       MX53_PAD_PATA_DATA4 = 164,
+       MX53_PAD_PATA_DATA5 = 165,
+       MX53_PAD_PATA_DATA6 = 166,
+       MX53_PAD_PATA_DATA7 = 167,
+       MX53_PAD_PATA_DATA8 = 168,
+       MX53_PAD_PATA_DATA9 = 169,
+       MX53_PAD_PATA_DATA10 = 170,
+       MX53_PAD_PATA_DATA11 = 171,
+       MX53_PAD_PATA_DATA12 = 172,
+       MX53_PAD_PATA_DATA13 = 173,
+       MX53_PAD_PATA_DATA14 = 174,
+       MX53_PAD_PATA_DATA15 = 175,
+       MX53_PAD_SD1_DATA0 = 176,
+       MX53_PAD_SD1_DATA1 = 177,
+       MX53_PAD_SD1_CMD = 178,
+       MX53_PAD_SD1_DATA2 = 179,
+       MX53_PAD_SD1_CLK = 180,
+       MX53_PAD_SD1_DATA3 = 181,
+       MX53_PAD_SD2_CLK = 182,
+       MX53_PAD_SD2_CMD = 183,
+       MX53_PAD_SD2_DATA3 = 184,
+       MX53_PAD_SD2_DATA2 = 185,
+       MX53_PAD_SD2_DATA1 = 186,
+       MX53_PAD_SD2_DATA0 = 187,
+       MX53_PAD_GPIO_0 = 188,
+       MX53_PAD_GPIO_1 = 189,
+       MX53_PAD_GPIO_9 = 190,
+       MX53_PAD_GPIO_3 = 191,
+       MX53_PAD_GPIO_6 = 192,
+       MX53_PAD_GPIO_2 = 193,
+       MX53_PAD_GPIO_4 = 194,
+       MX53_PAD_GPIO_5 = 195,
+       MX53_PAD_GPIO_7 = 196,
+       MX53_PAD_GPIO_8 = 197,
+       MX53_PAD_GPIO_16 = 198,
+       MX53_PAD_GPIO_17 = 199,
+       MX53_PAD_GPIO_18 = 200,
 };
 
 /* imx53 register maps */
index a39fb7a6fc5142b86a65630bd5a208c84b94cd9a..ec6209dd7c397b9238cc1b495e5b849d966a3fdf 100644 (file)
@@ -465,6 +465,8 @@ static const unsigned mc4_a_1_pins[] = { DB8500_PIN_AH24, DB8500_PIN_AG25,
 static const unsigned mc1_a_1_pins[] = { DB8500_PIN_AH16, DB8500_PIN_AG15,
        DB8500_PIN_AJ15, DB8500_PIN_AG14, DB8500_PIN_AF13, DB8500_PIN_AG13,
        DB8500_PIN_AH15 };
+static const unsigned mc1_a_2_pins[] = { DB8500_PIN_AH16, DB8500_PIN_AJ15,
+       DB8500_PIN_AG14, DB8500_PIN_AF13, DB8500_PIN_AG13,DB8500_PIN_AH15 };
 static const unsigned mc1dir_a_1_pins[] = { DB8500_PIN_AH13, DB8500_PIN_AG12,
        DB8500_PIN_AH12, DB8500_PIN_AH11 };
 static const unsigned hsir_a_1_pins[] = { DB8500_PIN_AG10, DB8500_PIN_AH10,
@@ -641,6 +643,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
        DB8500_PIN_GROUP(msp2_a_1, NMK_GPIO_ALT_A),
        DB8500_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
        DB8500_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
+       DB8500_PIN_GROUP(mc1_a_2, NMK_GPIO_ALT_A),
        DB8500_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
        DB8500_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
        DB8500_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
@@ -768,7 +771,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
 /* MSP2 can not invert the RX/TX pins but has the optional SCK pin */
 DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
 DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
-DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
+DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1_a_2", "mc1dir_a_1");
 DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
 DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
 DB8500_FUNC_GROUPS(usb, "usb_a_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8540.c b/drivers/pinctrl/pinctrl-nomadik-db8540.c
new file mode 100644 (file)
index 0000000..3daf665
--- /dev/null
@@ -0,0 +1,999 @@
+#include <linux/kernel.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-nomadik.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define _GPIO(offset)          (offset)
+
+#define DB8540_PIN_AH6         _GPIO(0)
+#define DB8540_PIN_AG7         _GPIO(1)
+#define DB8540_PIN_AF2         _GPIO(2)
+#define DB8540_PIN_AD3         _GPIO(3)
+#define DB8540_PIN_AF6         _GPIO(4)
+#define DB8540_PIN_AG6         _GPIO(5)
+#define DB8540_PIN_AD5         _GPIO(6)
+#define DB8540_PIN_AF7         _GPIO(7)
+#define DB8540_PIN_AG5         _GPIO(8)
+#define DB8540_PIN_AH5         _GPIO(9)
+#define DB8540_PIN_AE4         _GPIO(10)
+#define DB8540_PIN_AD1         _GPIO(11)
+#define DB8540_PIN_AD2         _GPIO(12)
+#define DB8540_PIN_AC2         _GPIO(13)
+#define DB8540_PIN_AC4         _GPIO(14)
+#define DB8540_PIN_AC3         _GPIO(15)
+#define DB8540_PIN_AH7         _GPIO(16)
+#define DB8540_PIN_AE7         _GPIO(17)
+/* Hole */
+#define DB8540_PIN_AF8         _GPIO(22)
+#define DB8540_PIN_AH11                _GPIO(23)
+#define DB8540_PIN_AG11                _GPIO(24)
+#define DB8540_PIN_AF11                _GPIO(25)
+#define DB8540_PIN_AH10                _GPIO(26)
+#define DB8540_PIN_AG10                _GPIO(27)
+#define DB8540_PIN_AF10                _GPIO(28)
+/* Hole */
+#define DB8540_PIN_AD4         _GPIO(33)
+#define DB8540_PIN_AF3         _GPIO(34)
+#define DB8540_PIN_AF5         _GPIO(35)
+#define DB8540_PIN_AG4         _GPIO(36)
+#define DB8540_PIN_AF9         _GPIO(37)
+#define DB8540_PIN_AE8         _GPIO(38)
+/* Hole */
+#define DB8540_PIN_M26         _GPIO(64)
+#define DB8540_PIN_M25         _GPIO(65)
+#define DB8540_PIN_M27         _GPIO(66)
+#define DB8540_PIN_N25         _GPIO(67)
+/* Hole */
+#define DB8540_PIN_M28         _GPIO(70)
+#define DB8540_PIN_N26         _GPIO(71)
+#define DB8540_PIN_M22         _GPIO(72)
+#define DB8540_PIN_N22         _GPIO(73)
+#define DB8540_PIN_N27         _GPIO(74)
+#define DB8540_PIN_N28         _GPIO(75)
+#define DB8540_PIN_P22         _GPIO(76)
+#define DB8540_PIN_P28         _GPIO(77)
+#define DB8540_PIN_P26         _GPIO(78)
+#define DB8540_PIN_T22         _GPIO(79)
+#define DB8540_PIN_R27         _GPIO(80)
+#define DB8540_PIN_P27         _GPIO(81)
+#define DB8540_PIN_R26         _GPIO(82)
+#define DB8540_PIN_R25         _GPIO(83)
+#define DB8540_PIN_U22         _GPIO(84)
+#define DB8540_PIN_T27         _GPIO(85)
+#define DB8540_PIN_T25         _GPIO(86)
+#define DB8540_PIN_T26         _GPIO(87)
+/* Hole */
+#define DB8540_PIN_AF20                _GPIO(116)
+#define DB8540_PIN_AG21                _GPIO(117)
+#define DB8540_PIN_AH19                _GPIO(118)
+#define DB8540_PIN_AE19                _GPIO(119)
+#define DB8540_PIN_AG18                _GPIO(120)
+#define DB8540_PIN_AH17                _GPIO(121)
+#define DB8540_PIN_AF19                _GPIO(122)
+#define DB8540_PIN_AF18                _GPIO(123)
+#define DB8540_PIN_AE18                _GPIO(124)
+#define DB8540_PIN_AG17                _GPIO(125)
+#define DB8540_PIN_AF17                _GPIO(126)
+#define DB8540_PIN_AE17                _GPIO(127)
+#define DB8540_PIN_AC27                _GPIO(128)
+#define DB8540_PIN_AD27                _GPIO(129)
+#define DB8540_PIN_AE28                _GPIO(130)
+#define DB8540_PIN_AG26                _GPIO(131)
+#define DB8540_PIN_AF25                _GPIO(132)
+#define DB8540_PIN_AE27                _GPIO(133)
+#define DB8540_PIN_AF27                _GPIO(134)
+#define DB8540_PIN_AG28                _GPIO(135)
+#define DB8540_PIN_AF28                _GPIO(136)
+#define DB8540_PIN_AG25                _GPIO(137)
+#define DB8540_PIN_AG24                _GPIO(138)
+#define DB8540_PIN_AD25                _GPIO(139)
+#define DB8540_PIN_AH25                _GPIO(140)
+#define DB8540_PIN_AF26                _GPIO(141)
+#define DB8540_PIN_AF23                _GPIO(142)
+#define DB8540_PIN_AG23                _GPIO(143)
+#define DB8540_PIN_AE25                _GPIO(144)
+#define DB8540_PIN_AH24                _GPIO(145)
+#define DB8540_PIN_AJ25                _GPIO(146)
+#define DB8540_PIN_AG27                _GPIO(147)
+#define DB8540_PIN_AH23                _GPIO(148)
+#define DB8540_PIN_AE26                _GPIO(149)
+#define DB8540_PIN_AE24                _GPIO(150)
+#define DB8540_PIN_AJ24                _GPIO(151)
+#define DB8540_PIN_AE21                _GPIO(152)
+#define DB8540_PIN_AG22                _GPIO(153)
+#define DB8540_PIN_AF21                _GPIO(154)
+#define DB8540_PIN_AF24                _GPIO(155)
+#define DB8540_PIN_AH22                _GPIO(156)
+#define DB8540_PIN_AJ23                _GPIO(157)
+#define DB8540_PIN_AH21                _GPIO(158)
+#define DB8540_PIN_AG20                _GPIO(159)
+#define DB8540_PIN_AE23                _GPIO(160)
+#define DB8540_PIN_AH20                _GPIO(161)
+#define DB8540_PIN_AG19                _GPIO(162)
+#define DB8540_PIN_AF22                _GPIO(163)
+#define DB8540_PIN_AJ21                _GPIO(164)
+#define DB8540_PIN_AD26                _GPIO(165)
+#define DB8540_PIN_AD28                _GPIO(166)
+#define DB8540_PIN_AC28                _GPIO(167)
+#define DB8540_PIN_AC26                _GPIO(168)
+/* Hole */
+#define DB8540_PIN_J3          _GPIO(192)
+#define DB8540_PIN_H1          _GPIO(193)
+#define DB8540_PIN_J2          _GPIO(194)
+#define DB8540_PIN_H2          _GPIO(195)
+#define DB8540_PIN_H3          _GPIO(196)
+#define DB8540_PIN_H4          _GPIO(197)
+#define DB8540_PIN_G2          _GPIO(198)
+#define DB8540_PIN_G3          _GPIO(199)
+#define DB8540_PIN_G4          _GPIO(200)
+#define DB8540_PIN_F2          _GPIO(201)
+#define DB8540_PIN_C6          _GPIO(202)
+#define DB8540_PIN_B6          _GPIO(203)
+#define DB8540_PIN_B7          _GPIO(204)
+#define DB8540_PIN_A7          _GPIO(205)
+#define DB8540_PIN_D7          _GPIO(206)
+#define DB8540_PIN_D8          _GPIO(207)
+#define DB8540_PIN_F3          _GPIO(208)
+#define DB8540_PIN_E2          _GPIO(209)
+#define DB8540_PIN_C7          _GPIO(210)
+#define DB8540_PIN_B8          _GPIO(211)
+#define DB8540_PIN_C10         _GPIO(212)
+#define DB8540_PIN_C8          _GPIO(213)
+#define DB8540_PIN_C9          _GPIO(214)
+/* Hole */
+#define DB8540_PIN_B9          _GPIO(219)
+#define DB8540_PIN_A10         _GPIO(220)
+#define DB8540_PIN_D9          _GPIO(221)
+#define DB8540_PIN_B11         _GPIO(222)
+#define DB8540_PIN_B10         _GPIO(223)
+#define DB8540_PIN_E10         _GPIO(224)
+#define DB8540_PIN_B12         _GPIO(225)
+#define DB8540_PIN_D10         _GPIO(226)
+#define DB8540_PIN_D11         _GPIO(227)
+#define DB8540_PIN_AJ6         _GPIO(228)
+#define DB8540_PIN_B13         _GPIO(229)
+#define DB8540_PIN_C12         _GPIO(230)
+#define DB8540_PIN_B14         _GPIO(231)
+#define DB8540_PIN_E11         _GPIO(232)
+/* Hole */
+#define DB8540_PIN_D12         _GPIO(256)
+#define DB8540_PIN_D15         _GPIO(257)
+#define DB8540_PIN_C13         _GPIO(258)
+#define DB8540_PIN_C14         _GPIO(259)
+#define DB8540_PIN_C18         _GPIO(260)
+#define DB8540_PIN_C16         _GPIO(261)
+#define DB8540_PIN_B16         _GPIO(262)
+#define DB8540_PIN_D18         _GPIO(263)
+#define DB8540_PIN_C15         _GPIO(264)
+#define DB8540_PIN_C17         _GPIO(265)
+#define DB8540_PIN_B17         _GPIO(266)
+#define DB8540_PIN_D17         _GPIO(267)
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc nmk_db8540_pins[] = {
+       PINCTRL_PIN(DB8540_PIN_AH6, "GPIO0_AH6"),
+       PINCTRL_PIN(DB8540_PIN_AG7, "GPIO1_AG7"),
+       PINCTRL_PIN(DB8540_PIN_AF2, "GPIO2_AF2"),
+       PINCTRL_PIN(DB8540_PIN_AD3, "GPIO3_AD3"),
+       PINCTRL_PIN(DB8540_PIN_AF6, "GPIO4_AF6"),
+       PINCTRL_PIN(DB8540_PIN_AG6, "GPIO5_AG6"),
+       PINCTRL_PIN(DB8540_PIN_AD5, "GPIO6_AD5"),
+       PINCTRL_PIN(DB8540_PIN_AF7, "GPIO7_AF7"),
+       PINCTRL_PIN(DB8540_PIN_AG5, "GPIO8_AG5"),
+       PINCTRL_PIN(DB8540_PIN_AH5, "GPIO9_AH5"),
+       PINCTRL_PIN(DB8540_PIN_AE4, "GPIO10_AE4"),
+       PINCTRL_PIN(DB8540_PIN_AD1, "GPIO11_AD1"),
+       PINCTRL_PIN(DB8540_PIN_AD2, "GPIO12_AD2"),
+       PINCTRL_PIN(DB8540_PIN_AC2, "GPIO13_AC2"),
+       PINCTRL_PIN(DB8540_PIN_AC4, "GPIO14_AC4"),
+       PINCTRL_PIN(DB8540_PIN_AC3, "GPIO15_AC3"),
+       PINCTRL_PIN(DB8540_PIN_AH7, "GPIO16_AH7"),
+       PINCTRL_PIN(DB8540_PIN_AE7, "GPIO17_AE7"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_AF8, "GPIO22_AF8"),
+       PINCTRL_PIN(DB8540_PIN_AH11, "GPIO23_AH11"),
+       PINCTRL_PIN(DB8540_PIN_AG11, "GPIO24_AG11"),
+       PINCTRL_PIN(DB8540_PIN_AF11, "GPIO25_AF11"),
+       PINCTRL_PIN(DB8540_PIN_AH10, "GPIO26_AH10"),
+       PINCTRL_PIN(DB8540_PIN_AG10, "GPIO27_AG10"),
+       PINCTRL_PIN(DB8540_PIN_AF10, "GPIO28_AF10"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_AD4, "GPIO33_AD4"),
+       PINCTRL_PIN(DB8540_PIN_AF3, "GPIO34_AF3"),
+       PINCTRL_PIN(DB8540_PIN_AF5, "GPIO35_AF5"),
+       PINCTRL_PIN(DB8540_PIN_AG4, "GPIO36_AG4"),
+       PINCTRL_PIN(DB8540_PIN_AF9, "GPIO37_AF9"),
+       PINCTRL_PIN(DB8540_PIN_AE8, "GPIO38_AE8"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_M26, "GPIO64_M26"),
+       PINCTRL_PIN(DB8540_PIN_M25, "GPIO65_M25"),
+       PINCTRL_PIN(DB8540_PIN_M27, "GPIO66_M27"),
+       PINCTRL_PIN(DB8540_PIN_N25, "GPIO67_N25"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_M28, "GPIO70_M28"),
+       PINCTRL_PIN(DB8540_PIN_N26, "GPIO71_N26"),
+       PINCTRL_PIN(DB8540_PIN_M22, "GPIO72_M22"),
+       PINCTRL_PIN(DB8540_PIN_N22, "GPIO73_N22"),
+       PINCTRL_PIN(DB8540_PIN_N27, "GPIO74_N27"),
+       PINCTRL_PIN(DB8540_PIN_N28, "GPIO75_N28"),
+       PINCTRL_PIN(DB8540_PIN_P22, "GPIO76_P22"),
+       PINCTRL_PIN(DB8540_PIN_P28, "GPIO77_P28"),
+       PINCTRL_PIN(DB8540_PIN_P26, "GPIO78_P26"),
+       PINCTRL_PIN(DB8540_PIN_T22, "GPIO79_T22"),
+       PINCTRL_PIN(DB8540_PIN_R27, "GPIO80_R27"),
+       PINCTRL_PIN(DB8540_PIN_P27, "GPIO81_P27"),
+       PINCTRL_PIN(DB8540_PIN_R26, "GPIO82_R26"),
+       PINCTRL_PIN(DB8540_PIN_R25, "GPIO83_R25"),
+       PINCTRL_PIN(DB8540_PIN_U22, "GPIO84_U22"),
+       PINCTRL_PIN(DB8540_PIN_T27, "GPIO85_T27"),
+       PINCTRL_PIN(DB8540_PIN_T25, "GPIO86_T25"),
+       PINCTRL_PIN(DB8540_PIN_T26, "GPIO87_T26"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_AF20, "GPIO116_AF20"),
+       PINCTRL_PIN(DB8540_PIN_AG21, "GPIO117_AG21"),
+       PINCTRL_PIN(DB8540_PIN_AH19, "GPIO118_AH19"),
+       PINCTRL_PIN(DB8540_PIN_AE19, "GPIO119_AE19"),
+       PINCTRL_PIN(DB8540_PIN_AG18, "GPIO120_AG18"),
+       PINCTRL_PIN(DB8540_PIN_AH17, "GPIO121_AH17"),
+       PINCTRL_PIN(DB8540_PIN_AF19, "GPIO122_AF19"),
+       PINCTRL_PIN(DB8540_PIN_AF18, "GPIO123_AF18"),
+       PINCTRL_PIN(DB8540_PIN_AE18, "GPIO124_AE18"),
+       PINCTRL_PIN(DB8540_PIN_AG17, "GPIO125_AG17"),
+       PINCTRL_PIN(DB8540_PIN_AF17, "GPIO126_AF17"),
+       PINCTRL_PIN(DB8540_PIN_AE17, "GPIO127_AE17"),
+       PINCTRL_PIN(DB8540_PIN_AC27, "GPIO128_AC27"),
+       PINCTRL_PIN(DB8540_PIN_AD27, "GPIO129_AD27"),
+       PINCTRL_PIN(DB8540_PIN_AE28, "GPIO130_AE28"),
+       PINCTRL_PIN(DB8540_PIN_AG26, "GPIO131_AG26"),
+       PINCTRL_PIN(DB8540_PIN_AF25, "GPIO132_AF25"),
+       PINCTRL_PIN(DB8540_PIN_AE27, "GPIO133_AE27"),
+       PINCTRL_PIN(DB8540_PIN_AF27, "GPIO134_AF27"),
+       PINCTRL_PIN(DB8540_PIN_AG28, "GPIO135_AG28"),
+       PINCTRL_PIN(DB8540_PIN_AF28, "GPIO136_AF28"),
+       PINCTRL_PIN(DB8540_PIN_AG25, "GPIO137_AG25"),
+       PINCTRL_PIN(DB8540_PIN_AG24, "GPIO138_AG24"),
+       PINCTRL_PIN(DB8540_PIN_AD25, "GPIO139_AD25"),
+       PINCTRL_PIN(DB8540_PIN_AH25, "GPIO140_AH25"),
+       PINCTRL_PIN(DB8540_PIN_AF26, "GPIO141_AF26"),
+       PINCTRL_PIN(DB8540_PIN_AF23, "GPIO142_AF23"),
+       PINCTRL_PIN(DB8540_PIN_AG23, "GPIO143_AG23"),
+       PINCTRL_PIN(DB8540_PIN_AE25, "GPIO144_AE25"),
+       PINCTRL_PIN(DB8540_PIN_AH24, "GPIO145_AH24"),
+       PINCTRL_PIN(DB8540_PIN_AJ25, "GPIO146_AJ25"),
+       PINCTRL_PIN(DB8540_PIN_AG27, "GPIO147_AG27"),
+       PINCTRL_PIN(DB8540_PIN_AH23, "GPIO148_AH23"),
+       PINCTRL_PIN(DB8540_PIN_AE26, "GPIO149_AE26"),
+       PINCTRL_PIN(DB8540_PIN_AE24, "GPIO150_AE24"),
+       PINCTRL_PIN(DB8540_PIN_AJ24, "GPIO151_AJ24"),
+       PINCTRL_PIN(DB8540_PIN_AE21, "GPIO152_AE21"),
+       PINCTRL_PIN(DB8540_PIN_AG22, "GPIO153_AG22"),
+       PINCTRL_PIN(DB8540_PIN_AF21, "GPIO154_AF21"),
+       PINCTRL_PIN(DB8540_PIN_AF24, "GPIO155_AF24"),
+       PINCTRL_PIN(DB8540_PIN_AH22, "GPIO156_AH22"),
+       PINCTRL_PIN(DB8540_PIN_AJ23, "GPIO157_AJ23"),
+       PINCTRL_PIN(DB8540_PIN_AH21, "GPIO158_AH21"),
+       PINCTRL_PIN(DB8540_PIN_AG20, "GPIO159_AG20"),
+       PINCTRL_PIN(DB8540_PIN_AE23, "GPIO160_AE23"),
+       PINCTRL_PIN(DB8540_PIN_AH20, "GPIO161_AH20"),
+       PINCTRL_PIN(DB8540_PIN_AG19, "GPIO162_AG19"),
+       PINCTRL_PIN(DB8540_PIN_AF22, "GPIO163_AF22"),
+       PINCTRL_PIN(DB8540_PIN_AJ21, "GPIO164_AJ21"),
+       PINCTRL_PIN(DB8540_PIN_AD26, "GPIO165_AD26"),
+       PINCTRL_PIN(DB8540_PIN_AD28, "GPIO166_AD28"),
+       PINCTRL_PIN(DB8540_PIN_AC28, "GPIO167_AC28"),
+       PINCTRL_PIN(DB8540_PIN_AC26, "GPIO168_AC26"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_J3, "GPIO192_J3"),
+       PINCTRL_PIN(DB8540_PIN_H1, "GPIO193_H1"),
+       PINCTRL_PIN(DB8540_PIN_J2, "GPIO194_J2"),
+       PINCTRL_PIN(DB8540_PIN_H2, "GPIO195_H2"),
+       PINCTRL_PIN(DB8540_PIN_H3, "GPIO196_H3"),
+       PINCTRL_PIN(DB8540_PIN_H4, "GPIO197_H4"),
+       PINCTRL_PIN(DB8540_PIN_G2, "GPIO198_G2"),
+       PINCTRL_PIN(DB8540_PIN_G3, "GPIO199_G3"),
+       PINCTRL_PIN(DB8540_PIN_G4, "GPIO200_G4"),
+       PINCTRL_PIN(DB8540_PIN_F2, "GPIO201_F2"),
+       PINCTRL_PIN(DB8540_PIN_C6, "GPIO202_C6"),
+       PINCTRL_PIN(DB8540_PIN_B6, "GPIO203_B6"),
+       PINCTRL_PIN(DB8540_PIN_B7, "GPIO204_B7"),
+       PINCTRL_PIN(DB8540_PIN_A7, "GPIO205_A7"),
+       PINCTRL_PIN(DB8540_PIN_D7, "GPIO206_D7"),
+       PINCTRL_PIN(DB8540_PIN_D8, "GPIO207_D8"),
+       PINCTRL_PIN(DB8540_PIN_F3, "GPIO208_F3"),
+       PINCTRL_PIN(DB8540_PIN_E2, "GPIO209_E2"),
+       PINCTRL_PIN(DB8540_PIN_C7, "GPIO210_C7"),
+       PINCTRL_PIN(DB8540_PIN_B8, "GPIO211_B8"),
+       PINCTRL_PIN(DB8540_PIN_C10, "GPIO212_C10"),
+       PINCTRL_PIN(DB8540_PIN_C8, "GPIO213_C8"),
+       PINCTRL_PIN(DB8540_PIN_C9, "GPIO214_C9"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_B9, "GPIO219_B9"),
+       PINCTRL_PIN(DB8540_PIN_A10, "GPIO220_A10"),
+       PINCTRL_PIN(DB8540_PIN_D9, "GPIO221_D9"),
+       PINCTRL_PIN(DB8540_PIN_B11, "GPIO222_B11"),
+       PINCTRL_PIN(DB8540_PIN_B10, "GPIO223_B10"),
+       PINCTRL_PIN(DB8540_PIN_E10, "GPIO224_E10"),
+       PINCTRL_PIN(DB8540_PIN_B12, "GPIO225_B12"),
+       PINCTRL_PIN(DB8540_PIN_D10, "GPIO226_D10"),
+       PINCTRL_PIN(DB8540_PIN_D11, "GPIO227_D11"),
+       PINCTRL_PIN(DB8540_PIN_AJ6, "GPIO228_AJ6"),
+       PINCTRL_PIN(DB8540_PIN_B13, "GPIO229_B13"),
+       PINCTRL_PIN(DB8540_PIN_C12, "GPIO230_C12"),
+       PINCTRL_PIN(DB8540_PIN_B14, "GPIO231_B14"),
+       PINCTRL_PIN(DB8540_PIN_E11, "GPIO232_E11"),
+       /* Hole */
+       PINCTRL_PIN(DB8540_PIN_D12, "GPIO256_D12"),
+       PINCTRL_PIN(DB8540_PIN_D15, "GPIO257_D15"),
+       PINCTRL_PIN(DB8540_PIN_C13, "GPIO258_C13"),
+       PINCTRL_PIN(DB8540_PIN_C14, "GPIO259_C14"),
+       PINCTRL_PIN(DB8540_PIN_C18, "GPIO260_C18"),
+       PINCTRL_PIN(DB8540_PIN_C16, "GPIO261_C16"),
+       PINCTRL_PIN(DB8540_PIN_B16, "GPIO262_B16"),
+       PINCTRL_PIN(DB8540_PIN_D18, "GPIO263_D18"),
+       PINCTRL_PIN(DB8540_PIN_C15, "GPIO264_C15"),
+       PINCTRL_PIN(DB8540_PIN_C17, "GPIO265_C17"),
+       PINCTRL_PIN(DB8540_PIN_B17, "GPIO266_B17"),
+       PINCTRL_PIN(DB8540_PIN_D17, "GPIO267_D17"),
+};
+
+#define DB8540_GPIO_RANGE(a, b, c) { .name = "db8540", .id = a, .base = b, \
+                       .pin_base = b, .npins = c }
+
+/*
+ * This matches the 32-pin gpio chips registered by the GPIO portion. This
+ * cannot be const since we assign the struct gpio_chip * pointer at runtime.
+ */
+static struct pinctrl_gpio_range nmk_db8540_ranges[] = {
+       DB8540_GPIO_RANGE(0, 0, 18),
+       DB8540_GPIO_RANGE(0, 22, 7),
+       DB8540_GPIO_RANGE(1, 33, 6),
+       DB8540_GPIO_RANGE(2, 64, 4),
+       DB8540_GPIO_RANGE(2, 70, 18),
+       DB8540_GPIO_RANGE(3, 116, 12),
+       DB8540_GPIO_RANGE(4, 128, 32),
+       DB8540_GPIO_RANGE(5, 160, 9),
+       DB8540_GPIO_RANGE(6, 192, 23),
+       DB8540_GPIO_RANGE(6, 219, 5),
+       DB8540_GPIO_RANGE(7, 224, 9),
+       DB8540_GPIO_RANGE(8, 256, 12),
+};
+
+/*
+ * Read the pin group names like this:
+ * u0_a_1    = first groups of pins for uart0 on alt function a
+ * i2c2_b_2  = second group of pins for i2c2 on alt function b
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* Altfunction A column */
+static const unsigned u0_a_1_pins[] = { DB8540_PIN_AH6, DB8540_PIN_AG7,
+                                       DB8540_PIN_AF2, DB8540_PIN_AD3 };
+static const unsigned u1rxtx_a_1_pins[] = { DB8540_PIN_AF6, DB8540_PIN_AG6 };
+static const unsigned u1ctsrts_a_1_pins[] = { DB8540_PIN_AD5, DB8540_PIN_AF7 };
+/* Image processor I2C line, this is driven by image processor firmware */
+static const unsigned ipi2c_a_1_pins[] = { DB8540_PIN_AG5, DB8540_PIN_AH5 };
+static const unsigned ipi2c_a_2_pins[] = { DB8540_PIN_AE4, DB8540_PIN_AD1 };
+/* MSP0 can only be on these pins, but TXD and RXD can be flipped */
+static const unsigned msp0txrx_a_1_pins[] = { DB8540_PIN_AD2, DB8540_PIN_AC3 };
+static const unsigned msp0tfstck_a_1_pins[] = { DB8540_PIN_AC2,
+       DB8540_PIN_AC4 };
+static const unsigned msp0rfsrck_a_1_pins[] = { DB8540_PIN_AH7,
+       DB8540_PIN_AE7 };
+/* Basic pins of the MMC/SD card 0 interface */
+static const unsigned mc0_a_1_pins[] = { DB8540_PIN_AH11, DB8540_PIN_AG11,
+       DB8540_PIN_AF11, DB8540_PIN_AH10, DB8540_PIN_AG10, DB8540_PIN_AF10};
+/* MSP1 can only be on these pins, but TXD and RXD can be flipped */
+static const unsigned msp1txrx_a_1_pins[] = { DB8540_PIN_AD4, DB8540_PIN_AG4 };
+static const unsigned msp1_a_1_pins[] = { DB8540_PIN_AF3, DB8540_PIN_AF5 };
+
+static const unsigned modobsclk_a_1_pins[] = { DB8540_PIN_AF9 };
+static const unsigned clkoutreq_a_1_pins[] = { DB8540_PIN_AE8 };
+/* LCD interface */
+static const unsigned lcdb_a_1_pins[] = { DB8540_PIN_M26, DB8540_PIN_M25,
+       DB8540_PIN_M27, DB8540_PIN_N25 };
+static const unsigned lcdvsi0_a_1_pins[] = { DB8540_PIN_AJ24 };
+static const unsigned lcdvsi1_a_1_pins[] = { DB8540_PIN_AE21 };
+static const unsigned lcd_d0_d7_a_1_pins[] = { DB8540_PIN_M28, DB8540_PIN_N26,
+       DB8540_PIN_M22, DB8540_PIN_N22, DB8540_PIN_N27, DB8540_PIN_N28,
+       DB8540_PIN_P22, DB8540_PIN_P28 };
+/* D8 thru D11 often used as TVOUT lines */
+static const unsigned lcd_d8_d11_a_1_pins[] = { DB8540_PIN_P26, DB8540_PIN_T22,
+       DB8540_PIN_R27, DB8540_PIN_P27 };
+static const unsigned lcd_d12_d23_a_1_pins[] = { DB8540_PIN_R26, DB8540_PIN_R25,
+       DB8540_PIN_U22, DB8540_PIN_T27, DB8540_PIN_AG22, DB8540_PIN_AF21,
+       DB8540_PIN_AF24, DB8540_PIN_AH22, DB8540_PIN_AJ23, DB8540_PIN_AH21,
+       DB8540_PIN_AG20, DB8540_PIN_AE23 };
+static const unsigned kp_a_1_pins[] = { DB8540_PIN_AH20, DB8540_PIN_AG19,
+       DB8540_PIN_AF22, DB8540_PIN_AJ21, DB8540_PIN_T25, DB8540_PIN_T26 };
+/* MC2 has 8 data lines and no direction control, so only for (e)MMC */
+static const unsigned mc2_a_1_pins[] = { DB8540_PIN_AC27, DB8540_PIN_AD27,
+       DB8540_PIN_AE28, DB8540_PIN_AG26, DB8540_PIN_AF25, DB8540_PIN_AE27,
+       DB8540_PIN_AF27, DB8540_PIN_AG28, DB8540_PIN_AF28, DB8540_PIN_AG25,
+       DB8540_PIN_AG24 };
+static const unsigned ssp1_a_1_pins[] = {  DB8540_PIN_AD25, DB8540_PIN_AH25,
+       DB8540_PIN_AF26, DB8540_PIN_AF23 };
+static const unsigned ssp0_a_1_pins[] = { DB8540_PIN_AG23, DB8540_PIN_AE25,
+       DB8540_PIN_AH24, DB8540_PIN_AJ25 };
+static const unsigned i2c0_a_1_pins[] = { DB8540_PIN_AG27, DB8540_PIN_AH23 };
+/*
+ * Image processor GPIO pins are named "ipgpio" and have their own
+ * numberspace
+ */
+static const unsigned ipgpio0_a_1_pins[] = { DB8540_PIN_AE26 };
+static const unsigned ipgpio1_a_1_pins[] = { DB8540_PIN_AE24 };
+/* modem i2s interface */
+static const unsigned modi2s_a_1_pins[] = { DB8540_PIN_AD26, DB8540_PIN_AD28,
+       DB8540_PIN_AC28, DB8540_PIN_AC26 };
+static const unsigned spi2_a_1_pins[] = { DB8540_PIN_AF20, DB8540_PIN_AG21,
+       DB8540_PIN_AH19, DB8540_PIN_AE19 };
+static const unsigned u2txrx_a_1_pins[] = { DB8540_PIN_AG18, DB8540_PIN_AH17 };
+static const unsigned u2ctsrts_a_1_pins[] = { DB8540_PIN_AF19,
+       DB8540_PIN_AF18 };
+static const unsigned modsmb_a_1_pins[] = { DB8540_PIN_AF17, DB8540_PIN_AE17 };
+static const unsigned msp2sck_a_1_pins[] = { DB8540_PIN_J3 };
+static const unsigned msp2txdtcktfs_a_1_pins[] = { DB8540_PIN_H1, DB8540_PIN_J2,
+       DB8540_PIN_H2 };
+static const unsigned msp2rxd_a_1_pins[] = { DB8540_PIN_H3 };
+static const unsigned mc4_a_1_pins[] = { DB8540_PIN_H4, DB8540_PIN_G2,
+       DB8540_PIN_G3, DB8540_PIN_G4, DB8540_PIN_F2, DB8540_PIN_C6,
+       DB8540_PIN_B6, DB8540_PIN_B7, DB8540_PIN_A7, DB8540_PIN_D7,
+       DB8540_PIN_D8 };
+static const unsigned mc1_a_1_pins[] = { DB8540_PIN_F3, DB8540_PIN_E2,
+       DB8540_PIN_C7, DB8540_PIN_B8, DB8540_PIN_C10, DB8540_PIN_C8,
+       DB8540_PIN_C9 };
+/* mc1_a_2_pins exclude MC1_FBCLK */
+static const unsigned mc1_a_2_pins[] = { DB8540_PIN_F3,        DB8540_PIN_C7,
+       DB8540_PIN_B8, DB8540_PIN_C10, DB8540_PIN_C8,
+       DB8540_PIN_C9 };
+static const unsigned hsir_a_1_pins[] = { DB8540_PIN_B9, DB8540_PIN_A10,
+       DB8540_PIN_D9 };
+static const unsigned hsit_a_1_pins[] = { DB8540_PIN_B11, DB8540_PIN_B10,
+       DB8540_PIN_E10, DB8540_PIN_B12, DB8540_PIN_D10 };
+static const unsigned hsit_a_2_pins[] = { DB8540_PIN_B11, DB8540_PIN_B10,
+       DB8540_PIN_E10, DB8540_PIN_B12 };
+static const unsigned clkout_a_1_pins[] = { DB8540_PIN_D11, DB8540_PIN_AJ6 };
+static const unsigned clkout_a_2_pins[] = { DB8540_PIN_B13, DB8540_PIN_C12 };
+static const unsigned msp4_a_1_pins[] = { DB8540_PIN_B14, DB8540_PIN_E11 };
+static const unsigned usb_a_1_pins[] = { DB8540_PIN_D12, DB8540_PIN_D15,
+       DB8540_PIN_C13, DB8540_PIN_C14, DB8540_PIN_C18, DB8540_PIN_C16,
+       DB8540_PIN_B16, DB8540_PIN_D18, DB8540_PIN_C15, DB8540_PIN_C17,
+       DB8540_PIN_B17, DB8540_PIN_D17 };
+/* Altfunction B colum */
+static const unsigned apetrig_b_1_pins[] = { DB8540_PIN_AH6, DB8540_PIN_AG7 };
+static const unsigned modtrig_b_1_pins[] = { DB8540_PIN_AF2, DB8540_PIN_AD3 };
+static const unsigned i2c4_b_1_pins[] = { DB8540_PIN_AF6, DB8540_PIN_AG6 };
+static const unsigned i2c1_b_1_pins[] = { DB8540_PIN_AD5, DB8540_PIN_AF7 };
+static const unsigned i2c2_b_1_pins[] = { DB8540_PIN_AG5, DB8540_PIN_AH5 };
+static const unsigned i2c2_b_2_pins[] = { DB8540_PIN_AE4, DB8540_PIN_AD1 };
+static const unsigned msp0txrx_b_1_pins[] = { DB8540_PIN_AD2, DB8540_PIN_AC3 };
+static const unsigned i2c1_b_2_pins[] = { DB8540_PIN_AH7, DB8540_PIN_AE7 };
+static const unsigned stmmod_b_1_pins[] = { DB8540_PIN_AH11, DB8540_PIN_AF11,
+       DB8540_PIN_AH10, DB8540_PIN_AG10, DB8540_PIN_AF10 };
+static const unsigned moduartstmmux_b_1_pins[] = { DB8540_PIN_AG11 };
+static const unsigned msp1txrx_b_1_pins[] = { DB8540_PIN_AD4, DB8540_PIN_AG4 };
+static const unsigned kp_b_1_pins[] = { DB8540_PIN_AJ24, DB8540_PIN_AE21,
+       DB8540_PIN_M26, DB8540_PIN_M25, DB8540_PIN_M27, DB8540_PIN_N25,
+       DB8540_PIN_M28, DB8540_PIN_N26, DB8540_PIN_M22, DB8540_PIN_N22,
+       DB8540_PIN_N27, DB8540_PIN_N28, DB8540_PIN_P22, DB8540_PIN_P28,
+       DB8540_PIN_P26, DB8540_PIN_T22, DB8540_PIN_R27, DB8540_PIN_P27,
+       DB8540_PIN_R26, DB8540_PIN_R25 };
+static const unsigned u2txrx_b_1_pins[] = { DB8540_PIN_U22, DB8540_PIN_T27 };
+static const unsigned sm_b_1_pins[] = { DB8540_PIN_AG22, DB8540_PIN_AF21,
+       DB8540_PIN_AF24, DB8540_PIN_AH22, DB8540_PIN_AJ23, DB8540_PIN_AH21,
+       DB8540_PIN_AG20, DB8540_PIN_AE23, DB8540_PIN_AH20, DB8540_PIN_AF22,
+       DB8540_PIN_AJ21, DB8540_PIN_AC27, DB8540_PIN_AD27, DB8540_PIN_AE28,
+       DB8540_PIN_AG26, DB8540_PIN_AF25, DB8540_PIN_AE27, DB8540_PIN_AF27,
+       DB8540_PIN_AG28, DB8540_PIN_AF28, DB8540_PIN_AG25, DB8540_PIN_AG24,
+       DB8540_PIN_AD25 };
+static const unsigned smcs0_b_1_pins[] = { DB8540_PIN_AG19 };
+static const unsigned smcs1_b_1_pins[] = { DB8540_PIN_AE26 };
+static const unsigned ipgpio7_b_1_pins[] = { DB8540_PIN_AH25 };
+static const unsigned ipgpio2_b_1_pins[] = { DB8540_PIN_AF26 };
+static const unsigned ipgpio3_b_1_pins[] = { DB8540_PIN_AF23 };
+static const unsigned i2c6_b_1_pins[] = { DB8540_PIN_AG23, DB8540_PIN_AE25 };
+static const unsigned i2c5_b_1_pins[] = { DB8540_PIN_AH24, DB8540_PIN_AJ25 };
+static const unsigned u3txrx_b_1_pins[] = { DB8540_PIN_AF20, DB8540_PIN_AG21 };
+static const unsigned u3ctsrts_b_1_pins[] = { DB8540_PIN_AH19,
+       DB8540_PIN_AE19 };
+static const unsigned i2c5_b_2_pins[] = { DB8540_PIN_AG18, DB8540_PIN_AH17 };
+static const unsigned i2c4_b_2_pins[] = { DB8540_PIN_AF19, DB8540_PIN_AF18 };
+static const unsigned u4txrx_b_1_pins[] = { DB8540_PIN_AE18, DB8540_PIN_AG17 };
+static const unsigned u4ctsrts_b_1_pins[] = { DB8540_PIN_AF17,
+       DB8540_PIN_AE17 };
+static const unsigned ddrtrig_b_1_pins[] = { DB8540_PIN_J3 };
+static const unsigned msp4_b_1_pins[] = { DB8540_PIN_H3 };
+static const unsigned pwl_b_1_pins[] = { DB8540_PIN_C6 };
+static const unsigned spi1_b_1_pins[] = { DB8540_PIN_E2, DB8540_PIN_C10,
+       DB8540_PIN_C8, DB8540_PIN_C9 };
+static const unsigned mc3_b_1_pins[] = { DB8540_PIN_B9, DB8540_PIN_A10,
+       DB8540_PIN_D9, DB8540_PIN_B11, DB8540_PIN_B10, DB8540_PIN_E10,
+       DB8540_PIN_B12 };
+static const unsigned pwl_b_2_pins[] = { DB8540_PIN_D10 };
+static const unsigned pwl_b_3_pins[] = { DB8540_PIN_B13 };
+static const unsigned pwl_b_4_pins[] = { DB8540_PIN_C12 };
+static const unsigned u2txrx_b_2_pins[] = { DB8540_PIN_B17, DB8540_PIN_D17 };
+
+/* Altfunction C column */
+static const unsigned ipgpio6_c_1_pins[] = { DB8540_PIN_AG6 };
+static const unsigned ipgpio0_c_1_pins[] = { DB8540_PIN_AD5 };
+static const unsigned ipgpio1_c_1_pins[] = { DB8540_PIN_AF7 };
+static const unsigned ipgpio3_c_1_pins[] = { DB8540_PIN_AE4 };
+static const unsigned ipgpio2_c_1_pins[] = { DB8540_PIN_AD1 };
+static const unsigned u0_c_1_pins[] = { DB8540_PIN_AD4, DB8540_PIN_AF3,
+       DB8540_PIN_AF5, DB8540_PIN_AG4 };
+static const unsigned smcleale_c_1_pins[] = { DB8540_PIN_AJ24,
+       DB8540_PIN_AE21 };
+static const unsigned ipgpio4_c_1_pins[] = { DB8540_PIN_M26 };
+static const unsigned ipgpio5_c_1_pins[] = { DB8540_PIN_M25 };
+static const unsigned ipgpio6_c_2_pins[] = { DB8540_PIN_M27 };
+static const unsigned ipgpio7_c_1_pins[] = { DB8540_PIN_N25 };
+static const unsigned stmape_c_1_pins[] = { DB8540_PIN_M28, DB8540_PIN_N26,
+       DB8540_PIN_M22, DB8540_PIN_N22, DB8540_PIN_N27 };
+static const unsigned u2rxtx_c_1_pins[] = { DB8540_PIN_N28, DB8540_PIN_P22 };
+static const unsigned modobsresout_c_1_pins[] = { DB8540_PIN_P28 };
+static const unsigned ipgpio2_c_2_pins[] = { DB8540_PIN_P26 };
+static const unsigned ipgpio3_c_2_pins[] = { DB8540_PIN_T22 };
+static const unsigned ipgpio4_c_2_pins[] = { DB8540_PIN_R27 };
+static const unsigned ipgpio5_c_2_pins[] = { DB8540_PIN_P27 };
+static const unsigned modaccgpo_c_1_pins[] = { DB8540_PIN_R26, DB8540_PIN_R25,
+       DB8540_PIN_U22 };
+static const unsigned modobspwrrst_c_1_pins[] = { DB8540_PIN_T27 };
+static const unsigned mc5_c_1_pins[] = { DB8540_PIN_AG22, DB8540_PIN_AF21,
+       DB8540_PIN_AF24, DB8540_PIN_AH22, DB8540_PIN_AJ23, DB8540_PIN_AH21,
+       DB8540_PIN_AG20, DB8540_PIN_AE23, DB8540_PIN_AH20, DB8540_PIN_AF22,
+       DB8540_PIN_AJ21};
+static const unsigned smps0_c_1_pins[] = { DB8540_PIN_AG19 };
+static const unsigned moduart1_c_1_pins[] = { DB8540_PIN_T25, DB8540_PIN_T26 };
+static const unsigned mc2rstn_c_1_pins[] = { DB8540_PIN_AE28 };
+static const unsigned i2c5_c_1_pins[] = { DB8540_PIN_AG28, DB8540_PIN_AF28 };
+static const unsigned ipgpio0_c_2_pins[] = { DB8540_PIN_AG25 };
+static const unsigned ipgpio1_c_2_pins[] = { DB8540_PIN_AG24 };
+static const unsigned kp_c_1_pins[] = { DB8540_PIN_AD25, DB8540_PIN_AH25,
+       DB8540_PIN_AF26, DB8540_PIN_AF23 };
+static const unsigned modrf_c_1_pins[] = { DB8540_PIN_AG23, DB8540_PIN_AE25,
+       DB8540_PIN_AH24 };
+static const unsigned smps1_c_1_pins[] = { DB8540_PIN_AE26 };
+static const unsigned i2c5_c_2_pins[] = { DB8540_PIN_AH19, DB8540_PIN_AE19 };
+static const unsigned u4ctsrts_c_1_pins[] = { DB8540_PIN_AG18,
+       DB8540_PIN_AH17 };
+static const unsigned u3rxtx_c_1_pins[] = { DB8540_PIN_AF19, DB8540_PIN_AF18 };
+static const unsigned msp4_c_1_pins[] = { DB8540_PIN_J3 };
+static const unsigned mc4rstn_c_1_pins[] = { DB8540_PIN_C6 };
+static const unsigned spi0_c_1_pins[] = { DB8540_PIN_A10, DB8540_PIN_B10,
+       DB8540_PIN_E10, DB8540_PIN_B12 };
+static const unsigned i2c3_c_1_pins[] = { DB8540_PIN_B13, DB8540_PIN_C12 };
+
+/* Other alt C1 column */
+static const unsigned spi3_oc1_1_pins[] = { DB8540_PIN_AG5, DB8540_PIN_AH5,
+       DB8540_PIN_AE4, DB8540_PIN_AD1 };
+static const unsigned stmape_oc1_1_pins[] = { DB8540_PIN_AH11, DB8540_PIN_AF11,
+       DB8540_PIN_AH10, DB8540_PIN_AG10, DB8540_PIN_AF10 };
+static const unsigned u2_oc1_1_pins[] = { DB8540_PIN_AG11 };
+static const unsigned remap0_oc1_1_pins[] = { DB8540_PIN_AJ24 };
+static const unsigned remap1_oc1_1_pins[] = { DB8540_PIN_AE21 };
+static const unsigned modobsrefclk_oc1_1_pins[] = { DB8540_PIN_M26 };
+static const unsigned modobspwrctrl_oc1_1_pins[] = { DB8540_PIN_M25 };
+static const unsigned modobsclkout_oc1_1_pins[] = { DB8540_PIN_M27 };
+static const unsigned moduart1_oc1_1_pins[] = { DB8540_PIN_N25 };
+static const unsigned modprcmudbg_oc1_1_pins[] = { DB8540_PIN_M28,
+       DB8540_PIN_N26, DB8540_PIN_M22, DB8540_PIN_N22, DB8540_PIN_N27,
+       DB8540_PIN_P22, DB8540_PIN_P28, DB8540_PIN_P26, DB8540_PIN_T22,
+       DB8540_PIN_R26, DB8540_PIN_R25, DB8540_PIN_U22, DB8540_PIN_T27,
+       DB8540_PIN_AH20, DB8540_PIN_AG19, DB8540_PIN_AF22, DB8540_PIN_AJ21,
+       DB8540_PIN_T25};
+static const unsigned modobsresout_oc1_1_pins[] = { DB8540_PIN_N28 };
+static const unsigned modaccgpo_oc1_1_pins[] = { DB8540_PIN_R27, DB8540_PIN_P27,
+       DB8540_PIN_T26 };
+static const unsigned kp_oc1_1_pins[] = { DB8540_PIN_AG22, DB8540_PIN_AF21,
+       DB8540_PIN_AF24, DB8540_PIN_AH22, DB8540_PIN_AJ23, DB8540_PIN_AH21,
+       DB8540_PIN_AG20, DB8540_PIN_AE23 };
+static const unsigned modxmip_oc1_1_pins[] = { DB8540_PIN_AD25, DB8540_PIN_AH25,
+       DB8540_PIN_AG23, DB8540_PIN_AE25 };
+static const unsigned i2c6_oc1_1_pins[] = { DB8540_PIN_AE26, DB8540_PIN_AE24 };
+static const unsigned u2txrx_oc1_1_pins[] = { DB8540_PIN_B7, DB8540_PIN_A7 };
+static const unsigned u2ctsrts_oc1_1_pins[] = { DB8540_PIN_D7, DB8540_PIN_D8 };
+
+/* Other alt C2 column */
+static const unsigned sbag_oc2_1_pins[] = { DB8540_PIN_AH11, DB8540_PIN_AG11,
+       DB8540_PIN_AF11, DB8540_PIN_AH10, DB8540_PIN_AG10, DB8540_PIN_AF10 };
+static const unsigned hxclk_oc2_1_pins[] = { DB8540_PIN_M25 };
+static const unsigned modaccuart_oc2_1_pins[] = { DB8540_PIN_N25 };
+static const unsigned stmmod_oc2_1_pins[] = { DB8540_PIN_M28, DB8540_PIN_N26,
+       DB8540_PIN_M22, DB8540_PIN_N22, DB8540_PIN_N27 };
+static const unsigned moduartstmmux_oc2_1_pins[] = { DB8540_PIN_N28 };
+static const unsigned hxgpio_oc2_1_pins[] = { DB8540_PIN_P22, DB8540_PIN_P28,
+       DB8540_PIN_P26, DB8540_PIN_T22, DB8540_PIN_R27, DB8540_PIN_P27,
+       DB8540_PIN_R26, DB8540_PIN_R25 };
+static const unsigned sbag_oc2_2_pins[] = { DB8540_PIN_U22, DB8540_PIN_T27,
+       DB8540_PIN_AG22, DB8540_PIN_AF21, DB8540_PIN_AF24, DB8540_PIN_AH22 };
+static const unsigned modobsservice_oc2_1_pins[] = { DB8540_PIN_AJ23 };
+static const unsigned moduart0_oc2_1_pins[] = { DB8540_PIN_AG20,
+       DB8540_PIN_AE23 };
+static const unsigned stmape_oc2_1_pins[] = { DB8540_PIN_AH20, DB8540_PIN_AG19,
+       DB8540_PIN_AF22, DB8540_PIN_AJ21, DB8540_PIN_T25 };
+static const unsigned u2_oc2_1_pins[] = { DB8540_PIN_T26, DB8540_PIN_AH21 };
+static const unsigned modxmip_oc2_1_pins[] = { DB8540_PIN_AE26,
+       DB8540_PIN_AE24 };
+
+/* Other alt C3 column */
+static const unsigned modaccgpo_oc3_1_pins[] = { DB8540_PIN_AG11 };
+static const unsigned tpui_oc3_1_pins[] = { DB8540_PIN_M26, DB8540_PIN_M25,
+       DB8540_PIN_M27, DB8540_PIN_N25, DB8540_PIN_M28, DB8540_PIN_N26,
+       DB8540_PIN_M22, DB8540_PIN_N22, DB8540_PIN_N27, DB8540_PIN_N28,
+       DB8540_PIN_P22, DB8540_PIN_P28, DB8540_PIN_P26, DB8540_PIN_T22,
+       DB8540_PIN_R27, DB8540_PIN_P27, DB8540_PIN_R26, DB8540_PIN_R25,
+       DB8540_PIN_U22, DB8540_PIN_T27, DB8540_PIN_AG22, DB8540_PIN_AF21,
+       DB8540_PIN_AF24, DB8540_PIN_AH22, DB8540_PIN_AJ23, DB8540_PIN_AH21,
+       DB8540_PIN_AG20, DB8540_PIN_AE23, DB8540_PIN_AH20, DB8540_PIN_AG19,
+       DB8540_PIN_AF22, DB8540_PIN_AJ21, DB8540_PIN_T25, DB8540_PIN_T26 };
+
+/* Other alt C4 column */
+static const unsigned hwobs_oc4_1_pins[] = { DB8540_PIN_M26, DB8540_PIN_M25,
+       DB8540_PIN_M27, DB8540_PIN_N25, DB8540_PIN_M28, DB8540_PIN_N26,
+       DB8540_PIN_M22, DB8540_PIN_N22, DB8540_PIN_N27, DB8540_PIN_N28,
+       DB8540_PIN_P22, DB8540_PIN_P28, DB8540_PIN_P26, DB8540_PIN_T22,
+       DB8540_PIN_R27, DB8540_PIN_P27, DB8540_PIN_R26, DB8540_PIN_R25 };
+static const unsigned moduart1txrx_oc4_1_pins[] = { DB8540_PIN_U22,
+       DB8540_PIN_T27 };
+static const unsigned moduart1rtscts_oc4_1_pins[] = { DB8540_PIN_AG22,
+       DB8540_PIN_AF21 };
+static const unsigned modaccuarttxrx_oc4_1_pins[] = { DB8540_PIN_AF24,
+       DB8540_PIN_AH22 };
+static const unsigned modaccuartrtscts_oc4_1_pins[] = { DB8540_PIN_AJ23,
+       DB8540_PIN_AH21 };
+static const unsigned stmmod_oc4_1_pins[] = { DB8540_PIN_AH20, DB8540_PIN_AG19,
+       DB8540_PIN_AF22, DB8540_PIN_AJ21, DB8540_PIN_T25 };
+static const unsigned moduartstmmux_oc4_1_pins[] = { DB8540_PIN_T26 };
+
+#define DB8540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins,         \
+                       .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct nmk_pingroup nmk_db8540_groups[] = {
+       /* Altfunction A column */
+       DB8540_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(u1rxtx_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(u1ctsrts_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(ipi2c_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(ipi2c_a_2, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp0txrx_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp1_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(modobsclk_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(clkoutreq_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(lcdb_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(lcdvsi0_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(lcdvsi1_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(lcd_d0_d7_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(ipgpio0_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(ipgpio1_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(modi2s_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(spi2_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(u2txrx_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(u2ctsrts_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(modsmb_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp2sck_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp2txdtcktfs_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp2rxd_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(clkout_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(clkout_a_2, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(msp4_a_1, NMK_GPIO_ALT_A),
+       DB8540_PIN_GROUP(usb_a_1, NMK_GPIO_ALT_A),
+       /* Altfunction B column */
+       DB8540_PIN_GROUP(apetrig_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(modtrig_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c4_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c1_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c2_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c2_b_2, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(msp0txrx_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c1_b_2, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(stmmod_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(moduartstmmux_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(u2txrx_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(ipgpio7_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(ipgpio2_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(ipgpio3_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c6_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c5_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(u3txrx_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(u3ctsrts_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c5_b_2, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(i2c4_b_2, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(u4txrx_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(u4ctsrts_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(ddrtrig_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(msp4_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(pwl_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(spi1_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(mc3_b_1, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(pwl_b_2, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(pwl_b_3, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(pwl_b_4, NMK_GPIO_ALT_B),
+       DB8540_PIN_GROUP(u2txrx_b_2, NMK_GPIO_ALT_B),
+       /* Altfunction C column */
+       DB8540_PIN_GROUP(ipgpio6_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio0_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio1_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio3_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio2_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u0_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(smcleale_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio4_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio5_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio6_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio7_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(stmape_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u2rxtx_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modobsresout_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio2_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio3_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio4_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio5_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modaccgpo_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modobspwrrst_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(mc5_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(smps0_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(moduart1_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(mc2rstn_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(i2c5_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio0_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(ipgpio1_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(kp_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modrf_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(smps1_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(i2c5_c_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u4ctsrts_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u3rxtx_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(msp4_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(mc4rstn_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(spi0_c_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(i2c3_c_1, NMK_GPIO_ALT_C),
+
+       /* Other alt C1 column, these are still configured as alt C */
+       DB8540_PIN_GROUP(spi3_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(stmape_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u2_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(remap0_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(remap1_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modobsrefclk_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modobspwrctrl_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modobsclkout_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(moduart1_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modprcmudbg_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modobsresout_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modaccgpo_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modxmip_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(i2c6_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u2txrx_oc1_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u2ctsrts_oc1_1, NMK_GPIO_ALT_C),
+
+       /* Other alt C2 column, these are still configured as alt C */
+       DB8540_PIN_GROUP(sbag_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(hxclk_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modaccuart_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(stmmod_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(moduartstmmux_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(hxgpio_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(sbag_oc2_2, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modobsservice_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(moduart0_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(stmape_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(u2_oc2_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modxmip_oc2_1, NMK_GPIO_ALT_C),
+
+       /* Other alt C3 column, these are still configured as alt C */
+       DB8540_PIN_GROUP(modaccgpo_oc3_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(tpui_oc3_1, NMK_GPIO_ALT_C),
+
+       /* Other alt C4 column, these are still configured as alt C */
+       DB8540_PIN_GROUP(hwobs_oc4_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(moduart1txrx_oc4_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(moduart1rtscts_oc4_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modaccuarttxrx_oc4_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(modaccuartrtscts_oc4_1, NMK_GPIO_ALT_C),
+       DB8540_PIN_GROUP(stmmod_oc4_1, NMK_GPIO_ALT_C),
+
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define DB8540_FUNC_GROUPS(a, b...)       \
+static const char * const a##_groups[] = { b };
+
+DB8540_FUNC_GROUPS(apetrig, "apetrig_b_1");
+DB8540_FUNC_GROUPS(clkout, "clkoutreq_a_1", "clkout_a_1", "clkout_a_2");
+DB8540_FUNC_GROUPS(ddrtrig, "ddrtrig_b_1");
+DB8540_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
+DB8540_FUNC_GROUPS(hwobs, "hwobs_oc4_1");
+DB8540_FUNC_GROUPS(hx, "hxclk_oc2_1", "hxgpio_oc2_1");
+DB8540_FUNC_GROUPS(i2c0, "i2c0_a_1");
+DB8540_FUNC_GROUPS(i2c1, "i2c1_b_1", "i2c1_b_2");
+DB8540_FUNC_GROUPS(i2c2, "i2c2_b_1", "i2c2_b_2");
+DB8540_FUNC_GROUPS(i2c3, "i2c3_c_1", "i2c4_b_1");
+DB8540_FUNC_GROUPS(i2c4, "i2c4_b_2");
+DB8540_FUNC_GROUPS(i2c5, "i2c5_b_1", "i2c5_b_2", "i2c5_c_1", "i2c5_c_2");
+DB8540_FUNC_GROUPS(i2c6, "i2c6_b_1", "i2c6_oc1_1");
+/* The image processor has 8 GPIO pins that can be muxed out */
+DB8540_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio0_c_1", "ipgpio0_c_2",
+               "ipgpio1_a_1", "ipgpio1_c_1", "ipgpio1_c_2",
+               "ipgpio2_b_1", "ipgpio2_c_1", "ipgpio2_c_2",
+               "ipgpio3_b_1", "ipgpio3_c_1", "ipgpio3_c_2",
+               "ipgpio4_c_1", "ipgpio4_c_2",
+               "ipgpio5_c_1", "ipgpio5_c_2",
+               "ipgpio6_c_1", "ipgpio6_c_2",
+               "ipgpio7_b_1", "ipgpio7_c_1");
+DB8540_FUNC_GROUPS(ipi2c, "ipi2c_a_1", "ipi2c_a_2");
+DB8540_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1");
+DB8540_FUNC_GROUPS(lcd, "lcd_d0_d7_a_1", "lcd_d12_d23_a_1", "lcd_d8_d11_a_1",
+               "lcdvsi0_a_1", "lcdvsi1_a_1");
+DB8540_FUNC_GROUPS(lcdb, "lcdb_a_1");
+DB8540_FUNC_GROUPS(mc0, "mc0_a_1");
+DB8540_FUNC_GROUPS(mc1, "mc1_a_1", "mc1_a_2");
+DB8540_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1");
+DB8540_FUNC_GROUPS(mc3, "mc3_b_1");
+DB8540_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
+DB8540_FUNC_GROUPS(mc5, "mc5_c_1");
+DB8540_FUNC_GROUPS(modaccgpo, "modaccgpo_c_1", "modaccgpo_oc1_1",
+               "modaccgpo_oc3_1");
+DB8540_FUNC_GROUPS(modaccuart, "modaccuart_oc2_1", "modaccuarttxrx_oc4_1",
+               "modaccuartrtccts_oc4_1");
+DB8540_FUNC_GROUPS(modi2s, "modi2s_a_1");
+DB8540_FUNC_GROUPS(modobs, "modobsclk_a_1", "modobsclkout_oc1_1",
+               "modobspwrctrl_oc1_1", "modobspwrrst_c_1",
+               "modobsrefclk_oc1_1", "modobsresout_c_1",
+               "modobsresout_oc1_1", "modobsservice_oc2_1");
+DB8540_FUNC_GROUPS(modprcmudbg, "modprcmudbg_oc1_1");
+DB8540_FUNC_GROUPS(modrf, "modrf_c_1");
+DB8540_FUNC_GROUPS(modsmb, "modsmb_a_1");
+DB8540_FUNC_GROUPS(modtrig, "modtrig_b_1");
+DB8540_FUNC_GROUPS(moduart, "moduart1_c_1", "moduart1_oc1_1",
+               "moduart1txrx_oc4_1", "moduart1rtscts_oc4_1", "moduart0_oc2_1");
+DB8540_FUNC_GROUPS(moduartstmmux, "moduartstmmux_b_1", "moduartstmmux_oc2_1",
+               "moduartstmmux_oc4_1");
+DB8540_FUNC_GROUPS(modxmip, "modxmip_oc1_1", "modxmip_oc2_1");
+/*
+ * MSP0 can only be on a certain set of pins, but the TX/RX pins can be
+ * switched around by selecting the altfunction A or B.
+ */
+DB8540_FUNC_GROUPS(msp0, "msp0rfsrck_a_1", "msp0tfstck_a_1", "msp0txrx_a_1",
+               "msp0txrx_b_1");
+DB8540_FUNC_GROUPS(msp1, "msp1_a_1", "msp1txrx_a_1", "msp1txrx_b_1");
+DB8540_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2txdtcktfs_a_1", "msp2rxd_a_1");
+DB8540_FUNC_GROUPS(msp4, "msp4_a_1", "msp4_b_1", "msp4_c_1");
+DB8540_FUNC_GROUPS(pwl, "pwl_b_1", "pwl_b_2", "pwl_b_3", "pwl_b_4");
+DB8540_FUNC_GROUPS(remap, "remap0_oc1_1", "remap1_oc1_1");
+DB8540_FUNC_GROUPS(sbag, "sbag_oc2_1", "sbag_oc2_2");
+/* Select between CS0 on alt B or PS1 on alt C */
+DB8540_FUNC_GROUPS(sm, "sm_b_1", "smcleale_c_1", "smcs0_b_1", "smcs1_b_1",
+               "smps0_c_1", "smps1_c_1");
+DB8540_FUNC_GROUPS(spi0, "spi0_c_1");
+DB8540_FUNC_GROUPS(spi1, "spi1_b_1");
+DB8540_FUNC_GROUPS(spi2, "spi2_a_1");
+DB8540_FUNC_GROUPS(spi3, "spi3_oc1_1");
+DB8540_FUNC_GROUPS(ssp0, "ssp0_a_1");
+DB8540_FUNC_GROUPS(ssp1, "ssp1_a_1");
+DB8540_FUNC_GROUPS(stmape, "stmape_c_1", "stmape_oc1_1", "stmape_oc2_1");
+DB8540_FUNC_GROUPS(stmmod, "stmmod_b_1", "stmmod_oc2_1", "stmmod_oc4_1");
+DB8540_FUNC_GROUPS(tpui, "tpui_oc3_1");
+DB8540_FUNC_GROUPS(u0, "u0_a_1", "u0_c_1");
+DB8540_FUNC_GROUPS(u1, "u1ctsrts_a_1", "u1rxtx_a_1");
+DB8540_FUNC_GROUPS(u2, "u2_oc1_1", "u2_oc2_1", "u2ctsrts_a_1", "u2ctsrts_oc1_1",
+               "u2rxtx_c_1", "u2txrx_a_1", "u2txrx_b_1", "u2txrx_b_2",
+               "u2txrx_oc1_1");
+DB8540_FUNC_GROUPS(u3, "u3ctsrts_b_1", "u3rxtx_c_1", "u3txrxa_b_1");
+DB8540_FUNC_GROUPS(u4, "u4ctsrts_b_1", "u4ctsrts_c_1", "u4txrx_b_1");
+DB8540_FUNC_GROUPS(usb, "usb_a_1");
+
+
+#define FUNCTION(fname)                                        \
+       {                                               \
+               .name = #fname,                         \
+               .groups = fname##_groups,               \
+               .ngroups = ARRAY_SIZE(fname##_groups),  \
+       }
+
+static const struct nmk_function nmk_db8540_functions[] = {
+       FUNCTION(apetrig),
+       FUNCTION(clkout),
+       FUNCTION(ddrtrig),
+       FUNCTION(hsi),
+       FUNCTION(hwobs),
+       FUNCTION(hx),
+       FUNCTION(i2c0),
+       FUNCTION(i2c1),
+       FUNCTION(i2c2),
+       FUNCTION(i2c3),
+       FUNCTION(i2c4),
+       FUNCTION(i2c5),
+       FUNCTION(i2c6),
+       FUNCTION(ipgpio),
+       FUNCTION(ipi2c),
+       FUNCTION(kp),
+       FUNCTION(lcd),
+       FUNCTION(lcdb),
+       FUNCTION(mc0),
+       FUNCTION(mc1),
+       FUNCTION(mc2),
+       FUNCTION(mc3),
+       FUNCTION(mc4),
+       FUNCTION(mc5),
+       FUNCTION(modaccgpo),
+       FUNCTION(modaccuart),
+       FUNCTION(modi2s),
+       FUNCTION(modobs),
+       FUNCTION(modprcmudbg),
+       FUNCTION(modrf),
+       FUNCTION(modsmb),
+       FUNCTION(modtrig),
+       FUNCTION(moduart),
+       FUNCTION(modxmip),
+       FUNCTION(msp0),
+       FUNCTION(msp1),
+       FUNCTION(msp2),
+       FUNCTION(msp4),
+       FUNCTION(pwl),
+       FUNCTION(remap),
+       FUNCTION(sbag),
+       FUNCTION(sm),
+       FUNCTION(spi0),
+       FUNCTION(spi1),
+       FUNCTION(spi2),
+       FUNCTION(spi3),
+       FUNCTION(ssp0),
+       FUNCTION(ssp1),
+       FUNCTION(stmape),
+       FUNCTION(stmmod),
+       FUNCTION(tpui),
+       FUNCTION(u0),
+       FUNCTION(u1),
+       FUNCTION(u2),
+       FUNCTION(u3),
+       FUNCTION(u4),
+       FUNCTION(usb)
+};
+
+static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
+       .gpio_ranges = nmk_db8540_ranges,
+       .gpio_num_ranges = ARRAY_SIZE(nmk_db8540_ranges),
+       .pins = nmk_db8540_pins,
+       .npins = ARRAY_SIZE(nmk_db8540_pins),
+       .functions = nmk_db8540_functions,
+       .nfunctions = ARRAY_SIZE(nmk_db8540_functions),
+       .groups = nmk_db8540_groups,
+       .ngroups = ARRAY_SIZE(nmk_db8540_groups),
+};
+
+void __devinit
+nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
+{
+       *soc = &nmk_db8540_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/pinctrl-nomadik-stn8815.c
new file mode 100644 (file)
index 0000000..7d432c3
--- /dev/null
@@ -0,0 +1,357 @@
+#include <linux/kernel.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-nomadik.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define _GPIO(offset)          (offset)
+
+#define STN8815_PIN_B4         _GPIO(0)
+#define STN8815_PIN_D5         _GPIO(1)
+#define STN8815_PIN_C5         _GPIO(2)
+#define STN8815_PIN_A4         _GPIO(3)
+#define STN8815_PIN_B5         _GPIO(4)
+#define STN8815_PIN_D6         _GPIO(5)
+#define STN8815_PIN_C6         _GPIO(6)
+#define STN8815_PIN_B6         _GPIO(7)
+#define STN8815_PIN_B10                _GPIO(8)
+#define STN8815_PIN_A10                _GPIO(9)
+#define STN8815_PIN_C11                _GPIO(10)
+#define STN8815_PIN_B11                _GPIO(11)
+#define STN8815_PIN_A11                _GPIO(12)
+#define STN8815_PIN_C12                _GPIO(13)
+#define STN8815_PIN_B12                _GPIO(14)
+#define STN8815_PIN_A12                _GPIO(15)
+#define STN8815_PIN_C13                _GPIO(16)
+#define STN8815_PIN_B13                _GPIO(17)
+#define STN8815_PIN_A13                _GPIO(18)
+#define STN8815_PIN_D13                _GPIO(19)
+#define STN8815_PIN_C14                _GPIO(20)
+#define STN8815_PIN_B14                _GPIO(21)
+#define STN8815_PIN_A14                _GPIO(22)
+#define STN8815_PIN_D15                _GPIO(23)
+#define STN8815_PIN_C15                _GPIO(24)
+#define STN8815_PIN_B15                _GPIO(25)
+#define STN8815_PIN_A15                _GPIO(26)
+#define STN8815_PIN_C16                _GPIO(27)
+#define STN8815_PIN_B16                _GPIO(28)
+#define STN8815_PIN_A16                _GPIO(29)
+#define STN8815_PIN_D17                _GPIO(30)
+#define STN8815_PIN_C17                _GPIO(31)
+#define STN8815_PIN_AB6                _GPIO(32)
+#define STN8815_PIN_AA6                _GPIO(33)
+#define STN8815_PIN_Y6         _GPIO(34)
+#define STN8815_PIN_Y5         _GPIO(35)
+#define STN8815_PIN_AA5                _GPIO(36)
+#define STN8815_PIN_AB5                _GPIO(37)
+#define STN8815_PIN_AB4                _GPIO(38)
+#define STN8815_PIN_Y4         _GPIO(39)
+#define STN8815_PIN_R1         _GPIO(40)
+#define STN8815_PIN_R2         _GPIO(41)
+#define STN8815_PIN_R3         _GPIO(42)
+#define STN8815_PIN_P1         _GPIO(43)
+#define STN8815_PIN_P2         _GPIO(44)
+#define STN8815_PIN_P3         _GPIO(45)
+#define STN8815_PIN_N1         _GPIO(46)
+#define STN8815_PIN_N2         _GPIO(47)
+#define STN8815_PIN_N3         _GPIO(48)
+#define STN8815_PIN_M1         _GPIO(49)
+#define STN8815_PIN_M3         _GPIO(50)
+#define STN8815_PIN_M2         _GPIO(51)
+#define STN8815_PIN_L1         _GPIO(52)
+#define STN8815_PIN_L4         _GPIO(53)
+#define STN8815_PIN_L3         _GPIO(54)
+#define STN8815_PIN_L2         _GPIO(55)
+#define STN8815_PIN_F3         _GPIO(56)
+#define STN8815_PIN_F2         _GPIO(57)
+#define STN8815_PIN_E1         _GPIO(58)
+#define STN8815_PIN_E3         _GPIO(59)
+#define STN8815_PIN_E2         _GPIO(60)
+#define STN8815_PIN_E4         _GPIO(61)
+#define STN8815_PIN_D3         _GPIO(62)
+#define STN8815_PIN_D2         _GPIO(63)
+#define STN8815_PIN_F21                _GPIO(64)
+#define STN8815_PIN_F20                _GPIO(65)
+#define STN8815_PIN_E22                _GPIO(66)
+#define STN8815_PIN_D22                _GPIO(67)
+#define STN8815_PIN_E21                _GPIO(68)
+#define STN8815_PIN_E20                _GPIO(69)
+#define STN8815_PIN_C22                _GPIO(70)
+#define STN8815_PIN_D21                _GPIO(71)
+#define STN8815_PIN_D20                _GPIO(72)
+#define STN8815_PIN_C21                _GPIO(73)
+#define STN8815_PIN_C20                _GPIO(74)
+#define STN8815_PIN_C19                _GPIO(75)
+#define STN8815_PIN_B20                _GPIO(76)
+#define STN8815_PIN_B8         _GPIO(77)
+#define STN8815_PIN_A8         _GPIO(78)
+#define STN8815_PIN_C9         _GPIO(79)
+#define STN8815_PIN_B9         _GPIO(80)
+#define STN8815_PIN_A9         _GPIO(81)
+#define STN8815_PIN_C10                _GPIO(82)
+#define STN8815_PIN_K1         _GPIO(83)
+#define STN8815_PIN_K3         _GPIO(84)
+#define STN8815_PIN_K2         _GPIO(85)
+#define STN8815_PIN_J1         _GPIO(86)
+#define STN8815_PIN_J3         _GPIO(87)
+#define STN8815_PIN_J2         _GPIO(88)
+#define STN8815_PIN_H1         _GPIO(89)
+#define STN8815_PIN_H3         _GPIO(90)
+#define STN8815_PIN_H2         _GPIO(91)
+#define STN8815_PIN_G1         _GPIO(92)
+#define STN8815_PIN_G3         _GPIO(93)
+#define STN8815_PIN_G2         _GPIO(94)
+#define STN8815_PIN_F1         _GPIO(95)
+#define STN8815_PIN_T20                _GPIO(96)
+#define STN8815_PIN_R21                _GPIO(97)
+#define STN8815_PIN_R20                _GPIO(98)
+#define STN8815_PIN_U22                _GPIO(99)
+#define STN8815_PIN_N21                _GPIO(100)
+#define STN8815_PIN_N20                _GPIO(101)
+#define STN8815_PIN_P22                _GPIO(102)
+#define STN8815_PIN_N22                _GPIO(103)
+#define STN8815_PIN_V22                _GPIO(104)
+#define STN8815_PIN_V21                _GPIO(105)
+#define STN8815_PIN_K22                _GPIO(106)
+#define STN8815_PIN_K21                _GPIO(107)
+#define STN8815_PIN_H20                _GPIO(108)
+#define STN8815_PIN_G20                _GPIO(109)
+#define STN8815_PIN_L21                _GPIO(110)
+#define STN8815_PIN_H21                _GPIO(111)
+#define STN8815_PIN_J21                _GPIO(112)
+#define STN8815_PIN_H22                _GPIO(113)
+#define STN8815_PIN_K20                _GPIO(114)
+#define STN8815_PIN_L22                _GPIO(115)
+#define STN8815_PIN_G21                _GPIO(116)
+#define STN8815_PIN_J20                _GPIO(117)
+#define STN8815_PIN_G22                _GPIO(118)
+#define STN8815_PIN_U19                _GPIO(119)
+#define STN8815_PIN_G19                _GPIO(120)
+#define STN8815_PIN_M22                _GPIO(121)
+#define STN8815_PIN_M19                _GPIO(122)
+#define STN8815_PIN_J22                _GPIO(123)
+/* GPIOs 124-127 not routed to pins */
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc nmk_stn8815_pins[] = {
+       PINCTRL_PIN(STN8815_PIN_B4, "GPIO0_B4"),
+       PINCTRL_PIN(STN8815_PIN_D5, "GPIO1_D5"),
+       PINCTRL_PIN(STN8815_PIN_C5, "GPIO2_C5"),
+       PINCTRL_PIN(STN8815_PIN_A4, "GPIO3_A4"),
+       PINCTRL_PIN(STN8815_PIN_B5, "GPIO4_B5"),
+       PINCTRL_PIN(STN8815_PIN_D6, "GPIO5_D6"),
+       PINCTRL_PIN(STN8815_PIN_C6, "GPIO6_C6"),
+       PINCTRL_PIN(STN8815_PIN_B6, "GPIO7_B6"),
+       PINCTRL_PIN(STN8815_PIN_B10, "GPIO8_B10"),
+       PINCTRL_PIN(STN8815_PIN_A10, "GPIO9_A10"),
+       PINCTRL_PIN(STN8815_PIN_C11, "GPIO10_C11"),
+       PINCTRL_PIN(STN8815_PIN_B11, "GPIO11_B11"),
+       PINCTRL_PIN(STN8815_PIN_A11, "GPIO12_A11"),
+       PINCTRL_PIN(STN8815_PIN_C12, "GPIO13_C12"),
+       PINCTRL_PIN(STN8815_PIN_B12, "GPIO14_B12"),
+       PINCTRL_PIN(STN8815_PIN_A12, "GPIO15_A12"),
+       PINCTRL_PIN(STN8815_PIN_C13, "GPIO16_C13"),
+       PINCTRL_PIN(STN8815_PIN_B13, "GPIO17_B13"),
+       PINCTRL_PIN(STN8815_PIN_A13, "GPIO18_A13"),
+       PINCTRL_PIN(STN8815_PIN_D13, "GPIO19_D13"),
+       PINCTRL_PIN(STN8815_PIN_C14, "GPIO20_C14"),
+       PINCTRL_PIN(STN8815_PIN_B14, "GPIO21_B14"),
+       PINCTRL_PIN(STN8815_PIN_A14, "GPIO22_A14"),
+       PINCTRL_PIN(STN8815_PIN_D15, "GPIO23_D15"),
+       PINCTRL_PIN(STN8815_PIN_C15, "GPIO24_C15"),
+       PINCTRL_PIN(STN8815_PIN_B15, "GPIO25_B15"),
+       PINCTRL_PIN(STN8815_PIN_A15, "GPIO26_A15"),
+       PINCTRL_PIN(STN8815_PIN_C16, "GPIO27_C16"),
+       PINCTRL_PIN(STN8815_PIN_B16, "GPIO28_B16"),
+       PINCTRL_PIN(STN8815_PIN_A16, "GPIO29_A16"),
+       PINCTRL_PIN(STN8815_PIN_D17, "GPIO30_D17"),
+       PINCTRL_PIN(STN8815_PIN_C17, "GPIO31_C17"),
+       PINCTRL_PIN(STN8815_PIN_AB6, "GPIO32_AB6"),
+       PINCTRL_PIN(STN8815_PIN_AA6, "GPIO33_AA6"),
+       PINCTRL_PIN(STN8815_PIN_Y6, "GPIO34_Y6"),
+       PINCTRL_PIN(STN8815_PIN_Y5, "GPIO35_Y5"),
+       PINCTRL_PIN(STN8815_PIN_AA5, "GPIO36_AA5"),
+       PINCTRL_PIN(STN8815_PIN_AB5, "GPIO37_AB5"),
+       PINCTRL_PIN(STN8815_PIN_AB4, "GPIO38_AB4"),
+       PINCTRL_PIN(STN8815_PIN_Y4, "GPIO39_Y4"),
+       PINCTRL_PIN(STN8815_PIN_R1, "GPIO40_R1"),
+       PINCTRL_PIN(STN8815_PIN_R2, "GPIO41_R2"),
+       PINCTRL_PIN(STN8815_PIN_R3, "GPIO42_R3"),
+       PINCTRL_PIN(STN8815_PIN_P1, "GPIO43_P1"),
+       PINCTRL_PIN(STN8815_PIN_P2, "GPIO44_P2"),
+       PINCTRL_PIN(STN8815_PIN_P3, "GPIO45_P3"),
+       PINCTRL_PIN(STN8815_PIN_N1, "GPIO46_N1"),
+       PINCTRL_PIN(STN8815_PIN_N2, "GPIO47_N2"),
+       PINCTRL_PIN(STN8815_PIN_N3, "GPIO48_N3"),
+       PINCTRL_PIN(STN8815_PIN_M1, "GPIO49_M1"),
+       PINCTRL_PIN(STN8815_PIN_M3, "GPIO50_M3"),
+       PINCTRL_PIN(STN8815_PIN_M2, "GPIO51_M2"),
+       PINCTRL_PIN(STN8815_PIN_L1, "GPIO52_L1"),
+       PINCTRL_PIN(STN8815_PIN_L4, "GPIO53_L4"),
+       PINCTRL_PIN(STN8815_PIN_L3, "GPIO54_L3"),
+       PINCTRL_PIN(STN8815_PIN_L2, "GPIO55_L2"),
+       PINCTRL_PIN(STN8815_PIN_F3, "GPIO56_F3"),
+       PINCTRL_PIN(STN8815_PIN_F2, "GPIO57_F2"),
+       PINCTRL_PIN(STN8815_PIN_E1, "GPIO58_E1"),
+       PINCTRL_PIN(STN8815_PIN_E3, "GPIO59_E3"),
+       PINCTRL_PIN(STN8815_PIN_E2, "GPIO60_E2"),
+       PINCTRL_PIN(STN8815_PIN_E4, "GPIO61_E4"),
+       PINCTRL_PIN(STN8815_PIN_D3, "GPIO62_D3"),
+       PINCTRL_PIN(STN8815_PIN_D2, "GPIO63_D2"),
+       PINCTRL_PIN(STN8815_PIN_F21, "GPIO64_F21"),
+       PINCTRL_PIN(STN8815_PIN_F20, "GPIO65_F20"),
+       PINCTRL_PIN(STN8815_PIN_E22, "GPIO66_E22"),
+       PINCTRL_PIN(STN8815_PIN_D22, "GPIO67_D22"),
+       PINCTRL_PIN(STN8815_PIN_E21, "GPIO68_E21"),
+       PINCTRL_PIN(STN8815_PIN_E20, "GPIO69_E20"),
+       PINCTRL_PIN(STN8815_PIN_C22, "GPIO70_C22"),
+       PINCTRL_PIN(STN8815_PIN_D21, "GPIO71_D21"),
+       PINCTRL_PIN(STN8815_PIN_D20, "GPIO72_D20"),
+       PINCTRL_PIN(STN8815_PIN_C21, "GPIO73_C21"),
+       PINCTRL_PIN(STN8815_PIN_C20, "GPIO74_C20"),
+       PINCTRL_PIN(STN8815_PIN_C19, "GPIO75_C19"),
+       PINCTRL_PIN(STN8815_PIN_B20, "GPIO76_B20"),
+       PINCTRL_PIN(STN8815_PIN_B8, "GPIO77_B8"),
+       PINCTRL_PIN(STN8815_PIN_A8, "GPIO78_A8"),
+       PINCTRL_PIN(STN8815_PIN_C9, "GPIO79_C9"),
+       PINCTRL_PIN(STN8815_PIN_B9, "GPIO80_B9"),
+       PINCTRL_PIN(STN8815_PIN_A9, "GPIO81_A9"),
+       PINCTRL_PIN(STN8815_PIN_C10, "GPIO82_C10"),
+       PINCTRL_PIN(STN8815_PIN_K1, "GPIO83_K1"),
+       PINCTRL_PIN(STN8815_PIN_K3, "GPIO84_K3"),
+       PINCTRL_PIN(STN8815_PIN_K2, "GPIO85_K2"),
+       PINCTRL_PIN(STN8815_PIN_J1, "GPIO86_J1"),
+       PINCTRL_PIN(STN8815_PIN_J3, "GPIO87_J3"),
+       PINCTRL_PIN(STN8815_PIN_J2, "GPIO88_J2"),
+       PINCTRL_PIN(STN8815_PIN_H1, "GPIO89_H1"),
+       PINCTRL_PIN(STN8815_PIN_H3, "GPIO90_H3"),
+       PINCTRL_PIN(STN8815_PIN_H2, "GPIO91_H2"),
+       PINCTRL_PIN(STN8815_PIN_G1, "GPIO92_G1"),
+       PINCTRL_PIN(STN8815_PIN_G3, "GPIO93_G3"),
+       PINCTRL_PIN(STN8815_PIN_G2, "GPIO94_G2"),
+       PINCTRL_PIN(STN8815_PIN_F1, "GPIO95_F1"),
+       PINCTRL_PIN(STN8815_PIN_T20, "GPIO96_T20"),
+       PINCTRL_PIN(STN8815_PIN_R21, "GPIO97_R21"),
+       PINCTRL_PIN(STN8815_PIN_R20, "GPIO98_R20"),
+       PINCTRL_PIN(STN8815_PIN_U22, "GPIO99_U22"),
+       PINCTRL_PIN(STN8815_PIN_N21, "GPIO100_N21"),
+       PINCTRL_PIN(STN8815_PIN_N20, "GPIO101_N20"),
+       PINCTRL_PIN(STN8815_PIN_P22, "GPIO102_P22"),
+       PINCTRL_PIN(STN8815_PIN_N22, "GPIO103_N22"),
+       PINCTRL_PIN(STN8815_PIN_V22, "GPIO104_V22"),
+       PINCTRL_PIN(STN8815_PIN_V21, "GPIO105_V21"),
+       PINCTRL_PIN(STN8815_PIN_K22, "GPIO106_K22"),
+       PINCTRL_PIN(STN8815_PIN_K21, "GPIO107_K21"),
+       PINCTRL_PIN(STN8815_PIN_H20, "GPIO108_H20"),
+       PINCTRL_PIN(STN8815_PIN_G20, "GPIO109_G20"),
+       PINCTRL_PIN(STN8815_PIN_L21, "GPIO110_L21"),
+       PINCTRL_PIN(STN8815_PIN_H21, "GPIO111_H21"),
+       PINCTRL_PIN(STN8815_PIN_J21, "GPIO112_J21"),
+       PINCTRL_PIN(STN8815_PIN_H22, "GPIO113_H22"),
+       PINCTRL_PIN(STN8815_PIN_K20, "GPIO114_K20"),
+       PINCTRL_PIN(STN8815_PIN_L22, "GPIO115_L22"),
+       PINCTRL_PIN(STN8815_PIN_G21, "GPIO116_G21"),
+       PINCTRL_PIN(STN8815_PIN_J20, "GPIO117_J20"),
+       PINCTRL_PIN(STN8815_PIN_G22, "GPIO118_G22"),
+       PINCTRL_PIN(STN8815_PIN_U19, "GPIO119_U19"),
+       PINCTRL_PIN(STN8815_PIN_G19, "GPIO120_G19"),
+       PINCTRL_PIN(STN8815_PIN_M22, "GPIO121_M22"),
+       PINCTRL_PIN(STN8815_PIN_M19, "GPIO122_M19"),
+       PINCTRL_PIN(STN8815_PIN_J22, "GPIO123_J22"),
+};
+
+#define STN8815_GPIO_RANGE(a, b, c) { .name = "STN8815", .id = a, .base = b, \
+                       .pin_base = b, .npins = c }
+
+/*
+ * This matches the 32-pin gpio chips registered by the GPIO portion. This
+ * cannot be const since we assign the struct gpio_chip * pointer at runtime.
+ */
+static struct pinctrl_gpio_range nmk_stn8815_ranges[] = {
+       STN8815_GPIO_RANGE(0, 0, 32),
+       STN8815_GPIO_RANGE(1, 32, 32),
+       STN8815_GPIO_RANGE(2, 64, 32),
+       STN8815_GPIO_RANGE(3, 96, 28),
+};
+
+/*
+ * Read the pin group names like this:
+ * u0_a_1    = first groups of pins for uart0 on alt function a
+ * i2c2_b_2  = second group of pins for i2c2 on alt function b
+ */
+
+/* Altfunction A */
+static const unsigned u0_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5,
+       STN8815_PIN_C5, STN8815_PIN_A4, STN8815_PIN_B5, STN8815_PIN_D6,
+       STN8815_PIN_C6, STN8815_PIN_B6 };
+static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10,
+       STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12,
+       STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 };
+static const unsigned u1_a_1_pins[] = { STN8815_PIN_M2, STN8815_PIN_L1,
+                                       STN8815_PIN_F3, STN8815_PIN_F2 };
+static const unsigned i2c1_a_1_pins[] = { STN8815_PIN_L4, STN8815_PIN_L3 };
+static const unsigned i2c0_a_1_pins[] = { STN8815_PIN_D3, STN8815_PIN_D2 };
+/* Altfunction B */
+static const unsigned u1_b_1_pins[] = { STN8815_PIN_B16, STN8815_PIN_A16 };
+static const unsigned i2cusb_b_1_pins[] = { STN8815_PIN_C21, STN8815_PIN_C20 };
+
+#define STN8815_PIN_GROUP(a,b) { .name = #a, .pins = a##_pins,         \
+                       .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct nmk_pingroup nmk_stn8815_groups[] = {
+       STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+       STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
+       STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
+       STN8815_PIN_GROUP(i2c1_a_1, NMK_GPIO_ALT_A),
+       STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
+       STN8815_PIN_GROUP(u1_b_1, NMK_GPIO_ALT_B),
+       STN8815_PIN_GROUP(i2cusb_b_1, NMK_GPIO_ALT_B),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define STN8815_FUNC_GROUPS(a, b...)      \
+static const char * const a##_groups[] = { b };
+
+STN8815_FUNC_GROUPS(u0, "u0_a_1");
+STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1");
+STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
+STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
+STN8815_FUNC_GROUPS(i2c0, "i2c0_a_1");
+STN8815_FUNC_GROUPS(i2cusb, "i2cusb_b_1");
+
+#define FUNCTION(fname)                                        \
+       {                                               \
+               .name = #fname,                         \
+               .groups = fname##_groups,               \
+               .ngroups = ARRAY_SIZE(fname##_groups),  \
+       }
+
+static const struct nmk_function nmk_stn8815_functions[] = {
+       FUNCTION(u0),
+       FUNCTION(mmcsd),
+       FUNCTION(u1),
+       FUNCTION(i2c1),
+       FUNCTION(i2c0),
+       FUNCTION(i2cusb),
+};
+
+static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
+       .gpio_ranges = nmk_stn8815_ranges,
+       .gpio_num_ranges = ARRAY_SIZE(nmk_stn8815_ranges),
+       .pins = nmk_stn8815_pins,
+       .npins = ARRAY_SIZE(nmk_stn8815_pins),
+       .functions = nmk_stn8815_functions,
+       .nfunctions = ARRAY_SIZE(nmk_stn8815_functions),
+       .groups = nmk_stn8815_groups,
+       .ngroups = ARRAY_SIZE(nmk_stn8815_groups),
+};
+
+void __devinit
+nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
+{
+       *soc = &nmk_stn8815_soc;
+}
index 3dde6537adb878d7986a96b2dba528d41cf77d1a..6030a513f3c488506de4d80cd3cfae6c25ce69b2 100644 (file)
@@ -819,6 +819,7 @@ static struct irq_chip nmk_gpio_irq_chip = {
        .irq_set_wake   = nmk_gpio_irq_set_wake,
        .irq_startup    = nmk_gpio_irq_startup,
        .irq_shutdown   = nmk_gpio_irq_shutdown,
+       .flags          = IRQCHIP_MASK_ON_SUSPEND,
 };
 
 static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
@@ -826,16 +827,14 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
 {
        struct nmk_gpio_chip *nmk_chip;
        struct irq_chip *host_chip = irq_get_chip(irq);
-       unsigned int first_irq;
 
        chained_irq_enter(host_chip, desc);
 
        nmk_chip = irq_get_handler_data(irq);
-       first_irq = nmk_chip->domain->revmap_data.legacy.first_irq;
        while (status) {
                int bit = __ffs(status);
 
-               generic_handle_irq(first_irq + bit);
+               generic_handle_irq(irq_find_mapping(nmk_chip->domain, bit));
                status &= ~BIT(bit);
        }
 
@@ -1720,8 +1719,12 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
                        of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
 
        /* Poke in other ASIC variants here */
+       if (version == PINCTRL_NMK_STN8815)
+               nmk_pinctrl_stn8815_init(&npct->soc);
        if (version == PINCTRL_NMK_DB8500)
                nmk_pinctrl_db8500_init(&npct->soc);
+       if (version == PINCTRL_NMK_DB8540)
+               nmk_pinctrl_db8540_init(&npct->soc);
 
        /*
         * We need all the GPIO drivers to probe FIRST, or we will not be able
@@ -1772,6 +1775,7 @@ static struct platform_driver nmk_gpio_driver = {
 static const struct platform_device_id nmk_pinctrl_id[] = {
        { "pinctrl-stn8815", PINCTRL_NMK_STN8815 },
        { "pinctrl-db8500", PINCTRL_NMK_DB8500 },
+       { "pinctrl-db8540", PINCTRL_NMK_DB8540 },
 };
 
 static struct platform_driver nmk_pinctrl_driver = {
index bc91aed7185db0bb46d731523819d6fa0dcbf8e6..5c99f1c62dfd1a1524f3d833b37d36c83c96a3c1 100644 (file)
@@ -6,6 +6,7 @@
 /* Package definitions */
 #define PINCTRL_NMK_STN8815    0
 #define PINCTRL_NMK_DB8500     1
+#define PINCTRL_NMK_DB8540     2
 
 /**
  * struct nmk_function - Nomadik pinctrl mux function
@@ -61,6 +62,19 @@ struct nmk_pinctrl_soc_data {
        unsigned ngroups;
 };
 
+#ifdef CONFIG_PINCTRL_STN8815
+
+void nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
 #ifdef CONFIG_PINCTRL_DB8500
 
 void nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc);
@@ -74,4 +88,17 @@ nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
 
 #endif
 
+#ifdef CONFIG_PINCTRL_DB8540
+
+void nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
 #endif /* PINCTRL_PINCTRL_NOMADIK_H */
index 76a4260f20f3dc458b3aa77b71f0d468728d9833..726a729a2ec92d5d0ae5e45e94bd458305496219 100644 (file)
@@ -26,7 +26,8 @@
 #include "core.h"
 
 #define DRIVER_NAME                    "pinctrl-single"
-#define PCS_MUX_NAME                   "pinctrl-single,pins"
+#define PCS_MUX_PINS_NAME              "pinctrl-single,pins"
+#define PCS_MUX_BITS_NAME              "pinctrl-single,bits"
 #define PCS_REG_NAME_LEN               ((sizeof(unsigned long) * 2) + 1)
 #define PCS_OFF_DISABLED               ~0U
 
@@ -54,6 +55,7 @@ struct pcs_pingroup {
 struct pcs_func_vals {
        void __iomem *reg;
        unsigned val;
+       unsigned mask;
 };
 
 /**
@@ -139,6 +141,7 @@ struct pcs_device {
        unsigned fshift;
        unsigned foff;
        unsigned fmax;
+       bool bits_per_mux;
        struct pcs_name *names;
        struct pcs_data pins;
        struct radix_tree_root pgtree;
@@ -243,7 +246,15 @@ static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
                                        struct seq_file *s,
                                        unsigned offset)
 {
-       seq_printf(s, " " DRIVER_NAME);
+       struct pcs_device *pcs;
+       unsigned val;
+
+       pcs = pinctrl_dev_get_drvdata(pctldev);
+
+       val = pcs->read(pcs->base + offset);
+       val &= pcs->fmask;
+
+       seq_printf(s, "%08x %s " , val, DRIVER_NAME);
 }
 
 static void pcs_dt_free_map(struct pinctrl_dev *pctldev,
@@ -332,12 +343,17 @@ static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
 
        for (i = 0; i < func->nvals; i++) {
                struct pcs_func_vals *vals;
-               unsigned val;
+               unsigned val, mask;
 
                vals = &func->vals[i];
                val = pcs->read(vals->reg);
-               val &= ~pcs->fmask;
-               val |= vals->val;
+               if (!vals->mask)
+                       mask = pcs->fmask;
+               else
+                       mask = pcs->fmask & vals->mask;
+
+               val &= ~mask;
+               val |= (vals->val & mask);
                pcs->write(val, vals->reg);
        }
 
@@ -657,18 +673,29 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
 {
        struct pcs_func_vals *vals;
        const __be32 *mux;
-       int size, rows, *pins, index = 0, found = 0, res = -ENOMEM;
+       int size, params, rows, *pins, index = 0, found = 0, res = -ENOMEM;
        struct pcs_function *function;
 
-       mux = of_get_property(np, PCS_MUX_NAME, &size);
-       if ((!mux) || (size < sizeof(*mux) * 2)) {
-               dev_err(pcs->dev, "bad data for mux %s\n",
-                       np->name);
+       if (pcs->bits_per_mux) {
+               params = 3;
+               mux = of_get_property(np, PCS_MUX_BITS_NAME, &size);
+       } else {
+               params = 2;
+               mux = of_get_property(np, PCS_MUX_PINS_NAME, &size);
+       }
+
+       if (!mux) {
+               dev_err(pcs->dev, "no valid property for %s\n", np->name);
+               return -EINVAL;
+       }
+
+       if (size < (sizeof(*mux) * params)) {
+               dev_err(pcs->dev, "bad data for %s\n", np->name);
                return -EINVAL;
        }
 
        size /= sizeof(*mux);   /* Number of elements in array */
-       rows = size / 2;        /* Each row is a key value pair */
+       rows = size / params;
 
        vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows, GFP_KERNEL);
        if (!vals)
@@ -686,6 +713,10 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
                val = be32_to_cpup(mux + index++);
                vals[found].reg = pcs->base + offset;
                vals[found].val = val;
+               if (params == 3) {
+                       val = be32_to_cpup(mux + index++);
+                       vals[found].mask = val;
+               }
 
                pin = pcs_get_pin_by_offset(pcs, offset);
                if (pin < 0) {
@@ -883,6 +914,9 @@ static int __devinit pcs_probe(struct platform_device *pdev)
        if (ret)
                pcs->foff = PCS_OFF_DISABLED;
 
+       pcs->bits_per_mux = of_property_read_bool(np,
+                                                 "pinctrl-single,bit-per-mux");
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(pcs->dev, "could not get resource\n");
index 304360cd213ee400a18e71fce846838f6845ce6b..675497c15149f2e3725cdb9f1031d78dc1859750 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/bitops.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <asm/mach/irq.h>
 
 #define DRIVER_NAME "pinmux-sirf"
 
@@ -69,6 +70,10 @@ static DEFINE_SPINLOCK(sgpio_lock);
  * refer to CS-131858-DC-6A.xls
  */
 static const struct pinctrl_pin_desc sirfsoc_pads[] = {
+       PINCTRL_PIN(0, "gpio0-0"),
+       PINCTRL_PIN(1, "gpio0-1"),
+       PINCTRL_PIN(2, "gpio0-2"),
+       PINCTRL_PIN(3, "gpio0-3"),
        PINCTRL_PIN(4, "pwm0"),
        PINCTRL_PIN(5, "pwm1"),
        PINCTRL_PIN(6, "pwm2"),
@@ -77,7 +82,9 @@ static const struct pinctrl_pin_desc sirfsoc_pads[] = {
        PINCTRL_PIN(9, "odo_0"),
        PINCTRL_PIN(10, "odo_1"),
        PINCTRL_PIN(11, "dr_dir"),
+       PINCTRL_PIN(12, "viprom_fa"),
        PINCTRL_PIN(13, "scl_1"),
+       PINCTRL_PIN(14, "ntrst"),
        PINCTRL_PIN(15, "sda_1"),
        PINCTRL_PIN(16, "x_ldd[16]"),
        PINCTRL_PIN(17, "x_ldd[17]"),
@@ -1260,8 +1267,10 @@ static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev)
                goto out_no_pmx;
        }
 
-       for (i = 0; i < ARRAY_SIZE(sirfsoc_gpio_ranges); i++)
+       for (i = 0; i < ARRAY_SIZE(sirfsoc_gpio_ranges); i++) {
+               sirfsoc_gpio_ranges[i].gc = &sgpio_bank[i].chip.gc;
                pinctrl_add_gpio_range(spmx->pmx, &sirfsoc_gpio_ranges[i]);
+       }
 
        dev_info(&pdev->dev, "initialized SIRFSOC pinmux driver\n");
 
@@ -1475,6 +1484,9 @@ static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
        u32 status, ctrl;
        int idx = 0;
        unsigned int first_irq;
+       struct irq_chip *chip = irq_get_chip(irq);
+
+       chained_irq_enter(chip, desc);
 
        status = readl(bank->chip.regs + SIRFSOC_GPIO_INT_STATUS(bank->id));
        if (!status) {
@@ -1503,20 +1515,17 @@ static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
                idx++;
                status = status >> 1;
        }
+
+       chained_irq_exit(chip, desc);
 }
 
 static inline void sirfsoc_gpio_set_input(struct sirfsoc_gpio_bank *bank, unsigned ctrl_offset)
 {
        u32 val;
-       unsigned long flags;
-
-       spin_lock_irqsave(&bank->lock, flags);
 
        val = readl(bank->chip.regs + ctrl_offset);
        val &= ~SIRFSOC_GPIO_CTL_OUT_EN_MASK;
        writel(val, bank->chip.regs + ctrl_offset);
-
-       spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -1726,6 +1735,8 @@ static int __devinit sirfsoc_gpio_probe(struct device_node *np)
                irq_set_handler_data(bank->parent_irq, bank);
        }
 
+       return 0;
+
 out:
        iounmap(regs);
        return err;
index 3d5ac73bd5a7f978534a315b028e904433c0279b..9301a7a95effae0a3f447dcc5aefb8030a9e9a62 100644 (file)
@@ -232,14 +232,11 @@ int pinmux_request_gpio(struct pinctrl_dev *pctldev,
                        struct pinctrl_gpio_range *range,
                        unsigned pin, unsigned gpio)
 {
-       char gpiostr[16];
        const char *owner;
        int ret;
 
        /* Conjure some name stating what chip and pin this is taken by */
-       snprintf(gpiostr, 15, "%s:%d", range->name, gpio);
-
-       owner = kstrdup(gpiostr, GFP_KERNEL);
+       owner = kasprintf(GFP_KERNEL, "%s:%d", range->name, gpio);
        if (!owner)
                return -EINVAL;
 
index 52daaa816e53691792b6071955dbe680c4721b91..9da5fe715e6a2a92364fcacfb46e6158314b88b8 100644 (file)
@@ -7685,25 +7685,15 @@ static int fan_set_speed(int speed)
 
 static void fan_watchdog_reset(void)
 {
-       static int fan_watchdog_active;
-
        if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
                return;
 
-       if (fan_watchdog_active)
-               cancel_delayed_work(&fan_watchdog_task);
-
        if (fan_watchdog_maxinterval > 0 &&
-           tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
-               fan_watchdog_active = 1;
-               if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
-                               msecs_to_jiffies(fan_watchdog_maxinterval
-                                                * 1000))) {
-                       pr_err("failed to queue the fan watchdog, "
-                              "watchdog will not trigger\n");
-               }
-       } else
-               fan_watchdog_active = 0;
+           tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+               mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
+                       msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
+       else
+               cancel_delayed_work(&fan_watchdog_task);
 }
 
 static void fan_watchdog_fire(struct work_struct *ignored)
index bba3ccac72fe731a6807e211af9171a3204ce8c8..3041514f4d3f2d38469048dbf5ee96bf879483b6 100644 (file)
@@ -1018,7 +1018,7 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
        }
 
        /* Init work for measuring temperature periodically */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+       INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
                ab8500_btemp_periodic_work);
 
        /* Identify the battery */
index d4f0c98428cbacbef7767c8d375867aaf87cf529..0701dbc2b7e1fe4b506cd0394a12bdaed6e0b91d 100644 (file)
@@ -2618,9 +2618,9 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
        }
 
        /* Init work for HW failure check */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+       INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
                ab8500_charger_check_hw_failure_work);
-       INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+       INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
                ab8500_charger_check_usbchargernotok_work);
 
        /*
@@ -2632,10 +2632,10 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
         * watchdog have to be kicked by the charger driver
         * when the AC charger is disabled
         */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
+       INIT_DEFERRABLE_WORK(&di->kick_wd_work,
                ab8500_charger_kick_watchdog_work);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
+       INIT_DEFERRABLE_WORK(&di->check_vbat_work,
                ab8500_charger_check_vbat_work);
 
        /* Init work for charger detection */
index bf022255994c86b3d3486e27e1edcc905c7246f4..5c9e7c263c382f93244a70e4be7f7ddb7a455fcc 100644 (file)
@@ -2516,19 +2516,19 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
        INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
 
        /* Init work for reinitialising the fg algorithm */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+       INIT_DEFERRABLE_WORK(&di->fg_reinit_work,
                ab8500_fg_reinit_work);
 
        /* Work delayed Queue to run the state machine */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+       INIT_DEFERRABLE_WORK(&di->fg_periodic_work,
                ab8500_fg_periodic_work);
 
        /* Work to check low battery condition */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+       INIT_DEFERRABLE_WORK(&di->fg_low_bat_work,
                ab8500_fg_low_bat_work);
 
        /* Init work for HW failure check */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work,
+       INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
                ab8500_fg_check_hw_failure_work);
 
        /* Initialize OVV, and other registers */
index 804b88c760d6bf0a375c486ada3d527bf204d109..4d302803ffccb01580937ba4ffef7ecc05096ac9 100644 (file)
@@ -1848,9 +1848,9 @@ static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
        }
 
        /* Init work for chargalg */
-       INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
+       INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
                abx500_chargalg_periodic_work);
-       INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
+       INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
                abx500_chargalg_wd_work);
 
        /* Init work for chargalg */
index 526e5c9312945bdf480f57a4673b478d22250be8..7ff83cf43c8c1ad5d06407b9eba1358f0bdb9053 100644 (file)
@@ -509,9 +509,8 @@ static void _setup_polling(struct work_struct *work)
        if (!delayed_work_pending(&cm_monitor_work) ||
            (delayed_work_pending(&cm_monitor_work) &&
             time_after(next_polling, _next_polling))) {
-               cancel_delayed_work_sync(&cm_monitor_work);
                next_polling = jiffies + polling_jiffy;
-               queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+               mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
        }
 
 out:
@@ -546,10 +545,8 @@ static void fullbatt_handler(struct charger_manager *cm)
        if (cm_suspended)
                device_set_wakeup_capable(cm->dev, true);
 
-       if (delayed_work_pending(&cm->fullbatt_vchk_work))
-               cancel_delayed_work(&cm->fullbatt_vchk_work);
-       queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
-                          msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+       mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+                        msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
        cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
                                       desc->fullbatt_vchkdrop_ms);
 
index 74c6b23aeabfc769c6e2892577669ab658a32ade..b19bfe400f8c23bb0512ea8b642dd6bd5034fced 100644 (file)
@@ -290,7 +290,7 @@ static struct gpio collie_batt_gpios[] = {
 static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
 {
        /* flush all pending status updates */
-       flush_work_sync(&bat_work);
+       flush_work(&bat_work);
        return 0;
 }
 
index 076e211a40b7111e864d591cba0400ff7042b6ad..704e652072be08acb238f8a3b8965beb90cdf10b 100644 (file)
@@ -355,8 +355,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
 
        dev_dbg(di->dev, "%s\n", __func__);
 
-       cancel_delayed_work(&di->monitor_work);
-       queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
+       mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
 }
 
 
@@ -401,8 +400,7 @@ static void ds2760_battery_set_charged(struct power_supply *psy)
 
        /* postpone the actual work by 20 secs. This is for debouncing GPIO
         * signals and to let the current value settle. See AN4188. */
-       cancel_delayed_work(&di->set_charged_work);
-       queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
+       mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
 }
 
 static int ds2760_battery_get_property(struct power_supply *psy,
@@ -616,8 +614,7 @@ static int ds2760_battery_resume(struct platform_device *pdev)
        di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
        power_supply_changed(&di->bat);
 
-       cancel_delayed_work(&di->monitor_work);
-       queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
+       mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
 
        return 0;
 }
index 8dbc7bfaab14d40422c8bb17db3a4c43cf695abe..ffbed5e5b9456e9dd763ce683b27b7cab6eaf1b1 100644 (file)
@@ -173,16 +173,14 @@ static void jz_battery_external_power_changed(struct power_supply *psy)
 {
        struct jz_battery *jz_battery = psy_to_jz_battery(psy);
 
-       cancel_delayed_work(&jz_battery->work);
-       schedule_delayed_work(&jz_battery->work, 0);
+       mod_delayed_work(system_wq, &jz_battery->work, 0);
 }
 
 static irqreturn_t jz_battery_charge_irq(int irq, void *data)
 {
        struct jz_battery *jz_battery = data;
 
-       cancel_delayed_work(&jz_battery->work);
-       schedule_delayed_work(&jz_battery->work, 0);
+       mod_delayed_work(system_wq, &jz_battery->work, 0);
 
        return IRQ_HANDLED;
 }
index c284143cfcd76ba7c3ccd3245fb379f953e0f40f..58e67830143c37ff520fb3e7678fe3513ecf2858 100644 (file)
@@ -232,7 +232,7 @@ static int __devinit max17040_probe(struct i2c_client *client,
        max17040_reset(client);
        max17040_get_version(client);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work);
+       INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
        schedule_delayed_work(&chip->work, MAX17040_DELAY);
 
        return 0;
index 28bbe7e094e36a7695ffaf4a81e3e3e50dbf5ca4..51199b5ce2210ed64b3842eec4e0ea93b1f12763 100644 (file)
@@ -327,7 +327,7 @@ static struct gpio tosa_bat_gpios[] = {
 static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
 {
        /* flush all pending status updates */
-       flush_work_sync(&bat_work);
+       flush_work(&bat_work);
        return 0;
 }
 
index d2d4c08c681cd99f1eef9b1a477830289b533f70..1245fe1f48c336d588a7d2f42a3263d84a18524d 100644 (file)
@@ -146,7 +146,7 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data)
 #ifdef CONFIG_PM
 static int wm97xx_bat_suspend(struct device *dev)
 {
-       flush_work_sync(&bat_work);
+       flush_work(&bat_work);
        return 0;
 }
 
index 8c9a607ea77a9ef3a02f7df2a0e94075d7448a78..5757d0d6782f33fa858558f52f903e2bc36a6b49 100644 (file)
@@ -276,7 +276,7 @@ static int z2_batt_suspend(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        struct z2_charger *charger = i2c_get_clientdata(client);
 
-       flush_work_sync(&charger->bat_work);
+       flush_work(&charger->bat_work);
        return 0;
 }
 
index 1e528b539a07f202a04b9e9dbdfbffbac6656703..79f4bce061bd289d5f01a35601ad151f73ee8d80 100644 (file)
@@ -143,10 +143,12 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
                kt = timespec_to_ktime(ts);
                delta = ktime_to_ns(kt);
                err = ops->adjtime(ops, delta);
-
        } else if (tx->modes & ADJ_FREQUENCY) {
-
                err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
+               ptp->dialed_frequency = tx->freq;
+       } else if (tx->modes == 0) {
+               tx->freq = ptp->dialed_frequency;
+               err = 0;
        }
 
        return err;
@@ -180,7 +182,8 @@ static void delete_ptp_clock(struct posix_clock *pc)
 
 /* public interface */
 
-struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
+struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+                                    struct device *parent)
 {
        struct ptp_clock *ptp;
        int err = 0, index, major = MAJOR(ptp_devt);
@@ -213,7 +216,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
        init_waitqueue_head(&ptp->tsev_wq);
 
        /* Create a new device in our class. */
-       ptp->dev = device_create(ptp_class, NULL, ptp->devid, ptp,
+       ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp,
                                 "ptp%d", ptp->index);
        if (IS_ERR(ptp->dev))
                goto no_device;
@@ -300,6 +303,11 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
                pps_get_ts(&evt);
                pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
                break;
+
+       case PTP_CLOCK_PPSUSR:
+               pps_event(ptp->pps_source, &event->pps_times,
+                         PTP_PPS_EVENT, NULL);
+               break;
        }
 }
 EXPORT_SYMBOL(ptp_clock_event);
index e03c40692b0073106f845f9adbb1d38250590e76..d49b85164fd22e830f51e066337801134eef9354 100644 (file)
@@ -298,7 +298,7 @@ static int __init ptp_ixp_init(void)
 
        ixp_clock.caps = ptp_ixp_caps;
 
-       ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps);
+       ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps, NULL);
 
        if (IS_ERR(ixp_clock.ptp_clock))
                return PTR_ERR(ixp_clock.ptp_clock);
index 3a9c17eced10c3a34d591fd465db555ec9498b88..e624e4dd2abb001444c08dc0593800ddab9f8da0 100644 (file)
@@ -627,7 +627,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        chip->caps = ptp_pch_caps;
-       chip->ptp_clock = ptp_clock_register(&chip->caps);
+       chip->ptp_clock = ptp_clock_register(&chip->caps, &pdev->dev);
 
        if (IS_ERR(chip->ptp_clock))
                return PTR_ERR(chip->ptp_clock);
index 4d5b5082c3b19d4766be52dbed3904aa5f4cd6c9..69d32070cc654526c4391229f89140e8eff35303 100644 (file)
@@ -45,6 +45,7 @@ struct ptp_clock {
        dev_t devid;
        int index; /* index into clocks.map */
        struct pps_device *pps_source;
+       long dialed_frequency; /* remembers the frequency adjustment */
        struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
        struct mutex tsevq_mux; /* one process at a time reading the fifo */
        wait_queue_head_t tsev_wq;
index 2e0352dc26bda02968f28ad686b138d45ea5104f..5c4829cba6a62de6168c6bb3c4913812e5e8b4e1 100644 (file)
@@ -3476,7 +3476,7 @@ void regulator_unregister(struct regulator_dev *rdev)
                regulator_put(rdev->supply);
        mutex_lock(&regulator_list_mutex);
        debugfs_remove_recursive(rdev->debugfs);
-       flush_work_sync(&rdev->disable_work.work);
+       flush_work(&rdev->disable_work.work);
        WARN_ON(rdev->open_count);
        unset_regulator_supplies(rdev);
        list_del(&rdev->list);
index d4ade9e92fbbd4fa97f9ce1acf4043a7b91b4f65..fb92524d24ef9ac1e72a6bf0912a5092e15433fd 100644 (file)
@@ -1523,7 +1523,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
                                goto done;
        default:
                break;
-       };
+       }
 
        fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
index 5227e5734a9d152e45a2b4920a13f92b7354051e..98ea9cc6f1aaba9f442ecb4b46724c0f3f5ccd7b 100644 (file)
@@ -1454,7 +1454,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
                                ch_fsm_len, GFP_KERNEL);
        }
        if (ch->fsm == NULL)
-                               goto free_return;
+                               goto nomem_return;
 
        fsm_newstate(ch->fsm, CTC_STATE_IDLE);
 
index a3adf4b1c60d7e2e5efc074ce4fb6b2e7a6276be..2ca0f1dd7a00b857211348cf28d2c9e4a9df161a 100644 (file)
@@ -282,7 +282,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
 
        LCS_DBF_TEXT(3, setup, "iwritccw");
        /* Setup write ccws. */
-       memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
+       memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
        for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
                card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
                card->write.ccws[cnt].count = 0;
index cf6da7fafe54d5996bff33ec38b0976e046d0a0c..3e25d31504560a79b75b782a45153190a7694ffa 100644 (file)
@@ -489,7 +489,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
                atomic_set(&reply->refcnt, 1);
                atomic_set(&reply->received, 0);
                reply->card = card;
-       };
+       }
        return reply;
 }
 
@@ -1257,7 +1257,30 @@ static void qeth_clean_channel(struct qeth_channel *channel)
                kfree(channel->iob[cnt].data);
 }
 
-static void qeth_get_channel_path_desc(struct qeth_card *card)
+static void qeth_set_single_write_queues(struct qeth_card *card)
+{
+       if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
+           (card->qdio.no_out_queues == 4))
+               qeth_free_qdio_buffers(card);
+
+       card->qdio.no_out_queues = 1;
+       if (card->qdio.default_out_queue != 0)
+               dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+
+       card->qdio.default_out_queue = 0;
+}
+
+static void qeth_set_multiple_write_queues(struct qeth_card *card)
+{
+       if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
+           (card->qdio.no_out_queues == 1)) {
+               qeth_free_qdio_buffers(card);
+               card->qdio.default_out_queue = 2;
+       }
+       card->qdio.no_out_queues = 4;
+}
+
+static void qeth_update_from_chp_desc(struct qeth_card *card)
 {
        struct ccw_device *ccwdev;
        struct channelPath_dsc {
@@ -1274,38 +1297,23 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
        QETH_DBF_TEXT(SETUP, 2, "chp_desc");
 
        ccwdev = card->data.ccwdev;
-       chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
-       if (chp_dsc != NULL) {
-               if (card->info.type != QETH_CARD_TYPE_IQD) {
-                       /* CHPP field bit 6 == 1 -> single queue */
-                       if ((chp_dsc->chpp & 0x02) == 0x02) {
-                               if ((atomic_read(&card->qdio.state) !=
-                                       QETH_QDIO_UNINITIALIZED) &&
-                                   (card->qdio.no_out_queues == 4))
-                                       /* change from 4 to 1 outbound queues */
-                                       qeth_free_qdio_buffers(card);
-                               card->qdio.no_out_queues = 1;
-                               if (card->qdio.default_out_queue != 0)
-                                       dev_info(&card->gdev->dev,
-                                       "Priority Queueing not supported\n");
-                               card->qdio.default_out_queue = 0;
-                       } else {
-                               if ((atomic_read(&card->qdio.state) !=
-                                       QETH_QDIO_UNINITIALIZED) &&
-                                   (card->qdio.no_out_queues == 1)) {
-                                       /* change from 1 to 4 outbound queues */
-                                       qeth_free_qdio_buffers(card);
-                                       card->qdio.default_out_queue = 2;
-                               }
-                               card->qdio.no_out_queues = 4;
-                       }
-               }
-               card->info.func_level = 0x4100 + chp_dsc->desc;
-               kfree(chp_dsc);
-       }
+       chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
+       if (!chp_dsc)
+               goto out;
+
+       card->info.func_level = 0x4100 + chp_dsc->desc;
+       if (card->info.type == QETH_CARD_TYPE_IQD)
+               goto out;
+
+       /* CHPP field bit 6 == 1 -> single queue */
+       if ((chp_dsc->chpp & 0x02) == 0x02)
+               qeth_set_single_write_queues(card);
+       else
+               qeth_set_multiple_write_queues(card);
+out:
+       kfree(chp_dsc);
        QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
        QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
-       return;
 }
 
 static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1473,7 +1481,7 @@ static int qeth_determine_card_type(struct qeth_card *card)
                        card->qdio.no_in_queues = 1;
                        card->info.is_multicast_different =
                                known_devices[i][QETH_MULTICAST_IND];
-                       qeth_get_channel_path_desc(card);
+                       qeth_update_from_chp_desc(card);
                        return 0;
                }
                i++;
@@ -2029,7 +2037,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
                        if (time_after(jiffies, timeout))
                                goto time_err;
                        cpu_relax();
-               };
+               }
        }
 
        if (reply->rc == -EIO)
@@ -4735,7 +4743,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
 
        QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
        atomic_set(&card->force_alloc_skb, 0);
-       qeth_get_channel_path_desc(card);
+       qeth_update_from_chp_desc(card);
 retry:
        if (retries)
                QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
index c5f03fa70fbaea0e774da0891a1ba9338c69f88d..4cd310cb5bdf8f1c070cc1193294928015834a94 100644 (file)
@@ -794,6 +794,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
                rc = -EEXIST;
        spin_unlock_irqrestore(&card->ip_lock, flags);
        if (rc) {
+               kfree(ipaddr);
                return rc;
        }
        if (!qeth_l3_add_ip(card, ipaddr))
@@ -858,6 +859,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
                rc = -EEXIST;
        spin_unlock_irqrestore(&card->ip_lock, flags);
        if (rc) {
+               kfree(ipaddr);
                return rc;
        }
        if (!qeth_l3_add_ip(card, ipaddr))
index aff8621de8069b813014a550c468a8bf8d78937b..f6adde44f226e1352c327a3b27696f09f7a7b501 100644 (file)
@@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
 
        rwlock_init(&port->unit_list_lock);
        INIT_LIST_HEAD(&port->unit_list);
+       atomic_set(&port->units, 0);
 
        INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
        INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
index e37f04551948907be4be7a6da626b37263b09c1b..f2dd3a0a39eb04e421437f4eaef1b6d1d1f1f82f 100644 (file)
@@ -39,19 +39,25 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
        spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
 }
 
-static int zfcp_ccw_activate(struct ccw_device *cdev)
-
+/**
+ * zfcp_ccw_activate - activate adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @clear: Status flags to clear.
+ * @tag: s390dbf trace record tag
+ */
+static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
 {
        struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
        if (!adapter)
                return 0;
 
+       zfcp_erp_clear_adapter_status(adapter, clear);
        zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
        zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-                               "ccresu2");
+                               tag);
        zfcp_erp_wait(adapter);
-       flush_work(&adapter->scan_work);
+       flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
 
        zfcp_ccw_adapter_put(adapter);
 
@@ -164,32 +170,52 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
        BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
        adapter->req_no = 0;
 
-       zfcp_ccw_activate(cdev);
+       zfcp_ccw_activate(cdev, 0, "ccsonl1");
+       /* scan for remote ports
+          either at the end of any successful adapter recovery
+          or only after the adapter recovery for setting a device online */
+       zfcp_fc_inverse_conditional_port_scan(adapter);
+       flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
        zfcp_ccw_adapter_put(adapter);
        return 0;
 }
 
 /**
- * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
  * @cdev: pointer to belonging ccw device
+ * @set: Status flags to set.
+ * @tag: s390dbf trace record tag
  *
  * This function gets called by the common i/o layer and sets an adapter
  * into state offline.
  */
-static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
 {
        struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
        if (!adapter)
                return 0;
 
-       zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
+       zfcp_erp_set_adapter_status(adapter, set);
+       zfcp_erp_adapter_shutdown(adapter, 0, tag);
        zfcp_erp_wait(adapter);
 
        zfcp_ccw_adapter_put(adapter);
        return 0;
 }
 
+/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+{
+       return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
+}
+
 /**
  * zfcp_ccw_notify - ccw notify function
  * @cdev: pointer to belonging ccw device
@@ -207,6 +233,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
 
        switch (event) {
        case CIO_GONE:
+               if (atomic_read(&adapter->status) &
+                   ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+                       zfcp_dbf_hba_basic("ccnigo1", adapter);
+                       break;
+               }
                dev_warn(&cdev->dev, "The FCP device has been detached\n");
                zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
                break;
@@ -216,6 +247,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
                zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
                break;
        case CIO_OPER:
+               if (atomic_read(&adapter->status) &
+                   ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+                       zfcp_dbf_hba_basic("ccniop1", adapter);
+                       break;
+               }
                dev_info(&cdev->dev, "The FCP device is operational again\n");
                zfcp_erp_set_adapter_status(adapter,
                                            ZFCP_STATUS_COMMON_RUNNING);
@@ -251,6 +287,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
        zfcp_ccw_adapter_put(adapter);
 }
 
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+{
+       zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
+       return 0;
+}
+
+static int zfcp_ccw_thaw(struct ccw_device *cdev)
+{
+       /* trace records for thaw and final shutdown during suspend
+          can only be found in system dump until the end of suspend
+          but not after resume because it's based on the memory image
+          right after the very first suspend (freeze) callback */
+       zfcp_ccw_activate(cdev, 0, "ccthaw1");
+       return 0;
+}
+
+static int zfcp_ccw_resume(struct ccw_device *cdev)
+{
+       zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
+       return 0;
+}
+
 struct ccw_driver zfcp_ccw_driver = {
        .driver = {
                .owner  = THIS_MODULE,
@@ -263,7 +321,7 @@ struct ccw_driver zfcp_ccw_driver = {
        .set_offline = zfcp_ccw_set_offline,
        .notify      = zfcp_ccw_notify,
        .shutdown    = zfcp_ccw_shutdown,
-       .freeze      = zfcp_ccw_set_offline,
-       .thaw        = zfcp_ccw_activate,
-       .restore     = zfcp_ccw_activate,
+       .freeze      = zfcp_ccw_suspend,
+       .thaw        = zfcp_ccw_thaw,
+       .restore     = zfcp_ccw_resume,
 };
index fbd8b4db6025772840ec0af0911f004be0548fe0..49b82e46629ee493a860f541e3f521f77a3239ab 100644 (file)
@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
-       shost_for_each_device(sdev, port->adapter->scsi_host) {
+       shost_for_each_device(sdev, adapter->scsi_host) {
                zfcp_sdev = sdev_to_zfcp(sdev);
                status = atomic_read(&zfcp_sdev->status);
                if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
index 3c1d22097ad0b66296e730463b01a19775a18977..e1a8cc2526e79fa93e6c1b77f01e4321fc573f49 100644 (file)
@@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
        length = min((u16)sizeof(struct qdio_buffer),
                     (u16)ZFCP_DBF_PAY_MAX_REC);
 
-       while ((char *)pl[payload->counter] && payload->counter < scount) {
+       while (payload->counter < scount && (char *)pl[payload->counter]) {
                memcpy(payload->data, (char *)pl[payload->counter], length);
                debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
                payload->counter++;
@@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
        spin_unlock_irqrestore(&dbf->pay_lock, flags);
 }
 
+/**
+ * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @adapter: pointer to struct zfcp_adapter
+ */
+void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
+{
+       struct zfcp_dbf *dbf = adapter->dbf;
+       struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dbf->hba_lock, flags);
+       memset(rec, 0, sizeof(*rec));
+
+       memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+       rec->id = ZFCP_DBF_HBA_BASIC;
+
+       debug_event(dbf->hba, 1, rec, sizeof(*rec));
+       spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
                                struct zfcp_adapter *adapter,
                                struct zfcp_port *port,
index 714f087eb7a96bdf973b43665636d14df9adc7a9..3ac7a4b30dd910ef6f59c4ada70966d66694c90e 100644 (file)
@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
        ZFCP_DBF_HBA_RES        = 1,
        ZFCP_DBF_HBA_USS        = 2,
        ZFCP_DBF_HBA_BIT        = 3,
+       ZFCP_DBF_HBA_BASIC      = 4,
 };
 
 /**
index 2955e1a3deaf88bcff0140ea3c9e571a8f6e659a..1305955cbf5978399eb3bf4bd694c8cc3ae4465c 100644 (file)
@@ -77,6 +77,7 @@ struct zfcp_reqlist;
 #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED       0x00000004
 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK         0x00000008
 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT      0x00000010
+#define ZFCP_STATUS_ADAPTER_SUSPENDED          0x00000040
 #define ZFCP_STATUS_ADAPTER_ERP_PENDING                0x00000100
 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED     0x00000200
 #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED   0x00000400
@@ -204,6 +205,7 @@ struct zfcp_port {
        struct zfcp_adapter    *adapter;       /* adapter used to access port */
        struct list_head        unit_list;      /* head of logical unit list */
        rwlock_t                unit_list_lock; /* unit list lock */
+       atomic_t                units;         /* zfcp_unit count */
        atomic_t               status;         /* status of this remote port */
        u64                    wwnn;           /* WWNN if known */
        u64                    wwpn;           /* WWPN */
index 92d3df6ac8ba9e6ef37ba3e24b503e6b12217ef2..4133ab6e20f1ab5ccfe058b49f125770e6c289e2 100644 (file)
@@ -1230,7 +1230,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
        case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
                if (result == ZFCP_ERP_SUCCEEDED) {
                        register_service_level(&adapter->service_level);
-                       queue_work(adapter->work_queue, &adapter->scan_work);
+                       zfcp_fc_conditional_port_scan(adapter);
                        queue_work(adapter->work_queue, &adapter->ns_up_work);
                } else
                        unregister_service_level(&adapter->service_level);
index 36f422770ff54da358879ab9571e1c3ecd79112b..1d3dd3f7d69994163c2dd1d860a7a6584c21f5d5 100644 (file)
@@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
@@ -98,6 +99,8 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
 extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
 extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
 extern void zfcp_fc_sym_name_update(struct work_struct *);
+extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
+extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
 
 /* zfcp_fsf.c */
 extern struct kmem_cache *zfcp_fsf_qtcb_cache;
@@ -158,6 +161,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
 extern struct attribute_group zfcp_sysfs_unit_attrs;
 extern struct attribute_group zfcp_sysfs_adapter_attrs;
 extern struct attribute_group zfcp_sysfs_port_attrs;
+extern struct mutex zfcp_sysfs_port_units_mutex;
 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
 
index 88688a80b2c11334bcc99686634bee964858b816..ff598cd68b2d0bde11c99c412ca0ea70a90d6ece 100644 (file)
@@ -26,6 +26,27 @@ static u32 zfcp_fc_rscn_range_mask[] = {
        [ELS_ADDR_FMT_FAB]              = 0x000000,
 };
 
+static bool no_auto_port_rescan;
+module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
+MODULE_PARM_DESC(no_auto_port_rescan,
+                "no automatic port_rescan (default off)");
+
+void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+       if (no_auto_port_rescan)
+               return;
+
+       queue_work(adapter->work_queue, &adapter->scan_work);
+}
+
+void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
+{
+       if (!no_auto_port_rescan)
+               return;
+
+       queue_work(adapter->work_queue, &adapter->scan_work);
+}
+
 /**
  * zfcp_fc_post_event - post event to userspace via fc_transport
  * @work: work struct with enqueued events
@@ -206,7 +227,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
                zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
                                      *(u32 *)page);
        }
-       queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
+       zfcp_fc_conditional_port_scan(fsf_req->adapter);
 }
 
 static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
index e1c1efc2c5a0f35158ec402360c78ae87b80c681..c96320d79fbc84fd91c24cfd729361c24f4887cc 100644 (file)
@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
                return;
        }
 
-       zfcp_dbf_hba_fsf_uss("fssrh_2", req);
+       zfcp_dbf_hba_fsf_uss("fssrh_4", req);
 
        switch (sr_buf->status_type) {
        case FSF_STATUS_READ_PORT_CLOSED:
@@ -257,7 +257,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
                if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
                        zfcp_cfdc_adapter_access_changed(adapter);
                if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
-                       queue_work(adapter->work_queue, &adapter->scan_work);
+                       zfcp_fc_conditional_port_scan(adapter);
                break;
        case FSF_STATUS_READ_CFDC_UPDATED:
                zfcp_cfdc_adapter_access_changed(adapter);
@@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
        }
 }
 
+#define ZFCP_FSF_PORTSPEED_1GBIT       (1 <<  0)
+#define ZFCP_FSF_PORTSPEED_2GBIT       (1 <<  1)
+#define ZFCP_FSF_PORTSPEED_4GBIT       (1 <<  2)
+#define ZFCP_FSF_PORTSPEED_10GBIT      (1 <<  3)
+#define ZFCP_FSF_PORTSPEED_8GBIT       (1 <<  4)
+#define ZFCP_FSF_PORTSPEED_16GBIT      (1 <<  5)
+#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
+
+static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+{
+       u32 fdmi_speed = 0;
+       if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
+               fdmi_speed |= FC_PORTSPEED_1GBIT;
+       if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
+               fdmi_speed |= FC_PORTSPEED_2GBIT;
+       if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
+               fdmi_speed |= FC_PORTSPEED_4GBIT;
+       if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
+               fdmi_speed |= FC_PORTSPEED_10GBIT;
+       if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
+               fdmi_speed |= FC_PORTSPEED_8GBIT;
+       if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
+               fdmi_speed |= FC_PORTSPEED_16GBIT;
+       if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
+               fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
+       return fdmi_speed;
+}
+
 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 {
        struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
@@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
        fc_host_port_name(shost) = nsp->fl_wwpn;
        fc_host_node_name(shost) = nsp->fl_wwnn;
        fc_host_port_id(shost) = ntoh24(bottom->s_id);
-       fc_host_speed(shost) = bottom->fc_link_speed;
+       fc_host_speed(shost) =
+               zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
        fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
 
        adapter->hydra_version = bottom->adapter_type;
@@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
        } else
                fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
        fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
-       fc_host_supported_speeds(shost) = bottom->supported_speed;
+       fc_host_supported_speeds(shost) =
+               zfcp_fsf_convert_portspeed(bottom->supported_speed);
        memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
               FC_FC4_LIST_SIZE);
        memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
@@ -771,12 +801,14 @@ out:
 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 {
        struct scsi_device *sdev = req->data;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
        union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
 
        if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        switch (req->qtcb->header.fsf_status) {
        case FSF_PORT_HANDLE_NOT_VALID:
                if (fsq->word[0] == fsq->word[1]) {
@@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 
        switch (header->fsf_status) {
         case FSF_GOOD:
-               zfcp_dbf_san_res("fsscth1", req);
+               zfcp_dbf_san_res("fsscth2", req);
                ct->status = 0;
                break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 {
        struct zfcp_adapter *adapter = req->adapter;
        struct scsi_device *sdev = req->data;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
        struct fsf_qtcb_header *header = &req->qtcb->header;
        struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
 
        if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
                          ZFCP_STATUS_COMMON_ACCESS_BOXED |
                          ZFCP_STATUS_LUN_SHARED |
@@ -1856,11 +1890,13 @@ out:
 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
 {
        struct scsi_device *sdev = req->data;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
 
        if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        switch (req->qtcb->header.fsf_status) {
        case FSF_PORT_HANDLE_NOT_VALID:
                zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
@@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
 {
        struct fsf_qual_latency_info *lat_in;
        struct latency_cont *lat = NULL;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
+       struct zfcp_scsi_dev *zfcp_sdev;
        struct zfcp_blk_drv_data blktrc;
        int ticks = req->adapter->timer_ticks;
 
@@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
 
        if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
            !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+               zfcp_sdev = sdev_to_zfcp(scsi->device);
                blktrc.flags |= ZFCP_BLK_LAT_VALID;
                blktrc.channel_lat = lat_in->channel_lat * ticks;
                blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 {
        struct scsi_cmnd *scmnd = req->data;
        struct scsi_device *sdev = scmnd->device;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
        struct fsf_qtcb_header *header = &req->qtcb->header;
 
        if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        switch (header->fsf_status) {
        case FSF_HANDLE_MISMATCH:
        case FSF_PORT_HANDLE_NOT_VALID:
index b9fffc8d94a76daaf6b7faadbcd85194cb555094..50b5615848f6edaf63ca75d2021fe102eae5a6d7 100644 (file)
@@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
 {
        struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
        struct zfcp_adapter *adapter = qdio->adapter;
-       struct qdio_buffer_element *sbale;
        int sbal_no, sbal_idx;
-       void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
-       u64 req_id;
-       u8 scount;
 
        if (unlikely(qdio_err)) {
-               memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
                if (zfcp_adapter_multi_buffer_active(adapter)) {
+                       void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+                       struct qdio_buffer_element *sbale;
+                       u64 req_id;
+                       u8 scount;
+
+                       memset(pl, 0,
+                              ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
                        sbale = qdio->res_q[idx]->element;
                        req_id = (u64) sbale->addr;
-                       scount = sbale->scount + 1; /* incl. signaling SBAL */
+                       scount = min(sbale->scount + 1,
+                                    ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
+                                    /* incl. signaling SBAL */
 
                        for (sbal_no = 0; sbal_no < scount; sbal_no++) {
                                sbal_idx = (idx + sbal_no) %
index c66af27b230bb99fdde7cd3f28bc45f6eac82ba3..1e0eb089dfbafd620e430c2357497284acf5349a 100644 (file)
@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
                     zfcp_sysfs_port_rescan_store);
 
+DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+
 static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
                                            struct device_attribute *attr,
                                            const char *buf, size_t count)
@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
        else
                retval = 0;
 
+       mutex_lock(&zfcp_sysfs_port_units_mutex);
+       if (atomic_read(&port->units) > 0) {
+               retval = -EBUSY;
+               mutex_unlock(&zfcp_sysfs_port_units_mutex);
+               goto out;
+       }
+       /* port is about to be removed, so no more unit_add */
+       atomic_set(&port->units, -1);
+       mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
        write_lock_irq(&adapter->port_list_lock);
        list_del(&port->list);
        write_unlock_irq(&adapter->port_list_lock);
@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
 {
        struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
        u64 fcp_lun;
+       int retval;
 
        if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
                return -EINVAL;
 
-       if (zfcp_unit_add(port, fcp_lun))
-               return -EINVAL;
+       retval = zfcp_unit_add(port, fcp_lun);
+       if (retval)
+               return retval;
 
        return count;
 }
index 3f2bff0d3aa21fd9996cedc187c441bff167718e..1cd2b99ab256c06132dd85c0a7a3d4293e98cb87 100644 (file)
@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
 {
        struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
 
-       put_device(&unit->port->dev);
+       atomic_dec(&unit->port->units);
        kfree(unit);
 }
 
@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
 int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
 {
        struct zfcp_unit *unit;
+       int retval = 0;
+
+       mutex_lock(&zfcp_sysfs_port_units_mutex);
+       if (atomic_read(&port->units) == -1) {
+               /* port is already gone */
+               retval = -ENODEV;
+               goto out;
+       }
 
        unit = zfcp_unit_find(port, fcp_lun);
        if (unit) {
                put_device(&unit->dev);
-               return -EEXIST;
+               retval = -EEXIST;
+               goto out;
        }
 
        unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
-       if (!unit)
-               return -ENOMEM;
+       if (!unit) {
+               retval = -ENOMEM;
+               goto out;
+       }
 
        unit->port = port;
        unit->fcp_lun = fcp_lun;
@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
        if (dev_set_name(&unit->dev, "0x%016llx",
                         (unsigned long long) fcp_lun)) {
                kfree(unit);
-               return -ENOMEM;
+               retval = -ENOMEM;
+               goto out;
        }
 
-       get_device(&port->dev);
-
        if (device_register(&unit->dev)) {
                put_device(&unit->dev);
-               return -ENOMEM;
+               retval = -ENOMEM;
+               goto out;
        }
 
        if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
                device_unregister(&unit->dev);
-               return -EINVAL;
+               retval = -EINVAL;
+               goto out;
        }
 
+       atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
+
        write_lock_irq(&port->unit_list_lock);
        list_add_tail(&unit->list, &port->unit_list);
        write_unlock_irq(&port->unit_list_lock);
 
        zfcp_unit_scsi_scan(unit);
 
-       return 0;
+out:
+       mutex_unlock(&zfcp_sysfs_port_units_mutex);
+       return retval;
 }
 
 /**
index 4b9939726c342f3b5e32b1cc5f3ebfe5c88ef945..b160073e54b65e76baa876f06720d4ab92ae160e 100644 (file)
@@ -150,7 +150,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        regs |= D7S_FLIP;
                writeb(regs, p->regs);
                break;
-       };
+       }
        mutex_unlock(&d7s_mutex);
 
        return error;
index 339fd6f65eda7ff3ba3d90914e8b79c73ae15d34..0bc18569f9c0afe35c908ea2676cf59973538dae 100644 (file)
@@ -353,7 +353,7 @@ static int envctrl_i2c_data_translate(unsigned char data, int translate_type,
 
        default:
                break;
-       };
+       }
 
        return len;
 }
@@ -644,7 +644,7 @@ envctrl_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        default:
                break;
 
-       };
+       }
 
        return ret;
 }
@@ -687,7 +687,7 @@ envctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        default:
                return -EINVAL;
-       };
+       }
 
        return 0;
 }
@@ -947,7 +947,7 @@ static void envctrl_init_i2c_child(struct device_node *dp,
 
                default:
                        break;
-               };
+               }
        }
 }
 
index 2236aea3ca2fde226e1e85fcffd3461f2321d06c..5843288f64bc00c8e5666a499767aee995795820 100644 (file)
@@ -222,7 +222,7 @@ static int opromnext(void __user *argp, unsigned int cmd, struct device_node *dp
                case OPROMSETCUR:
                default:
                        break;
-               };
+               }
        } else {
                /* Sibling of node zero is the root node.  */
                if (cmd != OPROMNEXT)
@@ -588,7 +588,7 @@ static int openprom_bsd_ioctl(struct file * file,
        default:
                err = -EINVAL;
                break;
-       };
+       }
        mutex_unlock(&openprom_mutex);
 
        return err;
index def24a1079adc70dc7dd343f1062d76710ffee1a..33c52bc2c7b461033b8845e7e538f072f9ab671f 100644 (file)
@@ -999,7 +999,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
        int poll_count = 0;
        arcmsr_free_sysfs_attr(acb);
        scsi_remove_host(host);
-       flush_work_sync(&acb->arcmsr_do_message_isr_bh);
+       flush_work(&acb->arcmsr_do_message_isr_bh);
        del_timer_sync(&acb->eternal_timer);
        arcmsr_disable_outbound_ints(acb);
        arcmsr_stop_adapter_bgrb(acb);
@@ -1045,7 +1045,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
                (struct AdapterControlBlock *)host->hostdata;
        del_timer_sync(&acb->eternal_timer);
        arcmsr_disable_outbound_ints(acb);
-       flush_work_sync(&acb->arcmsr_do_message_isr_bh);
+       flush_work(&acb->arcmsr_do_message_isr_bh);
        arcmsr_stop_adapter_bgrb(acb);
        arcmsr_flush_adapter_cache(acb);
 }
index d2e9e933f7a336e56f97b72c93196fba7bf974b4..07d2cb126d934eca69956a6269ab3543a3333dac 100644 (file)
@@ -48,7 +48,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
        }
 
        if (sreset & BE2_SET_RESET) {
-               printk(KERN_ERR "Soft Reset  did not deassert\n");
+               printk(KERN_ERR DRV_NAME
+                      " Soft Reset  did not deassert\n");
                return -EIO;
        }
        pconline1 = BE2_MPU_IRAM_ONLINE;
@@ -67,7 +68,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
                i++;
        }
        if (sreset & BE2_SET_RESET) {
-               printk(KERN_ERR "MPU Online Soft Reset did not deassert\n");
+               printk(KERN_ERR DRV_NAME
+                      " MPU Online Soft Reset did not deassert\n");
                return -EIO;
        }
        return 0;
@@ -93,8 +95,9 @@ int be_chk_reset_complete(struct beiscsi_hba *phba)
        }
 
        if ((status & 0x80000000) || (!num_loop)) {
-               printk(KERN_ERR "Failed in be_chk_reset_complete"
-               "status = 0x%x\n", status);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : Failed in be_chk_reset_complete"
+                           "status = 0x%x\n", status);
                return -EIO;
        }
 
@@ -169,6 +172,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
                                struct be_mcc_compl *compl)
 {
        u16 compl_status, extd_status;
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
        be_dws_le_to_cpu(compl, 4);
 
@@ -177,9 +181,12 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
        if (compl_status != MCC_STATUS_SUCCESS) {
                extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
                                                CQE_STATUS_EXTD_MASK;
-               dev_err(&ctrl->pdev->dev,
-                       "error in cmd completion: status(compl/extd)=%d/%d\n",
-                       compl_status, extd_status);
+
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
+                           compl_status, extd_status);
+
                return -EBUSY;
        }
        return 0;
@@ -233,22 +240,29 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
 {
        switch (evt->port_link_status) {
        case ASYNC_EVENT_LINK_DOWN:
-               SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
-                                    evt->physical_port);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+                           "BC_%d : Link Down on Physical Port %d\n",
+                           evt->physical_port);
+
                phba->state |= BE_ADAPTER_LINK_DOWN;
                iscsi_host_for_each_session(phba->shost,
                                            be2iscsi_fail_session);
                break;
        case ASYNC_EVENT_LINK_UP:
                phba->state = BE_ADAPTER_UP;
-               SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
-                                               evt->physical_port);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+                           "BC_%d : Link UP on Physical Port %d\n",
+                           evt->physical_port);
                break;
        default:
-               SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
-                                   "Physical Port %d\n",
-                                    evt->port_link_status,
-                                    evt->physical_port);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+                           "BC_%d : Unexpected Async Notification %d on"
+                           "Physical Port %d\n",
+                           evt->port_link_status,
+                           evt->physical_port);
        }
 }
 
@@ -279,9 +293,11 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
                                beiscsi_async_link_state_process(phba,
                                   (struct be_async_event_link_state *) compl);
                        else
-                               SE_DEBUG(DBG_LVL_1,
-                                        " Unsupported Async Event, flags"
-                                        " = 0x%08x\n", compl->flags);
+                               beiscsi_log(phba, KERN_ERR,
+                                           BEISCSI_LOG_CONFIG |
+                                           BEISCSI_LOG_MBOX,
+                                           "BC_%d : Unsupported Async Event, flags"
+                                           " = 0x%08x\n", compl->flags);
 
                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
                                status = be_mcc_compl_process(ctrl, compl);
@@ -312,7 +328,10 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
                udelay(100);
        }
        if (i == mcc_timeout) {
-               dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : mccq poll timed out\n");
+
                return -EBUSY;
        }
        return 0;
@@ -338,7 +357,11 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
                        break;
 
                if (cnt > 12000000) {
-                       dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
+                       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                                   "BC_%d : mbox_db poll timed out\n");
+
                        return -EBUSY;
                }
 
@@ -360,6 +383,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
        struct be_mcc_mailbox *mbox = mbox_mem->va;
        struct be_mcc_compl *compl = &mbox->compl;
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
        val &= ~MPU_MAILBOX_DB_RDY_MASK;
        val |= MPU_MAILBOX_DB_HI_MASK;
@@ -368,7 +392,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
 
        status = be_mbox_db_ready_wait(ctrl);
        if (status != 0) {
-               SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : be_mbox_db_ready_wait failed\n");
+
                return status;
        }
        val = 0;
@@ -379,18 +406,27 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
 
        status = be_mbox_db_ready_wait(ctrl);
        if (status != 0) {
-               SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : be_mbox_db_ready_wait failed\n");
+
                return status;
        }
        if (be_mcc_compl_is_new(compl)) {
                status = be_mcc_compl_process(ctrl, &mbox->compl);
                be_mcc_compl_use(compl);
                if (status) {
-                       SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                                   "BC_%d : After be_mcc_compl_process\n");
+
                        return status;
                }
        } else {
-               dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : Invalid Mailbox Completion\n");
+
                return -EBUSY;
        }
        return 0;
@@ -436,7 +472,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
                if (status)
                        return status;
        } else {
-               dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : invalid mailbox completion\n");
+
                return -EBUSY;
        }
        return 0;
@@ -528,7 +567,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
        struct be_dma_mem *q_mem = &eq->dma_mem;
        int status;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
        spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
@@ -563,10 +601,10 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        int status;
        u8 *endian_check;
 
-       SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
        spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
@@ -583,7 +621,8 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 
        status = be_mbox_notify(ctrl);
        if (status)
-               SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : be_cmd_fw_initialize Failed\n");
 
        spin_unlock(&ctrl->mbox_lock);
        return status;
@@ -596,11 +635,11 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_cmd_req_cq_create *req = embedded_payload(wrb);
        struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        struct be_dma_mem *q_mem = &cq->dma_mem;
        void *ctxt = &req->context;
        int status;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
        spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
@@ -608,8 +647,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                        OPCODE_COMMON_CQ_CREATE, sizeof(*req));
-       if (!q_mem->va)
-               SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 
@@ -633,8 +670,10 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                cq->id = le16_to_cpu(resp->cq_id);
                cq->created = true;
        } else
-               SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
-                       status);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
+                           status);
+
        spin_unlock(&ctrl->mbox_lock);
 
        return status;
@@ -700,10 +739,14 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        u8 subsys = 0, opcode = 0;
        int status;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BC_%d : In beiscsi_cmd_q_destroy "
+                   "queue_type : %d\n", queue_type);
+
        spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -759,7 +802,6 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
        void *ctxt = &req->context;
        int status;
 
-       SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
        spin_lock(&ctrl->mbox_lock);
        memset(wrb, 0, sizeof(*wrb));
 
@@ -830,6 +872,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        int status;
        unsigned int curr_pages;
        u32 internal_page_offset = 0;
@@ -860,8 +903,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
 
                status = be_mbox_notify(ctrl);
                if (status) {
-                       SE_DEBUG(DBG_LVL_1,
-                                "FW CMD to map iscsi frags failed.\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BC_%d : FW CMD to map iscsi frags failed.\n");
+
                        goto error;
                }
        } while (num_pages > 0);
@@ -890,3 +934,45 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba  *phba)
        spin_unlock(&ctrl->mbox_lock);
        return status;
 }
+
+/**
+ * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
+ * @phba: device priv structure instance
+ * @vlan_tag: TAG to be set
+ *
+ * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
+ *
+ * returns
+ *     TAG for the MBX Cmd
+ * **/
+int be_cmd_set_vlan(struct beiscsi_hba *phba,
+                    uint16_t vlan_tag)
+{
+       unsigned int tag = 0;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_set_vlan_req *req;
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+
+       spin_lock(&ctrl->mbox_lock);
+       tag = alloc_mcc_tag(phba);
+       if (!tag) {
+               spin_unlock(&ctrl->mbox_lock);
+               return tag;
+       }
+
+       wrb = wrb_from_mccq(phba);
+       req = embedded_payload(wrb);
+       wrb->tag0 |= tag;
+       be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+                          OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
+                          sizeof(*req));
+
+       req->interface_hndl = phba->interface_handle;
+       req->vlan_priority = vlan_tag;
+
+       be_mcc_notify(phba);
+       spin_unlock(&ctrl->mbox_lock);
+
+       return tag;
+}
index b0b36c6a145f77f0fb3ce38d9c2b45964dc4b99d..2c8f98df12875c478c3e5e0872623b6476f0106b 100644 (file)
@@ -348,6 +348,23 @@ struct be_cmd_get_boot_target_resp {
        int  boot_session_handle;
 };
 
+struct be_cmd_reopen_session_req {
+       struct be_cmd_req_hdr hdr;
+#define BE_REOPEN_ALL_SESSIONS  0x00
+#define BE_REOPEN_BOOT_SESSIONS 0x01
+#define BE_REOPEN_A_SESSION     0x02
+       u16 reopen_type;
+       u16 rsvd;
+       u32 session_handle;
+} __packed;
+
+struct be_cmd_reopen_session_resp {
+       struct be_cmd_resp_hdr hdr;
+       u32 rsvd;
+       u32 session_handle;
+} __packed;
+
+
 struct be_cmd_mac_query_req {
        struct be_cmd_req_hdr hdr;
        u8 type;
@@ -432,6 +449,12 @@ struct be_cmd_get_def_gateway_resp {
        struct ip_addr_format ip_addr;
 } __packed;
 
+#define BEISCSI_VLAN_DISABLE   0xFFFF
+struct be_cmd_set_vlan_req {
+       struct be_cmd_req_hdr hdr;
+       u32 interface_hndl;
+       u32 vlan_priority;
+} __packed;
 /******************** Create CQ ***************************/
 /**
  * Pseudo amap definition in which each bit of the actual structure is defined
@@ -671,6 +694,9 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
 
 bool is_link_state_evt(u32 trailer);
 
+/* Configuration Functions */
+int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+
 struct be_default_pdu_context {
        u32 dw[4];
 } __packed;
@@ -911,6 +937,7 @@ struct be_cmd_get_all_if_id_req {
 #define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME      6
 #define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME      7
 #define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION  14
+#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36
 #define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
 #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
 #define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET  52
index 43f35034585d1a5102b65d3aeaed84cfc00fbe1a..aedb0d9a9dae652d753455df07c2aee1a4d9e867 100644 (file)
@@ -50,21 +50,27 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
        struct beiscsi_session *beiscsi_sess;
        struct beiscsi_io_task *io_task;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
 
        if (!ep) {
-               SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep\n");
+               printk(KERN_ERR
+                      "beiscsi_session_create: invalid ep\n");
                return NULL;
        }
        beiscsi_ep = ep->dd_data;
        phba = beiscsi_ep->phba;
        shost = phba->shost;
+
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_session_create\n");
+
        if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
-               shost_printk(KERN_ERR, shost, "Cannot handle %d cmds."
-                            "Max cmds per session supported is %d. Using %d. "
-                            "\n", cmds_max,
-                             beiscsi_ep->phba->params.wrbs_per_cxn,
-                             beiscsi_ep->phba->params.wrbs_per_cxn);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Cannot handle %d cmds."
+                           "Max cmds per session supported is %d. Using %d."
+                           "\n", cmds_max,
+                           beiscsi_ep->phba->params.wrbs_per_cxn,
+                           beiscsi_ep->phba->params.wrbs_per_cxn);
+
                cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
        }
 
@@ -102,7 +108,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
        struct iscsi_session *sess = cls_session->dd_data;
        struct beiscsi_session *beiscsi_sess = sess->dd_data;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
+       printk(KERN_INFO "In beiscsi_session_destroy\n");
        pci_pool_destroy(beiscsi_sess->bhs_pool);
        iscsi_session_teardown(cls_session);
 }
@@ -123,11 +129,13 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
        struct iscsi_session *sess;
        struct beiscsi_session *beiscsi_sess;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_create ,cid"
-                "from iscsi layer=%d\n", cid);
        shost = iscsi_session_to_shost(cls_session);
        phba = iscsi_host_priv(shost);
 
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_conn_create ,cid"
+                   "from iscsi layer=%d\n", cid);
+
        cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
        if (!cls_conn)
                return NULL;
@@ -154,12 +162,15 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
                                unsigned int cid)
 {
        if (phba->conn_table[cid]) {
-               SE_DEBUG(DBG_LVL_1,
-                        "Connection table already occupied. Detected clash\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Connection table already occupied. Detected clash\n");
+
                return -EINVAL;
        } else {
-               SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn)\n",
-                        cid, beiscsi_conn);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
+                           cid, beiscsi_conn);
+
                phba->conn_table[cid] = beiscsi_conn;
        }
        return 0;
@@ -184,7 +195,6 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_bind\n");
        ep = iscsi_lookup_endpoint(transport_fd);
        if (!ep)
                return -EINVAL;
@@ -195,17 +205,21 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
                return -EINVAL;
 
        if (beiscsi_ep->phba != phba) {
-               SE_DEBUG(DBG_LVL_8,
-                        "beiscsi_ep->hba=%p not equal to phba=%p\n",
-                        beiscsi_ep->phba, phba);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
+                           beiscsi_ep->phba, phba);
+
                return -EEXIST;
        }
 
        beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
        beiscsi_conn->ep = beiscsi_ep;
        beiscsi_ep->conn = beiscsi_conn;
-       SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d\n",
-                beiscsi_conn, conn, beiscsi_ep->ep_cid);
+
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
+                   beiscsi_conn, conn, beiscsi_ep->ep_cid);
+
        return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
 }
 
@@ -219,8 +233,9 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
                                              ISCSI_IFACE_TYPE_IPV4,
                                              0, 0);
        if (!phba->ipv4_iface) {
-               shost_printk(KERN_ERR, phba->shost, "Could not "
-                            "create default IPv4 address.\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Could not "
+                           "create default IPv4 address.\n");
                return -ENODEV;
        }
 
@@ -237,8 +252,9 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
                                              ISCSI_IFACE_TYPE_IPV6,
                                              0, 0);
        if (!phba->ipv6_iface) {
-               shost_printk(KERN_ERR, phba->shost, "Could not "
-                            "create default IPv6 address.\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Could not "
+                           "create default IPv6 address.\n");
                return -ENODEV;
        }
 
@@ -299,12 +315,14 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
                        iface_ip = nla_data(nla);
                break;
        default:
-               shost_printk(KERN_ERR, shost, "Unsupported param %d\n",
-                            iface_param->param);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Unsupported param %d\n",
+                           iface_param->param);
        }
 
        if (!iface_ip || !iface_subnet) {
-               shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : IP and Subnet Mask required\n");
                return -EINVAL;
        }
 
@@ -314,6 +332,51 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
        return ret;
 }
 
+/**
+ * beiscsi_set_vlan_tag()- Set the VLAN TAG
+ * @shost: Scsi Host for the driver instance
+ * @iface_param: Interface paramters
+ *
+ * Set the VLAN TAG for the adapter or disable
+ * the VLAN config
+ *
+ * returns
+ *     Success: 0
+ *     Failure: Non-Zero Value
+ **/
+static int
+beiscsi_set_vlan_tag(struct Scsi_Host *shost,
+                     struct iscsi_iface_param_info *iface_param)
+{
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+       int ret = 0;
+
+       /* Get the Interface Handle */
+       if (mgmt_get_all_if_id(phba)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Getting Interface Handle Failed\n");
+               return -EIO;
+       }
+
+       switch (iface_param->param) {
+       case ISCSI_NET_PARAM_VLAN_ENABLED:
+               if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
+                       ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
+               break;
+       case ISCSI_NET_PARAM_VLAN_TAG:
+               ret = mgmt_set_vlan(phba,
+                                   *((uint16_t *)iface_param->value));
+               break;
+       default:
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Unkown Param Type : %d\n",
+                           iface_param->param);
+               return -ENOSYS;
+       }
+       return ret;
+}
+
+
 static int
 beiscsi_set_ipv4(struct Scsi_Host *shost,
                struct iscsi_iface_param_info *iface_param,
@@ -335,8 +398,9 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
                        ret = beiscsi_set_static_ip(shost, iface_param,
                                                    data, dt_len);
                else
-                       shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n",
-                                       iface_param->value[0]);
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BS_%d : Invalid BOOTPROTO: %d\n",
+                                   iface_param->value[0]);
                break;
        case ISCSI_NET_PARAM_IFACE_ENABLE:
                if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
@@ -349,9 +413,14 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
                ret = beiscsi_set_static_ip(shost, iface_param,
                                            data, dt_len);
                break;
+       case ISCSI_NET_PARAM_VLAN_ENABLED:
+       case ISCSI_NET_PARAM_VLAN_TAG:
+               ret = beiscsi_set_vlan_tag(shost, iface_param);
+               break;
        default:
-               shost_printk(KERN_ERR, shost, "Param %d not supported\n",
-                            iface_param->param);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Param %d not supported\n",
+                           iface_param->param);
        }
 
        return ret;
@@ -379,8 +448,9 @@ beiscsi_set_ipv6(struct Scsi_Host *shost,
                                  ISCSI_BOOTPROTO_STATIC);
                break;
        default:
-               shost_printk(KERN_ERR, shost, "Param %d not supported\n",
-                            iface_param->param);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Param %d not supported\n",
+                           iface_param->param);
        }
 
        return ret;
@@ -390,6 +460,7 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
                void *data, uint32_t dt_len)
 {
        struct iscsi_iface_param_info *iface_param = NULL;
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
        struct nlattr *attrib;
        uint32_t rm_len = dt_len;
        int ret = 0 ;
@@ -404,9 +475,11 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
                 * BE2ISCSI only supports 1 interface
                 */
                if (iface_param->iface_num) {
-                       shost_printk(KERN_ERR, shost, "Invalid iface_num %d."
-                                    "Only iface_num 0 is supported.\n",
-                                    iface_param->iface_num);
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BS_%d : Invalid iface_num %d."
+                                   "Only iface_num 0 is supported.\n",
+                                   iface_param->iface_num);
+
                        return -EINVAL;
                }
 
@@ -420,9 +493,9 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
                                               data, dt_len);
                        break;
                default:
-                       shost_printk(KERN_ERR, shost,
-                                    "Invalid iface type :%d passed\n",
-                                    iface_param->iface_type);
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BS_%d : Invalid iface type :%d passed\n",
+                                   iface_param->iface_type);
                        break;
                }
 
@@ -465,6 +538,27 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
        case ISCSI_NET_PARAM_IPV4_SUBNET:
                len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
                break;
+       case ISCSI_NET_PARAM_VLAN_ENABLED:
+               len = sprintf(buf, "%s\n",
+                            (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+                            ? "Disabled" : "Enabled");
+               break;
+       case ISCSI_NET_PARAM_VLAN_ID:
+               if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+                       return -EINVAL;
+               else
+                       len = sprintf(buf, "%d\n",
+                                    (if_info.vlan_priority &
+                                    ISCSI_MAX_VLAN_ID));
+               break;
+       case ISCSI_NET_PARAM_VLAN_PRIORITY:
+               if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+                       return -EINVAL;
+               else
+                       len = sprintf(buf, "%d\n",
+                                    ((if_info.vlan_priority >> 13) &
+                                    ISCSI_MAX_VLAN_PRIORITY));
+               break;
        default:
                WARN_ON(1);
        }
@@ -486,6 +580,9 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
        case ISCSI_NET_PARAM_IPV4_SUBNET:
        case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
        case ISCSI_NET_PARAM_IPV6_ADDR:
+       case ISCSI_NET_PARAM_VLAN_ENABLED:
+       case ISCSI_NET_PARAM_VLAN_ID:
+       case ISCSI_NET_PARAM_VLAN_PRIORITY:
                len = be2iscsi_get_if_param(phba, iface, param, buf);
                break;
        case ISCSI_NET_PARAM_IFACE_ENABLE:
@@ -518,7 +615,10 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
        struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
        int len = 0;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_get_param, param= %d\n", param);
+       beiscsi_log(beiscsi_ep->phba, KERN_INFO,
+                   BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_ep_get_param,"
+                   " param= %d\n", param);
 
        switch (param) {
        case ISCSI_PARAM_CONN_PORT:
@@ -541,9 +641,14 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
+       struct beiscsi_hba *phba = NULL;
        int ret;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
+       phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_conn_set_param,"
+                   " param= %d\n", param);
+
        ret = iscsi_set_param(cls_conn, param, buf, buflen);
        if (ret)
                return ret;
@@ -593,7 +698,9 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
 
        tag = be_cmd_get_initname(phba);
        if (!tag) {
-               SE_DEBUG(DBG_LVL_1, "Getting Initiator Name Failed\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Getting Initiator Name Failed\n");
+
                return -EBUSY;
        } else
                wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -604,9 +711,12 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
        status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
 
        if (status || extd_status) {
-               SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
-                               "status = %d extd_status = %d\n",
-                               status, extd_status);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BS_%d : MailBox Command Failed with "
+                           "status = %d extd_status = %d\n",
+                           status, extd_status);
+
                free_mcc_tag(&phba->ctrl, tag);
                return -EAGAIN;
        }
@@ -650,7 +760,9 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
 
        tag = be_cmd_get_port_speed(phba);
        if (!tag) {
-               SE_DEBUG(DBG_LVL_1, "Getting Port Speed Failed\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Getting Port Speed Failed\n");
+
                 return -EBUSY;
         } else
                wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -661,9 +773,12 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
        status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
 
        if (status || extd_status) {
-               SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
-                               "status = %d extd_status = %d\n",
-                               status, extd_status);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BS_%d : MailBox Command Failed with "
+                           "status = %d extd_status = %d\n",
+                           status, extd_status);
+
                free_mcc_tag(&phba->ctrl, tag);
                return -EAGAIN;
        }
@@ -704,20 +819,24 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        int status = 0;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_get_host_param,"
+                   " param= %d\n", param);
+
        switch (param) {
        case ISCSI_HOST_PARAM_HWADDRESS:
                status = beiscsi_get_macaddr(buf, phba);
                if (status < 0) {
-                       SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BS_%d : beiscsi_get_macaddr Failed\n");
                        return status;
                }
                break;
        case ISCSI_HOST_PARAM_INITIATOR_NAME:
                status = beiscsi_get_initname(buf, phba);
                if (status < 0) {
-                       SE_DEBUG(DBG_LVL_1,
-                                       "Retreiving Initiator Name Failed\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BS_%d : Retreiving Initiator Name Failed\n");
                        return status;
                }
                break;
@@ -728,8 +847,8 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
        case ISCSI_HOST_PARAM_PORT_SPEED:
                status = beiscsi_get_port_speed(shost);
                if (status) {
-                       SE_DEBUG(DBG_LVL_1,
-                                       "Retreiving Port Speed Failed\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BS_%d : Retreiving Port Speed Failed\n");
                        return status;
                }
                status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
@@ -746,7 +865,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
        int rc;
 
        if (strlen(phba->mac_address))
-               return strlcpy(buf, phba->mac_address, PAGE_SIZE);
+               return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
 
        memset(&resp, 0, sizeof(resp));
        rc = mgmt_get_nic_conf(phba, &resp);
@@ -768,8 +887,12 @@ void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
                            struct iscsi_stats *stats)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
+       struct beiscsi_hba *phba = NULL;
+
+       phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_conn_get_stats\n");
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_stats\n");
        stats->txdata_octets = conn->txdata_octets;
        stats->rxdata_octets = conn->rxdata_octets;
        stats->dataout_pdus = conn->dataout_pdus_cnt;
@@ -829,11 +952,16 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
        struct beiscsi_endpoint *beiscsi_ep;
        struct beiscsi_offload_params params;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
+       beiscsi_log(beiscsi_conn->phba, KERN_INFO,
+                   BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_conn_start\n");
+
        memset(&params, 0, sizeof(struct beiscsi_offload_params));
        beiscsi_ep = beiscsi_conn->ep;
        if (!beiscsi_ep)
-               SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n");
+               beiscsi_log(beiscsi_conn->phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG,
+                           "BS_%d : In beiscsi_conn_start , no beiscsi_ep\n");
 
        beiscsi_conn->login_in_progress = 0;
        beiscsi_set_params_for_offld(beiscsi_conn, &params);
@@ -907,19 +1035,27 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        unsigned int tag, wrb_num;
        int ret = -ENOMEM;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_open_conn\n");
+
        beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
        if (beiscsi_ep->ep_cid == 0xFFFF) {
-               SE_DEBUG(DBG_LVL_1, "No free cid available\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : No free cid available\n");
                return ret;
        }
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d\n",
-                beiscsi_ep->ep_cid);
+
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
+                   beiscsi_ep->ep_cid);
+
        phba->ep_array[beiscsi_ep->ep_cid -
                       phba->fw_config.iscsi_cid_start] = ep;
        if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
                                  phba->params.cxns_per_ctrl * 2)) {
-               SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
+
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Failed in allocate iscsi cid\n");
                goto free_ep;
        }
 
@@ -928,9 +1064,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                                sizeof(struct tcp_connect_and_offload_in),
                                &nonemb_cmd.dma);
        if (nonemb_cmd.va == NULL) {
-               SE_DEBUG(DBG_LVL_1,
-                        "Failed to allocate memory for mgmt_open_connection"
-                        "\n");
+
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Failed to allocate memory for"
+                           " mgmt_open_connection\n");
+
                beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
                return -ENOMEM;
        }
@@ -938,9 +1076,10 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        memset(nonemb_cmd.va, 0, nonemb_cmd.size);
        tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
        if (!tag) {
-               SE_DEBUG(DBG_LVL_1,
-                        "mgmt_open_connection Failed for cid=%d\n",
-                        beiscsi_ep->ep_cid);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : mgmt_open_connection Failed for cid=%d\n",
+                           beiscsi_ep->ep_cid);
+
                beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
@@ -953,9 +1092,12 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
        status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
        if (status || extd_status) {
-               SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
-                                   " status = %d extd_status = %d\n",
-                                   status, extd_status);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BS_%d : mgmt_open_connection Failed"
+                           " status = %d extd_status = %d\n",
+                           status, extd_status);
+
                free_mcc_tag(&phba->ctrl, tag);
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
@@ -968,7 +1110,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                beiscsi_ep = ep->dd_data;
                beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
                beiscsi_ep->cid_vld = 1;
-               SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : mgmt_open_connection Success\n");
        }
        pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
@@ -996,18 +1139,19 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
        struct iscsi_endpoint *ep;
        int ret;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect\n");
        if (shost)
                phba = iscsi_host_priv(shost);
        else {
                ret = -ENXIO;
-               SE_DEBUG(DBG_LVL_1, "shost is NULL\n");
+               printk(KERN_ERR
+                      "beiscsi_ep_connect shost is NULL\n");
                return ERR_PTR(ret);
        }
 
        if (phba->state != BE_ADAPTER_UP) {
                ret = -EBUSY;
-               SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : The Adapter state is Not UP\n");
                return ERR_PTR(ret);
        }
 
@@ -1022,7 +1166,8 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
        beiscsi_ep->openiscsi_ep = ep;
        ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking);
        if (ret) {
-               SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Failed in beiscsi_open_conn\n");
                goto free_ep;
        }
 
@@ -1044,7 +1189,9 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
 {
        struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
 
-       SE_DEBUG(DBG_LVL_8, "In  beiscsi_ep_poll\n");
+       beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In  beiscsi_ep_poll\n");
+
        if (beiscsi_ep->cid_vld == 1)
                return 1;
        else
@@ -1064,8 +1211,10 @@ static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
 
        tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
        if (!tag) {
-               SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x\n",
-                        beiscsi_ep->ep_cid);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : upload failed for cid 0x%x\n",
+                           beiscsi_ep->ep_cid);
+
                ret = -EAGAIN;
        } else {
                wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -1086,7 +1235,8 @@ static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
        if (phba->conn_table[cid])
                phba->conn_table[cid] = NULL;
        else {
-               SE_DEBUG(DBG_LVL_8, "Connection table Not occupied.\n");
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : Connection table Not occupied.\n");
                return -EINVAL;
        }
        return 0;
@@ -1104,38 +1254,40 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
        struct beiscsi_endpoint *beiscsi_ep;
        struct beiscsi_hba *phba;
        unsigned int tag;
+       uint8_t mgmt_invalidate_flag, tcp_upload_flag;
        unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
 
        beiscsi_ep = ep->dd_data;
        phba = beiscsi_ep->phba;
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
-                            beiscsi_ep->ep_cid);
-
-       if (!beiscsi_ep->conn) {
-               SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect, no "
-                        "beiscsi_ep\n");
-               return;
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
+                   beiscsi_ep->ep_cid);
+
+       if (beiscsi_ep->conn) {
+               beiscsi_conn = beiscsi_ep->conn;
+               iscsi_suspend_queue(beiscsi_conn->conn);
+               mgmt_invalidate_flag = ~BEISCSI_NO_RST_ISSUE;
+               tcp_upload_flag = CONNECTION_UPLOAD_GRACEFUL;
+       } else {
+               mgmt_invalidate_flag = BEISCSI_NO_RST_ISSUE;
+               tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
        }
-       beiscsi_conn = beiscsi_ep->conn;
-       iscsi_suspend_queue(beiscsi_conn->conn);
-
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect ep_cid = %d\n",
-                beiscsi_ep->ep_cid);
 
        tag = mgmt_invalidate_connection(phba, beiscsi_ep,
-                                           beiscsi_ep->ep_cid, 1,
-                                           savecfg_flag);
+                                         beiscsi_ep->ep_cid,
+                                         mgmt_invalidate_flag,
+                                         savecfg_flag);
        if (!tag) {
-               SE_DEBUG(DBG_LVL_1,
-                        "mgmt_invalidate_connection Failed for cid=%d\n",
-                         beiscsi_ep->ep_cid);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
+                           beiscsi_ep->ep_cid);
        } else {
                wait_event_interruptible(phba->ctrl.mcc_wait[tag],
                                         phba->ctrl.mcc_numtag[tag]);
                free_mcc_tag(&phba->ctrl, tag);
        }
 
-       beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
+       beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
        beiscsi_free_ep(beiscsi_ep);
        beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
        iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
@@ -1152,6 +1304,9 @@ umode_t be2iscsi_attr_is_visible(int param_type, int param)
                case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
                case ISCSI_NET_PARAM_IPV4_GW:
                case ISCSI_NET_PARAM_IPV6_ADDR:
+               case ISCSI_NET_PARAM_VLAN_ID:
+               case ISCSI_NET_PARAM_VLAN_PRIORITY:
+               case ISCSI_NET_PARAM_VLAN_ENABLED:
                        return S_IRUGO;
                default:
                        return 0;
index 0b1d99c99fd28bb39d0e8a9d63c6a4a3251b7cda..ff73f9500b01c43c3bdbbd210052fd651a8f3487 100644 (file)
@@ -42,6 +42,7 @@
 #include "be_main.h"
 #include "be_iscsi.h"
 #include "be_mgmt.h"
+#include "be_cmds.h"
 
 static unsigned int be_iopoll_budget = 10;
 static unsigned int be_max_phys_size = 64;
@@ -57,9 +58,105 @@ MODULE_LICENSE("GPL");
 module_param(be_iopoll_budget, int, 0);
 module_param(enable_msix, int, 0);
 module_param(be_max_phys_size, uint, S_IRUGO);
-MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
-                                  "contiguous memory that can be allocated."
-                                  "Range is 16 - 128");
+MODULE_PARM_DESC(be_max_phys_size,
+               "Maximum Size (In Kilobytes) of physically contiguous "
+               "memory that can be allocated. Range is 16 - 128");
+
+#define beiscsi_disp_param(_name)\
+ssize_t        \
+beiscsi_##_name##_disp(struct device *dev,\
+                       struct device_attribute *attrib, char *buf)     \
+{      \
+       struct Scsi_Host *shost = class_to_shost(dev);\
+       struct beiscsi_hba *phba = iscsi_host_priv(shost); \
+       uint32_t param_val = 0; \
+       param_val = phba->attr_##_name;\
+       return snprintf(buf, PAGE_SIZE, "%d\n",\
+                       phba->attr_##_name);\
+}
+
+#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
+int \
+beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
+{\
+       if (val >= _minval && val <= _maxval) {\
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
+                           "BA_%d : beiscsi_"#_name" updated "\
+                           "from 0x%x ==> 0x%x\n",\
+                           phba->attr_##_name, val); \
+               phba->attr_##_name = val;\
+               return 0;\
+       } \
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
+                   "BA_%d beiscsi_"#_name" attribute "\
+                   "cannot be updated to 0x%x, "\
+                   "range allowed is ["#_minval" - "#_maxval"]\n", val);\
+               return -EINVAL;\
+}
+
+#define beiscsi_store_param(_name)  \
+ssize_t \
+beiscsi_##_name##_store(struct device *dev,\
+                        struct device_attribute *attr, const char *buf,\
+                        size_t count) \
+{ \
+       struct Scsi_Host  *shost = class_to_shost(dev);\
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);\
+       uint32_t param_val = 0;\
+       if (!isdigit(buf[0]))\
+               return -EINVAL;\
+       if (sscanf(buf, "%i", &param_val) != 1)\
+               return -EINVAL;\
+       if (beiscsi_##_name##_change(phba, param_val) == 0) \
+               return strlen(buf);\
+       else \
+               return -EINVAL;\
+}
+
+#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
+int \
+beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
+{ \
+       if (val >= _minval && val <= _maxval) {\
+               phba->attr_##_name = val;\
+               return 0;\
+       } \
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
+                   "BA_%d beiscsi_"#_name" attribute " \
+                   "cannot be updated to 0x%x, "\
+                   "range allowed is ["#_minval" - "#_maxval"]\n", val);\
+       phba->attr_##_name = _defval;\
+       return -EINVAL;\
+}
+
+#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
+static uint beiscsi_##_name = _defval;\
+module_param(beiscsi_##_name, uint, S_IRUGO);\
+MODULE_PARM_DESC(beiscsi_##_name, _descp);\
+beiscsi_disp_param(_name)\
+beiscsi_change_param(_name, _minval, _maxval, _defval)\
+beiscsi_store_param(_name)\
+beiscsi_init_param(_name, _minval, _maxval, _defval)\
+DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
+             beiscsi_##_name##_disp, beiscsi_##_name##_store)
+
+/*
+ * When new log level added update the
+ * the MAX allowed value for log_enable
+ */
+BEISCSI_RW_ATTR(log_enable, 0x00,
+               0xFF, 0x00, "Enable logging Bit Mask\n"
+               "\t\t\t\tInitialization Events  : 0x01\n"
+               "\t\t\t\tMailbox Events         : 0x02\n"
+               "\t\t\t\tMiscellaneous Events   : 0x04\n"
+               "\t\t\t\tError Handling         : 0x08\n"
+               "\t\t\t\tIO Path Events         : 0x10\n"
+               "\t\t\t\tConfiguration Path     : 0x20\n");
+
+struct device_attribute *beiscsi_attrs[] = {
+       &dev_attr_beiscsi_log_enable,
+       NULL,
+};
 
 static int beiscsi_slave_configure(struct scsi_device *sdev)
 {
@@ -112,9 +209,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
                                sizeof(struct invalidate_commands_params_in),
                                &nonemb_cmd.dma);
        if (nonemb_cmd.va == NULL) {
-               SE_DEBUG(DBG_LVL_1,
-                        "Failed to allocate memory for"
-                        "mgmt_invalidate_icds\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+                           "BM_%d : Failed to allocate memory for"
+                           "mgmt_invalidate_icds\n");
                return FAILED;
        }
        nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
@@ -122,9 +219,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
        tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
                                   cid, &nonemb_cmd);
        if (!tag) {
-               shost_printk(KERN_WARNING, phba->shost,
-                            "mgmt_invalidate_icds could not be"
-                            " submitted\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
+                           "BM_%d : mgmt_invalidate_icds could not be"
+                           "submitted\n");
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
 
@@ -188,9 +285,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
                                sizeof(struct invalidate_commands_params_in),
                                &nonemb_cmd.dma);
        if (nonemb_cmd.va == NULL) {
-               SE_DEBUG(DBG_LVL_1,
-                        "Failed to allocate memory for"
-                        "mgmt_invalidate_icds\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+                           "BM_%d : Failed to allocate memory for"
+                           "mgmt_invalidate_icds\n");
                return FAILED;
        }
        nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
@@ -198,9 +295,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
        tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
                                   cid, &nonemb_cmd);
        if (!tag) {
-               shost_printk(KERN_WARNING, phba->shost,
-                            "mgmt_invalidate_icds could not be"
-                            " submitted\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
+                           "BM_%d : mgmt_invalidate_icds could not be"
+                           " submitted\n");
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
                return FAILED;
@@ -389,6 +486,7 @@ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
 };
 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 
+
 static struct scsi_host_template beiscsi_sht = {
        .module = THIS_MODULE,
        .name = "Emulex 10Gbe open-iscsi Initiator Driver",
@@ -400,6 +498,7 @@ static struct scsi_host_template beiscsi_sht = {
        .eh_abort_handler = beiscsi_eh_abort,
        .eh_device_reset_handler = beiscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_session_reset,
+       .shost_attrs = beiscsi_attrs,
        .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
        .can_queue = BE2_IO_DEPTH,
        .this_id = -1,
@@ -419,8 +518,8 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
 
        shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
        if (!shost) {
-               dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
-                       "iscsi_host_alloc failed\n");
+               dev_err(&pcidev->dev,
+                       "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
                return NULL;
        }
        shost->dma_boundary = pcidev->dma_mask;
@@ -510,8 +609,8 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
 
        ret = pci_enable_device(pcidev);
        if (ret) {
-               dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
-                       "failed. Returning -ENODEV\n");
+               dev_err(&pcidev->dev,
+                       "beiscsi_enable_pci - enable device failed\n");
                return ret;
        }
 
@@ -576,8 +675,9 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
                                    + BE2_TMFS) / 512) + 1) * 512;
        phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
                                ? 1024 : phba->params.num_eq_entries;
-       SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
-                            phba->params.num_eq_entries);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : phba->params.num_eq_entries=%d\n",
+                   phba->params.num_eq_entries);
        phba->params.num_cq_entries =
            (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
                                    + BE2_TMFS) / 512) + 1) * 512;
@@ -621,8 +721,6 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
        phba =  pbe_eq->phba;
        mcc = &phba->ctrl.mcc_obj.cq;
        eqe = queue_tail_node(eq);
-       if (!eqe)
-               SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
 
        num_eq_processed = 0;
 
@@ -667,8 +765,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
        eq = &pbe_eq->q;
        cq = pbe_eq->cq;
        eqe = queue_tail_node(eq);
-       if (!eqe)
-               SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
 
        phba = pbe_eq->phba;
        num_eq_processed = 0;
@@ -743,8 +839,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
        mcc = &phba->ctrl.mcc_obj.cq;
        index = 0;
        eqe = queue_tail_node(eq);
-       if (!eqe)
-               SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
 
        num_ioeq_processed = 0;
        num_mcceq_processed = 0;
@@ -842,9 +936,10 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
                                          phba->msi_name[i],
                                          &phwi_context->be_eq[i]);
                        if (ret) {
-                               shost_printk(KERN_ERR, phba->shost,
-                                            "beiscsi_init_irqs-Failed to"
-                                            "register msix for i = %d\n", i);
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : beiscsi_init_irqs-Failed to"
+                                           "register msix for i = %d\n",
+                                           i);
                                kfree(phba->msi_name[i]);
                                goto free_msix_irqs;
                        }
@@ -860,8 +955,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
                ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
                                  &phwi_context->be_eq[i]);
                if (ret) {
-                       shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
-                                    "Failed to register beiscsi_msix_mcc\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
+                                   "BM_%d : beiscsi_init_irqs-"
+                                   "Failed to register beiscsi_msix_mcc\n");
                        kfree(phba->msi_name[i]);
                        goto free_msix_irqs;
                }
@@ -870,8 +966,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
                ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
                                  "beiscsi", phba);
                if (ret) {
-                       shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
-                                    "Failed to register irq\\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : beiscsi_init_irqs-"
+                                   "Failed to register irq\\n");
                        return ret;
                }
        }
@@ -922,7 +1019,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
        case ISCSI_OP_REJECT:
                WARN_ON(!pbuffer);
                WARN_ON(!(buf_len == 48));
-               SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                           "BM_%d : In ISCSI_OP_REJECT\n");
                break;
        case ISCSI_OP_LOGIN_RSP:
        case ISCSI_OP_TEXT_RSP:
@@ -932,11 +1031,12 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
                login_hdr->itt = io_task->libiscsi_itt;
                break;
        default:
-               shost_printk(KERN_WARNING, phba->shost,
-                            "Unrecognized opcode 0x%x in async msg\n",
-                            (ppdu->
+               beiscsi_log(phba, KERN_WARNING,
+                           BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                           "BM_%d : Unrecognized opcode 0x%x in async msg\n",
+                           (ppdu->
                             dw[offsetof(struct amap_pdu_base, opcode) / 32]
-                                               & PDUBASE_OPCODE_MASK));
+                            & PDUBASE_OPCODE_MASK));
                return 1;
        }
 
@@ -951,9 +1051,11 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
        struct sgl_handle *psgl_handle;
 
        if (phba->io_sgl_hndl_avbl) {
-               SE_DEBUG(DBG_LVL_8,
-                        "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
-                        phba->io_sgl_alloc_index);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+                           "BM_%d : In alloc_io_sgl_handle,"
+                           " io_sgl_alloc_index=%d\n",
+                           phba->io_sgl_alloc_index);
+
                psgl_handle = phba->io_sgl_hndl_base[phba->
                                                io_sgl_alloc_index];
                phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
@@ -971,17 +1073,20 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 static void
 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 {
-       SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
-                phba->io_sgl_free_index);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+                   "BM_%d : In free_,io_sgl_free_index=%d\n",
+                   phba->io_sgl_free_index);
+
        if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
                /*
                 * this can happen if clean_task is called on a task that
                 * failed in xmit_task or alloc_pdu.
                 */
-                SE_DEBUG(DBG_LVL_8,
-                        "Double Free in IO SGL io_sgl_free_index=%d,"
-                        "value there=%p\n", phba->io_sgl_free_index,
-                        phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
+                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+                            "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
+                            "value there=%p\n", phba->io_sgl_free_index,
+                            phba->io_sgl_hndl_base
+                            [phba->io_sgl_free_index]);
                return;
        }
        phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1043,11 +1148,12 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
        else
                pwrb_context->free_index++;
 
-       SE_DEBUG(DBG_LVL_8,
-                "FREE WRB: pwrb_handle=%p free_index=0x%x"
-                "wrb_handles_available=%d\n",
-                pwrb_handle, pwrb_context->free_index,
-                pwrb_context->wrb_handles_available);
+       beiscsi_log(phba, KERN_INFO,
+                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                   "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
+                   "wrb_handles_available=%d\n",
+                   pwrb_handle, pwrb_context->free_index,
+                   pwrb_context->wrb_handles_available);
 }
 
 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
@@ -1057,8 +1163,11 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
        if (phba->eh_sgl_hndl_avbl) {
                psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
                phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
-               SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
-                        phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
+                           phba->eh_sgl_alloc_index,
+                           phba->eh_sgl_alloc_index);
+
                phba->eh_sgl_hndl_avbl--;
                if (phba->eh_sgl_alloc_index ==
                    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
@@ -1075,16 +1184,20 @@ void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
 {
 
-       SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
-                            phba->eh_sgl_free_index);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BM_%d : In  free_mgmt_sgl_handle,"
+                   "eh_sgl_free_index=%d\n",
+                   phba->eh_sgl_free_index);
+
        if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
                /*
                 * this can happen if clean_task is called on a task that
                 * failed in xmit_task or alloc_pdu.
                 */
-               SE_DEBUG(DBG_LVL_8,
-                        "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
-                        phba->eh_sgl_free_index);
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BM_%d : Double Free in eh SGL ,"
+                           "eh_sgl_free_index=%d\n",
+                           phba->eh_sgl_free_index);
                return;
        }
        phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1326,9 +1439,10 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
                break;
 
        case HWH_TYPE_LOGIN:
-               SE_DEBUG(DBG_LVL_1,
-                        "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
-                        "- Solicited path\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                           "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
+                           " hwi_complete_cmd- Solicited path\n");
                break;
 
        case HWH_TYPE_NOP:
@@ -1336,13 +1450,14 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
                break;
 
        default:
-               shost_printk(KERN_WARNING, phba->shost,
-                               "In hwi_complete_cmd, unknown type = %d"
-                               "wrb_index 0x%x CID 0x%x\n", type,
-                               ((psol->dw[offsetof(struct amap_iscsi_wrb,
-                               type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
-                               ((psol->dw[offsetof(struct amap_sol_cqe,
-                               cid) / 32] & SOL_CID_MASK) >> 6));
+               beiscsi_log(phba, KERN_WARNING,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                           "BM_%d : In hwi_complete_cmd, unknown type = %d"
+                           "wrb_index 0x%x CID 0x%x\n", type,
+                           ((psol->dw[offsetof(struct amap_iscsi_wrb,
+                           type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
+                           ((psol->dw[offsetof(struct amap_sol_cqe,
+                           cid) / 32] & SOL_CID_MASK) >> 6));
                break;
        }
 
@@ -1397,10 +1512,11 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
                break;
        default:
                pbusy_list = NULL;
-               shost_printk(KERN_WARNING, phba->shost,
-                       "Unexpected code=%d\n",
-                        pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
-                                       code) / 32] & PDUCQE_CODE_MASK);
+               beiscsi_log(phba, KERN_WARNING,
+                           BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                           "BM_%d : Unexpected code=%d\n",
+                           pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+                           code) / 32] & PDUCQE_CODE_MASK);
                return NULL;
        }
 
@@ -1425,8 +1541,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
 }
 
 static unsigned int
-hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
-                          unsigned int is_header, unsigned int cq_index)
+hwi_update_async_writables(struct beiscsi_hba *phba,
+                           struct hwi_async_pdu_context *pasync_ctx,
+                           unsigned int is_header, unsigned int cq_index)
 {
        struct list_head *pbusy_list;
        struct async_pdu_handle *pasync_handle;
@@ -1463,9 +1580,10 @@ hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
        }
 
        if (!writables) {
-               SE_DEBUG(DBG_LVL_1,
-                        "Duplicate notification received - index 0x%x!!\n",
-                        cq_index);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                           "BM_%d : Duplicate notification received - index 0x%x!!\n",
+                           cq_index);
                WARN_ON(1);
        }
 
@@ -1616,8 +1734,8 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
                                             pdpdu_cqe, &cq_index);
        BUG_ON(pasync_handle->is_header != 0);
        if (pasync_handle->consumed == 0)
-               hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
-                                          cq_index);
+               hwi_update_async_writables(phba, pasync_ctx,
+                                          pasync_handle->is_header, cq_index);
 
        hwi_free_async_msg(phba, pasync_handle->cri);
        hwi_post_async_buffers(phba, pasync_handle->is_header);
@@ -1745,8 +1863,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
                                             pdpdu_cqe, &cq_index);
 
        if (pasync_handle->consumed == 0)
-               hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
-                                          cq_index);
+               hwi_update_async_writables(phba, pasync_ctx,
+                                          pasync_handle->is_header, cq_index);
+
        hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
        hwi_post_async_buffers(phba, pasync_handle->is_header);
 }
@@ -1774,9 +1893,10 @@ static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
                                beiscsi_async_link_state_process(phba,
                                (struct be_async_event_link_state *) mcc_compl);
                        else
-                               SE_DEBUG(DBG_LVL_1,
-                                       " Unsupported Async Event, flags"
-                                       " = 0x%08x\n", mcc_compl->flags);
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
+                                           "BM_%d :  Unsupported Async Event, flags"
+                                           " = 0x%08x\n",
+                                           mcc_compl->flags);
                } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
                        be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
                        atomic_dec(&phba->ctrl.mcc_obj.q.used);
@@ -1801,6 +1921,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
        struct dmsg_cqe *dmsg;
        unsigned int num_processed = 0;
        unsigned int tot_nump = 0;
+       unsigned short code = 0, cid = 0;
        struct beiscsi_conn *beiscsi_conn;
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
@@ -1814,10 +1935,11 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
               CQE_VALID_MASK) {
                be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
 
-               ep = phba->ep_array[(u32) ((sol->
-                                  dw[offsetof(struct amap_sol_cqe, cid) / 32] &
-                                  SOL_CID_MASK) >> 6) -
-                                  phba->fw_config.iscsi_cid_start];
+               cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
+                     CQE_CID_MASK) >> 6);
+               code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
+                      CQE_CODE_MASK);
+               ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
 
                beiscsi_ep = ep->dd_data;
                beiscsi_conn = beiscsi_ep->conn;
@@ -1829,32 +1951,41 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                        num_processed = 0;
                }
 
-               switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
-                       32] & CQE_CODE_MASK) {
+               switch (code) {
                case SOL_CMD_COMPLETE:
                        hwi_complete_cmd(beiscsi_conn, phba, sol);
                        break;
                case DRIVERMSG_NOTIFY:
-                       SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
+                       beiscsi_log(phba, KERN_INFO,
+                                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                                   "BM_%d : Received DRIVERMSG_NOTIFY\n");
+
                        dmsg = (struct dmsg_cqe *)sol;
                        hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
                        break;
                case UNSOL_HDR_NOTIFY:
-                       SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
+                       beiscsi_log(phba, KERN_INFO,
+                                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                                   "BM_%d : Received UNSOL_HDR_ NOTIFY\n");
+
                        hwi_process_default_pdu_ring(beiscsi_conn, phba,
                                             (struct i_t_dpdu_cqe *)sol);
                        break;
                case UNSOL_DATA_NOTIFY:
-                       SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
+                       beiscsi_log(phba, KERN_INFO,
+                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                                   "BM_%d : Received UNSOL_DATA_NOTIFY\n");
+
                        hwi_process_default_pdu_ring(beiscsi_conn, phba,
                                             (struct i_t_dpdu_cqe *)sol);
                        break;
                case CXN_INVALIDATE_INDEX_NOTIFY:
                case CMD_INVALIDATED_NOTIFY:
                case CXN_INVALIDATE_NOTIFY:
-                       SE_DEBUG(DBG_LVL_1,
-                                "Ignoring CQ Error notification for cmd/cxn"
-                                "invalidate\n");
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                                   "BM_%d : Ignoring CQ Error notification for"
+                                   " cmd/cxn invalidate\n");
                        break;
                case SOL_CMD_KILLED_DATA_DIGEST_ERR:
                case CMD_KILLED_INVALID_STATSN_RCVD:
@@ -1864,17 +1995,16 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                case CMD_CXN_KILLED_ITT_INVALID:
                case CMD_CXN_KILLED_SEQ_OUTOFORDER:
                case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
-                       SE_DEBUG(DBG_LVL_1,
-                                "CQ Error notification for cmd.. "
-                                "code %d cid 0x%x\n",
-                                sol->dw[offsetof(struct amap_sol_cqe, code) /
-                                32] & CQE_CODE_MASK,
-                                (sol->dw[offsetof(struct amap_sol_cqe, cid) /
-                                32] & SOL_CID_MASK));
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                                   "BM_%d : CQ Error notification for cmd.. "
+                                   "code %d cid 0x%x\n", code, cid);
                        break;
                case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
-                       SE_DEBUG(DBG_LVL_1,
-                                "Digest error on def pdu ring, dropping..\n");
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                                   "BM_%d : Digest error on def pdu ring,"
+                                   " dropping..\n");
                        hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
                                             (struct i_t_dpdu_cqe *) sol);
                        break;
@@ -1892,33 +2022,31 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                case CXN_KILLED_OVER_RUN_RESIDUAL:
                case CXN_KILLED_UNDER_RUN_RESIDUAL:
                case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
-                       SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
-                                "0x%x...\n",
-                                sol->dw[offsetof(struct amap_sol_cqe, code) /
-                                32] & CQE_CODE_MASK,
-                                (sol->dw[offsetof(struct amap_sol_cqe, cid) /
-                                32] & CQE_CID_MASK));
-                       iscsi_conn_failure(beiscsi_conn->conn,
-                                          ISCSI_ERR_CONN_FAILED);
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                                   "BM_%d : CQ Error %d, reset CID 0x%x...\n",
+                                   code, cid);
+                       if (beiscsi_conn)
+                               iscsi_conn_failure(beiscsi_conn->conn,
+                                                  ISCSI_ERR_CONN_FAILED);
                        break;
                case CXN_KILLED_RST_SENT:
                case CXN_KILLED_RST_RCVD:
-                       SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
-                               "received/sent on CID 0x%x...\n",
-                                sol->dw[offsetof(struct amap_sol_cqe, code) /
-                                32] & CQE_CODE_MASK,
-                                (sol->dw[offsetof(struct amap_sol_cqe, cid) /
-                                32] & CQE_CID_MASK));
-                       iscsi_conn_failure(beiscsi_conn->conn,
-                                          ISCSI_ERR_CONN_FAILED);
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                                   "BM_%d : CQ Error %d, reset"
+                                   "received/sent on CID 0x%x...\n",
+                                   code, cid);
+                       if (beiscsi_conn)
+                               iscsi_conn_failure(beiscsi_conn->conn,
+                                                  ISCSI_ERR_CONN_FAILED);
                        break;
                default:
-                       SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
-                                "received on CID 0x%x...\n",
-                                sol->dw[offsetof(struct amap_sol_cqe, code) /
-                                32] & CQE_CODE_MASK,
-                                (sol->dw[offsetof(struct amap_sol_cqe, cid) /
-                                32] & CQE_CID_MASK));
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                                   "BM_%d : CQ Error Invalid code= %d "
+                                   "received on CID 0x%x...\n",
+                                   code, cid);
                        break;
                }
 
@@ -1977,7 +2105,10 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
        if (ret < budget) {
                phba = pbe_eq->phba;
                blk_iopoll_complete(iop);
-               SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
+               beiscsi_log(phba, KERN_INFO,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+                           "BM_%d : rearm pbe_eq->q.id =%d\n",
+                           pbe_eq->q.id);
                hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
        }
        return ret;
@@ -2348,16 +2479,16 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
                                kzalloc(sizeof(struct wrb_handle *) *
                                        phba->params.wrbs_per_cxn, GFP_KERNEL);
                if (!pwrb_context->pwrb_handle_base) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                       "Mem Alloc Failed. Failing to load\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : Mem Alloc Failed. Failing to load\n");
                        goto init_wrb_hndl_failed;
                }
                pwrb_context->pwrb_handle_basestd =
                                kzalloc(sizeof(struct wrb_handle *) *
                                        phba->params.wrbs_per_cxn, GFP_KERNEL);
                if (!pwrb_context->pwrb_handle_basestd) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                       "Mem Alloc Failed. Failing to load\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : Mem Alloc Failed. Failing to load\n");
                        goto init_wrb_hndl_failed;
                }
                if (!num_cxn_wrbh) {
@@ -2438,12 +2569,13 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
        mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
        if (mem_descr->mem_array[0].virtual_address) {
-               SE_DEBUG(DBG_LVL_8,
-                        "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
-                        "va=%p\n", mem_descr->mem_array[0].virtual_address);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx"
+                           " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
+                           mem_descr->mem_array[0].virtual_address);
        } else
-               shost_printk(KERN_WARNING, phba->shost,
-                            "No Virtual address\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : No Virtual address\n");
 
        pasync_ctx->async_header.va_base =
                        mem_descr->mem_array[0].virtual_address;
@@ -2454,24 +2586,27 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
        mem_descr += HWI_MEM_ASYNC_HEADER_RING;
        if (mem_descr->mem_array[0].virtual_address) {
-               SE_DEBUG(DBG_LVL_8,
-                        "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
-                        "va=%p\n", mem_descr->mem_array[0].virtual_address);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx"
+                           " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
+                           mem_descr->mem_array[0].virtual_address);
        } else
-               shost_printk(KERN_WARNING, phba->shost,
-                           "No Virtual address\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : No Virtual address\n");
+
        pasync_ctx->async_header.ring_base =
                        mem_descr->mem_array[0].virtual_address;
 
        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
        mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
        if (mem_descr->mem_array[0].virtual_address) {
-               SE_DEBUG(DBG_LVL_8,
-                        "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
-                        "va=%p\n", mem_descr->mem_array[0].virtual_address);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx"
+                           " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
+                           mem_descr->mem_array[0].virtual_address);
        } else
-               shost_printk(KERN_WARNING, phba->shost,
-                           "No Virtual address\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : No Virtual address\n");
 
        pasync_ctx->async_header.handle_base =
                        mem_descr->mem_array[0].virtual_address;
@@ -2482,12 +2617,13 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
        mem_descr += HWI_MEM_ASYNC_DATA_RING;
        if (mem_descr->mem_array[0].virtual_address) {
-               SE_DEBUG(DBG_LVL_8,
-                        "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
-                        "va=%p\n", mem_descr->mem_array[0].virtual_address);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx"
+                           " HWI_MEM_ASYNC_DATA_RING va=%p\n",
+                           mem_descr->mem_array[0].virtual_address);
        } else
-               shost_printk(KERN_WARNING, phba->shost,
-                            "No Virtual address\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : No Virtual address\n");
 
        pasync_ctx->async_data.ring_base =
                        mem_descr->mem_array[0].virtual_address;
@@ -2495,8 +2631,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
        mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
        if (!mem_descr->mem_array[0].virtual_address)
-               shost_printk(KERN_WARNING, phba->shost,
-                           "No Virtual address\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : No Virtual address\n");
 
        pasync_ctx->async_data.handle_base =
                        mem_descr->mem_array[0].virtual_address;
@@ -2511,12 +2647,14 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
        mem_descr += HWI_MEM_ASYNC_DATA_BUF;
        if (mem_descr->mem_array[0].virtual_address) {
-               SE_DEBUG(DBG_LVL_8,
-                        "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
-                        "va=%p\n", mem_descr->mem_array[0].virtual_address);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx"
+                           " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
+                           mem_descr->mem_array[0].virtual_address);
        } else
-               shost_printk(KERN_WARNING, phba->shost,
-                           "No Virtual address\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : No Virtual address\n");
+
        idx = 0;
        pasync_ctx->async_data.va_base =
                        mem_descr->mem_array[idx].virtual_address;
@@ -2657,7 +2795,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
                             struct hwi_context_memory *phwi_context)
 {
        unsigned int i, num_eq_pages;
-       int ret, eq_for_mcc;
+       int ret = 0, eq_for_mcc;
        struct be_queue_info *eq;
        struct be_dma_mem *mem;
        void *eq_vaddress;
@@ -2684,8 +2822,8 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
                ret = be_fill_queue(eq, phba->params.num_eq_entries,
                                    sizeof(struct be_eq_entry), eq_vaddress);
                if (ret) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "be_fill_queue Failed for EQ\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : be_fill_queue Failed for EQ\n");
                        goto create_eq_error;
                }
 
@@ -2693,12 +2831,15 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
                ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
                                            phwi_context->cur_eqd);
                if (ret) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "beiscsi_cmd_eq_create"
-                                    "Failedfor EQ\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : beiscsi_cmd_eq_create"
+                                   "Failed for EQ\n");
                        goto create_eq_error;
                }
-               SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
+
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : eqid = %d\n",
+                           phwi_context->be_eq[i].q.id);
        }
        return 0;
 create_eq_error:
@@ -2717,7 +2858,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
                             struct hwi_context_memory *phwi_context)
 {
        unsigned int i, num_cq_pages;
-       int ret;
+       int ret = 0;
        struct be_queue_info *cq, *eq;
        struct be_dma_mem *mem;
        struct be_eq_obj *pbe_eq;
@@ -2742,8 +2883,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
                ret = be_fill_queue(cq, phba->params.num_cq_entries,
                                    sizeof(struct sol_cqe), cq_vaddress);
                if (ret) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "be_fill_queue Failed for ISCSI CQ\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : be_fill_queue Failed "
+                                   "for ISCSI CQ\n");
                        goto create_cq_error;
                }
 
@@ -2751,14 +2893,14 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
                ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
                                            false, 0);
                if (ret) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "beiscsi_cmd_eq_create"
-                                    "Failed for ISCSI CQ\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : beiscsi_cmd_eq_create"
+                                   "Failed for ISCSI CQ\n");
                        goto create_cq_error;
                }
-               SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
-                                                cq->id, eq->id);
-               SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : iscsi cq_id is %d for eq_id %d\n"
+                           "iSCSI CQ CREATED\n", cq->id, eq->id);
        }
        return 0;
 
@@ -2799,8 +2941,8 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
                            sizeof(struct phys_addr),
                            sizeof(struct phys_addr), dq_vaddress);
        if (ret) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "be_fill_queue Failed for DEF PDU HDR\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
                return ret;
        }
        mem->dma = (unsigned long)mem_descr->mem_array[idx].
@@ -2809,13 +2951,15 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
                                              def_pdu_ring_sz,
                                              phba->params.defpdu_hdr_sz);
        if (ret) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
                return ret;
        }
        phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
-       SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
-                phwi_context->be_def_hdrq.id);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : iscsi def pdu id is %d\n",
+                   phwi_context->be_def_hdrq.id);
+
        hwi_post_async_buffers(phba, 1);
        return 0;
 }
@@ -2844,8 +2988,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
                            sizeof(struct phys_addr),
                            sizeof(struct phys_addr), dq_vaddress);
        if (ret) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "be_fill_queue Failed for DEF PDU DATA\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
                return ret;
        }
        mem->dma = (unsigned long)mem_descr->mem_array[idx].
@@ -2854,16 +2998,20 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
                                              def_pdu_ring_sz,
                                              phba->params.defpdu_data_sz);
        if (ret) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "be_cmd_create_default_pdu_queue Failed"
-                            " for DEF PDU DATA\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d be_cmd_create_default_pdu_queue"
+                           " Failed for DEF PDU DATA\n");
                return ret;
        }
        phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
-       SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
-                phwi_context->be_def_dataq.id);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : iscsi def data id is %d\n",
+                   phwi_context->be_def_dataq.id);
+
        hwi_post_async_buffers(phba, 0);
-       SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : DEFAULT PDU DATA RING CREATED\n");
+
        return 0;
 }
 
@@ -2889,13 +3037,14 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
                                                (pm_arr->size / PAGE_SIZE));
                page_offset += pm_arr->size / PAGE_SIZE;
                if (status != 0) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "post sgl failed.\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : post sgl failed.\n");
                        return status;
                }
                pm_arr++;
        }
-       SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : POSTED PAGES\n");
        return 0;
 }
 
@@ -2945,8 +3094,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
        pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
                           GFP_KERNEL);
        if (!pwrb_arr) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Memory alloc failed in create wrb ring.\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Memory alloc failed in create wrb ring.\n");
                return -ENOMEM;
        }
        wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
@@ -2990,8 +3139,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
                                            &phwi_context->be_wrbq[i]);
                if (status != 0) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "wrbq create failed.");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : wrbq create failed.");
                        kfree(pwrb_arr);
                        return status;
                }
@@ -3127,7 +3276,6 @@ static int find_num_cpus(void)
        if (num_cpus >= MAX_CPUS)
                num_cpus = MAX_CPUS - 1;
 
-       SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
        return num_cpus;
 }
 
@@ -3150,7 +3298,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 
        status = beiscsi_create_eqs(phba, phwi_context);
        if (status != 0) {
-               shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : EQ not created\n");
                goto error;
        }
 
@@ -3160,51 +3309,55 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 
        status = mgmt_check_supported_fw(ctrl, phba);
        if (status != 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Unsupported fw version\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Unsupported fw version\n");
                goto error;
        }
 
        status = beiscsi_create_cqs(phba, phwi_context);
        if (status != 0) {
-               shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : CQ not created\n");
                goto error;
        }
 
        status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
                                        def_pdu_ring_sz);
        if (status != 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Default Header not created\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Default Header not created\n");
                goto error;
        }
 
        status = beiscsi_create_def_data(phba, phwi_context,
                                         phwi_ctrlr, def_pdu_ring_sz);
        if (status != 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Default Data not created\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Default Data not created\n");
                goto error;
        }
 
        status = beiscsi_post_pages(phba);
        if (status != 0) {
-               shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Post SGL Pages Failed\n");
                goto error;
        }
 
        status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
        if (status != 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "WRB Rings not created\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : WRB Rings not created\n");
                goto error;
        }
 
-       SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : hwi_init_port success\n");
        return 0;
 
 error:
-       shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                   "BM_%d : hwi_init_port failed");
        hwi_cleanup(phba);
        return status;
 }
@@ -3217,12 +3370,13 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
        if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
                phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
                    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
-               SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
-                        phwi_ctrlr->phwi_ctxt);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d :  phwi_ctrlr->phwi_ctxt=%p\n",
+                           phwi_ctrlr->phwi_ctxt);
        } else {
-               shost_printk(KERN_ERR, phba->shost,
-                            "HWI_MEM_ADDN_CONTEXT is more than one element."
-                            "Failing to load\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
+                           "than one element.Failing to load\n");
                return -ENOMEM;
        }
 
@@ -3232,8 +3386,9 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
 
        hwi_init_async_pdu_ctx(phba);
        if (hwi_init_port(phba) != 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "hwi_init_controller failed\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_controller failed\n");
+
                return -ENOMEM;
        }
        return 0;
@@ -3268,15 +3423,18 @@ static int beiscsi_init_controller(struct beiscsi_hba *phba)
 
        ret = beiscsi_get_memory(phba);
        if (ret < 0) {
-               shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
-                            "Failed in beiscsi_alloc_memory\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_dev_probe -"
+                           "Failed in beiscsi_alloc_memory\n");
                return ret;
        }
 
        ret = hwi_init_controller(phba);
        if (ret)
                goto free_init;
-       SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : Return success from beiscsi_init_controller");
+
        return 0;
 
 free_init:
@@ -3301,8 +3459,8 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
                                                 phba->params.ios_per_ctrl,
                                                 GFP_KERNEL);
                if (!phba->io_sgl_hndl_base) {
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "Mem Alloc Failed. Failing to load\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : Mem Alloc Failed. Failing to load\n");
                        return -ENOMEM;
                }
                phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
@@ -3311,14 +3469,14 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
                                                 GFP_KERNEL);
                if (!phba->eh_sgl_hndl_base) {
                        kfree(phba->io_sgl_hndl_base);
-                       shost_printk(KERN_ERR, phba->shost,
-                                    "Mem Alloc Failed. Failing to load\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : Mem Alloc Failed. Failing to load\n");
                        return -ENOMEM;
                }
        } else {
-               shost_printk(KERN_ERR, phba->shost,
-                            "HWI_MEM_SGLH is more than one element."
-                            "Failing to load\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : HWI_MEM_SGLH is more than one element."
+                           "Failing to load\n");
                return -ENOMEM;
        }
 
@@ -3344,15 +3502,18 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
                }
                idx++;
        }
-       SE_DEBUG(DBG_LVL_8,
-                "phba->io_sgl_hndl_avbl=%d"
-                "phba->eh_sgl_hndl_avbl=%d\n",
-                phba->io_sgl_hndl_avbl,
-                phba->eh_sgl_hndl_avbl);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : phba->io_sgl_hndl_avbl=%d"
+                   "phba->eh_sgl_hndl_avbl=%d\n",
+                   phba->io_sgl_hndl_avbl,
+                   phba->eh_sgl_hndl_avbl);
+
        mem_descr_sg = phba->init_mem;
        mem_descr_sg += HWI_MEM_SGE;
-       SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
-                mem_descr_sg->num_elements);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "\n BM_%d : mem_descr_sg->num_elements=%d\n",
+                   mem_descr_sg->num_elements);
+
        arr_index = 0;
        idx = 0;
        while (idx < mem_descr_sg->num_elements) {
@@ -3390,17 +3551,17 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
        phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
                                  GFP_KERNEL);
        if (!phba->cid_array) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Failed to allocate memory in "
-                            "hba_setup_cid_tbls\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed to allocate memory in "
+                           "hba_setup_cid_tbls\n");
                return -ENOMEM;
        }
        phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
                                 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
        if (!phba->ep_array) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Failed to allocate memory in "
-                            "hba_setup_cid_tbls\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed to allocate memory in "
+                           "hba_setup_cid_tbls\n");
                kfree(phba->cid_array);
                return -ENOMEM;
        }
@@ -3433,18 +3594,22 @@ static void hwi_enable_intr(struct beiscsi_hba *phba)
        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
        if (!enabled) {
                reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
-               SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : reg =x%08x addr=%p\n", reg, addr);
                iowrite32(reg, addr);
        }
 
        if (!phba->msix_enabled) {
                eq = &phwi_context->be_eq[0].q;
-               SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : eq->id=%d\n", eq->id);
+
                hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
        } else {
                for (i = 0; i <= phba->num_cpus; i++) {
                        eq = &phwi_context->be_eq[i].q;
-                       SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
+                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                   "BM_%d : eq->id=%d\n", eq->id);
                        hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
                }
        }
@@ -3462,64 +3627,60 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
                reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
                iowrite32(reg, addr);
        } else
-               shost_printk(KERN_WARNING, phba->shost,
-                            "In hwi_disable_intr, Already Disabled\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : In hwi_disable_intr, Already Disabled\n");
 }
 
+/**
+ * beiscsi_get_boot_info()- Get the boot session info
+ * @phba: The device priv structure instance
+ *
+ * Get the boot target info and store in driver priv structure
+ *
+ * return values
+ *     Success: 0
+ *     Failure: Non-Zero Value
+ **/
 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
 {
-       struct be_cmd_get_boot_target_resp *boot_resp;
        struct be_cmd_get_session_resp *session_resp;
        struct be_mcc_wrb *wrb;
        struct be_dma_mem nonemb_cmd;
        unsigned int tag, wrb_num;
        unsigned short status, extd_status;
+       unsigned int s_handle;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
        int ret = -ENOMEM;
 
-       tag = mgmt_get_boot_target(phba);
-       if (!tag) {
-               SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed\n");
-               return -EAGAIN;
-       } else
-               wait_event_interruptible(phba->ctrl.mcc_wait[tag],
-                                        phba->ctrl.mcc_numtag[tag]);
-
-       wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
-       extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
-       status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
-       if (status || extd_status) {
-               SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed"
-                                   " status = %d extd_status = %d\n",
-                                   status, extd_status);
-               free_mcc_tag(&phba->ctrl, tag);
-               return -EBUSY;
-       }
-       wrb = queue_get_wrb(mccq, wrb_num);
-       free_mcc_tag(&phba->ctrl, tag);
-       boot_resp = embedded_payload(wrb);
-
-       if (boot_resp->boot_session_handle < 0) {
-               shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
-               return -ENXIO;
+       /* Get the session handle of the boot target */
+       ret = be_mgmt_get_boot_shandle(phba, &s_handle);
+       if (ret) {
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                           "BM_%d : No boot session\n");
+               return ret;
        }
-
        nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
                                sizeof(*session_resp),
                                &nonemb_cmd.dma);
        if (nonemb_cmd.va == NULL) {
-               SE_DEBUG(DBG_LVL_1,
-                        "Failed to allocate memory for"
-                        "beiscsi_get_session_info\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                           "BM_%d : Failed to allocate memory for"
+                           "beiscsi_get_session_info\n");
+
                return -ENOMEM;
        }
 
        memset(nonemb_cmd.va, 0, sizeof(*session_resp));
-       tag = mgmt_get_session_info(phba, boot_resp->boot_session_handle,
+       tag = mgmt_get_session_info(phba, s_handle,
                                    &nonemb_cmd);
        if (!tag) {
-               SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
-                       " Failed\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                           "BM_%d : beiscsi_get_session_info"
+                           " Failed\n");
+
                goto boot_freemem;
        } else
                wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -3529,9 +3690,12 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
        extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
        status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
        if (status || extd_status) {
-               SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
-                                   " status = %d extd_status = %d\n",
-                                   status, extd_status);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                           "BM_%d : beiscsi_get_session_info Failed"
+                           " status = %d extd_status = %d\n",
+                           status, extd_status);
+
                free_mcc_tag(&phba->ctrl, tag);
                goto boot_freemem;
        }
@@ -3611,22 +3775,22 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
 
        ret = beiscsi_init_controller(phba);
        if (ret < 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "beiscsi_dev_probe - Failed in"
-                            "beiscsi_init_controller\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_dev_probe - Failed in"
+                           "beiscsi_init_controller\n");
                return ret;
        }
        ret = beiscsi_init_sgl_handle(phba);
        if (ret < 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "beiscsi_dev_probe - Failed in"
-                            "beiscsi_init_sgl_handle\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_dev_probe - Failed in"
+                           "beiscsi_init_sgl_handle\n");
                goto do_cleanup_ctrlr;
        }
 
        if (hba_setup_cid_tbls(phba)) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Failed in hba_setup_cid_tbls\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed in hba_setup_cid_tbls\n");
                kfree(phba->io_sgl_hndl_base);
                kfree(phba->eh_sgl_hndl_base);
                goto do_cleanup_ctrlr;
@@ -3678,8 +3842,8 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
 
        mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
        if (mgmt_status)
-               shost_printk(KERN_WARNING, phba->shost,
-                            "mgmt_epfw_cleanup FAILED\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : mgmt_epfw_cleanup FAILED\n");
 
        hwi_purge_eq(phba);
        hwi_cleanup(phba);
@@ -3960,7 +4124,9 @@ free_hndls:
        pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
                      io_task->bhs_pa.u.a64.address);
        io_task->cmd_bhs = NULL;
-       SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
+       beiscsi_log(phba, KERN_ERR,
+                   BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+                   "BM_%d : Alloc of SGL_ICD Failed\n");
        return -ENOMEM;
 }
 
@@ -3981,15 +4147,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
        io_task->bhs_len = sizeof(struct be_cmd_bhs);
 
        if (writedir) {
-               memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
-               AMAP_SET_BITS(struct amap_pdu_data_out, itt,
-                             &io_task->cmd_bhs->iscsi_data_pdu,
-                             (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
-               AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
-                             &io_task->cmd_bhs->iscsi_data_pdu,
-                             ISCSI_OPCODE_SCSI_DATA_OUT);
-               AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
-                             &io_task->cmd_bhs->iscsi_data_pdu, 1);
                AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
                              INI_WR_CMD);
                AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
@@ -3998,9 +4155,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
                              INI_RD_CMD);
                AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
        }
-       memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
-              dw[offsetof(struct amap_pdu_data_out, lun) / 32],
-              &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
 
        AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
                      cpu_to_be16(*(unsigned short *)
@@ -4090,8 +4244,10 @@ static int beiscsi_mtask(struct iscsi_task *task)
                break;
 
        default:
-               SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
-                        task->hdr->opcode & ISCSI_OPCODE_MASK);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BM_%d : opcode =%d Not supported\n",
+                           task->hdr->opcode & ISCSI_OPCODE_MASK);
+
                return -EINVAL;
        }
 
@@ -4123,17 +4279,22 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
        io_task->scsi_cmnd = sc;
        num_sg = scsi_dma_map(sc);
        if (num_sg < 0) {
-               SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
+               struct iscsi_conn *conn = task->conn;
+               struct beiscsi_hba *phba = NULL;
+
+               phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
+                           "BM_%d : scsi_dma_map Failed\n");
+
                return num_sg;
        }
        xferlen = scsi_bufflen(sc);
        sg = scsi_sglist(sc);
-       if (sc->sc_data_direction == DMA_TO_DEVICE) {
+       if (sc->sc_data_direction == DMA_TO_DEVICE)
                writedir = 1;
-               SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
-                        task->imm_count);
-       } else
+        else
                writedir = 0;
+
        return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
 }
 
@@ -4162,14 +4323,17 @@ static int beiscsi_bsg_request(struct bsg_job *job)
                                        job->request_payload.payload_len,
                                        &nonemb_cmd.dma);
                if (nonemb_cmd.va == NULL) {
-                       SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for "
-                                "beiscsi_bsg_request\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BM_%d : Failed to allocate memory for "
+                                   "beiscsi_bsg_request\n");
                        return -EIO;
                }
                tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
                                                  &nonemb_cmd);
                if (!tag) {
-                       SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BM_%d : be_cmd_get_mac_addr Failed\n");
+
                        pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                            nonemb_cmd.va, nonemb_cmd.dma);
                        return -EAGAIN;
@@ -4191,22 +4355,31 @@ static int beiscsi_bsg_request(struct bsg_job *job)
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
                if (status || extd_status) {
-                       SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
-                                " status = %d extd_status = %d\n",
-                                status, extd_status);
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                                   "BM_%d : be_cmd_get_mac_addr Failed"
+                                   " status = %d extd_status = %d\n",
+                                   status, extd_status);
+
                        return -EIO;
                }
                break;
 
        default:
-               SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n",
-                        bsg_req->msgcode);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                               "BM_%d : Unsupported bsg command: 0x%x\n",
+                               bsg_req->msgcode);
                break;
        }
 
        return rc;
 }
 
+void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
+{
+       /* Set the logging parameter */
+       beiscsi_log_enable_init(phba, beiscsi_log_enable);
+}
+
 static void beiscsi_quiesce(struct beiscsi_hba *phba)
 {
        struct hwi_controller *phwi_ctrlr;
@@ -4316,18 +4489,21 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
 
        ret = beiscsi_enable_pci(pcidev);
        if (ret < 0) {
-               dev_err(&pcidev->dev, "beiscsi_dev_probe-"
-                       " Failed to enable pci device\n");
+               dev_err(&pcidev->dev,
+                       "beiscsi_dev_probe - Failed to enable pci device\n");
                return ret;
        }
 
        phba = beiscsi_hba_alloc(pcidev);
        if (!phba) {
-               dev_err(&pcidev->dev, "beiscsi_dev_probe-"
-                       " Failed in beiscsi_hba_alloc\n");
+               dev_err(&pcidev->dev,
+                       "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
                goto disable_pci;
        }
 
+       /* Initialize Driver configuration Paramters */
+       beiscsi_hba_attrs_init(phba);
+
        switch (pcidev->device) {
        case BE_DEVICE_ID1:
        case OC_DEVICE_ID1:
@@ -4347,7 +4523,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
        else
                num_cpus = 1;
        phba->num_cpus = num_cpus;
-       SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : num_cpus = %d\n",
+                   phba->num_cpus);
 
        if (enable_msix) {
                beiscsi_msix_enable(phba);
@@ -4356,8 +4534,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
        }
        ret = be_ctrl_init(phba, pcidev);
        if (ret) {
-               shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
-                               "Failed in be_ctrl_init\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_dev_probe-"
+                           "Failed in be_ctrl_init\n");
                goto hba_free;
        }
 
@@ -4366,19 +4545,19 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
                value = readl((void *)real_offset);
                if (value & 0x00010000) {
                        gcrashmode++;
-                       shost_printk(KERN_ERR, phba->shost,
-                               "Loading Driver in crashdump mode\n");
+                       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                   "BM_%d : Loading Driver in crashdump mode\n");
                        ret = beiscsi_cmd_reset_function(phba);
                        if (ret) {
-                               shost_printk(KERN_ERR, phba->shost,
-                                       "Reset Failed. Aborting Crashdump\n");
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : Reset Failed. Aborting Crashdump\n");
                                goto hba_free;
                        }
                        ret = be_chk_reset_complete(phba);
                        if (ret) {
-                               shost_printk(KERN_ERR, phba->shost,
-                                       "Failed to get out of reset."
-                                       "Aborting Crashdump\n");
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : Failed to get out of reset."
+                                           "Aborting Crashdump\n");
                                goto hba_free;
                        }
                } else {
@@ -4393,8 +4572,8 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
        spin_lock_init(&phba->isr_lock);
        ret = mgmt_get_fw_config(&phba->ctrl, phba);
        if (ret != 0) {
-               shost_printk(KERN_ERR, phba->shost,
-                            "Error getting fw config\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Error getting fw config\n");
                goto free_port;
        }
        phba->shost->max_id = phba->fw_config.iscsi_cid_count;
@@ -4402,8 +4581,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
        phba->shost->can_queue = phba->params.ios_per_ctrl;
        ret = beiscsi_init_port(phba);
        if (ret < 0) {
-               shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
-                            "Failed in beiscsi_init_port\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_dev_probe-"
+                           "Failed in beiscsi_init_port\n");
                goto free_port;
        }
 
@@ -4420,8 +4600,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
                 phba->shost->host_no);
        phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
        if (!phba->wq) {
-               shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
-                               "Failed to allocate work queue\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_dev_probe-"
+                           "Failed to allocate work queue\n");
                goto free_twq;
        }
 
@@ -4439,8 +4620,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
        }
        ret = beiscsi_init_irqs(phba);
        if (ret < 0) {
-               shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
-                            "Failed to beiscsi_init_irqs\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_dev_probe-"
+                           "Failed to beiscsi_init_irqs\n");
                goto free_blkenbld;
        }
        hwi_enable_intr(phba);
@@ -4450,11 +4632,13 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
                 * log error but continue, because we may not be using
                 * iscsi boot.
                 */
-               shost_printk(KERN_ERR, phba->shost, "Could not set up "
-                            "iSCSI boot info.\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Could not set up "
+                           "iSCSI boot info.\n");
 
        beiscsi_create_def_ifaces(phba);
-       SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
        return 0;
 
 free_blkenbld:
@@ -4542,19 +4726,17 @@ static int __init beiscsi_module_init(void)
        beiscsi_scsi_transport =
                        iscsi_register_transport(&beiscsi_iscsi_transport);
        if (!beiscsi_scsi_transport) {
-               SE_DEBUG(DBG_LVL_1,
-                        "beiscsi_module_init - Unable to  register beiscsi"
-                        "transport.\n");
+               printk(KERN_ERR
+                      "beiscsi_module_init - Unable to  register beiscsi transport.\n");
                return -ENOMEM;
        }
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
-                &beiscsi_iscsi_transport);
+       printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
+              &beiscsi_iscsi_transport);
 
        ret = pci_register_driver(&beiscsi_pci_driver);
        if (ret) {
-               SE_DEBUG(DBG_LVL_1,
-                        "beiscsi_module_init - Unable to  register"
-                        "beiscsi pci driver.\n");
+               printk(KERN_ERR
+                      "beiscsi_module_init - Unable to  register beiscsi pci driver.\n");
                goto unregister_iscsi_transport;
        }
        return 0;
index 40fea6ec879c2df00432f826e8f425dfecc69c48..b8912263ef4e739de691727269a5c58761dfba3a 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/pci.h>
 #include <linux/if_ether.h>
 #include <linux/in.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -34,7 +36,7 @@
 
 #include "be.h"
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "4.2.162.0"
+#define BUILD_STR              "4.4.58.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
 #define MAX_CMD_SZ                     65536
 #define IIOC_SCSI_DATA                  0x05   /* Write Operation */
 
-#define DBG_LVL                                0x00000001
-#define DBG_LVL_1                      0x00000001
-#define DBG_LVL_2                      0x00000002
-#define DBG_LVL_3                      0x00000004
-#define DBG_LVL_4                      0x00000008
-#define DBG_LVL_5                      0x00000010
-#define DBG_LVL_6                      0x00000020
-#define DBG_LVL_7                      0x00000040
-#define DBG_LVL_8                      0x00000080
-
-#define SE_DEBUG(debug_mask, fmt, args...)             \
-do {                                                   \
-       if (debug_mask & DBG_LVL) {                     \
-               printk(KERN_ERR "(%s():%d):", __func__, __LINE__);\
-               printk(fmt, ##args);                    \
-       }                                               \
-} while (0);
+#define INVALID_SESS_HANDLE    0xFFFFFFFF
 
 #define BE_ADAPTER_UP          0x00000000
 #define BE_ADAPTER_LINK_DOWN   0x00000001
@@ -351,6 +337,8 @@ struct beiscsi_hba {
        struct mgmt_session_info boot_sess;
        struct invalidate_command_table inv_tbl[128];
 
+       unsigned int attr_log_enable;
+
 };
 
 struct beiscsi_session {
@@ -860,4 +848,20 @@ struct hwi_context_memory {
        struct hwi_async_pdu_context *pasync_ctx;
 };
 
+/* Logging related definitions */
+#define BEISCSI_LOG_INIT       0x0001  /* Initialization events */
+#define BEISCSI_LOG_MBOX       0x0002  /* Mailbox Events */
+#define BEISCSI_LOG_MISC       0x0004  /* Miscllaneous Events */
+#define BEISCSI_LOG_EH         0x0008  /* Error Handler */
+#define BEISCSI_LOG_IO         0x0010  /* IO Code Path */
+#define BEISCSI_LOG_CONFIG     0x0020  /* CONFIG Code Path */
+
+#define beiscsi_log(phba, level, mask, fmt, arg...) \
+do { \
+       uint32_t log_value = phba->attr_log_enable; \
+               if (((mask) & log_value) || (level[1] <= '3')) \
+                       shost_printk(level, phba->shost, \
+                                    fmt, __LINE__, ##arg); \
+} while (0)
+
 #endif
index 2a096795b9aa86768ef18561872d948800eb6402..aab5dd359e2c2cbd5a945becbbcb44dd5638d727 100644 (file)
 #include "be_mgmt.h"
 #include "be_iscsi.h"
 
+/**
+ * mgmt_reopen_session()- Reopen a session based on reopen_type
+ * @phba: Device priv structure instance
+ * @reopen_type: Type of reopen_session FW should do.
+ * @sess_handle: Session Handle of the session to be re-opened
+ *
+ * return
+ *     the TAG used for MBOX Command
+ *
+ **/
+unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
+                                 unsigned int reopen_type,
+                                 unsigned int sess_handle)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_reopen_session_req *req;
+       unsigned int tag = 0;
+
+       beiscsi_log(phba, KERN_INFO,
+                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                   "BG_%d : In bescsi_get_boot_target\n");
+
+       spin_lock(&ctrl->mbox_lock);
+       tag = alloc_mcc_tag(phba);
+       if (!tag) {
+               spin_unlock(&ctrl->mbox_lock);
+               return tag;
+       }
+
+       wrb = wrb_from_mccq(phba);
+       req = embedded_payload(wrb);
+       wrb->tag0 |= tag;
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+                          OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
+                          sizeof(struct be_cmd_reopen_session_resp));
+
+       /* set the reopen_type,sess_handle */
+       req->reopen_type = reopen_type;
+       req->session_handle = sess_handle;
+
+       be_mcc_notify(phba);
+       spin_unlock(&ctrl->mbox_lock);
+       return tag;
+}
+
 unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -30,7 +77,10 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
        struct be_cmd_get_boot_target_req *req;
        unsigned int tag = 0;
 
-       SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
+       beiscsi_log(phba, KERN_INFO,
+                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                   "BG_%d : In bescsi_get_boot_target\n");
+
        spin_lock(&ctrl->mbox_lock);
        tag = alloc_mcc_tag(phba);
        if (!tag) {
@@ -62,7 +112,10 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
        struct be_cmd_get_session_resp *resp;
        struct be_sge *sge;
 
-       SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
+       beiscsi_log(phba, KERN_INFO,
+                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                   "BG_%d : In beiscsi_get_session_info\n");
+
        spin_lock(&ctrl->mbox_lock);
        tag = alloc_mcc_tag(phba);
        if (!tag) {
@@ -121,16 +174,16 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
                phba->fw_config.iscsi_cid_count =
                                        pfw_cfg->ulp[0].sq_count;
                if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
-                       SE_DEBUG(DBG_LVL_8,
-                               "FW reported MAX CXNS as %d\t"
-                               "Max Supported = %d.\n",
-                               phba->fw_config.iscsi_cid_count,
-                               BE2_MAX_SESSIONS);
+                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                   "BG_%d : FW reported MAX CXNS as %d\t"
+                                   "Max Supported = %d.\n",
+                                   phba->fw_config.iscsi_cid_count,
+                                   BE2_MAX_SESSIONS);
                        phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
                }
        } else {
-               shost_printk(KERN_WARNING, phba->shost,
-                            "Failed in mgmt_get_fw_config\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BG_%d : Failed in mgmt_get_fw_config\n");
        }
 
        spin_unlock(&ctrl->mbox_lock);
@@ -150,9 +203,9 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
                                sizeof(struct be_mgmt_controller_attributes),
                                &nonemb_cmd.dma);
        if (nonemb_cmd.va == NULL) {
-               SE_DEBUG(DBG_LVL_1,
-                        "Failed to allocate memory for mgmt_check_supported_fw"
-                        "\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d : Failed to allocate memory for "
+                           "mgmt_check_supported_fw\n");
                return -ENOMEM;
        }
        nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
@@ -169,18 +222,23 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
        status = be_mbox_notify(ctrl);
        if (!status) {
                struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
-               SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
-                       resp->params.hba_attribs.flashrom_version_string);
-               SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
-                       resp->params.hba_attribs.firmware_version_string);
-               SE_DEBUG(DBG_LVL_8,
-                       "Developer Build, not performing version check...\n");
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : Firmware Version of CMD : %s\n"
+                           "Firmware Version is : %s\n"
+                           "Developer Build, not performing version check...\n",
+                           resp->params.hba_attribs
+                           .flashrom_version_string,
+                           resp->params.hba_attribs.
+                           firmware_version_string);
+
                phba->fw_config.iscsi_features =
                                resp->params.hba_attribs.iscsi_features;
-               SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n",
-                                     phba->fw_config.iscsi_features);
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BM_%d : phba->fw_config.iscsi_features = %d\n",
+                           phba->fw_config.iscsi_features);
        } else
-               SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BG_%d :  Failed in mgmt_check_supported_fw\n");
        spin_unlock(&ctrl->mbox_lock);
        if (nonemb_cmd.va)
                pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
@@ -229,9 +287,10 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                           OPCODE_COMMON_READ_FLASH, sizeof(*req));
                break;
        default:
-               shost_printk(KERN_WARNING, phba->shost,
-                            "Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.
-                            h_vendor.vendor_cmd[0]);
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Unsupported cmd = 0x%x\n\n",
+                           bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
+
                spin_unlock(&ctrl->mbox_lock);
                return -ENOSYS;
        }
@@ -275,8 +334,8 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
 
        status =  be_mcc_notify_wait(phba);
        if (status)
-               shost_printk(KERN_WARNING, phba->shost,
-                            " mgmt_epfw_cleanup , FAILED\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BG_%d : mgmt_epfw_cleanup , FAILED\n");
        spin_unlock(&ctrl->mbox_lock);
        return status;
 }
@@ -459,8 +518,9 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
                       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
                beiscsi_ep->ip_type = BE2_IPV6;
        } else{
-               shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
-                            dst_addr->sa_family);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BG_%d : unknown addr family %d\n",
+                           dst_addr->sa_family);
                spin_unlock(&ctrl->mbox_lock);
                free_mcc_tag(&phba->ctrl, tag);
                return -EINVAL;
@@ -471,7 +531,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        if (phba->nxt_cqid == phba->num_cpus)
                phba->nxt_cqid = 0;
        req->cq_id = phwi_context->be_cq[i].id;
-       SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d\n", i, req->cq_id);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                   "BG_%d : i=%d cq_id=%d\n", i, req->cq_id);
        req->defq_id = def_hdr_id;
        req->hdr_ring_id = def_hdr_id;
        req->data_ring_id = def_data_id;
@@ -506,8 +567,8 @@ unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
        if (!status)
                phba->interface_handle = pbe_allid->if_hndl_list[0];
        else {
-               shost_printk(KERN_WARNING, phba->shost,
-                            "Failed in mgmt_get_all_if_id\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed in mgmt_get_all_if_id\n");
        }
        spin_unlock(&ctrl->mbox_lock);
 
@@ -550,9 +611,10 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
        extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
        status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
        if (status || extd_status) {
-               SE_DEBUG(DBG_LVL_1,
-                        "mgmt_exec_nonemb_cmd Failed status = %d"
-                        "extd_status = %d\n", status, extd_status);
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BG_%d : mgmt_exec_nonemb_cmd Failed status = %d"
+                           "extd_status = %d\n", status, extd_status);
                rc = -EIO;
                goto free_tag;
        }
@@ -573,7 +635,8 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
 {
        cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
        if (!cmd->va) {
-               SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed to allocate memory for if info\n");
                return -ENOMEM;
        }
        memset(cmd->va, 0, size);
@@ -629,8 +692,8 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
 
        rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
        if (rc < 0)
-               shost_printk(KERN_WARNING, phba->shost,
-                            "Failed to Modify existing IP Address\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed to Modify existing IP Address\n");
        return rc;
 }
 
@@ -684,8 +747,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
 
        if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
                if (if_info.dhcp_state) {
-                       shost_printk(KERN_WARNING, phba->shost,
-                                    "DHCP Already Enabled\n");
+                       beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                                   "BG_%d : DHCP Already Enabled\n");
                        return 0;
                }
                /* The ip_param->len is 1 in DHCP case. Setting
@@ -712,8 +775,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
 
                        rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
                        if (rc < 0) {
-                               shost_printk(KERN_WARNING, phba->shost,
-                                            "Failed to Delete existing dhcp\n");
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_CONFIG,
+                                           "BG_%d : Failed to Delete existing dhcp\n");
                                return rc;
                        }
                }
@@ -732,8 +796,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
                memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
                rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
                if (rc) {
-                       shost_printk(KERN_WARNING, phba->shost,
-                                    "Failed to Get Gateway Addr\n");
+                       beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                                   "BG_%d : Failed to Get Gateway Addr\n");
                        return rc;
                }
 
@@ -743,8 +807,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
                                                 IP_ACTION_DEL, IP_V4_LEN);
 
                        if (rc) {
-                               shost_printk(KERN_WARNING, phba->shost,
-                                            "Failed to clear Gateway Addr Set\n");
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_CONFIG,
+                                           "BG_%d : Failed to clear Gateway Addr Set\n");
                                return rc;
                        }
                }
@@ -783,8 +848,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
        memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
        rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
        if (rt_val) {
-               shost_printk(KERN_WARNING, phba->shost,
-                            "Failed to Get Gateway Addr\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed to Get Gateway Addr\n");
                return rt_val;
        }
 
@@ -793,8 +858,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
                rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
                                             gateway_param->len);
                if (rt_val) {
-                       shost_printk(KERN_WARNING, phba->shost,
-                                    "Failed to clear Gateway Addr Set\n");
+                       beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                                   "BG_%d : Failed to clear Gateway Addr Set\n");
                        return rt_val;
                }
        }
@@ -804,8 +869,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
                                     gateway_param->len);
 
        if (rt_val)
-               shost_printk(KERN_WARNING, phba->shost,
-                            "Failed to Set Gateway Addr\n");
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BG_%d : Failed to Set Gateway Addr\n");
 
        return rt_val;
 }
@@ -924,3 +989,150 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
        spin_unlock(&ctrl->mbox_lock);
        return tag;
 }
+
+/**
+ * be_mgmt_get_boot_shandle()- Get the session handle
+ * @phba: device priv structure instance
+ * @s_handle: session handle returned for boot session.
+ *
+ * Get the boot target session handle. In case of
+ * crashdump mode driver has to issue and MBX Cmd
+ * for FW to login to boot target
+ *
+ * return
+ *     Success: 0
+ *     Failure: Non-Zero value
+ *
+ **/
+int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
+                             unsigned int *s_handle)
+{
+       struct be_cmd_get_boot_target_resp *boot_resp;
+       struct be_mcc_wrb *wrb;
+       unsigned int tag, wrb_num;
+       uint8_t boot_retry = 3;
+       unsigned short status, extd_status;
+       struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+       do {
+               /* Get the Boot Target Session Handle and Count*/
+               tag = mgmt_get_boot_target(phba);
+               if (!tag) {
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+                                   "BG_%d : Getting Boot Target Info Failed\n");
+                       return -EAGAIN;
+               } else
+                       wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+                                                phba->ctrl.mcc_numtag[tag]);
+
+               wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+               extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+               status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+               if (status || extd_status) {
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                                   "BG_%d : mgmt_get_boot_target Failed"
+                                   " status = %d extd_status = %d\n",
+                                   status, extd_status);
+                       free_mcc_tag(&phba->ctrl, tag);
+                       return -EBUSY;
+               }
+               wrb = queue_get_wrb(mccq, wrb_num);
+               free_mcc_tag(&phba->ctrl, tag);
+               boot_resp = embedded_payload(wrb);
+
+               /* Check if the there are any Boot targets configured */
+               if (!boot_resp->boot_session_count) {
+                       beiscsi_log(phba, KERN_INFO,
+                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                                   "BG_%d  ;No boot targets configured\n");
+                       return -ENXIO;
+               }
+
+               /* FW returns the session handle of the boot session */
+               if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
+                       *s_handle = boot_resp->boot_session_handle;
+                       return 0;
+               }
+
+               /* Issue MBX Cmd to FW to login to the boot target */
+               tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
+                                         INVALID_SESS_HANDLE);
+               if (!tag) {
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                                   "BG_%d : mgmt_reopen_session Failed\n");
+                       return -EAGAIN;
+               } else
+                       wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+                                                phba->ctrl.mcc_numtag[tag]);
+
+               wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+               extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+               status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+               if (status || extd_status) {
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                                   "BG_%d : mgmt_reopen_session Failed"
+                                   " status = %d extd_status = %d\n",
+                                   status, extd_status);
+                       free_mcc_tag(&phba->ctrl, tag);
+                       return -EBUSY;
+               }
+               free_mcc_tag(&phba->ctrl, tag);
+
+       } while (--boot_retry);
+
+       /* Couldn't log into the boot target */
+       beiscsi_log(phba, KERN_ERR,
+                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                   "BG_%d : Login to Boot Target Failed\n");
+       return -ENXIO;
+}
+
+/**
+ * mgmt_set_vlan()- Issue and wait for CMD completion
+ * @phba: device private structure instance
+ * @vlan_tag: VLAN tag
+ *
+ * Issue the MBX Cmd and wait for the completion of the
+ * command.
+ *
+ * returns
+ *     Success: 0
+ *     Failure: Non-Xero Value
+ **/
+int mgmt_set_vlan(struct beiscsi_hba *phba,
+                  uint16_t vlan_tag)
+{
+       unsigned int tag, wrb_num;
+       unsigned short status, extd_status;
+
+       tag = be_cmd_set_vlan(phba, vlan_tag);
+       if (!tag) {
+               beiscsi_log(phba, KERN_ERR,
+                           (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+                           "BG_%d : VLAN Setting Failed\n");
+               return -EBUSY;
+       } else
+               wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+                                        phba->ctrl.mcc_numtag[tag]);
+
+       wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+       extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+       status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+
+       if (status || extd_status) {
+               beiscsi_log(phba, KERN_ERR,
+                           (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+                           "BS_%d : status : %d extd_status : %d\n",
+                           status, extd_status);
+
+               free_mcc_tag(&phba->ctrl, tag);
+               return -EAGAIN;
+       }
+
+       free_mcc_tag(&phba->ctrl, tag);
+       return 0;
+}
index 5c2e37693ca82a863c0b459a1b58df342d80d6be..c50cef6fec0db3921908638411d9749bb879cdf2 100644 (file)
@@ -108,6 +108,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                                         struct bsg_job *job,
                                         struct be_dma_mem *nonemb_cmd);
 
+#define BEISCSI_NO_RST_ISSUE   0
 struct iscsi_invalidate_connection_params_in {
        struct be_cmd_req_hdr hdr;
        unsigned int session_handle;
@@ -274,6 +275,10 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
 
 unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
 
+unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
+                                 unsigned int reopen_type,
+                                 unsigned sess_handle);
+
 unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
                                   u32 boot_session_handle,
                                   struct be_dma_mem *nonemb_cmd);
@@ -290,4 +295,10 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
 int mgmt_set_gateway(struct beiscsi_hba *phba,
                     struct iscsi_iface_param_info *gateway_param);
 
+int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
+                             unsigned int *s_handle);
+
+unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
+
+int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 #endif
index 456e5762977df90bc7984d98bdae383797ad674a..b7c326f7a6d0f3dc337d8b7b9c45bccb671cd186 100644 (file)
@@ -775,7 +775,8 @@ bfa_intx(struct bfa_s *bfa)
        if (!intr)
                return BFA_TRUE;
 
-       bfa_msix_lpu_err(bfa, intr);
+       if (bfa->intr_enabled)
+               bfa_msix_lpu_err(bfa, intr);
 
        return BFA_TRUE;
 }
@@ -803,11 +804,17 @@ bfa_isr_enable(struct bfa_s *bfa)
        writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
        bfa->iocfc.intr_mask = ~umsk;
        bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
+
+       /*
+        * Set the flag indicating successful enabling of interrupts
+        */
+       bfa->intr_enabled = BFA_TRUE;
 }
 
 void
 bfa_isr_disable(struct bfa_s *bfa)
 {
+       bfa->intr_enabled = BFA_FALSE;
        bfa_isr_mode_set(bfa, BFA_FALSE);
        writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
        bfa_msix_uninstall(bfa);
@@ -1022,7 +1029,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
 {
        u8      *dm_kva = NULL;
        u64     dm_pa = 0;
-       int     i, per_reqq_sz, per_rspq_sz, dbgsz;
+       int     i, per_reqq_sz, per_rspq_sz;
        struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
        struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
        struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
@@ -1083,11 +1090,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
                        BFA_CACHELINE_SZ);
 
        /* Claim IOCFC kva memory */
-       dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
-       if (dbgsz > 0) {
-               bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
-               bfa_mem_kva_curp(iocfc) += dbgsz;
-       }
+       bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
+       bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
 }
 
 /*
@@ -1429,8 +1433,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
        bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
 
        /* kva memory setup for IOCFC */
-       bfa_mem_kva_setup(meminfo, iocfc_kva,
-                       ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
+       bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
 }
 
 /*
index 12bfeed268eb7c87c2097447f5fdeb0752506309..91a8aa394db516b94aa501974718d0f9c18c5283 100644 (file)
@@ -168,7 +168,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 /*
  * bfa_q_deq - dequeue an element from head of the queue
  */
-#define bfa_q_deq(_q, _qe) {                                           \
+#define bfa_q_deq(_q, _qe) do {                                                \
        if (!list_empty(_q)) {                                          \
                (*((struct list_head **) (_qe))) = bfa_q_next(_q);      \
                bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =  \
@@ -177,7 +177,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
        } else {                                                        \
                *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
        }                                                               \
-}
+} while (0)
 
 /*
  * bfa_q_deq_tail - dequeue an element from tail of the queue
index 3bbc583f65cfeee82a9c2e6aace0b4a7490a025f..06f0a163ca35bd87a854dd0ea0861a4bb35c9e29 100644 (file)
@@ -93,6 +93,7 @@ struct bfa_lport_cfg_s {
        wwn_t          pwwn;       /*  port wwn */
        wwn_t          nwwn;       /*  node wwn */
        struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
+       struct bfa_lport_symname_s node_sym_name; /* Node symbolic name */
        enum bfa_lport_role roles;      /* FCS port roles */
        u32     rsvd;
        bfa_boolean_t   preboot_vp;  /*  vport created from PBC */
@@ -192,6 +193,18 @@ struct bfa_lport_stats_s {
        u32     ns_gidft_unknown_rsp;
        u32     ns_gidft_alloc_wait;
 
+       u32     ns_rnnid_sent;
+       u32     ns_rnnid_accepts;
+       u32     ns_rnnid_rsp_err;
+       u32     ns_rnnid_rejects;
+       u32     ns_rnnid_alloc_wait;
+
+       u32     ns_rsnn_nn_sent;
+       u32     ns_rsnn_nn_accepts;
+       u32     ns_rsnn_nn_rsp_err;
+       u32     ns_rsnn_nn_rejects;
+       u32     ns_rsnn_nn_alloc_wait;
+
        /*
         * Mgmt Server stats
         */
@@ -410,6 +423,11 @@ struct bfa_rport_remote_link_stats_s {
        u32 icc; /*  Invalid CRC Count */
 };
 
+struct bfa_rport_qualifier_s {
+       wwn_t   pwwn;   /* Port WWN */
+       u32     pid;    /* port ID */
+       u32     rsvd;
+};
 
 #define BFA_MAX_IO_INDEX 7
 #define BFA_NO_IO_INDEX 9
index 8d0b88f67a382e3582c40382d7dfe3892a19f3c8..e0beb4d7e26443c8881abc3ba66f59b7ac773f87 100644 (file)
@@ -1279,6 +1279,7 @@ enum {
        GS_GSPN_ID      = 0x0118,       /* Get symbolic PN on ID */
        GS_RFT_ID       = 0x0217,       /* Register fc4type on ID */
        GS_RSPN_ID      = 0x0218,       /* Register symbolic PN on ID */
+       GS_RSNN_NN      = 0x0239,       /* Register symbolic NN on NN */
        GS_RPN_ID       = 0x0212,       /* Register port name */
        GS_RNN_ID       = 0x0213,       /* Register node name */
        GS_RCS_ID       = 0x0214,       /* Register class of service */
@@ -1356,6 +1357,15 @@ struct fcgs_rspnid_req_s {
        u8      spn[256];       /* symbolic port name */
 };
 
+/*
+ * RSNN_NN
+ */
+struct fcgs_rsnn_nn_req_s {
+       wwn_t   node_name;      /* Node name */
+       u8      snn_len;        /* symbolic node name length */
+       u8      snn[256];       /* symbolic node name */
+};
+
 /*
  * RPN_ID
  */
index 17b59b8b564425fc0a8bc38dcf25f2af08f780a9..273cee90b3b429166ef8259a81a60aad0a865dc4 100644 (file)
@@ -1251,6 +1251,27 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
 }
 
+u16
+fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+                       wwn_t node_name, u8 *name)
+{
+       struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+       struct fcgs_rsnn_nn_req_s *rsnn_nn =
+               (struct fcgs_rsnn_nn_req_s *) (cthdr + 1);
+       u32     d_id = bfa_hton3b(FC_NAME_SERVER);
+
+       fc_gs_fchdr_build(fchs, d_id, s_id, 0);
+       fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN);
+
+       memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
+
+       rsnn_nn->node_name = node_name;
+       rsnn_nn->snn_len = (u8) strlen((char *)name);
+       strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+
+       return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
+}
+
 u16
 fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
 {
index 42cd9d4da697b767eb66601644b155b4d2162bcd..03c753d1e548949573a785463f203c685e1936b4 100644 (file)
@@ -166,6 +166,8 @@ enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
 
 u16        fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
                                u16 ox_id, u8 *name);
+u16    fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id,
+                               wwn_t node_name, u8 *name);
 
 u16        fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
                               u16 ox_id, enum bfa_lport_role role);
index f0f80e282e39cc023a72dacef7ee23b09c06a79e..1633963c66cac646cea41c4a1ba7a386873fa4c7 100644 (file)
@@ -1466,7 +1466,13 @@ bfa_status_t
 bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
                        struct bfa_itnim_ioprofile_s *ioprofile)
 {
-       struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+       struct bfa_fcpim_s *fcpim;
+
+       if (!itnim)
+               return BFA_STATUS_NO_FCPIM_NEXUS;
+
+       fcpim = BFA_FCPIM(itnim->bfa);
+
        if (!fcpim->io_profile)
                return BFA_STATUS_IOPROFILE_OFF;
 
@@ -1484,6 +1490,10 @@ void
 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
 {
        int j;
+
+       if (!itnim)
+               return;
+
        memset(&itnim->stats, 0, sizeof(itnim->stats));
        memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
        for (j = 0; j < BFA_IOBUCKET_MAX; j++)
index eaac57e1ddec4fd42a0ef08f47494c9310a3bc14..fd3e84d32bd2fed636b61a443f32ebe71f3c54a1 100644 (file)
@@ -76,6 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
        fcs->bfa = bfa;
        fcs->bfad = bfad;
        fcs->min_cfg = min_cfg;
+       fcs->num_rport_logins = 0;
 
        bfa->fcs = BFA_TRUE;
        fcbuild_init();
@@ -118,6 +119,18 @@ bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
        port_cfg->pwwn = ioc->attr->pwwn;
 }
 
+/*
+ * Stop FCS operations.
+ */
+void
+bfa_fcs_stop(struct bfa_fcs_s *fcs)
+{
+       bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
+       bfa_wc_up(&fcs->wc);
+       bfa_fcs_fabric_modstop(fcs);
+       bfa_wc_wait(&fcs->wc);
+}
+
 /*
  * fcs pbc vport initialization
  */
@@ -153,6 +166,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
        fcs->driver_info = *driver_info;
 
        bfa_fcs_fabric_psymb_init(&fcs->fabric);
+       bfa_fcs_fabric_nsymb_init(&fcs->fabric);
 }
 
 /*
@@ -213,6 +227,8 @@ static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
 static void bfa_fcs_fabric_delay(void *cbarg);
 static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
 static void bfa_fcs_fabric_delete_comp(void *cbarg);
+static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_stop_comp(void *cbarg);
 static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
                                      struct fchs_s *fchs, u16 len);
 static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
@@ -250,6 +266,10 @@ static void        bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
                                           enum bfa_fcs_fabric_event event);
 static void    bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
                                           enum bfa_fcs_fabric_event event);
+static void    bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+                                          enum bfa_fcs_fabric_event event);
+static void    bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+                                         enum bfa_fcs_fabric_event event);
 /*
  *   Beginning state before fabric creation.
  */
@@ -334,6 +354,11 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
                bfa_fcs_fabric_delete(fabric);
                break;
 
+       case BFA_FCS_FABRIC_SM_STOP:
+               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+               bfa_fcs_fabric_stop(fabric);
+               break;
+
        default:
                bfa_sm_fault(fabric->fcs, event);
        }
@@ -585,6 +610,11 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
                bfa_fcs_fabric_delete(fabric);
                break;
 
+       case BFA_FCS_FABRIC_SM_STOP:
+               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping);
+               bfa_fcs_fabric_stop(fabric);
+               break;
+
        case BFA_FCS_FABRIC_SM_AUTH_FAILED:
                bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
                bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
@@ -682,7 +712,62 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
+/*
+ * Fabric is being stopped, awaiting vport stop completions.
+ */
+static void
+bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
+                          enum bfa_fcs_fabric_event event)
+{
+       bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+       bfa_trc(fabric->fcs, event);
+
+       switch (event) {
+       case BFA_FCS_FABRIC_SM_STOPCOMP:
+               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+               bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
+               break;
+
+       case BFA_FCS_FABRIC_SM_LINK_UP:
+               break;
+
+       case BFA_FCS_FABRIC_SM_LINK_DOWN:
+               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
+               break;
+
+       default:
+               bfa_sm_fault(fabric->fcs, event);
+       }
+}
+
+/*
+ * Fabric is being stopped, cleanup without FLOGO
+ */
+static void
+bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
+                         enum bfa_fcs_fabric_event event)
+{
+       bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+       bfa_trc(fabric->fcs, event);
 
+       switch (event) {
+       case BFA_FCS_FABRIC_SM_STOPCOMP:
+       case BFA_FCS_FABRIC_SM_LOGOCOMP:
+               bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+               bfa_wc_down(&(fabric->fcs)->wc);
+               break;
+
+       case BFA_FCS_FABRIC_SM_LINK_DOWN:
+               /*
+                * Ignore - can get this event if we get notified about IOC down
+                * before the fabric completion callbk is done.
+                */
+               break;
+
+       default:
+               bfa_sm_fault(fabric->fcs, event);
+       }
+}
 
 /*
  *  fcs_fabric_private fabric private functions
@@ -759,6 +844,44 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
        port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
 }
 
+/*
+ * Node Symbolic Name Creation for base port and all vports
+ */
+void
+bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
+{
+       struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+       char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
+       struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
+
+       bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
+
+       /* Model name/number */
+       strncpy((char *)&port_cfg->node_sym_name, model,
+               BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
+       strncat((char *)&port_cfg->node_sym_name,
+                       BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+                       sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+       /* Driver Version */
+       strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
+               BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
+       strncat((char *)&port_cfg->node_sym_name,
+                       BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+                       sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+       /* Host machine name */
+       strncat((char *)&port_cfg->node_sym_name,
+               (char *)driver_info->host_machine_name,
+               BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
+       strncat((char *)&port_cfg->node_sym_name,
+                       BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+                       sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+       /* null terminate */
+       port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
+}
+
 /*
  * bfa lps login completion callback
  */
@@ -918,6 +1041,28 @@ bfa_fcs_fabric_delay(void *cbarg)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
 }
 
+/*
+ * Stop all vports and wait for vport stop completions.
+ */
+static void
+bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric)
+{
+       struct bfa_fcs_vport_s *vport;
+       struct list_head        *qe, *qen;
+
+       bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric);
+
+       list_for_each_safe(qe, qen, &fabric->vport_q) {
+               vport = (struct bfa_fcs_vport_s *) qe;
+               bfa_wc_up(&fabric->stop_wc);
+               bfa_fcs_vport_fcs_stop(vport);
+       }
+
+       bfa_wc_up(&fabric->stop_wc);
+       bfa_fcs_lport_stop(&fabric->bport);
+       bfa_wc_wait(&fabric->stop_wc);
+}
+
 /*
  * Computes operating BB_SCN value
  */
@@ -978,6 +1123,14 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
 }
 
+static void
+bfa_fcs_fabric_stop_comp(void *cbarg)
+{
+       struct bfa_fcs_fabric_s *fabric = cbarg;
+
+       bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP);
+}
+
 /*
  *  fcs_fabric_public fabric public functions
  */
@@ -1038,6 +1191,19 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
 }
 
+/*
+ * Fabric module stop -- stop FCS actions
+ */
+void
+bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs)
+{
+       struct bfa_fcs_fabric_s *fabric;
+
+       bfa_trc(fcs, 0);
+       fabric = &fcs->fabric;
+       bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP);
+}
+
 /*
  * Fabric module start -- kick starts FCS actions
  */
@@ -1219,8 +1385,11 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
                        return;
                }
        }
-       bfa_trc(fabric->fcs, els_cmd->els_code);
-       bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+
+       if (!bfa_fcs_fabric_is_switched(fabric))
+               bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+
+       bfa_trc(fabric->fcs, fchs->type);
 }
 
 /*
@@ -1294,7 +1463,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
        u16     reqlen;
        struct fchs_s   fchs;
 
-       fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE);
        /*
         * Do not expect this failure -- expect remote node to retry
         */
@@ -1387,6 +1556,13 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
+void
+bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
+{
+       struct bfa_fcs_fabric_s *fabric = uarg;
+       bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP);
+}
+
 /*
  *     Returns FCS vf structure for a given vf_id.
  *
index 51c9e134571986399fe1a6b9d1738ac3c36f876e..6c4377cb287f1eec93791c8d56bd401c28a5f731 100644 (file)
@@ -62,9 +62,9 @@ struct bfa_fcs_s;
 #define N2N_LOCAL_PID      0x010000
 #define N2N_REMOTE_PID         0x020000
 #define        BFA_FCS_RETRY_TIMEOUT 2000
+#define BFA_FCS_MAX_NS_RETRIES 5
 #define BFA_FCS_PID_IS_WKA(pid)  ((bfa_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
-
-
+#define BFA_FCS_MAX_RPORT_LOGINS 1024
 
 struct bfa_fcs_lport_ns_s {
        bfa_sm_t        sm;             /*  state machine */
@@ -72,6 +72,8 @@ struct bfa_fcs_lport_ns_s {
        struct bfa_fcs_lport_s *port;   /*  parent port */
        struct bfa_fcxp_s *fcxp;
        struct bfa_fcxp_wqe_s fcxp_wqe;
+       u8      num_rnnid_retries;
+       u8      num_rsnn_nn_retries;
 };
 
 
@@ -205,6 +207,7 @@ struct bfa_fcs_fabric_s {
        struct bfa_lps_s        *lps;   /*  lport login services        */
        u8      fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
                                        /*  attached fabric's ip addr  */
+       struct bfa_wc_s stop_wc;        /*  wait counter for stop */
 };
 
 #define bfa_fcs_fabric_npiv_capable(__f)    ((__f)->is_npiv)
@@ -264,6 +267,7 @@ struct bfa_fcs_fabric_s;
 #define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
 #define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
 #define bfa_fcs_lport_get_psym_name(_lport)    ((_lport)->port_cfg.sym_name)
+#define bfa_fcs_lport_get_nsym_name(_lport) ((_lport)->port_cfg.node_sym_name)
 #define bfa_fcs_lport_is_initiator(_lport)                     \
        ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
 #define bfa_fcs_lport_get_nrports(_lport)      \
@@ -286,9 +290,8 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
 
 bfa_boolean_t   bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
 struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
-void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
-                             wwn_t rport_wwns[], int *nrports);
-
+void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+                       struct bfa_rport_qualifier_s rport[], int *nrports);
 wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
                              int index, int nrports, bfa_boolean_t bwwn);
 
@@ -324,12 +327,17 @@ void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
 void            bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
 void            bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
 void            bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
+void           bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port);
 struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
                struct bfa_fcs_lport_s *port, u32 pid);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid(
+               struct bfa_fcs_lport_s *port, u32 pid);
 struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
                struct bfa_fcs_lport_s *port, wwn_t pwwn);
 struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
                struct bfa_fcs_lport_s *port, wwn_t nwwn);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier(
+               struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid);
 void            bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
                                       struct bfa_fcs_rport_s *rport);
 void            bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
@@ -338,6 +346,8 @@ void            bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
+void           bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
+                               struct bfa_fcxp_s *fcxp_alloced);
 void            bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
@@ -382,6 +392,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
 
 #define BFA_FCS_RPORT_DEF_DEL_TIMEOUT  90      /* in secs */
@@ -419,6 +430,7 @@ struct bfa_fcs_rport_s {
        struct bfa_fcs_s        *fcs;   /*  fcs instance */
        struct bfad_rport_s     *rp_drv;        /*  driver peer instance */
        u32     pid;    /*  port ID of rport */
+       u32     old_pid;        /* PID before rport goes offline */
        u16     maxfrsize;      /*  maximum frame size */
        __be16  reply_oxid;     /*  OX_ID of inbound requests */
        enum fc_cos     fc_cos; /*  FC classes of service supp */
@@ -459,7 +471,7 @@ struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
        struct bfa_fcs_lport_s *port, wwn_t rnwwn);
 void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
-
+void bfa_fcs_rport_set_max_logins(u32 max_logins);
 void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
         struct fchs_s *fchs, u16 len);
 void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
@@ -505,12 +517,13 @@ struct bfa_fcs_itnim_s {
        struct bfa_fcxp_s       *fcxp;          /*  FCXP in use */
        struct bfa_itnim_stats_s        stats;  /*  itn statistics      */
 };
-#define bfa_fcs_fcxp_alloc(__fcs)      \
-       bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
-
-#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \
-       bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \
-                                       NULL, 0, 0, NULL, NULL, NULL, NULL)
+#define bfa_fcs_fcxp_alloc(__fcs, __req)                               \
+       bfa_fcxp_req_rsp_alloc(NULL, (__fcs)->bfa, 0, 0,                \
+                              NULL, NULL, NULL, NULL, __req)
+#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn,            \
+                               __alloc_cbarg, __req)                   \
+       bfa_fcxp_req_rsp_alloc_wait(__bfa, __wqe, __alloc_cbfn,         \
+               __alloc_cbarg, NULL, 0, 0, NULL, NULL, NULL, NULL, __req)
 
 static inline struct bfad_port_s *
 bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
@@ -592,7 +605,7 @@ bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
 struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
 void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
-void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim);
 bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
 void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
 void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
@@ -676,6 +689,7 @@ struct bfa_fcs_s {
        struct bfa_fcs_stats_s  stats;  /*  FCS statistics */
        struct bfa_wc_s         wc;     /*  waiting counter */
        int                     fcs_aen_seq;
+       u32             num_rport_logins;
 };
 
 /*
@@ -702,6 +716,9 @@ enum bfa_fcs_fabric_event {
        BFA_FCS_FABRIC_SM_DELCOMP       = 14,   /*  all vports deleted event */
        BFA_FCS_FABRIC_SM_LOOPBACK      = 15,   /*  Received our own FLOGI   */
        BFA_FCS_FABRIC_SM_START         = 16,   /*  from driver       */
+       BFA_FCS_FABRIC_SM_STOP          = 17,   /*  Stop from driver    */
+       BFA_FCS_FABRIC_SM_STOPCOMP      = 18,   /*  Stop completion     */
+       BFA_FCS_FABRIC_SM_LOGOCOMP      = 19,   /*  FLOGO completion    */
 };
 
 /*
@@ -727,6 +744,26 @@ enum rport_event {
        RPSM_EVENT_ADDRESS_DISC = 16,   /*  Need to Discover rport's PID */
        RPSM_EVENT_PRLO_RCVD   = 17,    /*  PRLO from remote device     */
        RPSM_EVENT_PLOGI_RETRY = 18,    /*  Retry PLOGI continuously */
+       RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */
+};
+
+/*
+ * fcs_itnim_sm FCS itnim state machine events
+ */
+enum bfa_fcs_itnim_event {
+       BFA_FCS_ITNIM_SM_FCS_ONLINE = 1,        /*  rport online event */
+       BFA_FCS_ITNIM_SM_OFFLINE = 2,   /*  rport offline */
+       BFA_FCS_ITNIM_SM_FRMSENT = 3,   /*  prli frame is sent */
+       BFA_FCS_ITNIM_SM_RSP_OK = 4,    /*  good response */
+       BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /*  error response */
+       BFA_FCS_ITNIM_SM_TIMEOUT = 6,   /*  delay timeout */
+       BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /*  BFA online callback */
+       BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /*  BFA offline callback */
+       BFA_FCS_ITNIM_SM_INITIATOR = 9, /*  rport is initiator */
+       BFA_FCS_ITNIM_SM_DELETE = 10,   /*  delete event from rport */
+       BFA_FCS_ITNIM_SM_PRLO = 11,     /*  delete event from rport */
+       BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
+       BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */
 };
 
 /*
@@ -741,6 +778,7 @@ void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
                              struct bfa_fcs_driver_info_s *driver_info);
 void bfa_fcs_exit(struct bfa_fcs_s *fcs);
+void bfa_fcs_stop(struct bfa_fcs_s *fcs);
 
 /*
  * bfa fcs vf public functions
@@ -766,11 +804,13 @@ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
                struct fchs_s *fchs, u16 len);
 void   bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
+void   bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
               wwn_t fabric_name);
 u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
                        enum bfa_fcs_fabric_event event);
 void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
index 9272840a2409fe6075c26604d9d62bc135e0774c..6dc7926a3edd4d9927199530b57af61554a492d3 100644 (file)
@@ -40,25 +40,6 @@ static void  bfa_fcs_itnim_prli_response(void *fcsarg,
 static void    bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
                        enum bfa_itnim_aen_event event);
 
-/*
- *  fcs_itnim_sm FCS itnim state machine events
- */
-
-enum bfa_fcs_itnim_event {
-       BFA_FCS_ITNIM_SM_ONLINE = 1,    /*  rport online event */
-       BFA_FCS_ITNIM_SM_OFFLINE = 2,   /*  rport offline */
-       BFA_FCS_ITNIM_SM_FRMSENT = 3,   /*  prli frame is sent */
-       BFA_FCS_ITNIM_SM_RSP_OK = 4,    /*  good response */
-       BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /*  error response */
-       BFA_FCS_ITNIM_SM_TIMEOUT = 6,   /*  delay timeout */
-       BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /*  BFA online callback */
-       BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /*  BFA offline callback */
-       BFA_FCS_ITNIM_SM_INITIATOR = 9, /*  rport is initiator */
-       BFA_FCS_ITNIM_SM_DELETE = 10,   /*  delete event from rport */
-       BFA_FCS_ITNIM_SM_PRLO = 11,     /*  delete event from rport */
-       BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
-};
-
 static void    bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
                                         enum bfa_fcs_itnim_event event);
 static void    bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
@@ -69,6 +50,8 @@ static void   bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
                                            enum bfa_fcs_itnim_event event);
 static void    bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
                                            enum bfa_fcs_itnim_event event);
+static void    bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+                                       enum bfa_fcs_itnim_event event);
 static void    bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
                                        enum bfa_fcs_itnim_event event);
 static void    bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
@@ -99,7 +82,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
        bfa_trc(itnim->fcs, event);
 
        switch (event) {
-       case BFA_FCS_ITNIM_SM_ONLINE:
+       case BFA_FCS_ITNIM_SM_FCS_ONLINE:
                bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
                itnim->prli_retries = 0;
                bfa_fcs_itnim_send_prli(itnim, NULL);
@@ -138,6 +121,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
        case BFA_FCS_ITNIM_SM_INITIATOR:
                bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
                bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
+               bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
                break;
 
        case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -166,12 +150,13 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
 
        switch (event) {
        case BFA_FCS_ITNIM_SM_RSP_OK:
-               if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) {
+               if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR)
                        bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
-               } else {
-                       bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
-                       bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
-               }
+               else
+                       bfa_sm_set_state(itnim,
+                               bfa_fcs_itnim_sm_hal_rport_online);
+
+               bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
                break;
 
        case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -194,6 +179,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
        case BFA_FCS_ITNIM_SM_INITIATOR:
                bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
                bfa_fcxp_discard(itnim->fcxp);
+               bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
                break;
 
        case BFA_FCS_ITNIM_SM_DELETE:
@@ -207,6 +193,44 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
        }
 }
 
+static void
+bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
+                               enum bfa_fcs_itnim_event event)
+{
+       bfa_trc(itnim->fcs, itnim->rport->pwwn);
+       bfa_trc(itnim->fcs, event);
+
+       switch (event) {
+       case BFA_FCS_ITNIM_SM_HAL_ONLINE:
+               if (!itnim->bfa_itnim)
+                       itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa,
+                                       itnim->rport->bfa_rport, itnim);
+
+               if (itnim->bfa_itnim) {
+                       bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
+                       bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
+               } else {
+                       bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+                       bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE);
+               }
+
+               break;
+
+       case BFA_FCS_ITNIM_SM_OFFLINE:
+               bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+               bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
+               break;
+
+       case BFA_FCS_ITNIM_SM_DELETE:
+               bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+               bfa_fcs_itnim_free(itnim);
+               break;
+
+       default:
+               bfa_sm_fault(itnim->fcs, event);
+       }
+}
+
 static void
 bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
                            enum bfa_fcs_itnim_event event)
@@ -238,6 +262,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
        case BFA_FCS_ITNIM_SM_INITIATOR:
                bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
                bfa_timer_stop(&itnim->timer);
+               bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
                break;
 
        case BFA_FCS_ITNIM_SM_DELETE:
@@ -275,9 +300,8 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
                break;
 
        case BFA_FCS_ITNIM_SM_OFFLINE:
-               bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+               bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
                bfa_itnim_offline(itnim->bfa_itnim);
-               bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
                break;
 
        case BFA_FCS_ITNIM_SM_DELETE:
@@ -372,8 +396,14 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
                bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
                break;
 
+       /*
+        * fcs_online is expected here for well known initiator ports
+        */
+       case BFA_FCS_ITNIM_SM_FCS_ONLINE:
+               bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
+               break;
+
        case BFA_FCS_ITNIM_SM_RSP_ERROR:
-       case BFA_FCS_ITNIM_SM_ONLINE:
        case BFA_FCS_ITNIM_SM_INITIATOR:
                break;
 
@@ -426,11 +456,12 @@ bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(itnim->fcs, itnim->rport->pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                itnim->stats.fcxp_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
-                                   bfa_fcs_itnim_send_prli, itnim);
+                               bfa_fcs_itnim_send_prli, itnim, BFA_TRUE);
                return;
        }
        itnim->fcxp = fcxp;
@@ -483,7 +514,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                        if (prli_resp->parampage.servparams.initiator) {
                                bfa_trc(itnim->fcs, prli_resp->parampage.type);
                                itnim->rport->scsi_function =
-                                        BFA_RPORT_INITIATOR;
+                                               BFA_RPORT_INITIATOR;
                                itnim->stats.prli_rsp_acc++;
                                itnim->stats.initiator++;
                                bfa_sm_send_event(itnim,
@@ -531,7 +562,11 @@ bfa_fcs_itnim_timeout(void *arg)
 static void
 bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
 {
-       bfa_itnim_delete(itnim->bfa_itnim);
+       if (itnim->bfa_itnim) {
+               bfa_itnim_delete(itnim->bfa_itnim);
+               itnim->bfa_itnim = NULL;
+       }
+
        bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv);
 }
 
@@ -552,7 +587,6 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
        struct bfa_fcs_lport_s *port = rport->port;
        struct bfa_fcs_itnim_s *itnim;
        struct bfad_itnim_s   *itnim_drv;
-       struct bfa_itnim_s *bfa_itnim;
 
        /*
         * call bfad to allocate the itnim
@@ -570,20 +604,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
        itnim->fcs = rport->fcs;
        itnim->itnim_drv = itnim_drv;
 
-       /*
-        * call BFA to create the itnim
-        */
-       bfa_itnim =
-               bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
-
-       if (bfa_itnim == NULL) {
-               bfa_trc(port->fcs, rport->pwwn);
-               bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
-               WARN_ON(1);
-               return NULL;
-       }
-
-       itnim->bfa_itnim     = bfa_itnim;
+       itnim->bfa_itnim     = NULL;
        itnim->seq_rec       = BFA_FALSE;
        itnim->rec_support   = BFA_FALSE;
        itnim->conf_comp     = BFA_FALSE;
@@ -613,20 +634,12 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
  * Notification from rport that PLOGI is complete to initiate FC-4 session.
  */
 void
-bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
+bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim)
 {
        itnim->stats.onlines++;
 
-       if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) {
-               bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
-       } else {
-               /*
-                *  For well known addresses, we set the itnim to initiator
-                *  state
-                */
-               itnim->stats.initiator++;
-               bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
-       }
+       if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid))
+               bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE);
 }
 
 /*
index bcc4966e8ba484d6f6772e6dbca499336ead788a..3b75f6fb2de1fbf29b189715e5a3af9d17283343 100644 (file)
@@ -131,6 +131,8 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
                /* If vport - send completion call back */
                if (port->vport)
                        bfa_fcs_vport_stop_comp(port->vport);
+               else
+                       bfa_wc_down(&(port->fabric->stop_wc));
                break;
 
        case BFA_FCS_PORT_SM_OFFLINE:
@@ -166,6 +168,8 @@ bfa_fcs_lport_sm_online(
                        /* If vport - send completion call back */
                        if (port->vport)
                                bfa_fcs_vport_stop_comp(port->vport);
+                       else
+                               bfa_wc_down(&(port->fabric->stop_wc));
                } else {
                        bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
                        list_for_each_safe(qe, qen, &port->rport_q) {
@@ -222,6 +226,8 @@ bfa_fcs_lport_sm_offline(
                        /* If vport - send completion call back */
                        if (port->vport)
                                bfa_fcs_vport_stop_comp(port->vport);
+                       else
+                               bfa_wc_down(&(port->fabric->stop_wc));
                } else {
                        bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
                        list_for_each_safe(qe, qen, &port->rport_q) {
@@ -267,6 +273,8 @@ bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
                        /* If vport - send completion call back */
                        if (port->vport)
                                bfa_fcs_vport_stop_comp(port->vport);
+                       else
+                               bfa_wc_down(&(port->fabric->stop_wc));
                }
                break;
 
@@ -340,7 +348,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
        bfa_trc(port->fcs, rx_fchs->d_id);
        bfa_trc(port->fcs, rx_fchs->s_id);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -370,7 +378,7 @@ bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
        bfa_trc(port->fcs, rx_fchs->d_id);
        bfa_trc(port->fcs, rx_fchs->s_id);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -507,7 +515,7 @@ bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
        bfa_trc(port->fcs, rx_fchs->s_id);
        bfa_trc(port->fcs, rx_fchs->d_id);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -552,7 +560,7 @@ bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
        bfa_trc(port->fcs, rx_fchs->d_id);
        bfa_trc(port->fcs, rx_len);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -684,7 +692,7 @@ bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
        bfa_trc(port->fcs, rx_fchs->d_id);
        bfa_trc(port->fcs, rx_fchs->s_id);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -853,6 +861,25 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
        return NULL;
 }
 
+/*
+ * OLD_PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid)
+{
+       struct bfa_fcs_rport_s *rport;
+       struct list_head        *qe;
+
+       list_for_each(qe, &port->rport_q) {
+               rport = (struct bfa_fcs_rport_s *) qe;
+               if (rport->old_pid == pid)
+                       return rport;
+       }
+
+       bfa_trc(port->fcs, pid);
+       return NULL;
+}
+
 /*
  *   PWWN based Lookup for a R-Port in the Port R-Port Queue
  */
@@ -891,6 +918,26 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
        return NULL;
 }
 
+/*
+ * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue
+ */
+struct bfa_fcs_rport_s *
+bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port,
+                                    wwn_t pwwn, u32 pid)
+{
+       struct bfa_fcs_rport_s *rport;
+       struct list_head        *qe;
+
+       list_for_each(qe, &port->rport_q) {
+               rport = (struct bfa_fcs_rport_s *) qe;
+               if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid)
+                       return rport;
+       }
+
+       bfa_trc(port->fcs, pwwn);
+       return NULL;
+}
+
 /*
  * Called by rport module when new rports are discovered.
  */
@@ -938,6 +985,16 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
        bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
 }
 
+/*
+ * Called by fabric for base port and by vport for virtual ports
+ * when target mode driver is unloaded.
+ */
+void
+bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port)
+{
+       bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP);
+}
+
 /*
  * Called by fabric to delete base lport and associated resources.
  *
@@ -1657,10 +1714,11 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
-                                       bfa_fcs_lport_fdmi_send_rhba, fdmi);
+                               bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE);
                return;
        }
        fdmi->fcxp = fcxp;
@@ -1931,10 +1989,11 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
-                                       bfa_fcs_lport_fdmi_send_rprt, fdmi);
+                               bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE);
                return;
        }
        fdmi->fcxp = fcxp;
@@ -2146,10 +2205,11 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
-                                       bfa_fcs_lport_fdmi_send_rpa, fdmi);
+                               bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE);
                return;
        }
        fdmi->fcxp = fcxp;
@@ -2736,10 +2796,11 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->pid);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
-                                       bfa_fcs_lport_ms_send_gmal, ms);
+                               bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE);
                return;
        }
        ms->fcxp = fcxp;
@@ -2936,10 +2997,11 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->pid);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
-                                       bfa_fcs_lport_ms_send_gfn, ms);
+                               bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE);
                return;
        }
        ms->fcxp = fcxp;
@@ -3012,11 +3074,12 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->pid);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                port->stats.ms_plogi_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
-                                       bfa_fcs_lport_ms_send_plogi, ms);
+                               bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE);
                return;
        }
        ms->fcxp = fcxp;
@@ -3166,6 +3229,10 @@ static void     bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
                                            struct bfa_fcxp_s *fcxp_alloced);
 static void     bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
                                            struct bfa_fcxp_s *fcxp_alloced);
+static void    bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg,
+                                       struct bfa_fcxp_s *fcxp_alloced);
+static void    bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg,
+                                       struct bfa_fcxp_s *fcxp_alloced);
 static void     bfa_fcs_lport_ns_timeout(void *arg);
 static void     bfa_fcs_lport_ns_plogi_response(void *fcsarg,
                                               struct bfa_fcxp_s *fcxp,
@@ -3202,6 +3269,20 @@ static void     bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
                                                u32 rsp_len,
                                                u32 resid_len,
                                                struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_rnn_id_response(void *fcsarg,
+                                               struct bfa_fcxp_s *fcxp,
+                                               void *cbarg,
+                                               bfa_status_t req_status,
+                                               u32 rsp_len,
+                                               u32 resid_len,
+                                               struct fchs_s *rsp_fchs);
+static void     bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg,
+                                               struct bfa_fcxp_s *fcxp,
+                                               void *cbarg,
+                                               bfa_status_t req_status,
+                                               u32 rsp_len,
+                                               u32 resid_len,
+                                               struct fchs_s *rsp_fchs);
 static void     bfa_fcs_lport_ns_process_gidft_pids(
                                struct bfa_fcs_lport_s *port,
                                u32 *pid_buf, u32 n_pids);
@@ -3226,6 +3307,8 @@ enum vport_ns_event {
        NSSM_EVENT_RFTID_SENT = 9,
        NSSM_EVENT_RFFID_SENT = 10,
        NSSM_EVENT_GIDFT_SENT = 11,
+       NSSM_EVENT_RNNID_SENT = 12,
+       NSSM_EVENT_RSNN_NN_SENT = 13,
 };
 
 static void     bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
@@ -3266,6 +3349,21 @@ static void     bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
                                                enum vport_ns_event event);
 static void     bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
                                          enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_rnn_id(
+                                       struct bfa_fcs_lport_ns_s *ns,
+                                       enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+                                       enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+                                               enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_sending_rsnn_nn(
+                                       struct bfa_fcs_lport_ns_s *ns,
+                                       enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+                                               enum vport_ns_event event);
+static void     bfa_fcs_lport_ns_sm_rsnn_nn_retry(
+                                       struct bfa_fcs_lport_ns_s *ns,
+                                       enum vport_ns_event event);
 /*
  *     Start in offline state - awaiting linkup
  */
@@ -3333,8 +3431,9 @@ bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
                break;
 
        case NSSM_EVENT_RSP_OK:
-               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
-               bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+               ns->num_rnnid_retries = 0;
+               bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
                break;
 
        case NSSM_EVENT_PORT_OFFLINE:
@@ -3373,6 +3472,176 @@ bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
        }
 }
 
+static void
+bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+                                       enum vport_ns_event event)
+{
+       bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+       bfa_trc(ns->port->fcs, event);
+
+       switch (event) {
+       case NSSM_EVENT_RNNID_SENT:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id);
+               break;
+
+       case NSSM_EVENT_PORT_OFFLINE:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+               bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+                                               &ns->fcxp_wqe);
+               break;
+       default:
+               bfa_sm_fault(ns->port->fcs, event);
+       }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
+                               enum vport_ns_event event)
+{
+       bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+       bfa_trc(ns->port->fcs, event);
+
+       switch (event) {
+       case NSSM_EVENT_RSP_OK:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+               ns->num_rnnid_retries = 0;
+               ns->num_rsnn_nn_retries = 0;
+               bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+               break;
+
+       case NSSM_EVENT_RSP_ERROR:
+               if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) {
+                       bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry);
+                       ns->port->stats.ns_retries++;
+                       ns->num_rnnid_retries++;
+                       bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+                               &ns->timer, bfa_fcs_lport_ns_timeout, ns,
+                               BFA_FCS_RETRY_TIMEOUT);
+               } else {
+                       bfa_sm_set_state(ns,
+                               bfa_fcs_lport_ns_sm_sending_rspn_id);
+                       bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+               }
+               break;
+
+       case NSSM_EVENT_PORT_OFFLINE:
+               bfa_fcxp_discard(ns->fcxp);
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+               break;
+
+       default:
+               bfa_sm_fault(ns->port->fcs, event);
+       }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
+                               enum vport_ns_event event)
+{
+       bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+       bfa_trc(ns->port->fcs, event);
+
+       switch (event) {
+       case NSSM_EVENT_TIMEOUT:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
+               bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
+               break;
+
+       case NSSM_EVENT_PORT_OFFLINE:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+               bfa_timer_stop(&ns->timer);
+               break;
+
+       default:
+               bfa_sm_fault(ns->port->fcs, event);
+       }
+}
+
+static void
+bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+                                       enum vport_ns_event event)
+{
+       bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+       bfa_trc(ns->port->fcs, event);
+
+       switch (event) {
+       case NSSM_EVENT_RSNN_NN_SENT:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn);
+               break;
+
+       case NSSM_EVENT_PORT_OFFLINE:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+               bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+                       &ns->fcxp_wqe);
+               break;
+
+       default:
+               bfa_sm_fault(ns->port->fcs, event);
+       }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
+                               enum vport_ns_event event)
+{
+       bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+       bfa_trc(ns->port->fcs, event);
+
+       switch (event) {
+       case NSSM_EVENT_RSP_OK:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
+               ns->num_rsnn_nn_retries = 0;
+               bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+               break;
+
+       case NSSM_EVENT_RSP_ERROR:
+               if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) {
+                       bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry);
+                       ns->port->stats.ns_retries++;
+                       ns->num_rsnn_nn_retries++;
+                       bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
+                                       &ns->timer, bfa_fcs_lport_ns_timeout,
+                                       ns, BFA_FCS_RETRY_TIMEOUT);
+               } else {
+                       bfa_sm_set_state(ns,
+                               bfa_fcs_lport_ns_sm_sending_rspn_id);
+                       bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
+               }
+               break;
+
+       case NSSM_EVENT_PORT_OFFLINE:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+               bfa_fcxp_discard(ns->fcxp);
+               break;
+
+       default:
+               bfa_sm_fault(ns->port->fcs, event);
+       }
+}
+
+static void
+bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns,
+                                       enum vport_ns_event event)
+{
+       bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
+       bfa_trc(ns->port->fcs, event);
+
+       switch (event) {
+       case NSSM_EVENT_TIMEOUT:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
+               bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
+               break;
+
+       case NSSM_EVENT_PORT_OFFLINE:
+               bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
+               bfa_timer_stop(&ns->timer);
+               break;
+
+       default:
+               bfa_sm_fault(ns->port->fcs, event);
+       }
+}
+
 static void
 bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
                                   enum vport_ns_event event)
@@ -3770,11 +4039,12 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->pid);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                port->stats.ns_plogi_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
-                                       bfa_fcs_lport_ns_send_plogi, ns);
+                               bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE);
                return;
        }
        ns->fcxp = fcxp;
@@ -3852,6 +4122,162 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 }
 
+/*
+ * Register node name for port_id
+ */
+static void
+bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+       struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+       struct bfa_fcs_lport_s *port = ns->port;
+       struct fchs_s  fchs;
+       int     len;
+       struct bfa_fcxp_s *fcxp;
+
+       bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+       fcxp = fcxp_alloced ? fcxp_alloced :
+                       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+       if (!fcxp) {
+               port->stats.ns_rnnid_alloc_wait++;
+               bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+                               bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE);
+               return;
+       }
+
+       ns->fcxp = fcxp;
+
+       len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+                               bfa_fcs_lport_get_fcid(port),
+                               bfa_fcs_lport_get_fcid(port),
+                               bfa_fcs_lport_get_nwwn(port));
+
+       bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+                         FC_CLASS_3, len, &fchs,
+                         bfa_fcs_lport_ns_rnn_id_response, (void *)ns,
+                         FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+       port->stats.ns_rnnid_sent++;
+       bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+                               void *cbarg, bfa_status_t req_status,
+                               u32 rsp_len, u32 resid_len,
+                               struct fchs_s *rsp_fchs)
+
+{
+       struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+       struct bfa_fcs_lport_s *port = ns->port;
+       struct ct_hdr_s *cthdr = NULL;
+
+       bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+       /*
+        * Sanity Checks
+        */
+       if (req_status != BFA_STATUS_OK) {
+               bfa_trc(port->fcs, req_status);
+               port->stats.ns_rnnid_rsp_err++;
+               bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+               return;
+       }
+
+       cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+       if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+               port->stats.ns_rnnid_accepts++;
+               bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+               return;
+       }
+
+       port->stats.ns_rnnid_rejects++;
+       bfa_trc(port->fcs, cthdr->reason_code);
+       bfa_trc(port->fcs, cthdr->exp_code);
+       bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
+/*
+ * Register the symbolic node name for a given node name.
+ */
+static void
+bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+       struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
+       struct bfa_fcs_lport_s *port = ns->port;
+       struct fchs_s  fchs;
+       int     len;
+       struct bfa_fcxp_s *fcxp;
+       u8 *nsymbl;
+
+       bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+       fcxp = fcxp_alloced ? fcxp_alloced :
+                       bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
+       if (!fcxp) {
+               port->stats.ns_rsnn_nn_alloc_wait++;
+               bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+                               bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE);
+               return;
+       }
+       ns->fcxp = fcxp;
+
+       nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name(
+                                       bfa_fcs_get_base_port(port->fcs)));
+
+       len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+                               bfa_fcs_lport_get_fcid(port),
+                               bfa_fcs_lport_get_nwwn(port), nsymbl);
+
+       bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+                         FC_CLASS_3, len, &fchs,
+                         bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns,
+                         FC_MAX_PDUSZ, FC_FCCT_TOV);
+
+       port->stats.ns_rsnn_nn_sent++;
+
+       bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT);
+}
+
+static void
+bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
+                               void *cbarg, bfa_status_t req_status,
+                               u32 rsp_len, u32 resid_len,
+                               struct fchs_s *rsp_fchs)
+{
+       struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
+       struct bfa_fcs_lport_s *port = ns->port;
+       struct ct_hdr_s *cthdr = NULL;
+
+       bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+       /*
+        * Sanity Checks
+        */
+       if (req_status != BFA_STATUS_OK) {
+               bfa_trc(port->fcs, req_status);
+               port->stats.ns_rsnn_nn_rsp_err++;
+               bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+               return;
+       }
+
+       cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
+
+       if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
+               port->stats.ns_rsnn_nn_accepts++;
+               bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
+               return;
+       }
+
+       port->stats.ns_rsnn_nn_rejects++;
+       bfa_trc(port->fcs, cthdr->reason_code);
+       bfa_trc(port->fcs, cthdr->exp_code);
+       bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
+}
+
 /*
  * Register the symbolic port name.
  */
@@ -3870,11 +4296,12 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                port->stats.ns_rspnid_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
-                                       bfa_fcs_lport_ns_send_rspn_id, ns);
+                               bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE);
                return;
        }
        ns->fcxp = fcxp;
@@ -3971,11 +4398,12 @@ bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                port->stats.ns_rftid_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
-                                       bfa_fcs_lport_ns_send_rft_id, ns);
+                               bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE);
                return;
        }
        ns->fcxp = fcxp;
@@ -4044,11 +4472,12 @@ bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                port->stats.ns_rffid_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
-                                       bfa_fcs_lport_ns_send_rff_id, ns);
+                               bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE);
                return;
        }
        ns->fcxp = fcxp;
@@ -4127,11 +4556,12 @@ bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(port->fcs, port->pid);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                port->stats.ns_gidft_alloc_wait++;
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
-                                       bfa_fcs_lport_ns_send_gid_ft, ns);
+                               bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE);
                return;
        }
        ns->fcxp = fcxp;
@@ -4261,6 +4691,10 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
        struct fcgs_gidft_resp_s *gidft_entry;
        struct bfa_fcs_rport_s *rport;
        u32        ii;
+       struct bfa_fcs_fabric_s *fabric = port->fabric;
+       struct bfa_fcs_vport_s *vport;
+       struct list_head *qe;
+       u8 found = 0;
 
        for (ii = 0; ii < n_pids; ii++) {
                gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
@@ -4268,6 +4702,29 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
                if (gidft_entry->pid == port->pid)
                        continue;
 
+               /*
+                * Ignore PID if it is of base port
+                * (Avoid vports discovering base port as remote port)
+                */
+               if (gidft_entry->pid == fabric->bport.pid)
+                       continue;
+
+               /*
+                * Ignore PID if it is of vport created on the same base port
+                * (Avoid vport discovering every other vport created on the
+                * same port as remote port)
+                */
+               list_for_each(qe, &fabric->vport_q) {
+                       vport = (struct bfa_fcs_vport_s *) qe;
+                       if (vport->lport.pid == gidft_entry->pid)
+                               found = 1;
+               }
+
+               if (found) {
+                       found = 0;
+                       continue;
+               }
+
                /*
                 * Check if this rport already exists
                 */
@@ -4335,7 +4792,8 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
        struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
 
        bfa_trc(port->fcs, port->pid);
-       bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
+       if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online))
+               bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
 }
 
 static void
@@ -4355,6 +4813,70 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
        }
 }
 
+void
+bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
+{
+       struct bfa_fcs_lport_ns_s *ns = cbarg;
+       struct bfa_fcs_lport_s *port = ns->port;
+       struct fchs_s fchs;
+       struct bfa_fcxp_s *fcxp;
+       u8 symbl[256];
+       u8 *psymbl = &symbl[0];
+       int len;
+
+       if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
+               return;
+
+       /* Avoid sending RSPN in the following states. */
+       if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
+           bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
+           bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) ||
+           bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) ||
+           bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry))
+               return;
+
+       memset(symbl, 0, sizeof(symbl));
+       bfa_trc(port->fcs, port->port_cfg.pwwn);
+
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
+       if (!fcxp) {
+               port->stats.ns_rspnid_alloc_wait++;
+               bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
+                       bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE);
+               return;
+       }
+
+       ns->fcxp = fcxp;
+
+       if (port->vport) {
+               /*
+                * For Vports, we append the vport's port symbolic name
+                * to that of the base port.
+                */
+               strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+                       (bfa_fcs_get_base_port(port->fcs))),
+                       strlen((char *)&bfa_fcs_lport_get_psym_name(
+                       bfa_fcs_get_base_port(port->fcs))));
+
+               /* Ensure we have a null terminating string. */
+               ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
+                bfa_fcs_get_base_port(port->fcs)))] = 0;
+
+               strncat((char *)psymbl,
+                       (char *)&(bfa_fcs_lport_get_psym_name(port)),
+                       strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+       }
+
+       len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+                             bfa_fcs_lport_get_fcid(port), 0, psymbl);
+
+       bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
+                     FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
+
+       port->stats.ns_rspnid_sent++;
+}
+
 /*
  * FCS SCN
  */
@@ -4529,10 +5051,11 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        bfa_trc(port->fcs, port->pid);
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
-                                       bfa_fcs_lport_scn_send_scr, scn);
+                               bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE);
                return;
        }
        scn->fcxp = fcxp;
@@ -4614,7 +5137,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
 
        bfa_trc(port->fcs, rx_fchs->s_id);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -4688,14 +5211,33 @@ static void
 bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
 {
        struct bfa_fcs_rport_s *rport;
+       struct bfa_fcs_fabric_s *fabric = port->fabric;
+       struct bfa_fcs_vport_s *vport;
+       struct list_head *qe;
 
        bfa_trc(port->fcs, rpid);
 
+       /*
+        * Ignore PID if it is of base port or of vports created on the
+        * same base port. It is to avoid vports discovering base port or
+        * other vports created on same base port as remote port
+        */
+       if (rpid == fabric->bport.pid)
+               return;
+
+       list_for_each(qe, &fabric->vport_q) {
+               vport = (struct bfa_fcs_vport_s *) qe;
+               if (vport->lport.pid == rpid)
+                       return;
+       }
        /*
         * If this is an unknown device, then it just came online.
         * Otherwise let rport handle the RSCN event.
         */
        rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
+       if (!rport)
+               rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid);
+
        if (rport == NULL) {
                /*
                 * If min cfg mode is enabled, we donot need to
@@ -4888,15 +5430,15 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
 }
 
 void
-bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
-        wwn_t rport_wwns[], int *nrports)
+bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
+               struct bfa_rport_qualifier_s rports[], int *nrports)
 {
        struct list_head        *qh, *qe;
        struct bfa_fcs_rport_s *rport = NULL;
        int     i;
        struct bfa_fcs_s        *fcs;
 
-       if (port == NULL || rport_wwns == NULL || *nrports == 0)
+       if (port == NULL || rports == NULL || *nrports == 0)
                return;
 
        fcs = port->fcs;
@@ -4916,7 +5458,13 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
                        continue;
                }
 
-               rport_wwns[i] = rport->pwwn;
+               if (!rport->pwwn && !rport->pid) {
+                       qe = bfa_q_next(qe);
+                       continue;
+               }
+
+               rports[i].pwwn = rport->pwwn;
+               rports[i].pid = rport->pid;
 
                i++;
                qe = bfa_q_next(qe);
@@ -5760,6 +6308,16 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
 {
        vport->vport_stats.fab_cleanup++;
 }
+
+/*
+ * Stop notification from fabric SM. To be invoked from within FCS.
+ */
+void
+bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport)
+{
+       bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
+}
+
 /*
  * delete notification from fabric SM. To be invoked from within FCS.
  */
index fe0463a1db0456993ac9c9de8de991d6e6ce9941..cc43b2a58ce33ac9478cc25b9c9fc0509f4eeb96 100644 (file)
@@ -29,6 +29,12 @@ BFA_TRC_FILE(FCS, RPORT);
 static u32
 bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
         /* In millisecs */
+/*
+ * bfa_fcs_rport_max_logins is max count of bfa_fcs_rports
+ * whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports
+ */
+static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS;
+
 /*
  * forward declarations
  */
@@ -36,8 +42,10 @@ static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
                struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
 static void    bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
 static void    bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
-static void    bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport);
-static void    bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport);
+static void    bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport);
+static void    bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport);
+static void    bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport);
+static void    bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport);
 static void    bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
                                        struct fc_logi_s *plogi);
 static void    bfa_fcs_rport_timeout(void *arg);
@@ -76,6 +84,7 @@ static void   bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
 static void    bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
                                struct fchs_s *rx_fchs, u16 len);
 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
+static void    bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport);
 
 static void    bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
                                        enum rport_event event);
@@ -87,6 +96,8 @@ static void   bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
                                                enum rport_event event);
 static void    bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
                                        enum rport_event event);
+static void    bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+                                       enum rport_event event);
 static void    bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
                                                enum rport_event event);
 static void    bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
@@ -123,6 +134,10 @@ static void        bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
                                                enum rport_event event);
 static void    bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
                                                enum rport_event event);
+static void    bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+                                               enum rport_event event);
+static void    bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+                                               enum rport_event event);
 
 static struct bfa_sm_table_s rport_sm_table[] = {
        {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
@@ -130,6 +145,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
        {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
        {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
        {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
+       {BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE},
        {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
        {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
        {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
@@ -167,8 +183,8 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
                break;
 
        case RPSM_EVENT_PLOGI_RCVD:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
-               bfa_fcs_rport_send_plogiacc(rport, NULL);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
@@ -252,8 +268,8 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
 
        switch (event) {
        case RPSM_EVENT_FCXP_SENT:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
-               bfa_fcs_rport_hal_online(rport);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        case RPSM_EVENT_DELETE:
@@ -348,9 +364,9 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
                bfa_timer_stop(&rport->timer);
-               bfa_fcs_rport_hal_online(rport);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        default:
@@ -370,9 +386,9 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
 
        switch (event) {
        case RPSM_EVENT_ACCEPTED:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
                rport->plogi_retries = 0;
-               bfa_fcs_rport_hal_online(rport);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
@@ -397,6 +413,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
                                        BFA_FCS_RETRY_TIMEOUT);
                } else {
                        bfa_stats(rport->port, rport_del_max_plogi_retry);
+                       rport->old_pid = rport->pid;
                        rport->pid = 0;
                        bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
                        bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -443,13 +460,77 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_hal_online(rport);
+               bfa_fcs_rport_fcs_online_action(rport);
+               break;
+
+       default:
+               bfa_sm_fault(rport->fcs, event);
+       }
+}
+
+/*
+ * PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function
+ */
+static void
+bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
+                               enum rport_event event)
+{
+       bfa_trc(rport->fcs, rport->pwwn);
+       bfa_trc(rport->fcs, rport->pid);
+       bfa_trc(rport->fcs, event);
+
+       switch (event) {
+       case RPSM_EVENT_FC4_FCS_ONLINE:
+               if (rport->scsi_function == BFA_RPORT_INITIATOR) {
+                       if (!BFA_FCS_PID_IS_WKA(rport->pid))
+                               bfa_fcs_rpf_rport_online(rport);
+                       bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
+                       break;
+               }
+
+               if (!rport->bfa_rport)
+                       rport->bfa_rport =
+                               bfa_rport_create(rport->fcs->bfa, rport);
+
+               if (rport->bfa_rport) {
+                       bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+                       bfa_fcs_rport_hal_online(rport);
+               } else {
+                       bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+                       bfa_fcs_rport_fcs_offline_action(rport);
+               }
+               break;
+
+       case RPSM_EVENT_PLOGI_RCVD:
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+               rport->plogi_pending = BFA_TRUE;
+               bfa_fcs_rport_fcs_offline_action(rport);
+               break;
+
+       case RPSM_EVENT_PLOGI_COMP:
+       case RPSM_EVENT_LOGO_IMP:
+       case RPSM_EVENT_ADDRESS_CHANGE:
+       case RPSM_EVENT_SCN:
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+               bfa_fcs_rport_fcs_offline_action(rport);
+               break;
+
+       case RPSM_EVENT_LOGO_RCVD:
+       case RPSM_EVENT_PRLO_RCVD:
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+               bfa_fcs_rport_fcs_offline_action(rport);
+               break;
+
+       case RPSM_EVENT_DELETE:
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+               bfa_fcs_rport_fcs_offline_action(rport);
                break;
 
        default:
                bfa_sm_fault(rport->fcs, event);
+               break;
        }
 }
 
@@ -468,41 +549,34 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
        switch (event) {
        case RPSM_EVENT_HCB_ONLINE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
-               bfa_fcs_rport_online_action(rport);
+               bfa_fcs_rport_hal_online_action(rport);
                break;
 
-       case RPSM_EVENT_PRLO_RCVD:
        case RPSM_EVENT_PLOGI_COMP:
                break;
 
+       case RPSM_EVENT_PRLO_RCVD:
        case RPSM_EVENT_LOGO_RCVD:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
+               bfa_fcs_rport_fcs_offline_action(rport);
                break;
 
+       case RPSM_EVENT_SCN:
        case RPSM_EVENT_LOGO_IMP:
        case RPSM_EVENT_ADDRESS_CHANGE:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+               bfa_fcs_rport_fcs_offline_action(rport);
                break;
 
        case RPSM_EVENT_PLOGI_RCVD:
                rport->plogi_pending = BFA_TRUE;
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
+               bfa_fcs_rport_fcs_offline_action(rport);
                break;
 
        case RPSM_EVENT_DELETE:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
-               break;
-
-       case RPSM_EVENT_SCN:
-               /*
-                * @todo
-                * Ignore SCN - PLOGI just completed, FC-4 login should detect
-                * device failures.
-                */
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+               bfa_fcs_rport_fcs_offline_action(rport);
                break;
 
        default:
@@ -537,18 +611,18 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
        case RPSM_EVENT_LOGO_IMP:
        case RPSM_EVENT_ADDRESS_CHANGE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_DELETE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
@@ -579,7 +653,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_DELETE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_SCN:
@@ -592,24 +666,16 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_PRLO_RCVD:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_LOGO_IMP:
-               rport->pid = 0;
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
-               bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_timer_start(rport->fcs->bfa, &rport->timer,
-                               bfa_fcs_rport_timeout, rport,
-                               bfa_fcs_rport_del_timeout);
-               break;
-
        case RPSM_EVENT_PLOGI_RCVD:
        case RPSM_EVENT_ADDRESS_CHANGE:
        case RPSM_EVENT_PLOGI_COMP:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        default:
@@ -642,14 +708,14 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
                        bfa_fcs_rport_send_nsdisc(rport, NULL);
                } else {
                        bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
-                       bfa_fcs_rport_offline_action(rport);
+                       bfa_fcs_rport_hal_offline_action(rport);
                }
                break;
 
        case RPSM_EVENT_DELETE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_SCN:
@@ -659,7 +725,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
        case RPSM_EVENT_PRLO_RCVD:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
@@ -668,7 +734,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
        case RPSM_EVENT_LOGO_IMP:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        default:
@@ -696,21 +762,21 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_DELETE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_LOGO_IMP:
        case RPSM_EVENT_ADDRESS_CHANGE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_SCN:
@@ -719,7 +785,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_PLOGI_RCVD:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        default:
@@ -756,13 +822,13 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
        case RPSM_EVENT_FAILED:
        case RPSM_EVENT_ADDRESS_CHANGE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_DELETE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_SCN:
@@ -774,14 +840,14 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
        case RPSM_EVENT_LOGO_IMP:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_offline_action(rport);
+               bfa_fcs_rport_hal_offline_action(rport);
                break;
 
        default:
@@ -803,13 +869,19 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
        switch (event) {
        case RPSM_EVENT_FC4_OFFLINE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+               bfa_fcs_rport_hal_offline(rport);
                break;
 
        case RPSM_EVENT_DELETE:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
+               if (rport->pid && (rport->prlo == BFA_TRUE))
+                       bfa_fcs_rport_send_prlo_acc(rport);
+               if (rport->pid && (rport->prlo == BFA_FALSE))
+                       bfa_fcs_rport_send_logo_acc(rport);
+
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
                break;
 
+       case RPSM_EVENT_HCB_ONLINE:
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
        case RPSM_EVENT_ADDRESS_CHANGE:
@@ -835,7 +907,20 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
        switch (event) {
        case RPSM_EVENT_FC4_OFFLINE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+               bfa_fcs_rport_hal_offline(rport);
+               break;
+
+       case RPSM_EVENT_LOGO_RCVD:
+               bfa_fcs_rport_send_logo_acc(rport);
+       case RPSM_EVENT_PRLO_RCVD:
+               if (rport->prlo == BFA_TRUE)
+                       bfa_fcs_rport_send_prlo_acc(rport);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
+               break;
+
+       case RPSM_EVENT_HCB_ONLINE:
+       case RPSM_EVENT_DELETE:
+               /* Rport is being deleted */
                break;
 
        default:
@@ -857,13 +942,23 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
        switch (event) {
        case RPSM_EVENT_FC4_OFFLINE:
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+               bfa_fcs_rport_hal_offline(rport);
                break;
 
-       case RPSM_EVENT_SCN:
-       case RPSM_EVENT_LOGO_IMP:
        case RPSM_EVENT_LOGO_RCVD:
+               /*
+                * Rport is going offline. Just ack the logo
+                */
+               bfa_fcs_rport_send_logo_acc(rport);
+               break;
+
        case RPSM_EVENT_PRLO_RCVD:
+               bfa_fcs_rport_send_prlo_acc(rport);
+               break;
+
+       case RPSM_EVENT_HCB_ONLINE:
+       case RPSM_EVENT_SCN:
+       case RPSM_EVENT_LOGO_IMP:
        case RPSM_EVENT_ADDRESS_CHANGE:
                /*
                 * rport is already going offline.
@@ -907,24 +1002,23 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
                 */
 
        case RPSM_EVENT_ADDRESS_CHANGE:
-               if (bfa_fcs_lport_is_online(rport->port)) {
-                       if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
-                               bfa_sm_set_state(rport,
-                                       bfa_fcs_rport_sm_nsdisc_sending);
-                               rport->ns_retries = 0;
-                               bfa_fcs_rport_send_nsdisc(rport, NULL);
-                       } else {
-                               bfa_sm_set_state(rport,
-                                       bfa_fcs_rport_sm_plogi_sending);
-                               rport->plogi_retries = 0;
-                               bfa_fcs_rport_send_plogi(rport, NULL);
-                       }
-               } else {
+               if (!bfa_fcs_lport_is_online(rport->port)) {
                        rport->pid = 0;
                        bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
                        bfa_timer_start(rport->fcs->bfa, &rport->timer,
                                        bfa_fcs_rport_timeout, rport,
                                        bfa_fcs_rport_del_timeout);
+                       break;
+               }
+               if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+                       bfa_sm_set_state(rport,
+                               bfa_fcs_rport_sm_nsdisc_sending);
+                       rport->ns_retries = 0;
+                       bfa_fcs_rport_send_nsdisc(rport, NULL);
+               } else {
+                       bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
+                       rport->plogi_retries = 0;
+                       bfa_fcs_rport_send_plogi(rport, NULL);
                }
                break;
 
@@ -1001,7 +1095,11 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_DELETE:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+               if (rport->pid && (rport->prlo == BFA_TRUE))
+                       bfa_fcs_rport_send_prlo_acc(rport);
+               if (rport->pid && (rport->prlo == BFA_FALSE))
+                       bfa_fcs_rport_send_logo_acc(rport);
                break;
 
        case RPSM_EVENT_LOGO_IMP:
@@ -1040,7 +1138,14 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
+               bfa_fcs_rport_send_logo_acc(rport);
        case RPSM_EVENT_PRLO_RCVD:
+               if (rport->prlo == BFA_TRUE)
+                       bfa_fcs_rport_send_prlo_acc(rport);
+
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+               break;
+
        case RPSM_EVENT_ADDRESS_CHANGE:
                break;
 
@@ -1072,7 +1177,11 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
+               bfa_fcs_rport_send_logo_acc(rport);
        case RPSM_EVENT_PRLO_RCVD:
+               if (rport->prlo == BFA_TRUE)
+                       bfa_fcs_rport_send_prlo_acc(rport);
+
                bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
                bfa_fcs_rport_free(rport);
@@ -1126,9 +1235,9 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
                bfa_timer_stop(&rport->timer);
-               bfa_fcs_rport_hal_online(rport);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        case RPSM_EVENT_PLOGI_SEND:
@@ -1190,9 +1299,9 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
                bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
-               bfa_fcs_rport_hal_online(rport);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        default:
@@ -1254,9 +1363,9 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
                bfa_timer_stop(&rport->timer);
-               bfa_fcs_rport_hal_online(rport);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        default:
@@ -1296,6 +1405,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
                                 bfa_fcs_rport_sm_nsdisc_sending);
                        bfa_fcs_rport_send_nsdisc(rport, NULL);
                } else {
+                       rport->old_pid = rport->pid;
                        rport->pid = 0;
                        bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
                        bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -1343,9 +1453,9 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_PLOGI_COMP:
-               bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
                bfa_fcxp_discard(rport->fcxp);
-               bfa_fcs_rport_hal_online(rport);
+               bfa_fcs_rport_fcs_online_action(rport);
                break;
 
        default:
@@ -1353,7 +1463,63 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
        }
 }
 
+/*
+ * Rport needs to be deleted
+ * waiting for ITNIM clean up to finish
+ */
+static void
+bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
+                               enum rport_event event)
+{
+       bfa_trc(rport->fcs, rport->pwwn);
+       bfa_trc(rport->fcs, rport->pid);
+       bfa_trc(rport->fcs, event);
 
+       switch (event) {
+       case RPSM_EVENT_FC4_OFFLINE:
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
+               bfa_fcs_rport_hal_offline(rport);
+               break;
+
+       case RPSM_EVENT_DELETE:
+       case RPSM_EVENT_PLOGI_RCVD:
+               /* Ignore these events */
+               break;
+
+       default:
+               bfa_sm_fault(rport->fcs, event);
+               break;
+       }
+}
+
+/*
+ * RPort needs to be deleted
+ * waiting for BFA/FW to finish current processing
+ */
+static void
+bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
+                               enum rport_event event)
+{
+       bfa_trc(rport->fcs, rport->pwwn);
+       bfa_trc(rport->fcs, rport->pid);
+       bfa_trc(rport->fcs, event);
+
+       switch (event) {
+       case RPSM_EVENT_HCB_OFFLINE:
+               bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
+               bfa_fcs_rport_free(rport);
+               break;
+
+       case RPSM_EVENT_DELETE:
+       case RPSM_EVENT_LOGO_IMP:
+       case RPSM_EVENT_PLOGI_RCVD:
+               /* Ignore these events */
+               break;
+
+       default:
+               bfa_sm_fault(rport->fcs, event);
+       }
+}
 
 /*
  *  fcs_rport_private FCS RPORT provate functions
@@ -1370,10 +1536,11 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(rport->fcs, rport->pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
-                                       bfa_fcs_rport_send_plogi, rport);
+                               bfa_fcs_rport_send_plogi, rport, BFA_TRUE);
                return;
        }
        rport->fcxp = fcxp;
@@ -1490,10 +1657,11 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        bfa_trc(rport->fcs, rport->pwwn);
        bfa_trc(rport->fcs, rport->reply_oxid);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
-                                       bfa_fcs_rport_send_plogiacc, rport);
+                               bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE);
                return;
        }
        rport->fcxp = fcxp;
@@ -1522,10 +1690,11 @@ bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(rport->fcs, rport->pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
-                                       bfa_fcs_rport_send_adisc, rport);
+                               bfa_fcs_rport_send_adisc, rport, BFA_TRUE);
                return;
        }
        rport->fcxp = fcxp;
@@ -1585,10 +1754,11 @@ bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(rport->fcs, rport->pid);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
-                                       bfa_fcs_rport_send_nsdisc, rport);
+                               bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE);
                return;
        }
        rport->fcxp = fcxp;
@@ -1741,10 +1911,11 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        port = rport->port;
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
-                                       bfa_fcs_rport_send_logo, rport);
+                               bfa_fcs_rport_send_logo, rport, BFA_FALSE);
                return;
        }
        rport->fcxp = fcxp;
@@ -1778,7 +1949,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
 
        port = rport->port;
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -1849,7 +2020,7 @@ bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
                bfa_fcs_itnim_is_initiator(rport->itnim);
        }
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -1886,7 +2057,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
 
        speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -1920,7 +2091,7 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
         */
        if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
 
-               fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+               fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
                if (!fcxp)
                        return;
 
@@ -1957,6 +2128,15 @@ bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
        bfa_rport_online(rport->bfa_rport, &rport_info);
 }
 
+static void
+bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport)
+{
+       if (rport->bfa_rport)
+               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
+       else
+               bfa_cb_rport_offline(rport);
+}
+
 static struct bfa_fcs_rport_s *
 bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
 {
@@ -1967,6 +2147,11 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
        /*
         * allocate rport
         */
+       if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) {
+               bfa_trc(fcs, rpid);
+               return NULL;
+       }
+
        if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
                != BFA_STATUS_OK) {
                bfa_trc(fcs, rpid);
@@ -1981,16 +2166,9 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
        rport->rp_drv = rport_drv;
        rport->pid = rpid;
        rport->pwwn = pwwn;
+       rport->old_pid = 0;
 
-       /*
-        * allocate BFA rport
-        */
-       rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
-       if (!rport->bfa_rport) {
-               bfa_trc(fcs, rpid);
-               kfree(rport_drv);
-               return NULL;
-       }
+       rport->bfa_rport = NULL;
 
        /*
         * allocate FC-4s
@@ -2001,14 +2179,13 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
                rport->itnim = bfa_fcs_itnim_create(rport);
                if (!rport->itnim) {
                        bfa_trc(fcs, rpid);
-                       bfa_sm_send_event(rport->bfa_rport,
-                                               BFA_RPORT_SM_DELETE);
                        kfree(rport_drv);
                        return NULL;
                }
        }
 
        bfa_fcs_lport_add_rport(port, rport);
+       fcs->num_rport_logins++;
 
        bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
 
@@ -2024,20 +2201,28 @@ static void
 bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
 {
        struct bfa_fcs_lport_s *port = rport->port;
+       struct bfa_fcs_s *fcs = port->fcs;
 
        /*
         * - delete FC-4s
         * - delete BFA rport
         * - remove from queue of rports
         */
+       rport->plogi_pending = BFA_FALSE;
+
        if (bfa_fcs_lport_is_initiator(port)) {
                bfa_fcs_itnim_delete(rport->itnim);
                if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
                        bfa_fcs_rpf_rport_offline(rport);
        }
 
-       bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
+       if (rport->bfa_rport) {
+               bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
+               rport->bfa_rport = NULL;
+       }
+
        bfa_fcs_lport_del_rport(port, rport);
+       fcs->num_rport_logins--;
        kfree(rport->rp_drv);
 }
 
@@ -2071,7 +2256,18 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
 }
 
 static void
-bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
+bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport)
+{
+       if ((!rport->pid) || (!rport->pwwn)) {
+               bfa_trc(rport->fcs, rport->pid);
+               bfa_sm_fault(rport->fcs, rport->pid);
+       }
+
+       bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE);
+}
+
+static void
+bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
 {
        struct bfa_fcs_lport_s *port = rport->port;
        struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
@@ -2086,7 +2282,7 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
        }
 
        if (bfa_fcs_lport_is_initiator(port)) {
-               bfa_fcs_itnim_rport_online(rport->itnim);
+               bfa_fcs_itnim_brp_online(rport->itnim);
                if (!BFA_FCS_PID_IS_WKA(rport->pid))
                        bfa_fcs_rpf_rport_online(rport);
        };
@@ -2102,15 +2298,28 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
 }
 
 static void
-bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
+bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport)
+{
+       if (!BFA_FCS_PID_IS_WKA(rport->pid))
+               bfa_fcs_rpf_rport_offline(rport);
+
+       bfa_fcs_itnim_rport_offline(rport->itnim);
+}
+
+static void
+bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport)
 {
        struct bfa_fcs_lport_s *port = rport->port;
        struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
        char    lpwwn_buf[BFA_STRING_32];
        char    rpwwn_buf[BFA_STRING_32];
 
+       if (!rport->bfa_rport) {
+               bfa_fcs_rport_fcs_offline_action(rport);
+               return;
+       }
+
        rport->stats.offlines++;
-       rport->plogi_pending = BFA_FALSE;
 
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
        wwn2str(rpwwn_buf, rport->pwwn);
@@ -2340,7 +2549,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
        bfa_sm_send_event(rport, RPSM_EVENT_SCN);
 }
 
-
 /*
  *     brief
  *     This routine BFA callback for bfa_rport_online() call.
@@ -2508,7 +2716,7 @@ bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
 
        bfa_trc(rport->fcs, rport->pid);
 
-       fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
        if (!fcxp)
                return;
        len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
@@ -2534,7 +2742,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
 
        bfa_trc(rport->fcs, rx_fchs->s_id);
 
-       fcxp = bfa_fcs_fcxp_alloc(rport->fcs);
+       fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE);
        if (!fcxp)
                return;
 
@@ -2582,6 +2790,17 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
        bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
 }
 
+/*
+ * Called by BFAD to set the max limit on number of bfa_fcs_rport allocation
+ * which limits number of concurrent logins to remote ports
+ */
+void
+bfa_fcs_rport_set_max_logins(u32 max_logins)
+{
+       if (max_logins > 0)
+               bfa_fcs_rport_max_logins = max_logins;
+}
+
 void
 bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
                struct bfa_rport_attr_s *rport_attr)
@@ -2605,9 +2824,11 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
        rport_attr->curr_speed  = rport->rpf.rpsc_speed;
        rport_attr->assigned_speed  = rport->rpf.assigned_speed;
 
-       qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
-       qos_attr.qos_flow_id =
-               cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+       if (rport->bfa_rport) {
+               qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
+               qos_attr.qos_flow_id =
+                       cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+       }
        rport_attr->qos_attr = qos_attr;
 
        rport_attr->trl_enforced = BFA_FALSE;
@@ -2940,10 +3161,11 @@ bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 
        bfa_trc(rport->fcs, rport->pwwn);
 
-       fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+       fcxp = fcxp_alloced ? fcxp_alloced :
+              bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
        if (!fcxp) {
                bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
-                                       bfa_fcs_rpf_send_rpsc2, rpf);
+                               bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE);
                return;
        }
        rpf->fcxp = fcxp;
index 21ad2902e5ce99f300d91f08080afc9b556864d1..75ca8752b9f4b125ce5e47417f912b29c42fff80 100644 (file)
@@ -92,7 +92,6 @@ static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
                                enum bfa_ioc_event_e event);
 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
-static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 
@@ -599,8 +598,9 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
                break;
 
        case IOC_E_HWERROR:
+       case IOC_E_HWFAILED:
                /*
-                * HB failure notification, ignore.
+                * HB failure / HW error notification, ignore.
                 */
                break;
        default:
@@ -632,6 +632,10 @@ bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
                break;
 
+       case IOC_E_HWERROR:
+               /* Ignore - already in hwfail state */
+               break;
+
        default:
                bfa_sm_fault(ioc, event);
        }
@@ -1455,7 +1459,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
                bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
-               if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
+               if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
                        bfa_trc(ioc, i);
                        bfa_trc(ioc, fwhdr->md5sum[i]);
                        bfa_trc(ioc, drv_fwhdr->md5sum[i]);
@@ -1480,7 +1484,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
                bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
 
-       if (fwhdr.signature != drv_fwhdr->signature) {
+       if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
                bfa_trc(ioc, fwhdr.signature);
                bfa_trc(ioc, drv_fwhdr->signature);
                return BFA_FALSE;
@@ -1704,7 +1708,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
                 * write smem
                 */
                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
-                             fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
+                       cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
 
                loff += sizeof(u32);
 
@@ -2260,6 +2264,12 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
 }
 
+void
+bfa_ioc_suspend(struct bfa_ioc_s *ioc)
+{
+       ioc->dbg_fwsave_once = BFA_TRUE;
+       bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
 
 /*
  * Initialize memory for saving firmware trace. Driver must initialize
@@ -2269,7 +2279,7 @@ void
 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
 {
        ioc->dbg_fwsave     = dbg_fwsave;
-       ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
+       ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
 }
 
 /*
@@ -2856,7 +2866,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
 /*
  * Save firmware trace if configured.
  */
-static void
+void
 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
 {
        int             tlen;
index 7b916e04ca560724509766a2f880fbdbe53e05ca..b2856f96567cfca34ba4bdfec447c47ac9c7882d 100644 (file)
@@ -820,6 +820,7 @@ void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
                struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
 void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
+void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
                enum bfi_pcifn_class clscode);
 void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
@@ -866,6 +867,7 @@ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
 void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
 bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
 bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
+void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
 
 /*
  * asic block configuration related APIs
index 2d36e4823835d326bfc32c2c29f9a2d8e93d55c9..189fff71e3c25842904ff911d0b52d5203897577 100644 (file)
@@ -121,6 +121,7 @@ struct bfa_s {
        bfa_boolean_t           fcs;            /*  FCS is attached to BFA */
        struct bfa_msix_s       msix;
        int                     bfa_aen_seq;
+       bfa_boolean_t           intr_enabled;   /*  Status of interrupts */
 };
 
 extern bfa_boolean_t bfa_auto_recover;
index 2e856e6710f7d10b2e7083771cdac865bfd15404..b2538d60db34aab67dc8ec69c8da7f49bb05b122 100644 (file)
@@ -440,9 +440,11 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
        fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
        memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 
-       INIT_LIST_HEAD(&mod->fcxp_free_q);
+       INIT_LIST_HEAD(&mod->fcxp_req_free_q);
+       INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
        INIT_LIST_HEAD(&mod->fcxp_active_q);
-       INIT_LIST_HEAD(&mod->fcxp_unused_q);
+       INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
+       INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
 
        mod->fcxp_list = fcxp;
 
@@ -450,7 +452,14 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
                fcxp->fcxp_mod = mod;
                fcxp->fcxp_tag = i;
 
-               list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+               if (i < (mod->num_fcxps / 2)) {
+                       list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+                       fcxp->req_rsp = BFA_TRUE;
+               } else {
+                       list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
+                       fcxp->req_rsp = BFA_FALSE;
+               }
+
                bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
                fcxp->reqq_waiting = BFA_FALSE;
 
@@ -514,7 +523,8 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        if (!cfg->drvcfg.min_cfg)
                mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
 
-       INIT_LIST_HEAD(&mod->wait_q);
+       INIT_LIST_HEAD(&mod->req_wait_q);
+       INIT_LIST_HEAD(&mod->rsp_wait_q);
 
        claim_fcxps_mem(mod);
 }
@@ -542,7 +552,8 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
        struct list_head              *qe, *qen;
 
        /* Enqueue unused fcxp resources to free_q */
-       list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
+       list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
+       list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
 
        list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
                fcxp = (struct bfa_fcxp_s *) qe;
@@ -559,11 +570,14 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
 }
 
 static struct bfa_fcxp_s *
-bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
+bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
 {
        struct bfa_fcxp_s *fcxp;
 
-       bfa_q_deq(&fm->fcxp_free_q, &fcxp);
+       if (req)
+               bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
+       else
+               bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
 
        if (fcxp)
                list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
@@ -642,7 +656,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
        struct bfa_fcxp_wqe_s *wqe;
 
-       bfa_q_deq(&mod->wait_q, &wqe);
+       if (fcxp->req_rsp)
+               bfa_q_deq(&mod->req_wait_q, &wqe);
+       else
+               bfa_q_deq(&mod->rsp_wait_q, &wqe);
+
        if (wqe) {
                bfa_trc(mod->bfa, fcxp->fcxp_tag);
 
@@ -657,7 +675,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
 
        WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
        list_del(&fcxp->qe);
-       list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+
+       if (fcxp->req_rsp)
+               list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
+       else
+               list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
 }
 
 static void
@@ -900,21 +922,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
  *                             Address (given the sge index).
  * @param[in]  get_rsp_sglen   function ptr to be called to get a response SG
  *                             len (given the sge index).
+ * @param[in]  req             Allocated FCXP is used to send req or rsp?
+ *                             request - BFA_TRUE, response - BFA_FALSE
  *
  * @return FCXP instance. NULL on failure.
  */
 struct bfa_fcxp_s *
-bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
-              int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
-              bfa_fcxp_get_sglen_t req_sglen_cbfn,
-              bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
-              bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
+               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+               bfa_fcxp_get_sglen_t req_sglen_cbfn,
+               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+               bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
 {
        struct bfa_fcxp_s *fcxp = NULL;
 
        WARN_ON(bfa == NULL);
 
-       fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
+       fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
        if (fcxp == NULL)
                return NULL;
 
@@ -1071,17 +1095,20 @@ bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
 }
 
 void
-bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
               bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
               void *caller, int nreq_sgles,
               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
               bfa_fcxp_get_sglen_t req_sglen_cbfn,
               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
-              bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+              bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
 {
        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-       WARN_ON(!list_empty(&mod->fcxp_free_q));
+       if (req)
+               WARN_ON(!list_empty(&mod->fcxp_req_free_q));
+       else
+               WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
 
        wqe->alloc_cbfn = alloc_cbfn;
        wqe->alloc_cbarg = alloc_cbarg;
@@ -1094,7 +1121,10 @@ bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
        wqe->rsp_sga_cbfn = rsp_sga_cbfn;
        wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
 
-       list_add_tail(&wqe->qe, &mod->wait_q);
+       if (req)
+               list_add_tail(&wqe->qe, &mod->req_wait_q);
+       else
+               list_add_tail(&wqe->qe, &mod->rsp_wait_q);
 }
 
 void
@@ -1102,7 +1132,8 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
 {
        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-       WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
+       WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
+               !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
        list_del(&wqe->qe);
 }
 
@@ -1153,8 +1184,13 @@ bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
        int     i;
 
        for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
-               bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
-               list_add_tail(qe, &mod->fcxp_unused_q);
+               if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
+                       bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
+                       list_add_tail(qe, &mod->fcxp_req_unused_q);
+               } else {
+                       bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
+                       list_add_tail(qe, &mod->fcxp_rsp_unused_q);
+               }
        }
 }
 
@@ -1404,11 +1440,11 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
 
        switch (event) {
        case BFA_LPS_SM_FWRSP:
+       case BFA_LPS_SM_OFFLINE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                bfa_lps_logout_comp(lps);
                break;
 
-       case BFA_LPS_SM_OFFLINE:
        case BFA_LPS_SM_DELETE:
                bfa_sm_set_state(lps, bfa_lps_sm_init);
                break;
@@ -1786,6 +1822,8 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
 
        if (lps->fdisc)
                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+       else
+               bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
 }
 
 /*
@@ -4237,6 +4275,10 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
                break;
 
+       case BFA_RPORT_SM_OFFLINE:
+               bfa_rport_offline_cb(rp);
+               break;
+
        default:
                bfa_stats(rp, sm_off_unexp);
                bfa_sm_fault(rp->bfa, event);
@@ -4353,6 +4395,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
        case BFA_RPORT_SM_HWFAIL:
                bfa_stats(rp, sm_offp_hwf);
                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+               bfa_rport_offline_cb(rp);
                break;
 
        default:
@@ -4731,8 +4774,10 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
        WARN_ON(speed == 0);
        WARN_ON(speed == BFA_PORT_SPEED_AUTO);
 
-       rport->rport_info.speed = speed;
-       bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+       if (rport) {
+               rport->rport_info.speed = speed;
+               bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+       }
 }
 
 /* Set Rport LUN Mask */
index f300675646395b8702d8879cde778cddb08755e1..1abcf7c51661f68cdbe286f29dda5363b2ccb5d9 100644 (file)
@@ -97,10 +97,13 @@ struct bfa_fcxp_mod_s {
        struct bfa_s      *bfa;         /* backpointer to BFA */
        struct bfa_fcxp_s *fcxp_list;   /* array of FCXPs */
        u16     num_fcxps;      /* max num FCXP requests */
-       struct list_head  fcxp_free_q;  /* free FCXPs */
-       struct list_head  fcxp_active_q;        /* active FCXPs */
-       struct list_head  wait_q;               /* wait queue for free fcxp */
-       struct list_head fcxp_unused_q; /* unused fcxps */
+       struct list_head fcxp_req_free_q; /* free FCXPs used for sending req */
+       struct list_head fcxp_rsp_free_q; /* free FCXPs used for sending req */
+       struct list_head fcxp_active_q; /* active FCXPs */
+       struct list_head req_wait_q;    /* wait queue for free req_fcxp */
+       struct list_head rsp_wait_q;    /* wait queue for free rsp_fcxp */
+       struct list_head fcxp_req_unused_q;     /* unused req_fcxps */
+       struct list_head fcxp_rsp_unused_q;     /* unused rsp_fcxps */
        u32     req_pld_sz;
        u32     rsp_pld_sz;
        struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
@@ -197,6 +200,7 @@ struct bfa_fcxp_s {
        struct bfa_cb_qe_s    hcb_qe;   /*  comp: callback qelem */
        struct bfa_reqq_wait_s  reqq_wqe;
        bfa_boolean_t   reqq_waiting;
+       bfa_boolean_t   req_rsp;        /* Used to track req/rsp fcxp */
 };
 
 struct bfa_fcxp_wqe_s {
@@ -586,20 +590,22 @@ void      bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
 /*
  * bfa fcxp API functions
  */
-struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
+struct bfa_fcxp_s *bfa_fcxp_req_rsp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
                                  int nreq_sgles, int nrsp_sgles,
                                  bfa_fcxp_get_sgaddr_t get_req_sga,
                                  bfa_fcxp_get_sglen_t get_req_sglen,
                                  bfa_fcxp_get_sgaddr_t get_rsp_sga,
-                                 bfa_fcxp_get_sglen_t get_rsp_sglen);
-void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+                                 bfa_fcxp_get_sglen_t get_rsp_sglen,
+                                 bfa_boolean_t req);
+void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
                                bfa_fcxp_alloc_cbfn_t alloc_cbfn,
                                void *cbarg, void *bfad_fcxp,
                                int nreq_sgles, int nrsp_sgles,
                                bfa_fcxp_get_sgaddr_t get_req_sga,
                                bfa_fcxp_get_sglen_t get_req_sglen,
                                bfa_fcxp_get_sgaddr_t get_rsp_sga,
-                               bfa_fcxp_get_sglen_t get_rsp_sglen);
+                               bfa_fcxp_get_sglen_t get_rsp_sglen,
+                               bfa_boolean_t req);
 void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
                            struct bfa_fcxp_wqe_s *wqe);
 void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
@@ -658,6 +664,7 @@ u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
 u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
 u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
index 2c8f0c713076251bcc2e8e17e2018975e765aa00..c37494916a1af1c3a7599db6926ff5242624d538 100644 (file)
@@ -57,6 +57,7 @@ int           pcie_max_read_reqsz;
 int            bfa_debugfs_enable = 1;
 int            msix_disable_cb = 0, msix_disable_ct = 0;
 int            max_xfer_size = BFAD_MAX_SECTORS >> 1;
+int            max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
 
 /* Firmware releated */
 u32    bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
@@ -148,6 +149,8 @@ MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
 module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
                " Range[64k|128k|256k|512k|1024k|2048k]");
+module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
 
 static void
 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@@ -736,6 +739,9 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
                }
        }
 
+       /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
+       pci_enable_pcie_error_reporting(pdev);
+
        bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
        bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
 
@@ -806,6 +812,8 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
                }
        }
 
+       pci_save_state(pdev);
+
        return 0;
 
 out_release_region:
@@ -822,6 +830,8 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
        pci_iounmap(pdev, bfad->pci_bar0_kva);
        pci_iounmap(pdev, bfad->pci_bar2_kva);
        pci_release_regions(pdev);
+       /* Disable PCIE Advanced Error Recovery (AER) */
+       pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
 }
@@ -1258,6 +1268,16 @@ bfad_setup_intr(struct bfad_s *bfad)
 
                error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
                if (error) {
+                       /* In CT1 & CT2, try to allocate just one vector */
+                       if (bfa_asic_id_ctc(pdev->device)) {
+                               printk(KERN_WARNING "bfa %s: trying one msix "
+                                      "vector failed to allocate %d[%d]\n",
+                                      bfad->pci_name, bfad->nvec, error);
+                               bfad->nvec = 1;
+                               error = pci_enable_msix(bfad->pcidev,
+                                               msix_entries, bfad->nvec);
+                       }
+
                        /*
                         * Only error number of vector is available.
                         * We don't have a mechanism to map multiple
@@ -1267,12 +1287,13 @@ bfad_setup_intr(struct bfad_s *bfad)
                         *  vectors. Linux doesn't duplicate vectors
                         * in the MSIX table for this case.
                         */
-
-                       printk(KERN_WARNING "bfad%d: "
-                               "pci_enable_msix failed (%d),"
-                               " use line based.\n", bfad->inst_no, error);
-
-                       goto line_based;
+                       if (error) {
+                               printk(KERN_WARNING "bfad%d: "
+                                      "pci_enable_msix failed (%d), "
+                                      "use line based.\n",
+                                       bfad->inst_no, error);
+                               goto line_based;
+                       }
                }
 
                /* Disable INTX in MSI-X mode */
@@ -1470,6 +1491,197 @@ bfad_pci_remove(struct pci_dev *pdev)
        kfree(bfad);
 }
 
+/*
+ * PCI Error Recovery entry, error detected.
+ */
+static pci_ers_result_t
+bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+       struct bfad_s *bfad = pci_get_drvdata(pdev);
+       unsigned long   flags;
+       pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
+
+       dev_printk(KERN_ERR, &pdev->dev,
+                  "error detected state: %d - flags: 0x%x\n",
+                  state, bfad->bfad_flags);
+
+       switch (state) {
+       case pci_channel_io_normal: /* non-fatal error */
+               spin_lock_irqsave(&bfad->bfad_lock, flags);
+               bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+               /* Suspend/fail all bfa operations */
+               bfa_ioc_suspend(&bfad->bfa.ioc);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               del_timer_sync(&bfad->hal_tmo);
+               ret = PCI_ERS_RESULT_CAN_RECOVER;
+               break;
+       case pci_channel_io_frozen: /* fatal error */
+               init_completion(&bfad->comp);
+               spin_lock_irqsave(&bfad->bfad_lock, flags);
+               bfad->bfad_flags |= BFAD_EEH_BUSY;
+               /* Suspend/fail all bfa operations */
+               bfa_ioc_suspend(&bfad->bfa.ioc);
+               bfa_fcs_stop(&bfad->bfa_fcs);
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+               wait_for_completion(&bfad->comp);
+
+               bfad_remove_intr(bfad);
+               del_timer_sync(&bfad->hal_tmo);
+               pci_disable_device(pdev);
+               ret = PCI_ERS_RESULT_NEED_RESET;
+               break;
+       case pci_channel_io_perm_failure: /* PCI Card is DEAD */
+               spin_lock_irqsave(&bfad->bfad_lock, flags);
+               bfad->bfad_flags |= BFAD_EEH_BUSY |
+                                   BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
+               spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+               /* If the error_detected handler is called with the reason
+                * pci_channel_io_perm_failure - it will subsequently call
+                * pci_remove() entry point to remove the pci device from the
+                * system - So defer the cleanup to pci_remove(); cleaning up
+                * here causes inconsistent state during pci_remove().
+                */
+               ret = PCI_ERS_RESULT_DISCONNECT;
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       return ret;
+}
+
+int
+restart_bfa(struct bfad_s *bfad)
+{
+       unsigned long flags;
+       struct pci_dev *pdev = bfad->pcidev;
+
+       bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
+                  &bfad->meminfo, &bfad->hal_pcidev);
+
+       /* Enable Interrupt and wait bfa_init completion */
+       if (bfad_setup_intr(bfad)) {
+               dev_printk(KERN_WARNING, &pdev->dev,
+                          "%s: bfad_setup_intr failed\n", bfad->pci_name);
+               bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
+               return -1;
+       }
+
+       init_completion(&bfad->comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfa_iocfc_init(&bfad->bfa);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       /* Set up interrupt handler for each vectors */
+       if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
+           bfad_install_msix_handler(bfad))
+               dev_printk(KERN_WARNING, &pdev->dev,
+                          "%s: install_msix failed.\n", bfad->pci_name);
+
+       bfad_init_timer(bfad);
+       wait_for_completion(&bfad->comp);
+       bfad_drv_start(bfad);
+
+       return 0;
+}
+
+/*
+ * PCI Error Recovery entry, re-initialize the chip.
+ */
+static pci_ers_result_t
+bfad_pci_slot_reset(struct pci_dev *pdev)
+{
+       struct bfad_s *bfad = pci_get_drvdata(pdev);
+       u8 byte;
+
+       dev_printk(KERN_ERR, &pdev->dev,
+                  "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
+
+       if (pci_enable_device(pdev)) {
+               dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
+                          "PCI device after reset.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_restore_state(pdev);
+
+       /*
+        * Read some byte (e.g. DMA max. payload size which can't
+        * be 0xff any time) to make sure - we did not hit another PCI error
+        * in the middle of recovery. If we did, then declare permanent failure.
+        */
+       pci_read_config_byte(pdev, 0x68, &byte);
+       if (byte == 0xff) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "slot_reset failed ... got another PCI error !\n");
+               goto out_disable_device;
+       }
+
+       pci_save_state(pdev);
+       pci_set_master(pdev);
+
+       if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
+               if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
+                       goto out_disable_device;
+
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+
+       if (restart_bfa(bfad) == -1)
+               goto out_disable_device;
+
+       pci_enable_pcie_error_reporting(pdev);
+       dev_printk(KERN_WARNING, &pdev->dev,
+                  "slot_reset completed  flags: 0x%x!\n", bfad->bfad_flags);
+
+       return PCI_ERS_RESULT_RECOVERED;
+
+out_disable_device:
+       pci_disable_device(pdev);
+       return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static pci_ers_result_t
+bfad_pci_mmio_enabled(struct pci_dev *pdev)
+{
+       unsigned long   flags;
+       struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+       dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
+
+       /* Fetch FW diagnostic information */
+       bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
+
+       /* Cancel all pending IOs */
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       init_completion(&bfad->comp);
+       bfa_fcs_stop(&bfad->bfa_fcs);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       wait_for_completion(&bfad->comp);
+
+       bfad_remove_intr(bfad);
+       del_timer_sync(&bfad->hal_tmo);
+       pci_disable_device(pdev);
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static void
+bfad_pci_resume(struct pci_dev *pdev)
+{
+       unsigned long   flags;
+       struct bfad_s *bfad = pci_get_drvdata(pdev);
+
+       dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
+
+       /* wait until the link is online */
+       bfad_rport_online_wait(bfad);
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       bfad->bfad_flags &= ~BFAD_EEH_BUSY;
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
 struct pci_device_id bfad_id_table[] = {
        {
                .vendor = BFA_PCI_VENDOR_ID_BROCADE,
@@ -1513,11 +1725,22 @@ struct pci_device_id bfad_id_table[] = {
 
 MODULE_DEVICE_TABLE(pci, bfad_id_table);
 
+/*
+ * PCI error recovery handlers.
+ */
+static struct pci_error_handlers bfad_err_handler = {
+       .error_detected = bfad_pci_error_detected,
+       .slot_reset = bfad_pci_slot_reset,
+       .mmio_enabled = bfad_pci_mmio_enabled,
+       .resume = bfad_pci_resume,
+};
+
 static struct pci_driver bfad_pci_driver = {
        .name = BFAD_DRIVER_NAME,
        .id_table = bfad_id_table,
        .probe = bfad_pci_probe,
        .remove = __devexit_p(bfad_pci_remove),
+       .err_handler = &bfad_err_handler,
 };
 
 /*
@@ -1546,6 +1769,7 @@ bfad_init(void)
 
        bfa_auto_recover = ioc_auto_recover;
        bfa_fcs_rport_set_del_timeout(rport_del_timeout);
+       bfa_fcs_rport_set_max_logins(max_rport_logins);
 
        error = pci_register_driver(&bfad_pci_driver);
        if (error) {
index b83927440171810b4d6442c992c2c01a5db4558d..72f5dc32cc12f9db1916d086d169892e40404915 100644 (file)
@@ -587,6 +587,37 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
        return 0;
 }
 
+void
+bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
+{
+       struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+       struct bfad_im_port_s *im_port =
+                       (struct bfad_im_port_s *)vport->drv_port.im_port;
+       struct bfad_s *bfad = im_port->bfad;
+       struct Scsi_Host *vshost = vport->drv_port.im_port->shost;
+       char *sym_name = fc_vport->symbolic_name;
+       struct bfa_fcs_vport_s *fcs_vport;
+       wwn_t   pwwn;
+       unsigned long flags;
+
+       u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (fcs_vport == NULL)
+               return;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       if (strlen(sym_name) > 0) {
+               strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name);
+               bfa_fcs_lport_ns_util_send_rspn_id(
+                       BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
+       }
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+}
+
 struct fc_function_template bfad_im_fc_function_template = {
 
        /* Target dynamic attributes */
@@ -640,6 +671,7 @@ struct fc_function_template bfad_im_fc_function_template = {
        .vport_create = bfad_im_vport_create,
        .vport_delete = bfad_im_vport_delete,
        .vport_disable = bfad_im_vport_disable,
+       .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name,
        .bsg_request = bfad_im_bsg_request,
        .bsg_timeout = bfad_im_bsg_timeout,
 };
@@ -792,6 +824,13 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
                else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
                        snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                                "Brocade 16Gbps PCIe dual port FC HBA");
+       } else if (!strcmp(model, "Brocade-1867")) {
+               if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe single port FC HBA for IBM");
+               else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+                       snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+                               "Brocade 16Gbps PCIe dual port FC HBA for IBM");
        } else
                snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
                        "Invalid Model");
@@ -909,15 +948,16 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
        struct bfad_port_s    *port = im_port->port;
        struct bfad_s         *bfad = im_port->bfad;
        int        nrports = 2048;
-       wwn_t          *rports = NULL;
+       struct bfa_rport_qualifier_s *rports = NULL;
        unsigned long   flags;
 
-       rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
+       rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports,
+                        GFP_ATOMIC);
        if (rports == NULL)
                return snprintf(buf, PAGE_SIZE, "Failed\n");
 
        spin_lock_irqsave(&bfad->bfad_lock, flags);
-       bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports);
+       bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
        kfree(rports);
 
index 9c1495b321d9e1fdb366fa84c88506bdb6015c59..0afa39076cef4e95f06c26f2c4e17916ed01c3d3 100644 (file)
@@ -535,7 +535,8 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
 
        if (bfad_chk_iocmd_sz(payload_len,
                        sizeof(struct bfa_bsg_lport_get_rports_s),
-                       sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
+                       sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
+                       != BFA_STATUS_OK) {
                iocmd->status = BFA_STATUS_VERSION_FAIL;
                return 0;
        }
@@ -552,8 +553,9 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
                goto out;
        }
 
-       bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
-                               &iocmd->nrports);
+       bfa_fcs_lport_get_rport_quals(fcs_port,
+                       (struct bfa_rport_qualifier_s *)iocmd_bufptr,
+                       &iocmd->nrports);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
        iocmd->status = BFA_STATUS_OK;
 out:
@@ -578,7 +580,11 @@ bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
                goto out;
        }
 
-       fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+       if (iocmd->pid)
+               fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
+                                               iocmd->rpwwn, iocmd->pid);
+       else
+               fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
        if (fcs_rport == NULL) {
                bfa_trc(bfad, 0);
                spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -671,9 +677,11 @@ bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
 
        memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
                sizeof(struct bfa_rport_stats_s));
-       memcpy((void *)&iocmd->stats.hal_stats,
-              (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
-              sizeof(struct bfa_rport_hal_stats_s));
+       if (bfa_fcs_rport_get_halrport(fcs_rport)) {
+               memcpy((void *)&iocmd->stats.hal_stats,
+                      (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+                       sizeof(struct bfa_rport_hal_stats_s));
+       }
 
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
        iocmd->status = BFA_STATUS_OK;
@@ -709,7 +717,8 @@ bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
 
        memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
        rport = bfa_fcs_rport_get_halrport(fcs_rport);
-       memset(&rport->stats, 0, sizeof(rport->stats));
+       if (rport)
+               memset(&rport->stats, 0, sizeof(rport->stats));
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
        iocmd->status = BFA_STATUS_OK;
 out:
@@ -744,7 +753,8 @@ bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
        fcs_rport->rpf.assigned_speed  = iocmd->speed;
        /* Set this speed in f/w only if the RPSC speed is not available */
        if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
-               bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+               if (fcs_rport->bfa_rport)
+                       bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
        iocmd->status = BFA_STATUS_OK;
 out:
@@ -1030,9 +1040,10 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
                        iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
                else {
                        iocmd->status = BFA_STATUS_OK;
-                       memcpy((void *)&iocmd->iostats, (void *)
-                              &(bfa_fcs_itnim_get_halitn(itnim)->stats),
-                              sizeof(struct bfa_itnim_iostats_s));
+                       if (bfa_fcs_itnim_get_halitn(itnim))
+                               memcpy((void *)&iocmd->iostats, (void *)
+                               &(bfa_fcs_itnim_get_halitn(itnim)->stats),
+                                      sizeof(struct bfa_itnim_iostats_s));
                }
        }
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2949,13 +2960,13 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
        spin_lock_irqsave(&bfad->bfad_lock, flags);
 
        /* Allocate bfa_fcxp structure */
-       hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
+       hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
                                  drv_fcxp->num_req_sgles,
                                  drv_fcxp->num_rsp_sgles,
                                  bfad_fcxp_get_req_sgaddr_cb,
                                  bfad_fcxp_get_req_sglen_cb,
                                  bfad_fcxp_get_rsp_sgaddr_cb,
-                                 bfad_fcxp_get_rsp_sglen_cb);
+                                 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
        if (!hal_fcxp) {
                bfa_trc(bfad, 0);
                spin_unlock_irqrestore(&bfad->bfad_lock, flags);
index 17ad67283130d1f1528ade5e97893766e6534eac..8c569ddb750da571e368a490ea3884751db88c07 100644 (file)
@@ -319,6 +319,8 @@ struct bfa_bsg_rport_attr_s {
        u16             vf_id;
        wwn_t           pwwn;
        wwn_t           rpwwn;
+       u32             pid;
+       u32             rsvd;
        struct bfa_rport_attr_s attr;
 };
 
index 7f74f1d19124a95ea4f39f4c1110bb4fbf93e50c..1840651ce1d49c141b4e064d30efe35067cf2c46 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
 #include <linux/bitops.h>
+#include <linux/aer.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
@@ -56,7 +57,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.0.23.0"
+#define BFAD_DRIVER_VERSION    "3.1.2.0"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
@@ -81,6 +82,8 @@
 #define BFAD_FC4_PROBE_DONE                    0x00000200
 #define BFAD_PORT_DELETE                       0x00000001
 #define BFAD_INTX_ON                           0x00000400
+#define BFAD_EEH_BUSY                          0x00000800
+#define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE   0x00001000
 /*
  * BFAD related definition
  */
index 2eebf8d4d58b2ce9c364628a67a3bb5da8177750..8f92732655c785a6adafc813c67cf7e02d843eb5 100644 (file)
@@ -1216,6 +1216,15 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
                return 0;
        }
 
+       if (bfad->bfad_flags & BFAD_EEH_BUSY) {
+               if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE)
+                       cmnd->result = DID_NO_CONNECT << 16;
+               else
+                       cmnd->result = DID_REQUEUE << 16;
+               done(cmnd);
+               return 0;
+       }
+
        sg_cnt = scsi_dma_map(cmnd);
        if (sg_cnt < 0)
                return SCSI_MLQUEUE_HOST_BUSY;
index 73f231ccd45b2cf2ed8470cddd25680508250f01..8d4626c07a12f5e7a4f164b835f57e7f275acd2a 100644 (file)
@@ -1807,7 +1807,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
                        fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
                }
 
-               memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+               memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
                if (fcp_sns_len)
                        memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
 
index 450e011f981af7f964ea384d4eba447bd7127e40..76e4c039f0d5db725871da9454631dc352f19213 100644 (file)
@@ -1422,7 +1422,8 @@ static const char * const hostbyte_table[]={
 "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
 "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
 "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
-"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
+"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
+"DID_NEXUS_FAILURE" };
 #define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
 
 static const char * const driverbyte_table[]={
index 08d80a6d272a0c255541b445a97c13e147122e0d..6f4d8e6f32f15da12101c814e0f71179acf8196b 100644 (file)
@@ -641,8 +641,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
                h->state = TPGS_STATE_STANDBY;
                break;
        case TPGS_STATE_OFFLINE:
-       case TPGS_STATE_UNAVAILABLE:
-               /* Path unusable for unavailable/offline */
+               /* Path unusable */
                err = SCSI_DH_DEV_OFFLINED;
                break;
        default:
index 20c4557f5abd09c099600571ee417e9ef01e998c..69c915aa77c24737e17680805b67d4507df59c4c 100644 (file)
@@ -790,29 +790,19 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
        {"IBM", "1815"},
        {"IBM", "1818"},
        {"IBM", "3526"},
-       {"SGI", "TP9400"},
-       {"SGI", "TP9500"},
-       {"SGI", "TP9700"},
+       {"SGI", "TP9"},
        {"SGI", "IS"},
        {"STK", "OPENstorage D280"},
-       {"SUN", "CSM200_R"},
-       {"SUN", "LCSM100_I"},
-       {"SUN", "LCSM100_S"},
-       {"SUN", "LCSM100_E"},
-       {"SUN", "LCSM100_F"},
-       {"DELL", "MD3000"},
-       {"DELL", "MD3000i"},
-       {"DELL", "MD32xx"},
-       {"DELL", "MD32xxi"},
-       {"DELL", "MD36xxi"},
-       {"DELL", "MD36xxf"},
-       {"LSI", "INF-01-00"},
-       {"ENGENIO", "INF-01-00"},
        {"STK", "FLEXLINE 380"},
-       {"SUN", "CSM100_R_FC"},
+       {"SUN", "CSM"},
+       {"SUN", "LCSM100"},
        {"SUN", "STK6580_6780"},
        {"SUN", "SUN_6180"},
        {"SUN", "ArrayStorage"},
+       {"DELL", "MD3"},
+       {"NETAPP", "INF-01-00"},
+       {"LSI", "INF-01-00"},
+       {"ENGENIO", "INF-01-00"},
        {NULL, NULL},
 };
 
@@ -863,7 +853,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
        if (!scsi_dh_data) {
                sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
                            RDAC_NAME);
-               return 0;
+               return -ENOMEM;
        }
 
        scsi_dh_data->scsi_dh = &rdac_dh;
index 2b4261cb77424b52046d6e4c255f32f3359bbf2a..4217e49aea4624b5e0b2c9354a25b627fe10b915 100644 (file)
@@ -99,6 +99,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1920},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1925},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x334d},
        {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
                PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
        {0,}
@@ -118,13 +127,22 @@ static struct board_type products[] = {
        {0x3249103C, "Smart Array P812", &SA5_access},
        {0x324a103C, "Smart Array P712m", &SA5_access},
        {0x324b103C, "Smart Array P711m", &SA5_access},
-       {0x3350103C, "Smart Array", &SA5_access},
-       {0x3351103C, "Smart Array", &SA5_access},
-       {0x3352103C, "Smart Array", &SA5_access},
-       {0x3353103C, "Smart Array", &SA5_access},
-       {0x3354103C, "Smart Array", &SA5_access},
-       {0x3355103C, "Smart Array", &SA5_access},
-       {0x3356103C, "Smart Array", &SA5_access},
+       {0x3350103C, "Smart Array P222", &SA5_access},
+       {0x3351103C, "Smart Array P420", &SA5_access},
+       {0x3352103C, "Smart Array P421", &SA5_access},
+       {0x3353103C, "Smart Array P822", &SA5_access},
+       {0x3354103C, "Smart Array P420i", &SA5_access},
+       {0x3355103C, "Smart Array P220i", &SA5_access},
+       {0x3356103C, "Smart Array P721m", &SA5_access},
+       {0x1920103C, "Smart Array", &SA5_access},
+       {0x1921103C, "Smart Array", &SA5_access},
+       {0x1922103C, "Smart Array", &SA5_access},
+       {0x1923103C, "Smart Array", &SA5_access},
+       {0x1924103C, "Smart Array", &SA5_access},
+       {0x1925103C, "Smart Array", &SA5_access},
+       {0x1926103C, "Smart Array", &SA5_access},
+       {0x1928103C, "Smart Array", &SA5_access},
+       {0x334d103C, "Smart Array P822se", &SA5_access},
        {0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 
@@ -2610,7 +2628,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
        /* not in reqQ, if also not in cmpQ, must have already completed */
        found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
        if (!found)  {
-               dev_dbg(&h->pdev->dev, "%s Request FAILED (not known to driver).\n",
+               dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
                                msg);
                return SUCCESS;
        }
@@ -3266,7 +3284,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
                        c->Request.Timeout = 0; /* Don't time out */
                        memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
                        c->Request.CDB[0] =  cmd;
-                       c->Request.CDB[1] = 0x03;  /* Reset target above */
+                       c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
                        /* If bytes 4-7 are zero, it means reset the */
                        /* LunID device */
                        c->Request.CDB[4] = 0x00;
@@ -3338,7 +3356,8 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
 {
        ulong page_base = ((ulong) base) & PAGE_MASK;
        ulong page_offs = ((ulong) base) - page_base;
-       void __iomem *page_remapped = ioremap(page_base, page_offs + size);
+       void __iomem *page_remapped = ioremap_nocache(page_base,
+               page_offs + size);
 
        return page_remapped ? (page_remapped + page_offs) : NULL;
 }
index ff5b5c5538eed62b3a43111318fe74d068f46b1a..cb150d1e58509d1323aaec7a5659e21ec713b70e 100644 (file)
@@ -1,7 +1,3 @@
-obj-$(CONFIG_SCSI_IBMVSCSI)    += ibmvscsic.o
-
-ibmvscsic-y                    += ibmvscsi.o
-ibmvscsic-$(CONFIG_PPC_PSERIES)        += rpa_vscsi.o 
-
+obj-$(CONFIG_SCSI_IBMVSCSI)    += ibmvscsi.o
 obj-$(CONFIG_SCSI_IBMVSCSIS)   += ibmvstgt.o
 obj-$(CONFIG_SCSI_IBMVFC)      += ibmvfc.o
index 134a0ae85bb7d8d996252879b8dfb5b413f4ac0f..5e8d51bd03de92222874321ec874559b9a91ec83 100644 (file)
@@ -2241,6 +2241,21 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
        return 0;
 }
 
+/**
+ * ibmvfc_match_evt - Match function for specified event
+ * @evt:       ibmvfc event struct
+ * @match:     event to match
+ *
+ * Returns:
+ *     1 if event matches key / 0 if event does not match key
+ **/
+static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
+{
+       if (evt == match)
+               return 1;
+       return 0;
+}
+
 /**
  * ibmvfc_abort_task_set - Abort outstanding commands to the device
  * @sdev:      scsi device to abort commands
@@ -2322,7 +2337,20 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                if (rc) {
                        sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
                        ibmvfc_reset_host(vhost);
-                       rsp_rc = 0;
+                       rsp_rc = -EIO;
+                       rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
+
+                       if (rc == SUCCESS)
+                               rsp_rc = 0;
+
+                       rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
+                       if (rc != SUCCESS) {
+                               spin_lock_irqsave(vhost->host->host_lock, flags);
+                               ibmvfc_hard_reset_host(vhost);
+                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                               rsp_rc = 0;
+                       }
+
                        goto out;
                }
        }
@@ -2597,8 +2625,10 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
        case IBMVFC_AE_SCN_FABRIC:
        case IBMVFC_AE_SCN_DOMAIN:
                vhost->events_to_log |= IBMVFC_AE_RSCN;
-               vhost->delay_init = 1;
-               __ibmvfc_reset_host(vhost);
+               if (vhost->state < IBMVFC_HALTED) {
+                       vhost->delay_init = 1;
+                       __ibmvfc_reset_host(vhost);
+               }
                break;
        case IBMVFC_AE_SCN_NPORT:
        case IBMVFC_AE_SCN_GROUP:
index 834c37fc7ce9be5dcb73825d4b3f50da8beae800..3be8af624e6fce888ab2b148c66321a2ce30558c 100644 (file)
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME    "ibmvfc"
-#define IBMVFC_DRIVER_VERSION          "1.0.9"
-#define IBMVFC_DRIVER_DATE             "(August 5, 2010)"
+#define IBMVFC_DRIVER_VERSION          "1.0.10"
+#define IBMVFC_DRIVER_DATE             "(August 24, 2012)"
 
 #define IBMVFC_DEFAULT_TIMEOUT 60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT    45
index 3a6c4742951ea4f1e661c4973a48fdd16abb501d..ef9a54c7da6730b2f7ff9fc07a6037f12fb02a96 100644 (file)
@@ -93,13 +93,13 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
 static int fast_fail = 1;
 static int client_reserve = 1;
+static char partition_name[97] = "UNKNOWN";
+static unsigned int partition_number = -1;
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
 #define IBMVSCSI_VERSION "1.5.9"
 
-static struct ibmvscsi_ops *ibmvscsi_ops;
-
 MODULE_DESCRIPTION("IBM Virtual SCSI");
 MODULE_AUTHOR("Dave Boutcher");
 MODULE_LICENSE("GPL");
@@ -118,6 +118,316 @@ MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
 module_param_named(client_reserve, client_reserve, int, S_IRUGO );
 MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
 
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+                               struct ibmvscsi_host_data *hostdata);
+
+/* ------------------------------------------------------------
+ * Routines for managing the command/response queue
+ */
+/**
+ * ibmvscsi_handle_event: - Interrupt handler for crq events
+ * @irq:       number of irq to handle, not used
+ * @dev_instance: ibmvscsi_host_data of host that received interrupt
+ *
+ * Disables interrupts and schedules srp_task
+ * Always returns IRQ_HANDLED
+ */
+static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
+{
+       struct ibmvscsi_host_data *hostdata =
+           (struct ibmvscsi_host_data *)dev_instance;
+       vio_disable_interrupts(to_vio_dev(hostdata->dev));
+       tasklet_schedule(&hostdata->srp_task);
+       return IRQ_HANDLED;
+}
+
+/**
+ * release_crq_queue: - Deallocates data and unregisters CRQ
+ * @queue:     crq_queue to initialize and register
+ * @host_data: ibmvscsi_host_data of host
+ *
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
+ * the crq with the hypervisor.
+ */
+static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
+                                      struct ibmvscsi_host_data *hostdata,
+                                      int max_requests)
+{
+       long rc = 0;
+       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+       free_irq(vdev->irq, (void *)hostdata);
+       tasklet_kill(&hostdata->srp_task);
+       do {
+               if (rc)
+                       msleep(100);
+               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+       } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+       dma_unmap_single(hostdata->dev,
+                        queue->msg_token,
+                        queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+       free_page((unsigned long)queue->msgs);
+}
+
+/**
+ * crq_queue_next_crq: - Returns the next entry in message queue
+ * @queue:     crq_queue to use
+ *
+ * Returns pointer to next entry in queue, or NULL if there are no new
+ * entried in the CRQ.
+ */
+static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
+{
+       struct viosrp_crq *crq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->lock, flags);
+       crq = &queue->msgs[queue->cur];
+       if (crq->valid & 0x80) {
+               if (++queue->cur == queue->size)
+                       queue->cur = 0;
+       } else
+               crq = NULL;
+       spin_unlock_irqrestore(&queue->lock, flags);
+
+       return crq;
+}
+
+/**
+ * ibmvscsi_send_crq: - Send a CRQ
+ * @hostdata:  the adapter
+ * @word1:     the first 64 bits of the data
+ * @word2:     the second 64 bits of the data
+ */
+static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
+                            u64 word1, u64 word2)
+{
+       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+       return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+}
+
+/**
+ * ibmvscsi_task: - Process srps asynchronously
+ * @data:      ibmvscsi_host_data of host
+ */
+static void ibmvscsi_task(void *data)
+{
+       struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
+       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+       struct viosrp_crq *crq;
+       int done = 0;
+
+       while (!done) {
+               /* Pull all the valid messages off the CRQ */
+               while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
+                       ibmvscsi_handle_crq(crq, hostdata);
+                       crq->valid = 0x00;
+               }
+
+               vio_enable_interrupts(vdev);
+               crq = crq_queue_next_crq(&hostdata->queue);
+               if (crq != NULL) {
+                       vio_disable_interrupts(vdev);
+                       ibmvscsi_handle_crq(crq, hostdata);
+                       crq->valid = 0x00;
+               } else {
+                       done = 1;
+               }
+       }
+}
+
+static void gather_partition_info(void)
+{
+       struct device_node *rootdn;
+
+       const char *ppartition_name;
+       const unsigned int *p_number_ptr;
+
+       /* Retrieve information about this partition */
+       rootdn = of_find_node_by_path("/");
+       if (!rootdn) {
+               return;
+       }
+
+       ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
+       if (ppartition_name)
+               strncpy(partition_name, ppartition_name,
+                               sizeof(partition_name));
+       p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
+       if (p_number_ptr)
+               partition_number = *p_number_ptr;
+       of_node_put(rootdn);
+}
+
+static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+       memset(&hostdata->madapter_info, 0x00,
+                       sizeof(hostdata->madapter_info));
+
+       dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
+       strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
+
+       strncpy(hostdata->madapter_info.partition_name, partition_name,
+                       sizeof(hostdata->madapter_info.partition_name));
+
+       hostdata->madapter_info.partition_number = partition_number;
+
+       hostdata->madapter_info.mad_version = 1;
+       hostdata->madapter_info.os_type = 2;
+}
+
+/**
+ * reset_crq_queue: - resets a crq after a failure
+ * @queue:     crq_queue to initialize and register
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ */
+static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
+                                   struct ibmvscsi_host_data *hostdata)
+{
+       int rc = 0;
+       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+       /* Close the CRQ */
+       do {
+               if (rc)
+                       msleep(100);
+               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+       } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+
+       /* Clean out the queue */
+       memset(queue->msgs, 0x00, PAGE_SIZE);
+       queue->cur = 0;
+
+       set_adapter_info(hostdata);
+
+       /* And re-open it again */
+       rc = plpar_hcall_norets(H_REG_CRQ,
+                               vdev->unit_address,
+                               queue->msg_token, PAGE_SIZE);
+       if (rc == 2) {
+               /* Adapter is good, but other end is not ready */
+               dev_warn(hostdata->dev, "Partner adapter not ready\n");
+       } else if (rc != 0) {
+               dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
+       }
+       return rc;
+}
+
+/**
+ * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
+ * @queue:     crq_queue to initialize and register
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ * Allocates a page for messages, maps it for dma, and registers
+ * the crq with the hypervisor.
+ * Returns zero on success.
+ */
+static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
+                                  struct ibmvscsi_host_data *hostdata,
+                                  int max_requests)
+{
+       int rc;
+       int retrc;
+       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+       queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
+
+       if (!queue->msgs)
+               goto malloc_failed;
+       queue->size = PAGE_SIZE / sizeof(*queue->msgs);
+
+       queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
+                                         queue->size * sizeof(*queue->msgs),
+                                         DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(hostdata->dev, queue->msg_token))
+               goto map_failed;
+
+       gather_partition_info();
+       set_adapter_info(hostdata);
+
+       retrc = rc = plpar_hcall_norets(H_REG_CRQ,
+                               vdev->unit_address,
+                               queue->msg_token, PAGE_SIZE);
+       if (rc == H_RESOURCE)
+               /* maybe kexecing and resource is busy. try a reset */
+               rc = ibmvscsi_reset_crq_queue(queue,
+                                             hostdata);
+
+       if (rc == 2) {
+               /* Adapter is good, but other end is not ready */
+               dev_warn(hostdata->dev, "Partner adapter not ready\n");
+               retrc = 0;
+       } else if (rc != 0) {
+               dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
+               goto reg_crq_failed;
+       }
+
+       queue->cur = 0;
+       spin_lock_init(&queue->lock);
+
+       tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
+                    (unsigned long)hostdata);
+
+       if (request_irq(vdev->irq,
+                       ibmvscsi_handle_event,
+                       0, "ibmvscsi", (void *)hostdata) != 0) {
+               dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
+                       vdev->irq);
+               goto req_irq_failed;
+       }
+
+       rc = vio_enable_interrupts(vdev);
+       if (rc != 0) {
+               dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
+               goto req_irq_failed;
+       }
+
+       return retrc;
+
+      req_irq_failed:
+       tasklet_kill(&hostdata->srp_task);
+       rc = 0;
+       do {
+               if (rc)
+                       msleep(100);
+               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+       } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+      reg_crq_failed:
+       dma_unmap_single(hostdata->dev,
+                        queue->msg_token,
+                        queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+      map_failed:
+       free_page((unsigned long)queue->msgs);
+      malloc_failed:
+       return -1;
+}
+
+/**
+ * reenable_crq_queue: - reenables a crq after
+ * @queue:     crq_queue to initialize and register
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ */
+static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
+                                      struct ibmvscsi_host_data *hostdata)
+{
+       int rc = 0;
+       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+       /* Re-enable the CRQ */
+       do {
+               if (rc)
+                       msleep(100);
+               rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+       } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
+
+       if (rc)
+               dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
+       return rc;
+}
+
 /* ------------------------------------------------------------
  * Routines for the event pool and event structs
  */
@@ -611,7 +921,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
        }
 
        if ((rc =
-            ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
+            ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
                list_del(&evt_struct->list);
                del_timer(&evt_struct->timer);
 
@@ -1420,8 +1730,8 @@ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
  * @hostdata:  ibmvscsi_host_data of host
  *
 */
-void ibmvscsi_handle_crq(struct viosrp_crq *crq,
-                        struct ibmvscsi_host_data *hostdata)
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+                               struct ibmvscsi_host_data *hostdata)
 {
        long rc;
        unsigned long flags;
@@ -1433,8 +1743,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                case 0x01:      /* Initialization message */
                        dev_info(hostdata->dev, "partner initialized\n");
                        /* Send back a response */
-                       if ((rc = ibmvscsi_ops->send_crq(hostdata,
-                                                        0xC002000000000000LL, 0)) == 0) {
+                       rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
+                       if (rc == 0) {
                                /* Now login */
                                init_adapter(hostdata);
                        } else {
@@ -1541,6 +1851,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
 
        host_config = &evt_struct->iu.mad.host_config;
 
+       /* The transport length field is only 16-bit */
+       length = min(0xffff, length);
+
        /* Set up a lun reset SRP command */
        memset(host_config, 0x00, sizeof(*host_config));
        host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
@@ -1840,17 +2153,17 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
                smp_rmb();
                hostdata->reset_crq = 0;
 
-               rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
+               rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
                if (!rc)
-                       rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
+                       rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
                vio_enable_interrupts(to_vio_dev(hostdata->dev));
        } else if (hostdata->reenable_crq) {
                smp_rmb();
                action = "enable";
-               rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata);
+               rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
                hostdata->reenable_crq = 0;
                if (!rc)
-                       rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
+                       rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
        } else
                return;
 
@@ -1944,7 +2257,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                goto init_crq_failed;
        }
 
-       rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
+       rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
        if (rc != 0 && rc != H_RESOURCE) {
                dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
                goto kill_kthread;
@@ -1974,7 +2287,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
         * to fail if the other end is not acive.  In that case we don't
         * want to scan
         */
-       if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
+       if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
            || rc == H_RESOURCE) {
                /*
                 * Wait around max init_timeout secs for the adapter to finish
@@ -2002,7 +2315,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
       add_host_failed:
        release_event_pool(&hostdata->pool, hostdata);
       init_pool_failed:
-       ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
+       ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
       kill_kthread:
       kthread_stop(hostdata->work_thread);
       init_crq_failed:
@@ -2018,7 +2331,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
        unmap_persist_bufs(hostdata);
        release_event_pool(&hostdata->pool, hostdata);
-       ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
+       ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
                                        max_events);
 
        kthread_stop(hostdata->work_thread);
@@ -2039,7 +2352,10 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
 static int ibmvscsi_resume(struct device *dev)
 {
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
-       return ibmvscsi_ops->resume(hostdata);
+       vio_disable_interrupts(to_vio_dev(hostdata->dev));
+       tasklet_schedule(&hostdata->srp_task);
+
+       return 0;
 }
 
 /**
@@ -2076,9 +2392,7 @@ int __init ibmvscsi_module_init(void)
        driver_template.can_queue = max_requests;
        max_events = max_requests + 2;
 
-       if (firmware_has_feature(FW_FEATURE_VIO))
-               ibmvscsi_ops = &rpavscsi_ops;
-       else
+       if (!firmware_has_feature(FW_FEATURE_VIO))
                return -ENODEV;
 
        ibmvscsi_transport_template =
index c503e1776014021573afcc8c531229c006286de1..7d64867c5dd1784cb4a3e265bed621346e334d16 100644 (file)
@@ -107,26 +107,4 @@ struct ibmvscsi_host_data {
        dma_addr_t adapter_info_addr;
 };
 
-/* routines for managing a command/response queue */
-void ibmvscsi_handle_crq(struct viosrp_crq *crq,
-                        struct ibmvscsi_host_data *hostdata);
-
-struct ibmvscsi_ops {
-       int (*init_crq_queue)(struct crq_queue *queue,
-                             struct ibmvscsi_host_data *hostdata,
-                             int max_requests);
-       void (*release_crq_queue)(struct crq_queue *queue,
-                                 struct ibmvscsi_host_data *hostdata,
-                                 int max_requests);
-       int (*reset_crq_queue)(struct crq_queue *queue,
-                              struct ibmvscsi_host_data *hostdata);
-       int (*reenable_crq_queue)(struct crq_queue *queue,
-                                 struct ibmvscsi_host_data *hostdata);
-       int (*send_crq)(struct ibmvscsi_host_data *hostdata,
-                      u64 word1, u64 word2);
-       int (*resume) (struct ibmvscsi_host_data *hostdata);
-};
-
-extern struct ibmvscsi_ops rpavscsi_ops;
-
 #endif                         /* IBMVSCSI_H */
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
deleted file mode 100644 (file)
index f48ae01..0000000
+++ /dev/null
@@ -1,368 +0,0 @@
-/* ------------------------------------------------------------
- * rpa_vscsi.c
- * (C) Copyright IBM Corporation 1994, 2003
- * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
- *          Santiago Leon (santil@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
- * USA
- *
- * ------------------------------------------------------------
- * RPA-specific functions of the SCSI host adapter for Virtual I/O devices
- *
- * This driver allows the Linux SCSI peripheral drivers to directly
- * access devices in the hosting partition, either on an iSeries
- * hypervisor system or a converged hypervisor system.
- */
-
-#include <asm/vio.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/hvcall.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/gfp.h>
-#include <linux/interrupt.h>
-#include "ibmvscsi.h"
-
-static char partition_name[97] = "UNKNOWN";
-static unsigned int partition_number = -1;
-
-/* ------------------------------------------------------------
- * Routines for managing the command/response queue
- */
-/**
- * rpavscsi_handle_event: - Interrupt handler for crq events
- * @irq:       number of irq to handle, not used
- * @dev_instance: ibmvscsi_host_data of host that received interrupt
- *
- * Disables interrupts and schedules srp_task
- * Always returns IRQ_HANDLED
- */
-static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
-{
-       struct ibmvscsi_host_data *hostdata =
-           (struct ibmvscsi_host_data *)dev_instance;
-       vio_disable_interrupts(to_vio_dev(hostdata->dev));
-       tasklet_schedule(&hostdata->srp_task);
-       return IRQ_HANDLED;
-}
-
-/**
- * release_crq_queue: - Deallocates data and unregisters CRQ
- * @queue:     crq_queue to initialize and register
- * @host_data: ibmvscsi_host_data of host
- *
- * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
- * the crq with the hypervisor.
- */
-static void rpavscsi_release_crq_queue(struct crq_queue *queue,
-                                      struct ibmvscsi_host_data *hostdata,
-                                      int max_requests)
-{
-       long rc = 0;
-       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-       free_irq(vdev->irq, (void *)hostdata);
-       tasklet_kill(&hostdata->srp_task);
-       do {
-               if (rc)
-                       msleep(100);
-               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
-       } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-       dma_unmap_single(hostdata->dev,
-                        queue->msg_token,
-                        queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-       free_page((unsigned long)queue->msgs);
-}
-
-/**
- * crq_queue_next_crq: - Returns the next entry in message queue
- * @queue:     crq_queue to use
- *
- * Returns pointer to next entry in queue, or NULL if there are no new 
- * entried in the CRQ.
- */
-static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
-{
-       struct viosrp_crq *crq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&queue->lock, flags);
-       crq = &queue->msgs[queue->cur];
-       if (crq->valid & 0x80) {
-               if (++queue->cur == queue->size)
-                       queue->cur = 0;
-       } else
-               crq = NULL;
-       spin_unlock_irqrestore(&queue->lock, flags);
-
-       return crq;
-}
-
-/**
- * rpavscsi_send_crq: - Send a CRQ
- * @hostdata:  the adapter
- * @word1:     the first 64 bits of the data
- * @word2:     the second 64 bits of the data
- */
-static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
-                            u64 word1, u64 word2)
-{
-       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
-       return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
-}
-
-/**
- * rpavscsi_task: - Process srps asynchronously
- * @data:      ibmvscsi_host_data of host
- */
-static void rpavscsi_task(void *data)
-{
-       struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
-       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-       struct viosrp_crq *crq;
-       int done = 0;
-
-       while (!done) {
-               /* Pull all the valid messages off the CRQ */
-               while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
-                       ibmvscsi_handle_crq(crq, hostdata);
-                       crq->valid = 0x00;
-               }
-
-               vio_enable_interrupts(vdev);
-               if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
-                       vio_disable_interrupts(vdev);
-                       ibmvscsi_handle_crq(crq, hostdata);
-                       crq->valid = 0x00;
-               } else {
-                       done = 1;
-               }
-       }
-}
-
-static void gather_partition_info(void)
-{
-       struct device_node *rootdn;
-
-       const char *ppartition_name;
-       const unsigned int *p_number_ptr;
-
-       /* Retrieve information about this partition */
-       rootdn = of_find_node_by_path("/");
-       if (!rootdn) {
-               return;
-       }
-
-       ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
-       if (ppartition_name)
-               strncpy(partition_name, ppartition_name,
-                               sizeof(partition_name));
-       p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
-       if (p_number_ptr)
-               partition_number = *p_number_ptr;
-       of_node_put(rootdn);
-}
-
-static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
-{
-       memset(&hostdata->madapter_info, 0x00,
-                       sizeof(hostdata->madapter_info));
-
-       dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
-       strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
-
-       strncpy(hostdata->madapter_info.partition_name, partition_name,
-                       sizeof(hostdata->madapter_info.partition_name));
-
-       hostdata->madapter_info.partition_number = partition_number;
-
-       hostdata->madapter_info.mad_version = 1;
-       hostdata->madapter_info.os_type = 2;
-}
-
-/**
- * reset_crq_queue: - resets a crq after a failure
- * @queue:     crq_queue to initialize and register
- * @hostdata:  ibmvscsi_host_data of host
- *
- */
-static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
-                                   struct ibmvscsi_host_data *hostdata)
-{
-       int rc = 0;
-       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
-       /* Close the CRQ */
-       do {
-               if (rc)
-                       msleep(100);
-               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
-       } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-
-       /* Clean out the queue */
-       memset(queue->msgs, 0x00, PAGE_SIZE);
-       queue->cur = 0;
-
-       set_adapter_info(hostdata);
-
-       /* And re-open it again */
-       rc = plpar_hcall_norets(H_REG_CRQ,
-                               vdev->unit_address,
-                               queue->msg_token, PAGE_SIZE);
-       if (rc == 2) {
-               /* Adapter is good, but other end is not ready */
-               dev_warn(hostdata->dev, "Partner adapter not ready\n");
-       } else if (rc != 0) {
-               dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
-       }
-       return rc;
-}
-
-/**
- * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
- * @queue:     crq_queue to initialize and register
- * @hostdata:  ibmvscsi_host_data of host
- *
- * Allocates a page for messages, maps it for dma, and registers
- * the crq with the hypervisor.
- * Returns zero on success.
- */
-static int rpavscsi_init_crq_queue(struct crq_queue *queue,
-                                  struct ibmvscsi_host_data *hostdata,
-                                  int max_requests)
-{
-       int rc;
-       int retrc;
-       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
-       queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
-
-       if (!queue->msgs)
-               goto malloc_failed;
-       queue->size = PAGE_SIZE / sizeof(*queue->msgs);
-
-       queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
-                                         queue->size * sizeof(*queue->msgs),
-                                         DMA_BIDIRECTIONAL);
-
-       if (dma_mapping_error(hostdata->dev, queue->msg_token))
-               goto map_failed;
-
-       gather_partition_info();
-       set_adapter_info(hostdata);
-
-       retrc = rc = plpar_hcall_norets(H_REG_CRQ,
-                               vdev->unit_address,
-                               queue->msg_token, PAGE_SIZE);
-       if (rc == H_RESOURCE)
-               /* maybe kexecing and resource is busy. try a reset */
-               rc = rpavscsi_reset_crq_queue(queue,
-                                             hostdata);
-
-       if (rc == 2) {
-               /* Adapter is good, but other end is not ready */
-               dev_warn(hostdata->dev, "Partner adapter not ready\n");
-               retrc = 0;
-       } else if (rc != 0) {
-               dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
-               goto reg_crq_failed;
-       }
-
-       queue->cur = 0;
-       spin_lock_init(&queue->lock);
-
-       tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
-                    (unsigned long)hostdata);
-
-       if (request_irq(vdev->irq,
-                       rpavscsi_handle_event,
-                       0, "ibmvscsi", (void *)hostdata) != 0) {
-               dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
-                       vdev->irq);
-               goto req_irq_failed;
-       }
-
-       rc = vio_enable_interrupts(vdev);
-       if (rc != 0) {
-               dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
-               goto req_irq_failed;
-       }
-
-       return retrc;
-
-      req_irq_failed:
-       tasklet_kill(&hostdata->srp_task);
-       rc = 0;
-       do {
-               if (rc)
-                       msleep(100);
-               rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
-       } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-      reg_crq_failed:
-       dma_unmap_single(hostdata->dev,
-                        queue->msg_token,
-                        queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-      map_failed:
-       free_page((unsigned long)queue->msgs);
-      malloc_failed:
-       return -1;
-}
-
-/**
- * reenable_crq_queue: - reenables a crq after
- * @queue:     crq_queue to initialize and register
- * @hostdata:  ibmvscsi_host_data of host
- *
- */
-static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
-                                      struct ibmvscsi_host_data *hostdata)
-{
-       int rc = 0;
-       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
-
-       /* Re-enable the CRQ */
-       do {
-               if (rc)
-                       msleep(100);
-               rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
-       } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-
-       if (rc)
-               dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
-       return rc;
-}
-
-/**
- * rpavscsi_resume: - resume after suspend
- * @hostdata:  ibmvscsi_host_data of host
- *
- */
-static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
-{
-       vio_disable_interrupts(to_vio_dev(hostdata->dev));
-       tasklet_schedule(&hostdata->srp_task);
-       return 0;
-}
-
-struct ibmvscsi_ops rpavscsi_ops = {
-       .init_crq_queue = rpavscsi_init_crq_queue,
-       .release_crq_queue = rpavscsi_release_crq_queue,
-       .reset_crq_queue = rpavscsi_reset_crq_queue,
-       .reenable_crq_queue = rpavscsi_reenable_crq_queue,
-       .send_crq = rpavscsi_send_crq,
-       .resume = rpavscsi_resume,
-};
index 0a2c5a8ebb821db01a6b26c0789c4af1226a2252..e3f29f61cbc3910f7dec04e8fdc702b3077e1454 100644 (file)
@@ -565,6 +565,23 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
 #endif
 
+/**
+ * ipr_lock_and_done - Acquire lock and complete command
+ * @ipr_cmd:   ipr command struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
+{
+       unsigned long lock_flags;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       ipr_cmd->done(ipr_cmd);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
 /**
  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
  * @ipr_cmd:   ipr command struct
@@ -611,33 +628,49 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
  * Return value:
  *     none
  **/
-static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
+static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
+                             void (*fast_done) (struct ipr_cmnd *))
 {
        ipr_reinit_ipr_cmnd(ipr_cmd);
        ipr_cmd->u.scratch = 0;
        ipr_cmd->sibling = NULL;
+       ipr_cmd->fast_done = fast_done;
        init_timer(&ipr_cmd->timer);
 }
 
 /**
- * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
+ * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
  * @ioa_cfg:   ioa config struct
  *
  * Return value:
  *     pointer to ipr command struct
  **/
 static
-struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 {
        struct ipr_cmnd *ipr_cmd;
 
        ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
        list_del(&ipr_cmd->queue);
-       ipr_init_ipr_cmnd(ipr_cmd);
 
        return ipr_cmd;
 }
 
+/**
+ * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     pointer to ipr command struct
+ **/
+static
+struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+       ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
+       return ipr_cmd;
+}
+
 /**
  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
  * @ioa_cfg:   ioa config struct
@@ -5116,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
        u16 cmd_index;
        int num_hrrq = 0;
        int irq_none = 0;
-       struct ipr_cmnd *ipr_cmd;
+       struct ipr_cmnd *ipr_cmd, *temp;
        irqreturn_t rc = IRQ_NONE;
+       LIST_HEAD(doneq);
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
@@ -5138,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
 
                        if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
                                ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
-                               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                               return IRQ_HANDLED;
+                               rc = IRQ_HANDLED;
+                               goto unlock_out;
                        }
 
                        ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
@@ -5148,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
 
                        ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
 
-                       list_del(&ipr_cmd->queue);
-                       del_timer(&ipr_cmd->timer);
-                       ipr_cmd->done(ipr_cmd);
+                       list_move_tail(&ipr_cmd->queue, &doneq);
 
                        rc = IRQ_HANDLED;
 
@@ -5180,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
                } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
                           int_reg & IPR_PCII_HRRQ_UPDATED) {
                        ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
-                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                       return IRQ_HANDLED;
+                       rc = IRQ_HANDLED;
+                       goto unlock_out;
                } else
                        break;
        }
@@ -5189,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp)
        if (unlikely(rc == IRQ_NONE))
                rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
 
+unlock_out:
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+               list_del(&ipr_cmd->queue);
+               del_timer(&ipr_cmd->timer);
+               ipr_cmd->fast_done(ipr_cmd);
+       }
+
        return rc;
 }
 
@@ -5770,21 +5809,28 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+       unsigned long lock_flags;
 
        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
 
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
-               scsi_dma_unmap(ipr_cmd->scsi_cmd);
+               scsi_dma_unmap(scsi_cmd);
+
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
                list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
                scsi_cmd->scsi_done(scsi_cmd);
-       } else
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       } else {
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
                ipr_erp_start(ioa_cfg, ipr_cmd);
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       }
 }
 
 /**
  * ipr_queuecommand - Queue a mid-layer request
+ * @shost:             scsi host struct
  * @scsi_cmd:  scsi command struct
- * @done:              done function
  *
  * This function queues a request generated by the mid-layer.
  *
@@ -5793,61 +5839,61 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
  *     SCSI_MLQUEUE_DEVICE_BUSY if device is busy
  *     SCSI_MLQUEUE_HOST_BUSY if host is busy
  **/
-static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
-                           void (*done) (struct scsi_cmnd *))
+static int ipr_queuecommand(struct Scsi_Host *shost,
+                           struct scsi_cmnd *scsi_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg;
        struct ipr_resource_entry *res;
        struct ipr_ioarcb *ioarcb;
        struct ipr_cmnd *ipr_cmd;
-       int rc = 0;
+       unsigned long lock_flags;
+       int rc;
 
-       scsi_cmd->scsi_done = done;
-       ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
-       res = scsi_cmd->device->hostdata;
+       ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+
+       spin_lock_irqsave(shost->host_lock, lock_flags);
        scsi_cmd->result = (DID_OK << 16);
+       res = scsi_cmd->device->hostdata;
 
        /*
         * We are currently blocking all devices due to a host reset
         * We have told the host to stop giving us new requests, but
         * ERP ops don't count. FIXME
         */
-       if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
+       if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
                return SCSI_MLQUEUE_HOST_BUSY;
+       }
 
        /*
         * FIXME - Create scsi_set_host_offline interface
         *  and the ioa_is_dead check can be removed
         */
        if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
-               memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
-               scsi_cmd->result = (DID_NO_CONNECT << 16);
-               scsi_cmd->scsi_done(scsi_cmd);
-               return 0;
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+               goto err_nodev;
+       }
+
+       if (ipr_is_gata(res) && res->sata_port) {
+               rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+               return rc;
        }
 
-       if (ipr_is_gata(res) && res->sata_port)
-               return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+       ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
 
-       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
        ioarcb = &ipr_cmd->ioarcb;
-       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
 
        memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
        ipr_cmd->scsi_cmd = scsi_cmd;
-       ioarcb->res_handle = res->res_handle;
-       ipr_cmd->done = ipr_scsi_done;
-       ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
+       ipr_cmd->done = ipr_scsi_eh_done;
 
        if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
                if (scsi_cmd->underflow == 0)
                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
 
-               if (res->needs_sync_complete) {
-                       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
-                       res->needs_sync_complete = 0;
-               }
-
                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
                if (ipr_is_gscsi(res))
                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
@@ -5859,23 +5905,46 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
            (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
 
-       if (likely(rc == 0)) {
-               if (ioa_cfg->sis64)
-                       rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
-               else
-                       rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
-       }
+       if (ioa_cfg->sis64)
+               rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
+       else
+               rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
 
-       if (unlikely(rc != 0)) {
-               list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       spin_lock_irqsave(shost->host_lock, lock_flags);
+       if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
+               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+               if (!rc)
+                       scsi_dma_unmap(scsi_cmd);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
+       if (unlikely(ioa_cfg->ioa_is_dead)) {
+               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               spin_unlock_irqrestore(shost->host_lock, lock_flags);
+               scsi_dma_unmap(scsi_cmd);
+               goto err_nodev;
+       }
+
+       ioarcb->res_handle = res->res_handle;
+       if (res->needs_sync_complete) {
+               ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
+               res->needs_sync_complete = 0;
+       }
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+       ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
        ipr_send_command(ipr_cmd);
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
        return 0;
-}
 
-static DEF_SCSI_QCMD(ipr_queuecommand)
+err_nodev:
+       spin_lock_irqsave(shost->host_lock, lock_flags);
+       memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+       scsi_cmd->result = (DID_NO_CONNECT << 16);
+       scsi_cmd->scsi_done(scsi_cmd);
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       return 0;
+}
 
 /**
  * ipr_ioctl - IOCTL handler
@@ -8775,8 +8844,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
 
        ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
        memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
-       ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
-                     sata_port_info.flags, &ipr_sata_ops);
+       ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
 
        ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
 
@@ -9020,7 +9088,7 @@ static void __ipr_remove(struct pci_dev *pdev)
 
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
-       flush_work_sync(&ioa_cfg->work_q);
+       flush_work(&ioa_cfg->work_q);
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 
        spin_lock(&ipr_driver_lock);
index 153b8bd91d1ef825952ec70fec861fea0e8d5fea..c8a137f83bb13e1dbd9d3ceb1478c7fa29e76177 100644 (file)
@@ -38,8 +38,8 @@
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.5.3"
-#define IPR_DRIVER_DATE "(March 10, 2012)"
+#define IPR_DRIVER_VERSION "2.5.4"
+#define IPR_DRIVER_DATE "(July 11, 2012)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1525,6 +1525,7 @@ struct ipr_cmnd {
        struct ata_queued_cmd *qc;
        struct completion completion;
        struct timer_list timer;
+       void (*fast_done) (struct ipr_cmnd *);
        void (*done) (struct ipr_cmnd *);
        int (*job_step) (struct ipr_cmnd *);
        int (*job_step_failed) (struct ipr_cmnd *);
index b334fdc1726ad44588d7931023cc58ff8854301c..609dafd661d1437a0287c7511d8e31f7c1e1c199 100644 (file)
@@ -1044,7 +1044,7 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
        return SCI_SUCCESS;
 }
 
-void isci_host_scan_start(struct Scsi_Host *shost)
+void isci_host_start(struct Scsi_Host *shost)
 {
        struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
        unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
@@ -1079,7 +1079,6 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
 
 void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
 {
-       task->lldd_task = NULL;
        if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
            !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
                if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
@@ -1087,16 +1086,19 @@ void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_ta
                        dev_dbg(&ihost->pdev->dev,
                                "%s: Normal - ireq/task = %p/%p\n",
                                __func__, ireq, task);
-
+                       task->lldd_task = NULL;
                        task->task_done(task);
                } else {
                        dev_dbg(&ihost->pdev->dev,
                                "%s: Error - ireq/task = %p/%p\n",
                                __func__, ireq, task);
-
+                       if (sas_protocol_ata(task->task_proto))
+                               task->lldd_task = NULL;
                        sas_task_abort(task);
                }
-       }
+       } else
+               task->lldd_task = NULL;
+
        if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
                wake_up_all(&ihost->eventq);
 
@@ -1120,10 +1122,16 @@ void isci_host_completion_routine(unsigned long data)
        sci_controller_completion_handler(ihost);
        spin_unlock_irq(&ihost->scic_lock);
 
-       /* the coalesence timeout doubles at each encoding step, so
+       /*
+        * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
+        * issued for hardware issue workaround
+        */
+       active = isci_tci_active(ihost) - SCI_MAX_PORTS;
+
+       /*
+        * the coalesence timeout doubles at each encoding step, so
         * update it based on the ilog2 value of the outstanding requests
         */
-       active = isci_tci_active(ihost);
        writel(SMU_ICC_GEN_VAL(NUMBER, active) |
               SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
               &ihost->smu_registers->interrupt_coalesce_control);
@@ -1973,7 +1981,7 @@ static void sci_controller_afe_initialization(struct isci_host *ihost)
        }
 
        for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
-               struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
+               struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
                const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
                int cable_length_long =
                        is_long_cable(phy_id, cable_selection_mask);
index 9ab58e0540e7df4caf596e9621a5fcee62e86fba..4911310a38f5e42fd9d292b89de93b57369c7419 100644 (file)
@@ -473,7 +473,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
 
 enum sci_status sci_controller_continue_io(struct isci_request *ireq);
 int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
-void isci_host_scan_start(struct Scsi_Host *);
+void isci_host_start(struct Scsi_Host *);
 u16 isci_alloc_tag(struct isci_host *ihost);
 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
 void isci_tci_free(struct isci_host *ihost, u16 tci);
index 9be45a2b2232b2da9ed31717745992458eadfe4f..b74050b95d6af93ad5720baa39790b852a7f1413 100644 (file)
@@ -156,7 +156,7 @@ static struct scsi_host_template isci_sht = {
        .target_alloc                   = sas_target_alloc,
        .slave_configure                = sas_slave_configure,
        .scan_finished                  = isci_host_scan_finished,
-       .scan_start                     = isci_host_scan_start,
+       .scan_start                     = isci_host_start,
        .change_queue_depth             = sas_change_queue_depth,
        .change_queue_type              = sas_change_queue_type,
        .bios_param                     = sas_bios_param,
@@ -644,7 +644,6 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
                                                orom->hdr.version)) {
                        dev_warn(&pdev->dev,
                                 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
-                       devm_kfree(&pdev->dev, orom);
                        orom = NULL;
                        break;
                }
@@ -722,11 +721,67 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
        }
 }
 
+#ifdef CONFIG_PM
+static int isci_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct isci_host *ihost;
+       int i;
+
+       for_each_isci_host(i, ihost, pdev) {
+               sas_suspend_ha(&ihost->sas_ha);
+               isci_host_deinit(ihost);
+       }
+
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+static int isci_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct isci_host *ihost;
+       int rc, i;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+
+       rc = pcim_enable_device(pdev);
+       if (rc) {
+               dev_err(&pdev->dev,
+                       "enabling device failure after resume(%d)\n", rc);
+               return rc;
+       }
+
+       pci_set_master(pdev);
+
+       for_each_isci_host(i, ihost, pdev) {
+               sas_prep_resume_ha(&ihost->sas_ha);
+
+               isci_host_init(ihost);
+               isci_host_start(ihost->sas_ha.core.shost);
+               wait_for_start(ihost);
+
+               sas_resume_ha(&ihost->sas_ha);
+       }
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
+#endif
+
 static struct pci_driver isci_pci_driver = {
        .name           = DRV_NAME,
        .id_table       = isci_id_table,
        .probe          = isci_pci_probe,
        .remove         = __devexit_p(isci_pci_remove),
+#ifdef CONFIG_PM
+       .driver.pm      = &isci_pm_ops,
+#endif
 };
 
 static __init int isci_init(void)
index 18f43d4c30baf8e37177eac41c8b817d7c8bcb1d..cb87b2ef7c92ab2ab5b25cbedbb594e85956eb05 100644 (file)
@@ -169,7 +169,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        phy_cap.gen1_no_ssc = 1;
        if (ihost->oem_parameters.controller.do_enable_ssc) {
                struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
-               struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+               struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx];
                struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
                bool en_sas = false;
                bool en_sata = false;
@@ -1205,6 +1205,7 @@ static void scu_link_layer_start_oob(struct isci_phy *iphy)
        /** Reset OOB sequence - start */
        val = readl(&ll->phy_configuration);
        val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+                SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) |
                 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
        writel(val, &ll->phy_configuration);
        readl(&ll->phy_configuration); /* flush */
@@ -1236,6 +1237,7 @@ static void scu_link_layer_tx_hard_reset(
         * to the starting state. */
        phy_configuration_value =
                readl(&iphy->link_layer_registers->phy_configuration);
+       phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
        phy_configuration_value |=
                (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
                 SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
index 4d95654c3fd4852ebeaff740e11bbe2492d4c4a5..8ac646e5eddc9476603fb98c8ce5bd2a7b0be224 100644 (file)
@@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
 
        if (i >= len) {
                dev_err(&pdev->dev, "oprom parse error\n");
-               devm_kfree(&pdev->dev, rom);
                rom = NULL;
        }
        pci_unmap_biosrom(oprom);
index a703b9ce0c2c211c984a877179b724a624ebf4fc..c7ee81d011253487055b982942dff0620c635a17 100644 (file)
@@ -212,7 +212,7 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
                                                      scics_sds_remote_node_context_callback callback,
                                                      void *callback_parameter);
 enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
-                                                    u32 suspend_type,
+                                                    enum sci_remote_node_suspension_reasons reason,
                                                     u32 suspension_code);
 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
                                                    scics_sds_remote_node_context_callback cb_fn,
index 922086105b4b27d62e19668448b3fb2282837737..1b91ca0dc1e39cdb2b27bc76e0f9c710a6922a51 100644 (file)
@@ -55,7 +55,7 @@ static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
 static struct scsi_host_template iscsi_sw_tcp_sht;
 static struct iscsi_transport iscsi_sw_tcp_transport;
 
-static unsigned int iscsi_max_lun = 512;
+static unsigned int iscsi_max_lun = ~0;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
 static int iscsi_sw_tcp_dbg;
index a59fcdc8fd6331e929c65f69afd2763819da33e0..bdb81cda84013f0d132aa1972bd409dcb9903f6b 100644 (file)
@@ -580,10 +580,7 @@ int sas_ata_init(struct domain_device *found_dev)
        struct ata_port *ap;
        int rc;
 
-       ata_host_init(&found_dev->sata_dev.ata_host,
-                     ha->dev,
-                     sata_port_info.flags,
-                     &sas_sata_ops);
+       ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
        ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
                                &sata_port_info,
                                shost);
@@ -700,6 +697,92 @@ void sas_probe_sata(struct asd_sas_port *port)
                if (ata_dev_disabled(sas_to_ata_dev(dev)))
                        sas_fail_probe(dev, __func__, -ENODEV);
        }
+
+}
+
+static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
+{
+       struct domain_device *dev, *n;
+       bool retry = false;
+
+       list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
+               int rc;
+
+               if (!dev_is_sata(dev))
+                       continue;
+
+               sas_ata_wait_eh(dev);
+               rc = dev->sata_dev.pm_result;
+               if (rc == -EAGAIN)
+                       retry = true;
+               else if (rc) {
+                       /* since we don't have a
+                        * ->port_{suspend|resume} routine in our
+                        *  ata_port ops, and no entanglements with
+                        *  acpi, suspend should just be mechanical trip
+                        *  through eh, catch cases where these
+                        *  assumptions are invalidated
+                        */
+                       WARN_ONCE(1, "failed %s %s error: %d\n", func,
+                                dev_name(&dev->rphy->dev), rc);
+               }
+
+               /* if libata failed to power manage the device, tear it down */
+               if (ata_dev_disabled(sas_to_ata_dev(dev)))
+                       sas_fail_probe(dev, func, -ENODEV);
+       }
+
+       return retry;
+}
+
+void sas_suspend_sata(struct asd_sas_port *port)
+{
+       struct domain_device *dev;
+
+ retry:
+       mutex_lock(&port->ha->disco_mutex);
+       list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+               struct sata_device *sata;
+
+               if (!dev_is_sata(dev))
+                       continue;
+
+               sata = &dev->sata_dev;
+               if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
+                       continue;
+
+               sata->pm_result = -EIO;
+               ata_sas_port_async_suspend(sata->ap, &sata->pm_result);
+       }
+       mutex_unlock(&port->ha->disco_mutex);
+
+       if (sas_ata_flush_pm_eh(port, __func__))
+               goto retry;
+}
+
+void sas_resume_sata(struct asd_sas_port *port)
+{
+       struct domain_device *dev;
+
+ retry:
+       mutex_lock(&port->ha->disco_mutex);
+       list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+               struct sata_device *sata;
+
+               if (!dev_is_sata(dev))
+                       continue;
+
+               sata = &dev->sata_dev;
+               if (sata->ap->pm_mesg.event == PM_EVENT_ON)
+                       continue;
+
+               sata->pm_result = -EIO;
+               ata_sas_port_async_resume(sata->ap, &sata->pm_result);
+       }
+       mutex_unlock(&port->ha->disco_mutex);
+
+       if (sas_ata_flush_pm_eh(port, __func__))
+               goto retry;
 }
 
 /**
index 3e9dc1a84358a540dd5ad4324b63fd4f9bf98039..a0c3003e0c7d2f6b5e1a89147ab29d58b1609aaa 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/async.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_eh.h>
 #include "sas_internal.h"
@@ -180,16 +181,18 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
        struct Scsi_Host *shost = sas_ha->core.shost;
        struct sas_internal *i = to_sas_internal(shost->transportt);
 
-       if (i->dft->lldd_dev_found) {
-               res = i->dft->lldd_dev_found(dev);
-               if (res) {
-                       printk("sas: driver on pcidev %s cannot handle "
-                              "device %llx, error:%d\n",
-                              dev_name(sas_ha->dev),
-                              SAS_ADDR(dev->sas_addr), res);
-               }
-               kref_get(&dev->kref);
+       if (!i->dft->lldd_dev_found)
+               return 0;
+
+       res = i->dft->lldd_dev_found(dev);
+       if (res) {
+               printk("sas: driver on pcidev %s cannot handle "
+                      "device %llx, error:%d\n",
+                      dev_name(sas_ha->dev),
+                      SAS_ADDR(dev->sas_addr), res);
        }
+       set_bit(SAS_DEV_FOUND, &dev->state);
+       kref_get(&dev->kref);
        return res;
 }
 
@@ -200,7 +203,10 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
        struct Scsi_Host *shost = sas_ha->core.shost;
        struct sas_internal *i = to_sas_internal(shost->transportt);
 
-       if (i->dft->lldd_dev_gone) {
+       if (!i->dft->lldd_dev_gone)
+               return;
+
+       if (test_and_clear_bit(SAS_DEV_FOUND, &dev->state)) {
                i->dft->lldd_dev_gone(dev);
                sas_put_device(dev);
        }
@@ -234,6 +240,47 @@ static void sas_probe_devices(struct work_struct *work)
        }
 }
 
+static void sas_suspend_devices(struct work_struct *work)
+{
+       struct asd_sas_phy *phy;
+       struct domain_device *dev;
+       struct sas_discovery_event *ev = to_sas_discovery_event(work);
+       struct asd_sas_port *port = ev->port;
+       struct Scsi_Host *shost = port->ha->core.shost;
+       struct sas_internal *si = to_sas_internal(shost->transportt);
+
+       clear_bit(DISCE_SUSPEND, &port->disc.pending);
+
+       sas_suspend_sata(port);
+
+       /* lldd is free to forget the domain_device across the
+        * suspension, we force the issue here to keep the reference
+        * counts aligned
+        */
+       list_for_each_entry(dev, &port->dev_list, dev_list_node)
+               sas_notify_lldd_dev_gone(dev);
+
+       /* we are suspending, so we know events are disabled and
+        * phy_list is not being mutated
+        */
+       list_for_each_entry(phy, &port->phy_list, port_phy_el) {
+               if (si->dft->lldd_port_formed)
+                       si->dft->lldd_port_deformed(phy);
+               phy->suspended = 1;
+               port->suspended = 1;
+       }
+}
+
+static void sas_resume_devices(struct work_struct *work)
+{
+       struct sas_discovery_event *ev = to_sas_discovery_event(work);
+       struct asd_sas_port *port = ev->port;
+
+       clear_bit(DISCE_RESUME, &port->disc.pending);
+
+       sas_resume_sata(port);
+}
+
 /**
  * sas_discover_end_dev -- discover an end device (SSP, etc)
  * @end: pointer to domain device of interest
@@ -530,6 +577,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
                [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
                [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
                [DISCE_PROBE] = sas_probe_devices,
+               [DISCE_SUSPEND] = sas_suspend_devices,
+               [DISCE_RESUME] = sas_resume_devices,
                [DISCE_DESTRUCT] = sas_destruct_devices,
        };
 
index fc460933575c76e57d4433a40801cf4403333506..cd6f99c1ae7e1740fb5e69613806c36c2a7c137f 100644 (file)
@@ -41,6 +41,7 @@ static const char *sas_phye_str[] = {
        [1] = "PHYE_OOB_DONE",
        [2] = "PHYE_OOB_ERROR",
        [3] = "PHYE_SPINUP_HOLD",
+       [4] = "PHYE_RESUME_TIMEOUT",
 };
 
 void sas_dprint_porte(int phyid, enum port_event pe)
index 789c4d8bb7a7d8d7ef9310172c4b44795a2fee81..aadbd5314c5cce3e2a91b3b323aaffb4ab98fa06 100644 (file)
@@ -134,7 +134,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
                        &phy->port_events[event].work, ha);
 }
 
-static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
 {
        struct sas_ha_struct *ha = phy->ha;
 
@@ -159,7 +159,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
 
        sas_ha->notify_ha_event = notify_ha_event;
        sas_ha->notify_port_event = notify_port_event;
-       sas_ha->notify_phy_event = notify_phy_event;
+       sas_ha->notify_phy_event = sas_notify_phy_event;
 
        return 0;
 }
index 014297c05880d8e2fdd3e889550f3536b3cab30e..dbc8a793fd867dcd1f24e55e1d79aab836530b0f 100644 (file)
@@ -178,7 +178,7 @@ Undo_phys:
        return error;
 }
 
-int sas_unregister_ha(struct sas_ha_struct *sas_ha)
+static void sas_disable_events(struct sas_ha_struct *sas_ha)
 {
        /* Set the state to unregistered to avoid further unchained
         * events to be queued, and flush any in-progress drainers
@@ -189,7 +189,11 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
        spin_unlock_irq(&sas_ha->lock);
        __sas_drain_work(sas_ha);
        mutex_unlock(&sas_ha->drain_mutex);
+}
 
+int sas_unregister_ha(struct sas_ha_struct *sas_ha)
+{
+       sas_disable_events(sas_ha);
        sas_unregister_ports(sas_ha);
 
        /* flush unregistration work */
@@ -381,6 +385,90 @@ int sas_set_phy_speed(struct sas_phy *phy,
        return ret;
 }
 
+void sas_prep_resume_ha(struct sas_ha_struct *ha)
+{
+       int i;
+
+       set_bit(SAS_HA_REGISTERED, &ha->state);
+
+       /* clear out any stale link events/data from the suspension path */
+       for (i = 0; i < ha->num_phys; i++) {
+               struct asd_sas_phy *phy = ha->sas_phy[i];
+
+               memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+               phy->port_events_pending = 0;
+               phy->phy_events_pending = 0;
+               phy->frame_rcvd_size = 0;
+       }
+}
+EXPORT_SYMBOL(sas_prep_resume_ha);
+
+static int phys_suspended(struct sas_ha_struct *ha)
+{
+       int i, rc = 0;
+
+       for (i = 0; i < ha->num_phys; i++) {
+               struct asd_sas_phy *phy = ha->sas_phy[i];
+
+               if (phy->suspended)
+                       rc++;
+       }
+
+       return rc;
+}
+
+void sas_resume_ha(struct sas_ha_struct *ha)
+{
+       const unsigned long tmo = msecs_to_jiffies(25000);
+       int i;
+
+       /* deform ports on phys that did not resume
+        * at this point we may be racing the phy coming back (as posted
+        * by the lldd).  So we post the event and once we are in the
+        * libsas context check that the phy remains suspended before
+        * tearing it down.
+        */
+       i = phys_suspended(ha);
+       if (i)
+               dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
+                        i, i > 1 ? "s" : "");
+       wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
+       for (i = 0; i < ha->num_phys; i++) {
+               struct asd_sas_phy *phy = ha->sas_phy[i];
+
+               if (phy->suspended) {
+                       dev_warn(&phy->phy->dev, "resume timeout\n");
+                       sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
+               }
+       }
+
+       /* all phys are back up or timed out, turn on i/o so we can
+        * flush out disks that did not return
+        */
+       scsi_unblock_requests(ha->core.shost);
+       sas_drain_work(ha);
+}
+EXPORT_SYMBOL(sas_resume_ha);
+
+void sas_suspend_ha(struct sas_ha_struct *ha)
+{
+       int i;
+
+       sas_disable_events(ha);
+       scsi_block_requests(ha->core.shost);
+       for (i = 0; i < ha->num_phys; i++) {
+               struct asd_sas_port *port = ha->sas_port[i];
+
+               sas_discover_event(port, DISCE_SUSPEND);
+       }
+
+       /* flush suspend events while unregistered */
+       mutex_lock(&ha->drain_mutex);
+       __sas_drain_work(ha);
+       mutex_unlock(&ha->drain_mutex);
+}
+EXPORT_SYMBOL(sas_suspend_ha);
+
 static void sas_phy_release(struct sas_phy *phy)
 {
        kfree(phy->hostdata);
index 507e4cf12e56cef87cd3b80af00215cc62db6078..1de67964e5a1e39ffa10151c79489078826e2536 100644 (file)
@@ -89,6 +89,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
                        enum phy_func phy_func, struct sas_phy_linkrates *);
 int sas_smp_get_phy_events(struct sas_phy *phy);
 
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
 void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
 struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
index 521422e857ab330ee3a659ad11dae2dd02aee9f0..cdee446c29e1319078999d67176b3f9a2af8915f 100644 (file)
@@ -94,6 +94,25 @@ static void sas_phye_spinup_hold(struct work_struct *work)
        i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
 }
 
+static void sas_phye_resume_timeout(struct work_struct *work)
+{
+       struct asd_sas_event *ev = to_asd_sas_event(work);
+       struct asd_sas_phy *phy = ev->phy;
+
+       clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
+
+       /* phew, lldd got the phy back in the nick of time */
+       if (!phy->suspended) {
+               dev_info(&phy->phy->dev, "resume timeout cancelled\n");
+               return;
+       }
+
+       phy->error = 0;
+       phy->suspended = 0;
+       sas_deform_port(phy, 1);
+}
+
+
 /* ---------- Phy class registration ---------- */
 
 int sas_register_phys(struct sas_ha_struct *sas_ha)
@@ -105,6 +124,8 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
                [PHYE_OOB_DONE] = sas_phye_oob_done,
                [PHYE_OOB_ERROR] = sas_phye_oob_error,
                [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+               [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+
        };
 
        static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
index e884a8c58a0ccb181424051281fda4b4a45fc1a9..1398b714c01836ee3789199cf3cd627ac4fa4021 100644 (file)
@@ -39,6 +39,49 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
        return true;
 }
 
+static void sas_resume_port(struct asd_sas_phy *phy)
+{
+       struct domain_device *dev;
+       struct asd_sas_port *port = phy->port;
+       struct sas_ha_struct *sas_ha = phy->ha;
+       struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+
+       if (si->dft->lldd_port_formed)
+               si->dft->lldd_port_formed(phy);
+
+       if (port->suspended)
+               port->suspended = 0;
+       else {
+               /* we only need to handle "link returned" actions once */
+               return;
+       }
+
+       /* if the port came back:
+        * 1/ presume every device came back
+        * 2/ force the next revalidation to check all expander phys
+        */
+       list_for_each_entry(dev, &port->dev_list, dev_list_node) {
+               int i, rc;
+
+               rc = sas_notify_lldd_dev_found(dev);
+               if (rc) {
+                       sas_unregister_dev(port, dev);
+                       continue;
+               }
+
+               if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
+                       dev->ex_dev.ex_change_count = -1;
+                       for (i = 0; i < dev->ex_dev.num_phys; i++) {
+                               struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
+
+                               phy->phy_change_count = -1;
+                       }
+               }
+       }
+
+       sas_discover_event(port, DISCE_RESUME);
+}
+
 /**
  * sas_form_port -- add this phy to a port
  * @phy: the phy of interest
@@ -58,7 +101,14 @@ static void sas_form_port(struct asd_sas_phy *phy)
        if (port) {
                if (!phy_is_wideport_member(port, phy))
                        sas_deform_port(phy, 0);
-               else {
+               else if (phy->suspended) {
+                       phy->suspended = 0;
+                       sas_resume_port(phy);
+
+                       /* phy came back, try to cancel the timeout */
+                       wake_up(&sas_ha->eh_wait_q);
+                       return;
+               } else {
                        SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
                                    __func__, phy->id, phy->port->id,
                                    phy->port->num_phys);
index a65c05a8d4885dbd5f96821c73fcee6e13d43917..a184c2443a6401132f2c7edb1614f29e75ad3b90 100644 (file)
@@ -73,6 +73,8 @@ struct lpfc_sli2_slim;
 #define LPFC_HB_MBOX_INTERVAL   5      /* Heart beat interval in seconds. */
 #define LPFC_HB_MBOX_TIMEOUT    30     /* Heart beat timeout  in seconds. */
 
+#define LPFC_LOOK_AHEAD_OFF    0       /* Look ahead logic is turned off */
+
 /* Error Attention event polling interval */
 #define LPFC_ERATT_POLL_INTERVAL       5 /* EATT poll interval in seconds */
 
@@ -684,6 +686,7 @@ struct lpfc_hba {
 #define LPFC_FCF_FOV 1         /* Fast fcf failover */
 #define LPFC_FCF_PRIORITY 2    /* Priority fcf failover */
        uint32_t cfg_fcf_failover_policy;
+       uint32_t cfg_fcp_io_sched;
        uint32_t cfg_cr_delay;
        uint32_t cfg_cr_count;
        uint32_t cfg_multi_ring_support;
@@ -695,6 +698,7 @@ struct lpfc_hba {
        uint32_t cfg_fcp_imax;
        uint32_t cfg_fcp_wq_count;
        uint32_t cfg_fcp_eq_count;
+       uint32_t cfg_fcp_io_channel;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_prot_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
@@ -732,7 +736,7 @@ struct lpfc_hba {
        uint32_t hbq_count;             /* Count of configured HBQs */
        struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
 
-       uint32_t fcp_qidx;              /* next work queue to post work to */
+       atomic_t fcp_qidx;              /* next work queue to post work to */
 
        unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
        unsigned long pci_bar1_map;     /* Physical address for PCI BAR1 */
index adef5bb2100e8eee79751ae4a11cfba622b6a600..b032562aa0d9d32217487a3f7fb0a840c5e5580e 100644 (file)
@@ -3643,18 +3643,25 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
        struct lpfc_hba *phba = vport->phba;
        int val = 0, i;
 
+       /* fcp_imax is only valid for SLI4 */
+       if (phba->sli_rev != LPFC_SLI_REV4)
+               return -EINVAL;
+
        /* Sanity check on user data */
        if (!isdigit(buf[0]))
                return -EINVAL;
        if (sscanf(buf, "%i", &val) != 1)
                return -EINVAL;
 
-       /* Value range is [636,651042] */
-       if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST)
+       /*
+        * Value range for the HBA is [5000,5000000]
+        * The value for each EQ depends on how many EQs are configured.
+        */
+       if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
                return -EINVAL;
 
        phba->cfg_fcp_imax = (uint32_t)val;
-       for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY)
+       for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
                lpfc_modify_fcp_eq_delay(phba, i);
 
        return strlen(buf);
@@ -3662,13 +3669,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
 
 /*
 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
+# for the HBA.
 #
-# Value range is [636,651042]. Default value is 10000.
+# Value range is [5,000 to 5,000,000]. Default value is 50,000.
 */
-static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX;
+static int lpfc_fcp_imax = LPFC_DEF_IMAX;
 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(lpfc_fcp_imax,
-           "Set the maximum number of fast-path FCP interrupts per second");
+           "Set the maximum number of FCP interrupts per second per HBA");
 lpfc_param_show(fcp_imax)
 
 /**
@@ -3687,14 +3695,19 @@ lpfc_param_show(fcp_imax)
 static int
 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
 {
-       if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) {
+       if (phba->sli_rev != LPFC_SLI_REV4) {
+               phba->cfg_fcp_imax = 0;
+               return 0;
+       }
+
+       if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
                phba->cfg_fcp_imax = val;
                return 0;
        }
 
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "3016 fcp_imax: %d out of range, using default\n", val);
-       phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX;
+       phba->cfg_fcp_imax = LPFC_DEF_IMAX;
 
        return 0;
 }
@@ -3764,6 +3777,16 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
 */
 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
 
+/*
+# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
+# range is [0,1]. Default value is 0.
+# For [0], FCP commands are issued to Work Queues ina round robin fashion.
+# For [1], FCP commands are issued to a Work Queue associated with the
+#          current CPU.
+*/
+LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
+               "issuing commands [0] - Round Robin, [1] - Current CPU");
+
 /*
 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
@@ -3844,20 +3867,32 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
 
 /*
 # lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
+# This parameter is ignored and will eventually be depricated
 #
-# Value range is [1,31]. Default value is 4.
+# Value range is [1,7]. Default value is 4.
 */
-LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
+LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
+           LPFC_FCP_IO_CHAN_MAX,
            "Set the number of fast-path FCP work queues, if possible");
 
 /*
-# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
+# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
 #
-# Value range is [1,7]. Default value is 1.
+# Value range is [1,7]. Default value is 4.
 */
-LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
+LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
+           LPFC_FCP_IO_CHAN_MAX,
            "Set the number of fast-path FCP event queues, if possible");
 
+/*
+# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
+#
+# Value range is [1,7]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
+           LPFC_FCP_IO_CHAN_MAX,
+           "Set the number of FCP I/O channels");
+
 /*
 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
 #       0  = HBA resets disabled
@@ -3882,6 +3917,17 @@ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
 */
 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 
+/*
+# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
+#       0  = disabled (default)
+#       1  = enabled
+# Value range is [0,1]. Default value is 0.
+*/
+unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
+
+module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
+MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
+
 /*
 # lpfc_prot_mask: i
 #      - Bit mask of host protection capabilities used to register with the
@@ -3976,6 +4022,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_topology,
        &dev_attr_lpfc_scan_down,
        &dev_attr_lpfc_link_speed,
+       &dev_attr_lpfc_fcp_io_sched,
        &dev_attr_lpfc_cr_delay,
        &dev_attr_lpfc_cr_count,
        &dev_attr_lpfc_multi_ring_support,
@@ -4002,6 +4049,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_fcp_imax,
        &dev_attr_lpfc_fcp_wq_count,
        &dev_attr_lpfc_fcp_eq_count,
+       &dev_attr_lpfc_fcp_io_channel,
        &dev_attr_lpfc_enable_bg,
        &dev_attr_lpfc_soft_wwnn,
        &dev_attr_lpfc_soft_wwpn,
@@ -4964,6 +5012,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
 void
 lpfc_get_cfgparam(struct lpfc_hba *phba)
 {
+       lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
        lpfc_cr_delay_init(phba, lpfc_cr_delay);
        lpfc_cr_count_init(phba, lpfc_cr_count);
        lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@@ -4980,6 +5029,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
        lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
        lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
+       lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
        lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
        lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
        lpfc_enable_bg_init(phba, lpfc_enable_bg);
index 253d9a8573467777fb1f54058311e569aca3a188..f7368eb8041556c684418da59f2fbca14f0d203c 100644 (file)
@@ -195,7 +195,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 
        if (rsp->ulpStatus) {
                if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-                       switch (rsp->un.ulpWord[4] & 0xff) {
+                       switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
                        case IOERR_SEQUENCE_TIMEOUT:
                                rc = -ETIMEDOUT;
                                break;
@@ -1234,7 +1234,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
 
        if (rsp->ulpStatus) {
                if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-                       switch (rsp->un.ulpWord[4] & 0xff) {
+                       switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
                        case IOERR_SEQUENCE_TIMEOUT:
                                rc = -ETIMEDOUT;
                                break;
@@ -1714,6 +1714,8 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
                        phba->sli4_hba.lnk_info.lnk_no);
 
        link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+       bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
+              LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
        bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
               phba->sli4_hba.lnk_info.lnk_no);
        bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
@@ -4796,7 +4798,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
        menlo_resp->xri = rsp->ulpContext;
        if (rsp->ulpStatus) {
                if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-                       switch (rsp->un.ulpWord[4] & 0xff) {
+                       switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
                        case IOERR_SEQUENCE_TIMEOUT:
                                rc = -ETIMEDOUT;
                                break;
index 8a2a514a2553d02b545a24eb0b47c2c2023c10d5..e470c489de071b151c29191046472108fac9697c 100644 (file)
@@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *);
 irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
 irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
 irqreturn_t lpfc_sli4_intr_handler(int, void *);
-irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
-irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
 
 void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -391,6 +390,7 @@ extern spinlock_t pgcnt_lock;
 extern unsigned int pgcnt;
 extern unsigned int lpfc_prot_mask;
 extern unsigned char lpfc_prot_guard;
+extern unsigned int lpfc_fcp_look_ahead;
 
 /* Interface exported by fabric iocb scheduler */
 void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
@@ -457,6 +457,8 @@ int lpfc_sli4_queue_create(struct lpfc_hba *);
 void lpfc_sli4_queue_destroy(struct lpfc_hba *);
 void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
                                struct sli4_wcqe_xri_aborted *);
+void lpfc_sli_abts_recover_port(struct lpfc_vport *,
+                               struct lpfc_nodelist *);
 int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
 int lpfc_issue_reg_vfi(struct lpfc_vport *);
 int lpfc_issue_unreg_vfi(struct lpfc_vport *);
index 93e96b3c9097aff51694c6f7cc6f35dd695deead..7ffabb7e3afa536cb42f6aef511e80979ddb17c6 100644 (file)
@@ -104,7 +104,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
                lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
        } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-               ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
+                  ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                  IOERR_RCV_BUFFER_WAITING)) {
                /* Not enough posted buffers; Try posting more buffers */
                phba->fc_stat.NoRcvBuf++;
                if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -633,7 +634,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                /* Check for retry */
                if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
                        if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
-                           irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)
+                           (irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
+                           IOERR_NO_RESOURCES)
                                vport->fc_ns_retry++;
 
                        /* CT command is being retried */
@@ -783,7 +785,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
                        retry = 1;
                        if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-                               switch (irsp->un.ulpWord[4]) {
+                               switch ((irsp->un.ulpWord[4] &
+                                       IOERR_PARAM_MASK)) {
+
                                case IOERR_NO_RESOURCES:
                                        /* We don't increment the retry
                                         * count for this case.
@@ -908,8 +912,10 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
 
                if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-                       ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
-                        (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
+                       (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                         IOERR_SLI_DOWN) ||
+                        ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                         IOERR_SLI_ABORTED)))
                        goto out;
 
                retry = cmdiocb->retry;
index 3217d63ed2820c61db94b368c090ffec1155ca68..f63f5ff7f27467f7b34f80d3bc0089538dc147df 100644 (file)
@@ -490,9 +490,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
                len +=  snprintf(buf+len, size-len,
                                 "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
                                 "Local:%d flg:x%x)  RSP PutInx:%d Max:%d\n",
-                                i, pgpp->cmdGetInx, pring->numCiocb,
-                                pring->next_cmdidx, pring->local_getidx,
-                                pring->flag, pgpp->rspPutInx, pring->numRiocb);
+                                i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb,
+                                pring->sli.sli3.next_cmdidx,
+                                pring->sli.sli3.local_getidx,
+                                pring->flag, pgpp->rspPutInx,
+                                pring->sli.sli3.numRiocb);
        }
 
        if (phba->sli_rev <= LPFC_SLI_REV3) {
@@ -557,6 +559,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                case NLP_STE_PRLI_ISSUE:
                        statep = "PRLI  ";
                        break;
+               case NLP_STE_LOGO_ISSUE:
+                       statep = "LOGO  ";
+                       break;
                case NLP_STE_UNMAPPED_NODE:
                        statep = "UNMAP ";
                        break;
@@ -581,8 +586,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                        "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
                        *name, *(name+1), *(name+2), *(name+3),
                        *(name+4), *(name+5), *(name+6), *(name+7));
-               len +=  snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
-                       ndlp->nlp_rpi, ndlp->nlp_flag);
+               if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
+                       len +=  snprintf(buf+len, size-len, "RPI:%03d ",
+                               ndlp->nlp_rpi);
+               else
+                       len +=  snprintf(buf+len, size-len, "RPI:none ");
+               len +=  snprintf(buf+len, size-len, "flag:x%08x ",
+                       ndlp->nlp_flag);
                if (!ndlp->nlp_type)
                        len +=  snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
                if (ndlp->nlp_type & NLP_FC_NODE)
@@ -1999,207 +2009,298 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
 {
        struct lpfc_debug *debug = file->private_data;
        struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
-       int len = 0, fcp_qidx;
+       int len = 0;
        char *pbuffer;
+       int x, cnt;
+       int max_cnt;
+       struct lpfc_queue *qp = NULL;
+
 
        if (!debug->buffer)
                debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
        if (!debug->buffer)
                return 0;
        pbuffer = debug->buffer;
+       max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128;
 
        if (*ppos)
                return 0;
 
-       /* Get slow-path event queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Slow-path EQ information:\n");
-       if (phba->sli4_hba.sp_eq) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "\tEQID[%02d], "
-                       "QE-COUNT[%04d], QE-SIZE[%04d], "
-                       "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
-                       phba->sli4_hba.sp_eq->queue_id,
-                       phba->sli4_hba.sp_eq->entry_count,
-                       phba->sli4_hba.sp_eq->entry_size,
-                       phba->sli4_hba.sp_eq->host_index,
-                       phba->sli4_hba.sp_eq->hba_index);
-       }
+       spin_lock_irq(&phba->hbalock);
+
+       /* Fast-path event queue */
+       if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) {
+               cnt = phba->cfg_fcp_io_channel;
+
+               for (x = 0; x < cnt; x++) {
+
+                       /* Fast-path EQ */
+                       qp = phba->sli4_hba.hba_eq[x];
+                       if (!qp)
+                               goto proc_cq;
+
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\nHBA EQ info: "
+                               "EQ-STAT[max:x%x noE:x%x "
+                               "bs:x%x proc:x%llx]\n",
+                               qp->q_cnt_1, qp->q_cnt_2,
+                               qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "EQID[%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id,
+                               qp->entry_count,
+                               qp->entry_size,
+                               qp->host_index,
+                               qp->hba_index);
+
+
+                       /* Reset max counter */
+                       qp->EQ_max_eqe = 0;
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+proc_cq:
+                       /* Fast-path FCP CQ */
+                       qp = phba->sli4_hba.fcp_cq[x];
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tFCP CQ info: ");
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "AssocEQID[%02d]: "
+                               "CQ STAT[max:x%x relw:x%x "
+                               "xabt:x%x wq:x%llx]\n",
+                               qp->assoc_qid,
+                               qp->q_cnt_1, qp->q_cnt_2,
+                               qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\tCQID[%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id, qp->entry_count,
+                               qp->entry_size, qp->host_index,
+                               qp->hba_index);
+
 
-       /* Get fast-path event queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Fast-path EQ information:\n");
-       if (phba->sli4_hba.fp_eq) {
-               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
-                    fcp_qidx++) {
-                       if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+                       /* Reset max counter */
+                       qp->CQ_max_cqe = 0;
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+
+                       /* Fast-path FCP WQ */
+                       qp = phba->sli4_hba.fcp_wq[x];
+
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\t\tFCP WQ info: ");
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "AssocCQID[%02d]: "
+                               "WQ-STAT[oflow:x%x posted:x%llx]\n",
+                               qp->assoc_qid,
+                               qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+                       len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                               "\t\tWQID[%02d], "
+                               "QE-CNT[%04d], QE-SIZE[%04d], "
+                               "HOST-IDX[%04d], PORT-IDX[%04d]",
+                               qp->queue_id,
+                               qp->entry_count,
+                               qp->entry_size,
+                               qp->host_index,
+                               qp->hba_index);
+
+                       len +=  snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       if (len >= max_cnt)
+                               goto too_big;
+
+                       if (x)
+                               continue;
+
+                       /* Only EQ 0 has slow path CQs configured */
+
+                       /* Slow-path mailbox CQ */
+                       qp = phba->sli4_hba.mbx_cq;
+                       if (qp) {
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\tMBX CQ info: ");
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "AssocEQID[%02d]: "
+                                       "CQ-STAT[mbox:x%x relw:x%x "
+                                       "xabt:x%x wq:x%llx]\n",
+                                       qp->assoc_qid,
+                                       qp->q_cnt_1, qp->q_cnt_2,
+                                       qp->q_cnt_3,
+                                       (unsigned long long)qp->q_cnt_4);
                                len += snprintf(pbuffer+len,
                                        LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                               "\tEQID[%02d], "
-                               "QE-COUNT[%04d], QE-SIZE[%04d], "
-                               "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
-                               phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
-                               phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
-                               phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
-                               phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
-                               phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+                                       "\tCQID[%02d], "
+                                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                                       qp->queue_id, qp->entry_count,
+                                       qp->entry_size, qp->host_index,
+                                       qp->hba_index);
+
+                               len +=  snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                               if (len >= max_cnt)
+                                       goto too_big;
                        }
-               }
-       }
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
-
-       /* Get mailbox complete queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Slow-path MBX CQ information:\n");
-       if (phba->sli4_hba.mbx_cq) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Associated EQID[%02d]:\n",
-                       phba->sli4_hba.mbx_cq->assoc_qid);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "\tCQID[%02d], "
-                       "QE-COUNT[%04d], QE-SIZE[%04d], "
-                       "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
-                       phba->sli4_hba.mbx_cq->queue_id,
-                       phba->sli4_hba.mbx_cq->entry_count,
-                       phba->sli4_hba.mbx_cq->entry_size,
-                       phba->sli4_hba.mbx_cq->host_index,
-                       phba->sli4_hba.mbx_cq->hba_index);
-       }
 
-       /* Get slow-path complete queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Slow-path ELS CQ information:\n");
-       if (phba->sli4_hba.els_cq) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Associated EQID[%02d]:\n",
-                       phba->sli4_hba.els_cq->assoc_qid);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "\tCQID [%02d], "
-                       "QE-COUNT[%04d], QE-SIZE[%04d], "
-                       "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
-                       phba->sli4_hba.els_cq->queue_id,
-                       phba->sli4_hba.els_cq->entry_count,
-                       phba->sli4_hba.els_cq->entry_size,
-                       phba->sli4_hba.els_cq->host_index,
-                       phba->sli4_hba.els_cq->hba_index);
-       }
+                       /* Slow-path MBOX MQ */
+                       qp = phba->sli4_hba.mbx_wq;
+                       if (qp) {
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tMBX MQ info: ");
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "AssocCQID[%02d]:\n",
+                                       phba->sli4_hba.mbx_wq->assoc_qid);
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tWQID[%02d], "
+                                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                                       qp->queue_id, qp->entry_count,
+                                       qp->entry_size, qp->host_index,
+                                       qp->hba_index);
+
+                               len +=  snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                               if (len >= max_cnt)
+                                       goto too_big;
+                       }
 
-       /* Get fast-path complete queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Fast-path FCP CQ information:\n");
-       fcp_qidx = 0;
-       if (phba->sli4_hba.fcp_cq) {
-               do {
-                       if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+                       /* Slow-path ELS response CQ */
+                       qp = phba->sli4_hba.els_cq;
+                       if (qp) {
                                len += snprintf(pbuffer+len,
                                        LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                               "Associated EQID[%02d]:\n",
-                               phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
+                                       "\tELS CQ info: ");
                                len += snprintf(pbuffer+len,
                                        LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                               "\tCQID[%02d], "
-                               "QE-COUNT[%04d], QE-SIZE[%04d], "
-                               "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
-                               phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
-                               phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
-                               phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
-                               phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
-                               phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
+                                       "AssocEQID[%02d]: "
+                                       "CQ-STAT[max:x%x relw:x%x "
+                                       "xabt:x%x wq:x%llx]\n",
+                                       qp->assoc_qid,
+                                       qp->q_cnt_1, qp->q_cnt_2,
+                                       qp->q_cnt_3,
+                                       (unsigned long long)qp->q_cnt_4);
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\tCQID [%02d], "
+                                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                                       qp->queue_id, qp->entry_count,
+                                       qp->entry_size, qp->host_index,
+                                       qp->hba_index);
+
+                               /* Reset max counter */
+                               qp->CQ_max_cqe = 0;
+
+                               len +=  snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                               if (len >= max_cnt)
+                                       goto too_big;
                        }
-               } while (++fcp_qidx < phba->cfg_fcp_eq_count);
-               len += snprintf(pbuffer+len,
-                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
-       }
 
-       /* Get mailbox queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Slow-path MBX MQ information:\n");
-       if (phba->sli4_hba.mbx_wq) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Associated CQID[%02d]:\n",
-                       phba->sli4_hba.mbx_wq->assoc_qid);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "\tWQID[%02d], "
-                       "QE-COUNT[%04d], QE-SIZE[%04d], "
-                       "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
-                       phba->sli4_hba.mbx_wq->queue_id,
-                       phba->sli4_hba.mbx_wq->entry_count,
-                       phba->sli4_hba.mbx_wq->entry_size,
-                       phba->sli4_hba.mbx_wq->host_index,
-                       phba->sli4_hba.mbx_wq->hba_index);
-       }
+                       /* Slow-path ELS WQ */
+                       qp = phba->sli4_hba.els_wq;
+                       if (qp) {
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tELS WQ info: ");
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "AssocCQID[%02d]: "
+                                       " WQ-STAT[oflow:x%x "
+                                       "posted:x%llx]\n",
+                                       qp->assoc_qid,
+                                       qp->q_cnt_1,
+                                       (unsigned long long)qp->q_cnt_4);
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tWQID[%02d], "
+                                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                                       "HOST-IDX[%04d], PORT-IDX[%04d]",
+                                       qp->queue_id, qp->entry_count,
+                                       qp->entry_size, qp->host_index,
+                                       qp->hba_index);
+
+                               len +=  snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                               if (len >= max_cnt)
+                                       goto too_big;
+                       }
 
-       /* Get slow-path work queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Slow-path ELS WQ information:\n");
-       if (phba->sli4_hba.els_wq) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Associated CQID[%02d]:\n",
-                       phba->sli4_hba.els_wq->assoc_qid);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "\tWQID[%02d], "
-                       "QE-COUNT[%04d], QE-SIZE[%04d], "
-                       "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
-                       phba->sli4_hba.els_wq->queue_id,
-                       phba->sli4_hba.els_wq->entry_count,
-                       phba->sli4_hba.els_wq->entry_size,
-                       phba->sli4_hba.els_wq->host_index,
-                       phba->sli4_hba.els_wq->hba_index);
-       }
+                       if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+                               /* Slow-path RQ header */
+                               qp = phba->sli4_hba.hdr_rq;
 
-       /* Get fast-path work queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Fast-path FCP WQ information:\n");
-       if (phba->sli4_hba.fcp_wq) {
-               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
-                    fcp_qidx++) {
-                       if (!phba->sli4_hba.fcp_wq[fcp_qidx])
-                               continue;
-                       len += snprintf(pbuffer+len,
+                               len += snprintf(pbuffer+len,
+                               LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tRQ info: ");
+                               len += snprintf(pbuffer+len,
                                        LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                               "Associated CQID[%02d]:\n",
-                               phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
-                       len += snprintf(pbuffer+len,
+                                       "AssocCQID[%02d]: "
+                                       "RQ-STAT[nopost:x%x nobuf:x%x "
+                                       "trunc:x%x rcv:x%llx]\n",
+                                       qp->assoc_qid,
+                                       qp->q_cnt_1, qp->q_cnt_2,
+                                       qp->q_cnt_3,
+                                       (unsigned long long)qp->q_cnt_4);
+                               len += snprintf(pbuffer+len,
                                        LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                               "\tWQID[%02d], "
-                               "QE-COUNT[%04d], WQE-SIZE[%04d], "
-                               "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
-                               phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
-                               phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
-                               phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
-                               phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
-                               phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+                                       "\t\tHQID[%02d], "
+                                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                                       "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+                                       qp->queue_id,
+                                       qp->entry_count,
+                                       qp->entry_size,
+                                       qp->host_index,
+                                       qp->hba_index);
+
+                               /* Slow-path RQ data */
+                               qp = phba->sli4_hba.dat_rq;
+                               len += snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len,
+                                       "\t\tDQID[%02d], "
+                                       "QE-CNT[%04d], QE-SIZE[%04d], "
+                                       "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+                                       qp->queue_id,
+                                       qp->entry_count,
+                                       qp->entry_size,
+                                       qp->host_index,
+                                       qp->hba_index);
+
+                               len +=  snprintf(pbuffer+len,
+                                       LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+                       }
                }
-               len += snprintf(pbuffer+len,
-                               LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
        }
 
-       /* Get receive queue information */
-       len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Slow-path RQ information:\n");
-       if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "Associated CQID[%02d]:\n",
-                       phba->sli4_hba.hdr_rq->assoc_qid);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "\tHQID[%02d], "
-                       "QE-COUNT[%04d], QE-SIZE[%04d], "
-                       "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
-                       phba->sli4_hba.hdr_rq->queue_id,
-                       phba->sli4_hba.hdr_rq->entry_count,
-                       phba->sli4_hba.hdr_rq->entry_size,
-                       phba->sli4_hba.hdr_rq->host_index,
-                       phba->sli4_hba.hdr_rq->hba_index);
-               len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
-                       "\tDQID[%02d], "
-                       "QE-COUNT[%04d], QE-SIZE[%04d], "
-                       "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
-                       phba->sli4_hba.dat_rq->queue_id,
-                       phba->sli4_hba.dat_rq->entry_count,
-                       phba->sli4_hba.dat_rq->entry_size,
-                       phba->sli4_hba.dat_rq->host_index,
-                       phba->sli4_hba.dat_rq->hba_index);
-       }
+       spin_unlock_irq(&phba->hbalock);
+       return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+too_big:
+       len +=  snprintf(pbuffer+len,
+               LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n");
+       spin_unlock_irq(&phba->hbalock);
        return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 }
 
@@ -2408,31 +2509,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
 
        switch (quetp) {
        case LPFC_IDIAG_EQ:
-               /* Slow-path event queue */
-               if (phba->sli4_hba.sp_eq &&
-                   phba->sli4_hba.sp_eq->queue_id == queid) {
-                       /* Sanity check */
-                       rc = lpfc_idiag_que_param_check(
-                                       phba->sli4_hba.sp_eq, index, count);
-                       if (rc)
-                               goto error_out;
-                       idiag.ptr_private = phba->sli4_hba.sp_eq;
-                       goto pass_check;
-               }
-               /* Fast-path event queue */
-               if (phba->sli4_hba.fp_eq) {
-                       for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
-                               if (phba->sli4_hba.fp_eq[qidx] &&
-                                   phba->sli4_hba.fp_eq[qidx]->queue_id ==
+               /* HBA event queue */
+               if (phba->sli4_hba.hba_eq) {
+                       for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+                               qidx++) {
+                               if (phba->sli4_hba.hba_eq[qidx] &&
+                                   phba->sli4_hba.hba_eq[qidx]->queue_id ==
                                    queid) {
                                        /* Sanity check */
                                        rc = lpfc_idiag_que_param_check(
-                                               phba->sli4_hba.fp_eq[qidx],
+                                               phba->sli4_hba.hba_eq[qidx],
                                                index, count);
                                        if (rc)
                                                goto error_out;
                                        idiag.ptr_private =
-                                               phba->sli4_hba.fp_eq[qidx];
+                                               phba->sli4_hba.hba_eq[qidx];
                                        goto pass_check;
                                }
                        }
@@ -2479,7 +2570,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                                                phba->sli4_hba.fcp_cq[qidx];
                                        goto pass_check;
                                }
-                       } while (++qidx < phba->cfg_fcp_eq_count);
+                       } while (++qidx < phba->cfg_fcp_io_channel);
                }
                goto error_out;
                break;
@@ -2511,7 +2602,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
                }
                /* FCP work queue */
                if (phba->sli4_hba.fcp_wq) {
-                       for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+                       for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+                               qidx++) {
                                if (!phba->sli4_hba.fcp_wq[qidx])
                                        continue;
                                if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
@@ -4490,7 +4582,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
        lpfc_debug_dump_mbx_wq(phba);
        lpfc_debug_dump_els_wq(phba);
 
-       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
                lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
 
        lpfc_debug_dump_hdr_rq(phba);
@@ -4501,14 +4593,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
        lpfc_debug_dump_mbx_cq(phba);
        lpfc_debug_dump_els_cq(phba);
 
-       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
                lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
 
        /*
         * Dump Event Queues (EQs)
         */
-       lpfc_debug_dump_sp_eq(phba);
-
-       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
-               lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
+       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
+               lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
 }
index afe368fd1b98f31e59eae497d2daea9579df0c92..8b2b6a3bfc25b52c66d25b9f8e2135c56b4aaf45 100644 (file)
@@ -36,6 +36,9 @@
 /* dumpHostSlim output buffer size */
 #define LPFC_DUMPHOSTSLIM_SIZE 4096
 
+/* dumpSLIqinfo output buffer size */
+#define        LPFC_DUMPSLIQINFO_SIZE 4096
+
 /* hbqinfo output buffer size */
 #define LPFC_HBQINFO_SIZE 8192
 
@@ -366,7 +369,7 @@ static inline void
 lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
 {
        /* sanity check */
-       if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+       if (fcp_wqidx >= phba->cfg_fcp_io_channel)
                return;
 
        printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
@@ -388,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
        int fcp_cqidx, fcp_cqid;
 
        /* sanity check */
-       if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+       if (fcp_wqidx >= phba->cfg_fcp_io_channel)
                return;
 
        fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
-       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
                if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
                        break;
        if (phba->intr_type == MSIX) {
-               if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+               if (fcp_cqidx >= phba->cfg_fcp_io_channel)
                        return;
        } else {
                if (fcp_cqidx > 0)
@@ -410,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
 }
 
 /**
- * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue
+ * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
  * @phba: Pointer to HBA context object.
  * @fcp_wqidx: Index to a FCP work queue.
  *
@@ -418,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
  * associated to the FCP work queue specified by the @fcp_wqidx.
  **/
 static inline void
-lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
+lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
 {
        struct lpfc_queue *qdesc;
        int fcp_eqidx, fcp_eqid;
        int fcp_cqidx, fcp_cqid;
 
        /* sanity check */
-       if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+       if (fcp_wqidx >= phba->cfg_fcp_io_channel)
                return;
        fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
-       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
                if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
                        break;
        if (phba->intr_type == MSIX) {
-               if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+               if (fcp_cqidx >= phba->cfg_fcp_io_channel)
                        return;
        } else {
                if (fcp_cqidx > 0)
                        return;
        }
 
-       if (phba->cfg_fcp_eq_count == 0) {
-               fcp_eqidx = -1;
-               fcp_eqid = phba->sli4_hba.sp_eq->queue_id;
-               qdesc = phba->sli4_hba.sp_eq;
-       } else {
-               fcp_eqidx = fcp_cqidx;
-               fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
-               qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
-       }
+       fcp_eqidx = fcp_cqidx;
+       fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
+       qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
 
        printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
                "EQ[Idx:%d|Qid:%d]\n",
@@ -542,25 +539,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
        lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
 }
 
-/**
- * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
- * @phba: Pointer to HBA context object.
- *
- * This function dumps all entries from the slow-path event queue.
- **/
-static inline void
-lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
-{
-       printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
-               "EQ[Qid:%d]:\n",
-               phba->sli4_hba.mbx_wq->queue_id,
-               phba->sli4_hba.els_wq->queue_id,
-               phba->sli4_hba.mbx_cq->queue_id,
-               phba->sli4_hba.els_cq->queue_id,
-               phba->sli4_hba.sp_eq->queue_id);
-       lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
-}
-
 /**
  * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
  * @phba: Pointer to HBA context object.
@@ -574,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
 {
        int wq_idx;
 
-       for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++)
+       for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
                if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
                        break;
-       if (wq_idx < phba->cfg_fcp_wq_count) {
+       if (wq_idx < phba->cfg_fcp_io_channel) {
                printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
                lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
                return;
@@ -644,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
        do {
                if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
                        break;
-       } while (++cq_idx < phba->cfg_fcp_eq_count);
+       } while (++cq_idx < phba->cfg_fcp_io_channel);
 
-       if (cq_idx < phba->cfg_fcp_eq_count) {
+       if (cq_idx < phba->cfg_fcp_io_channel) {
                printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
                lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
                return;
@@ -677,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
 {
        int eq_idx;
 
-       for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) {
-               if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid)
+       for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
+               if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
                        break;
        }
 
-       if (eq_idx < phba->cfg_fcp_eq_count) {
+       if (eq_idx < phba->cfg_fcp_io_channel) {
                printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
-               lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]);
+               lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
                return;
        }
 
-       if (phba->sli4_hba.sp_eq->queue_id == qid) {
-               printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
-               lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
-       }
 }
 
 void lpfc_debug_dump_all_queues(struct lpfc_hba *);
index 1d84b63fccadf49eb9f39374ef3b7d1b163d2d8c..af49fb03dbb890dba4e0fc21a5a704580ab94781 100644 (file)
@@ -145,6 +145,7 @@ struct lpfc_node_rrq {
 #define NLP_RCV_PLOGI      0x00080000  /* Rcv'ed PLOGI from remote system */
 #define NLP_LOGO_ACC       0x00100000  /* Process LOGO after ACC completes */
 #define NLP_TGT_NO_SCSIID  0x00200000  /* good PRLI but no binding for scsid */
+#define NLP_ISSUE_LOGO     0x00400000  /* waiting to issue a LOGO */
 #define NLP_ACC_REGLOGIN   0x01000000  /* Issue Reg Login after successful
                                           ACC */
 #define NLP_NPR_ADISC      0x02000000  /* Issue ADISC when dq'ed from
@@ -201,10 +202,11 @@ struct lpfc_node_rrq {
 #define NLP_STE_ADISC_ISSUE       0x2  /* ADISC was sent to NL_PORT */
 #define NLP_STE_REG_LOGIN_ISSUE   0x3  /* REG_LOGIN was issued for NL_PORT */
 #define NLP_STE_PRLI_ISSUE        0x4  /* PRLI was sent to NL_PORT */
-#define NLP_STE_UNMAPPED_NODE     0x5  /* PRLI completed from NL_PORT */
-#define NLP_STE_MAPPED_NODE       0x6  /* Identified as a FCP Target */
-#define NLP_STE_NPR_NODE          0x7  /* NPort disappeared */
-#define NLP_STE_MAX_STATE         0x8
+#define NLP_STE_LOGO_ISSUE       0x5   /* LOGO was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE     0x6  /* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE       0x7  /* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE          0x8  /* NPort disappeared */
+#define NLP_STE_MAX_STATE         0x9
 #define NLP_STE_FREED_NODE        0xff /* node entry was freed to MEM_NLP */
 
 /* For UNUSED_NODE state, the node has just been allocated.
index d54ae199979710c11f59a1ce34e5e8271460ba6e..cfe533bc97909cbda098734e3a4e2e99cee1de85 100644 (file)
@@ -962,7 +962,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        if ((phba->fcoe_cvl_eventtag_attn ==
                             phba->fcoe_cvl_eventtag) &&
                            (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-                           (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
+                           ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                           IOERR_SLI_ABORTED))
                                goto stop_rr_fcf_flogi;
                        else
                                phba->fcoe_cvl_eventtag_attn =
@@ -1108,8 +1109,10 @@ flogifail:
                /* Start discovery */
                lpfc_disc_start(vport);
        } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
-                       ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
-                       (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
+                       (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+                        IOERR_SLI_ABORTED) &&
+                       ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+                        IOERR_SLI_DOWN))) &&
                        (phba->link_state != LPFC_CLEAR_LA)) {
                /* If FLOGI failed enable link interrupt. */
                lpfc_issue_clear_la(phba, vport);
@@ -1476,6 +1479,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
                return ndlp;
        memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
 
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
+                ndlp, ndlp->nlp_DID, new_ndlp);
+
        if (!new_ndlp) {
                rc = memcmp(&ndlp->nlp_portname, name,
                            sizeof(struct lpfc_name));
@@ -1527,6 +1534,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
                /* The new_ndlp is replacing ndlp totally, so we need
                 * to put ndlp on UNUSED list and try to free it.
                 */
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                        "3179 PLOGI confirm NEW: %x %x\n",
+                        new_ndlp->nlp_DID, keepDID);
 
                /* Fix up the rport accordingly */
                rport =  ndlp->rport;
@@ -1559,23 +1569,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
                lpfc_drop_node(vport, ndlp);
        }
        else {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                        "3180 PLOGI confirm SWAP: %x %x\n",
+                        new_ndlp->nlp_DID, keepDID);
+
                lpfc_unreg_rpi(vport, ndlp);
+
                /* Two ndlps cannot have the same did */
                ndlp->nlp_DID = keepDID;
                if (phba->sli_rev == LPFC_SLI_REV4)
                        memcpy(&ndlp->active_rrqs.xri_bitmap,
                                &rrq.xri_bitmap,
                                sizeof(ndlp->active_rrqs.xri_bitmap));
+
                /* Since we are swapping the ndlp passed in with the new one
-                * and the did has already been swapped, copy over the
-                * state and names.
+                * and the did has already been swapped, copy over state.
+                * The new WWNs are already in new_ndlp since thats what
+                * we looked it up by in the begining of this routine.
                 */
-               memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
-                       sizeof(struct lpfc_name));
-               memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
-                       sizeof(struct lpfc_name));
                new_ndlp->nlp_state = ndlp->nlp_state;
-               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+               /* Since we are switching over to the new_ndlp, the old
+                * ndlp should be put in the NPR state, unless we have
+                * already started re-discovery on it.
+                */
+               if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+                   (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
+                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
                /* Fix up the rport accordingly */
                rport = ndlp->rport;
                if (rport) {
@@ -2367,6 +2388,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        IOCB_t *irsp;
        struct lpfc_sli *psli;
        struct lpfcMboxq *mbox;
+       unsigned long flags;
+       uint32_t skip_recovery = 0;
 
        psli = &phba->sli;
        /* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2381,47 +2404,52 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                "LOGO cmpl:       status:x%x/x%x did:x%x",
                irsp->ulpStatus, irsp->un.ulpWord[4],
                ndlp->nlp_DID);
+
        /* LOGO completes to NPort <nlp_DID> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "0105 LOGO completes to NPort x%x "
                         "Data: x%x x%x x%x x%x\n",
                         ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
                         irsp->ulpTimeout, vport->num_disc_nodes);
-       /* Check to see if link went down during discovery */
-       if (lpfc_els_chk_latt(vport))
+
+       if (lpfc_els_chk_latt(vport)) {
+               skip_recovery = 1;
                goto out;
+       }
 
+       /* Check to see if link went down during discovery */
        if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
                /* NLP_EVT_DEVICE_RM should unregister the RPI
                 * which should abort all outstanding IOs.
                 */
                lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                        NLP_EVT_DEVICE_RM);
+               skip_recovery = 1;
                goto out;
        }
 
        if (irsp->ulpStatus) {
                /* Check for retry */
-               if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+               if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
                        /* ELS command is being retried */
+                       skip_recovery = 1;
                        goto out;
+               }
                /* LOGO failed */
                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
                                 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
                                 ndlp->nlp_DID, irsp->ulpStatus,
                                 irsp->un.ulpWord[4]);
                /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-               if (lpfc_error_lost_link(irsp))
+               if (lpfc_error_lost_link(irsp)) {
+                       skip_recovery = 1;
                        goto out;
-               else
-                       lpfc_disc_state_machine(vport, ndlp, cmdiocb,
-                                               NLP_EVT_CMPL_LOGO);
-       } else
-               /* Good status, call state machine.
-                * This will unregister the rpi if needed.
-                */
-               lpfc_disc_state_machine(vport, ndlp, cmdiocb,
-                                       NLP_EVT_CMPL_LOGO);
+               }
+       }
+
+       /* Call state machine. This will unregister the rpi if needed. */
+       lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+
 out:
        lpfc_els_free_iocb(phba, cmdiocb);
        /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
@@ -2436,9 +2464,30 @@ out:
                        if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
                                MBX_NOT_FINISHED) {
                                mempool_free(mbox, phba->mbox_mem_pool);
+                               skip_recovery = 1;
                        }
                }
        }
+
+       /*
+        * If the node is a target, the handling attempts to recover the port.
+        * For any other port type, the rpi is unregistered as an implicit
+        * LOGO.
+        */
+       if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
+               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+               spin_lock_irqsave(shost->host_lock, flags);
+               ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+               spin_unlock_irqrestore(shost->host_lock, flags);
+
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                "3187 LOGO completes to NPort x%x: Start "
+                                "Recovery Data: x%x x%x x%x x%x\n",
+                                ndlp->nlp_DID, irsp->ulpStatus,
+                                irsp->un.ulpWord[4], irsp->ulpTimeout,
+                                vport->num_disc_nodes);
+               lpfc_disc_start(vport);
+       }
        return;
 }
 
@@ -2501,10 +2550,27 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                "Issue LOGO:      did:x%x",
                ndlp->nlp_DID, 0, 0);
 
+       /*
+        * If we are issuing a LOGO, we may try to recover the remote NPort
+        * by issuing a PLOGI later. Even though we issue ELS cmds by the
+        * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
+        * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
+        * for that ELS cmd. To avoid this situation, lets get rid of the
+        * RPI right now, before any ELS cmds are sent.
+        */
+       spin_lock_irq(shost->host_lock);
+       ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+       spin_unlock_irq(shost->host_lock);
+       if (lpfc_unreg_rpi(vport, ndlp)) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 0;
+       }
+
        phba->fc_stat.elsXmitLOGO++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_LOGO_SND;
+       ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
        spin_unlock_irq(shost->host_lock);
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
@@ -2920,7 +2986,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
        case ELS_CMD_LOGO:
                if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
                        ndlp->nlp_prev_state = ndlp->nlp_state;
-                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
                }
                break;
        case ELS_CMD_FDISC:
@@ -3007,7 +3073,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                }
                break;
        case IOSTAT_LOCAL_REJECT:
-               switch ((irsp->un.ulpWord[4] & 0xff)) {
+               switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
                case IOERR_LOOP_OPEN_FAILURE:
                        if (cmd == ELS_CMD_FLOGI) {
                                if (PCI_DEVICE_ID_HORNET ==
@@ -3214,7 +3280,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
                if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
                        ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
-                       ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
+                       ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+                       IOERR_NO_RESOURCES))) {
                        /* Don't reset timer for no resources */
 
                        /* If discovery / RSCN timer is running, reset it */
@@ -3273,7 +3340,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        return 1;
                case ELS_CMD_LOGO:
                        ndlp->nlp_prev_state = ndlp->nlp_state;
-                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
                        lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
                        return 1;
                }
@@ -3533,13 +3600,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
        mempool_free(pmb, phba->mbox_mem_pool);
-       if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
-               lpfc_nlp_put(ndlp);
-               /* This is the end of the default RPI cleanup logic for this
-                * ndlp. If no other discovery threads are using this ndlp.
-                * we should free all resources associated with it.
-                */
-               lpfc_nlp_not_used(ndlp);
+       if (ndlp) {
+               if (NLP_CHK_NODE_ACT(ndlp)) {
+                       lpfc_nlp_put(ndlp);
+                       /* This is the end of the default RPI cleanup logic for
+                        * this ndlp. If no other discovery threads are using
+                        * this ndlp, free all resources associated with it.
+                        */
+                       lpfc_nlp_not_used(ndlp);
+               } else {
+                       lpfc_drop_node(ndlp->vport, ndlp);
+               }
        }
 
        return;
@@ -6803,7 +6874,8 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
                lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
        } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
-           (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
+                  (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                  IOERR_RCV_BUFFER_WAITING) {
                phba->fc_stat.NoRcvBuf++;
                /* Not enough posted buffers; Try posting more buffers */
                if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -7985,3 +8057,47 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
        spin_unlock_irqrestore(&phba->hbalock, iflag);
        return;
 }
+
+/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted node.
+ *
+ * The driver calls this routine in response to an SLI4 XRI ABORT CQE
+ * or an SLI3 ASYNC_STATUS_CN event from the port.  For either event,
+ * the driver is required to send a LOGO to the remote node before it
+ * attempts to recover its login to the remote node.
+ */
+void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+                          struct lpfc_nodelist *ndlp)
+{
+       struct Scsi_Host *shost;
+       struct lpfc_hba *phba;
+       unsigned long flags = 0;
+
+       shost = lpfc_shost_from_vport(vport);
+       phba = vport->phba;
+       if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+               lpfc_printf_log(phba, KERN_INFO,
+                               LOG_SLI, "3093 No rport recovery needed. "
+                               "rport in state 0x%x\n", ndlp->nlp_state);
+               return;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "3094 Start rport recovery on shost id 0x%x "
+                       "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+                       "flags 0x%x\n",
+                       shost->host_no, ndlp->nlp_DID,
+                       vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+                       ndlp->nlp_flag);
+       /*
+        * The rport is not responding.  Remove the FCP-2 flag to prevent
+        * an ADISC in the follow-up recovery code.
+        */
+       spin_lock_irqsave(shost->host_lock, flags);
+       ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       lpfc_issue_els_logo(vport, ndlp, 0);
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+}
+
index 9b4f92941dce585aaddbfaa401d9e3ae5d7f7312..e9845d2ecf10db6f6fd9eca8681aba1febee305a 100644 (file)
@@ -123,6 +123,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                "rport devlosscb: sid:x%x did:x%x flg:x%x",
                ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
 
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                        "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+                        ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
        /* Don't defer this if we are in the process of deleting the vport
         * or unloading the driver. The unload will cleanup the node
         * appropriately we just need to cleanup the ndlp rport info here.
@@ -142,6 +146,15 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
                return;
 
+       if (ndlp->nlp_type & NLP_FABRIC) {
+
+               /* If the WWPN of the rport and ndlp don't match, ignore it */
+               if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
+                       put_device(&rport->dev);
+                       return;
+               }
+       }
+
        evtp = &ndlp->dev_loss_evt;
 
        if (!list_empty(&evtp->evt_listp))
@@ -202,6 +215,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                "rport devlosstmo:did:x%x type:x%x id:x%x",
                ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
 
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                        "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+                        ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
        /* Don't defer this if we are in the process of deleting the vport
         * or unloading the driver. The unload will cleanup the node
         * appropriately we just need to cleanup the ndlp rport info here.
@@ -3492,7 +3509,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
        LPFC_MBOXQ_t *pmb = NULL;
        MAILBOX_t *mb;
        struct static_vport_info *vport_info;
-       int rc = 0, i;
+       int mbx_wait_rc = 0, i;
        struct fc_vport_identifiers vport_id;
        struct fc_vport *new_fc_vport;
        struct Scsi_Host *shost;
@@ -3509,7 +3526,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
                                " allocate mailbox memory\n");
                return;
        }
-
+       memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
        mb = &pmb->u.mb;
 
        vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
@@ -3523,24 +3540,31 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
 
        vport_buff = (uint8_t *) vport_info;
        do {
+               /* free dma buffer from previous round */
+               if (pmb->context1) {
+                       mp = (struct lpfc_dmabuf *)pmb->context1;
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                       kfree(mp);
+               }
                if (lpfc_dump_static_vport(phba, pmb, offset))
                        goto out;
 
                pmb->vport = phba->pport;
-               rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
+               mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
+                                                       LPFC_MBOX_TMO);
 
-               if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
+               if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
                                "0544 lpfc_create_static_vport failed to"
                                " issue dump mailbox command ret 0x%x "
                                "status 0x%x\n",
-                               rc, mb->mbxStatus);
+                               mbx_wait_rc, mb->mbxStatus);
                        goto out;
                }
 
                if (phba->sli_rev == LPFC_SLI_REV4) {
                        byte_count = pmb->u.mqe.un.mb_words[5];
-                       mp = (struct lpfc_dmabuf *) pmb->context2;
+                       mp = (struct lpfc_dmabuf *)pmb->context1;
                        if (byte_count > sizeof(struct static_vport_info) -
                                        offset)
                                byte_count = sizeof(struct static_vport_info)
@@ -3604,9 +3628,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
 
 out:
        kfree(vport_info);
-       if (rc != MBX_TIMEOUT) {
-               if (pmb->context2) {
-                       mp = (struct lpfc_dmabuf *) pmb->context2;
+       if (mbx_wait_rc != MBX_TIMEOUT) {
+               if (pmb->context1) {
+                       mp = (struct lpfc_dmabuf *)pmb->context1;
                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
                        kfree(mp);
                }
@@ -3834,6 +3858,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
                fc_remote_port_rolechg(rport, rport_ids.roles);
 
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                        "3183 rport register x%06x, rport %p role x%x\n",
+                        ndlp->nlp_DID, rport, rport_ids.roles);
+
        if ((rport->scsi_target_id != -1) &&
            (rport->scsi_target_id < LPFC_MAX_TARGET)) {
                ndlp->nlp_sid = rport->scsi_target_id;
@@ -3850,6 +3878,10 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
                "rport delete:    did:x%x flg:x%x type x%x",
                ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
 
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                        "3184 rport unregister x%06x, rport %p\n",
+                        ndlp->nlp_DID, rport);
+
        fc_remote_port_delete(rport);
 
        return;
@@ -3964,6 +3996,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
                [NLP_STE_ADISC_ISSUE] = "ADISC",
                [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
                [NLP_STE_PRLI_ISSUE] = "PRLI",
+               [NLP_STE_LOGO_ISSUE] = "LOGO",
                [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
                [NLP_STE_MAPPED_NODE] = "MAPPED",
                [NLP_STE_NPR_NODE] = "NPR",
@@ -4330,6 +4363,26 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
        return 0;
 }
 
+/**
+ * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function will issue an ELS LOGO command after completing
+ * the UNREG_RPI.
+ **/
+void
+lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       struct lpfc_vport  *vport = pmb->vport;
+       struct lpfc_nodelist *ndlp;
+
+       ndlp = (struct lpfc_nodelist *)(pmb->context1);
+       if (!ndlp)
+               return;
+       lpfc_issue_els_logo(vport, ndlp, 0);
+}
+
 /*
  * Free rpi associated with LPFC_NODELIST entry.
  * This routine is called from lpfc_freenode(), when we are removing
@@ -4354,9 +4407,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                        rpi = ndlp->nlp_rpi;
                        if (phba->sli_rev == LPFC_SLI_REV4)
                                rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
                        lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
                        mbox->vport = vport;
-                       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                       if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+                               mbox->context1 = ndlp;
+                               mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+                       } else {
+                               mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+                       }
+
                        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
                        if (rc == MBX_NOT_FINISHED)
                                mempool_free(mbox, phba->mbox_mem_pool);
@@ -4499,9 +4559,13 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                lpfc_disable_node(vport, ndlp);
        }
 
+
+       /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
+
        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
        if ((mb = phba->sli.mbox_active)) {
                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+                  !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
                        mb->context2 = NULL;
                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -4512,6 +4576,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        /* Cleanup REG_LOGIN completions which are not yet processed */
        list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
                if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+                       (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
                        (ndlp != (struct lpfc_nodelist *) mb->context2))
                        continue;
 
@@ -4521,6 +4586,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+                  !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
                        mp = (struct lpfc_dmabuf *) (mb->context1);
                        if (mp) {
@@ -4585,7 +4651,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
                                mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
                                mbox->vport = vport;
-                               mbox->context2 = NULL;
+                               mbox->context2 = ndlp;
                                rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
                                if (rc == MBX_NOT_FINISHED) {
                                        mempool_free(mbox, phba->mbox_mem_pool);
@@ -5365,9 +5431,17 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
        struct lpfc_nodelist *ndlp;
 
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
-               if (filter(ndlp, param))
+               if (filter(ndlp, param)) {
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                                        "3185 FIND node filter %p DID "
+                                        "Data: x%p x%x x%x\n",
+                                        filter, ndlp, ndlp->nlp_DID,
+                                        ndlp->nlp_flag);
                        return ndlp;
+               }
        }
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+                        "3186 FIND node filter %p NOT FOUND.\n", filter);
        return NULL;
 }
 
index 41bb1d2fb625e4333360b29fe23cc2053dc1d3c1..7398ca862e9750be3b943b4b0f14f02973648331 100644 (file)
@@ -1188,8 +1188,8 @@ typedef struct {
  */
 
 /* Number of rings currently used and available. */
-#define MAX_CONFIGURED_RINGS     3
-#define MAX_RINGS                4
+#define MAX_SLI3_CONFIGURED_RINGS     3
+#define MAX_SLI3_RINGS                4
 
 /* IOCB / Mailbox is owned by FireFly */
 #define OWN_CHIP        1
@@ -1251,6 +1251,8 @@ typedef struct {
 #define PCI_VENDOR_ID_SERVERENGINE  0x19a2
 #define PCI_DEVICE_ID_TIGERSHARK    0x0704
 #define PCI_DEVICE_ID_TOMCAT        0x0714
+#define PCI_DEVICE_ID_SKYHAWK       0x0724
+#define PCI_DEVICE_ID_SKYHAWK_VF    0x072c
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
@@ -1458,6 +1460,7 @@ typedef struct {          /* FireFly BIU registers */
 #define MBX_UNREG_FCFI     0xA2
 #define MBX_INIT_VFI        0xA3
 #define MBX_INIT_VPI        0xA4
+#define MBX_ACCESS_VDATA    0xA5
 
 #define MBX_AUTH_PORT       0xF8
 #define MBX_SECURITY_MGMT   0xF9
@@ -2991,7 +2994,7 @@ typedef struct _PCB {
 
        uint32_t pgpAddrLow;
        uint32_t pgpAddrHigh;
-       SLI2_RDSC rdsc[MAX_RINGS];
+       SLI2_RDSC rdsc[MAX_SLI3_RINGS];
 } PCB_t;
 
 /* NEW_FEATURE */
@@ -3101,18 +3104,18 @@ struct lpfc_pgp {
 
 struct sli2_desc {
        uint32_t unused1[16];
-       struct lpfc_hgp host[MAX_RINGS];
-       struct lpfc_pgp port[MAX_RINGS];
+       struct lpfc_hgp host[MAX_SLI3_RINGS];
+       struct lpfc_pgp port[MAX_SLI3_RINGS];
 };
 
 struct sli3_desc {
-       struct lpfc_hgp host[MAX_RINGS];
+       struct lpfc_hgp host[MAX_SLI3_RINGS];
        uint32_t reserved[8];
        uint32_t hbq_put[16];
 };
 
 struct sli3_pgp {
-       struct lpfc_pgp port[MAX_RINGS];
+       struct lpfc_pgp port[MAX_SLI3_RINGS];
        uint32_t hbq_get[16];
 };
 
@@ -3242,6 +3245,7 @@ typedef struct {
 #define IOERR_SLI_DOWN                0x101  /* ulpStatus  - Driver defined */
 #define IOERR_SLI_BRESET              0x102
 #define IOERR_SLI_ABORTED             0x103
+#define IOERR_PARAM_MASK              0x1ff
 } PARM_ERR;
 
 typedef union {
index 953603a7a43c9c1072f2fcf23ae9b6cca31f7882..834b699cac7673db4972c82381bdf5e0a222bab0 100644 (file)
@@ -187,11 +187,17 @@ struct lpfc_sli_intf {
 /* Active interrupt test count */
 #define LPFC_ACT_INTR_CNT      4
 
+/* Algrithmns for scheduling FCP commands to WQs */
+#define        LPFC_FCP_SCHED_ROUND_ROBIN      0
+#define        LPFC_FCP_SCHED_BY_CPU           1
+
 /* Delay Multiplier constant */
 #define LPFC_DMULT_CONST       651042
-#define LPFC_MIM_IMAX          636
-#define LPFC_FP_DEF_IMAX       10000
-#define LPFC_SP_DEF_IMAX       10000
+
+/* Configuration of Interrupts / sec for entire HBA port */
+#define LPFC_MIN_IMAX          5000
+#define LPFC_MAX_IMAX          5000000
+#define LPFC_DEF_IMAX          50000
 
 /* PORT_CAPABILITIES constants. */
 #define LPFC_MAX_SUPPORTED_PAGES       8
@@ -338,7 +344,7 @@ struct lpfc_cqe {
  * Define mask value for xri_aborted and wcqe completed CQE extended status.
  * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
  */
-#define WCQE_PARAM_MASK                0x1FF;
+#define WCQE_PARAM_MASK                0x1FF
 
 /* completion queue entry for wqe completions */
 struct lpfc_wcqe_complete {
@@ -880,13 +886,19 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_EQ_DESTROY                    0x37
 #define LPFC_MBOX_OPCODE_QUERY_FW_CFG                  0x3A
 #define LPFC_MBOX_OPCODE_FUNCTION_RESET                        0x3D
+#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG      0x3E
+#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG               0x43
 #define LPFC_MBOX_OPCODE_GET_PORT_NAME                 0x4D
 #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT                 0x5A
+#define LPFC_MBOX_OPCODE_GET_VPD_DATA                  0x5B
+#define LPFC_MBOX_OPCODE_SEND_ACTIVATION               0x73
+#define LPFC_MBOX_OPCODE_RESET_LICENSES                        0x74
 #define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO          0x9A
 #define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT         0x9B
 #define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT             0x9C
 #define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT           0x9D
 #define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG           0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES                0xA1
 #define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG            0xA4
 #define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG            0xA5
 #define LPFC_MBOX_OPCODE_GET_PROFILE_LIST              0xA6
@@ -1382,6 +1394,11 @@ struct lpfc_mbx_set_link_diag_state {
 #define lpfc_mbx_set_diag_state_diag_SHIFT     0
 #define lpfc_mbx_set_diag_state_diag_MASK      0x00000001
 #define lpfc_mbx_set_diag_state_diag_WORD      word0
+#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT   2
+#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK    0x00000001
+#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD    word0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE       0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE          1
 #define lpfc_mbx_set_diag_state_link_num_SHIFT 16
 #define lpfc_mbx_set_diag_state_link_num_MASK  0x0000003F
 #define lpfc_mbx_set_diag_state_link_num_WORD  word0
@@ -2556,7 +2573,7 @@ struct lpfc_mbx_get_sli4_parameters {
 };
 
 struct lpfc_rscr_desc_generic {
-#define LPFC_RSRC_DESC_WSIZE                   18
+#define LPFC_RSRC_DESC_WSIZE                   22
        uint32_t desc[LPFC_RSRC_DESC_WSIZE];
 };
 
@@ -2566,6 +2583,9 @@ struct lpfc_rsrc_desc_pcie {
 #define lpfc_rsrc_desc_pcie_type_MASK          0x000000ff
 #define lpfc_rsrc_desc_pcie_type_WORD          word0
 #define LPFC_RSRC_DESC_TYPE_PCIE               0x40
+#define lpfc_rsrc_desc_pcie_length_SHIFT       8
+#define lpfc_rsrc_desc_pcie_length_MASK                0x000000ff
+#define lpfc_rsrc_desc_pcie_length_WORD                word0
        uint32_t word1;
 #define lpfc_rsrc_desc_pcie_pfnum_SHIFT                0
 #define lpfc_rsrc_desc_pcie_pfnum_MASK         0x000000ff
@@ -2593,6 +2613,12 @@ struct lpfc_rsrc_desc_fcfcoe {
 #define lpfc_rsrc_desc_fcfcoe_type_MASK                0x000000ff
 #define lpfc_rsrc_desc_fcfcoe_type_WORD                word0
 #define LPFC_RSRC_DESC_TYPE_FCFCOE             0x43
+#define lpfc_rsrc_desc_fcfcoe_length_SHIFT     8
+#define lpfc_rsrc_desc_fcfcoe_length_MASK      0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_length_WORD      word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD     0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH   72
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH   88
        uint32_t word1;
 #define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT      0
 #define lpfc_rsrc_desc_fcfcoe_vfnum_MASK       0x000000ff
@@ -2651,6 +2677,12 @@ struct lpfc_rsrc_desc_fcfcoe {
 #define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT     16
 #define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK      0x0000ffff
 #define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD      word13
+/* extended FC/FCoE Resource Descriptor when length = 88 bytes */
+       uint32_t bw_min;
+       uint32_t bw_max;
+       uint32_t iops_min;
+       uint32_t iops_max;
+       uint32_t reserved[4];
 };
 
 struct lpfc_func_cfg {
index 628a703abddb75179fa692c0ab13f135c4e4fdbe..8a55a586dd6540fd9acb4f18bdf41a7c3645e3ee 100644 (file)
@@ -480,11 +480,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
        phba->link_state = LPFC_LINK_DOWN;
 
        /* Only process IOCBs on ELS ring till hba_state is READY */
-       if (psli->ring[psli->extra_ring].cmdringaddr)
+       if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
                psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
-       if (psli->ring[psli->fcp_ring].cmdringaddr)
+       if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
                psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
-       if (psli->ring[psli->next_ring].cmdringaddr)
+       if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
                psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
 
        /* Post receive buffers for desired rings */
@@ -2059,6 +2059,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
                oneConnect = 1;
                m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
                break;
+       case PCI_DEVICE_ID_SKYHAWK:
+       case PCI_DEVICE_ID_SKYHAWK_VF:
+               oneConnect = 1;
+               m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
+               break;
        default:
                m = (typeof(m)){"Unknown", "", ""};
                break;
@@ -4546,6 +4551,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                        phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
        }
 
+       if (!phba->sli.ring)
+               phba->sli.ring = (struct lpfc_sli_ring *)
+                       kzalloc(LPFC_SLI3_MAX_RING *
+                       sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+       if (!phba->sli.ring)
+               return -ENOMEM;
+
        /*
         * Since the sg_tablesize is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4690,6 +4702,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* Get all the module params for configuring this host */
        lpfc_get_cfgparam(phba);
        phba->max_vpi = LPFC_MAX_VPI;
+
+       /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
+       phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
+
        /* This will be set to correct value after the read_config mbox */
        phba->max_vports = 0;
 
@@ -4704,6 +4720,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        if (phba->cfg_enable_bg)
                sges_per_segment = 2;
 
+       /*
+        * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
+        * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
+        */
+       if (!phba->sli.ring)
+               phba->sli.ring = kzalloc(
+                       (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
+                       sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+       if (!phba->sli.ring)
+               return -ENOMEM;
        /*
         * Since the sg_tablesize is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4909,21 +4935,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_remove_rpi_hdrs;
        }
 
-       /*
-        * The cfg_fcp_eq_count can be zero whenever there is exactly one
-        * interrupt vector.  This is not an error
-        */
-       if (phba->cfg_fcp_eq_count) {
-               phba->sli4_hba.fcp_eq_hdl =
-                               kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
-                                   phba->cfg_fcp_eq_count), GFP_KERNEL);
-               if (!phba->sli4_hba.fcp_eq_hdl) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2572 Failed allocate memory for "
-                                       "fast-path per-EQ handle array\n");
-                       rc = -ENOMEM;
-                       goto out_free_fcf_rr_bmask;
-               }
+       phba->sli4_hba.fcp_eq_hdl =
+                       kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+                           phba->cfg_fcp_io_channel), GFP_KERNEL);
+       if (!phba->sli4_hba.fcp_eq_hdl) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2572 Failed allocate memory for "
+                               "fast-path per-EQ handle array\n");
+               rc = -ENOMEM;
+               goto out_free_fcf_rr_bmask;
        }
 
        phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -5550,6 +5570,10 @@ lpfc_hba_free(struct lpfc_hba *phba)
        /* Release the driver assigned board number */
        idr_remove(&lpfc_hba_index, phba->brd_no);
 
+       /* Free memory allocated with sli rings */
+       kfree(phba->sli.ring);
+       phba->sli.ring = NULL;
+
        kfree(phba);
        return;
 }
@@ -6275,8 +6299,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
        uint32_t shdr_status, shdr_add_status;
        struct lpfc_mbx_get_func_cfg *get_func_cfg;
        struct lpfc_rsrc_desc_fcfcoe *desc;
+       char *pdesc_0;
        uint32_t desc_count;
-       int length, i, rc = 0;
+       int length, i, rc = 0, rc2;
 
        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!pmb) {
@@ -6388,18 +6413,17 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                         LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
                         length, LPFC_SLI4_MBX_EMBED);
 
-       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+       rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
        shdr = (union lpfc_sli4_cfg_shdr *)
                                &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
-       if (rc || shdr_status || shdr_add_status) {
+       if (rc2 || shdr_status || shdr_add_status) {
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "3026 Mailbox failed , mbxCmd x%x "
                                "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
                                bf_get(lpfc_mqe_command, &pmb->u.mqe),
                                bf_get(lpfc_mqe_status, &pmb->u.mqe));
-               rc = -EIO;
                goto read_cfg_out;
        }
 
@@ -6407,11 +6431,18 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
        get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
        desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
 
+       pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
+       desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
+       length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
+       if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
+               length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
+       else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
+               goto read_cfg_out;
+
        for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
-               desc = (struct lpfc_rsrc_desc_fcfcoe *)
-                       &get_func_cfg->func_cfg.desc[i];
+               desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
                if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
-                   bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+                   bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
                        phba->sli4_hba.iov.pf_number =
                                bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
                        phba->sli4_hba.iov.vf_number =
@@ -6425,13 +6456,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                                "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
                                "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
                                phba->sli4_hba.iov.vf_number);
-       else {
+       else
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "3028 GET_FUNCTION_CONFIG: failed to find "
                                "Resrouce Descriptor:x%x\n",
                                LPFC_RSRC_DESC_TYPE_FCFCOE);
-               rc = -EIO;
-       }
 
 read_cfg_out:
        mempool_free(pmb, phba->mbox_mem_pool);
@@ -6512,53 +6541,40 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
 static int
 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
 {
-       int cfg_fcp_wq_count;
-       int cfg_fcp_eq_count;
+       int cfg_fcp_io_channel;
+       uint32_t cpu;
+       uint32_t i = 0;
+
 
        /*
-        * Sanity check for confiugred queue parameters against the run-time
+        * Sanity check for configured queue parameters against the run-time
         * device parameters
         */
 
-       /* Sanity check on FCP fast-path WQ parameters */
-       cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
-       if (cfg_fcp_wq_count >
-           (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
-               cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
-                                  LPFC_SP_WQN_DEF;
-               if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2581 Not enough WQs (%d) from "
-                                       "the pci function for supporting "
-                                       "FCP WQs (%d)\n",
-                                       phba->sli4_hba.max_cfg_param.max_wq,
-                                       phba->cfg_fcp_wq_count);
-                       goto out_error;
-               }
+       /* Sanity check on HBA EQ parameters */
+       cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
+
+       /* It doesn't make sense to have more io channels then CPUs */
+       for_each_online_cpu(cpu) {
+               i++;
+       }
+       if (i < cfg_fcp_io_channel) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "2582 Not enough WQs (%d) from the pci "
-                               "function for supporting the requested "
-                               "FCP WQs (%d), the actual FCP WQs can "
-                               "be supported: %d\n",
-                               phba->sli4_hba.max_cfg_param.max_wq,
-                               phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
-       }
-       /* The actual number of FCP work queues adopted */
-       phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
-
-       /* Sanity check on FCP fast-path EQ parameters */
-       cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
-       if (cfg_fcp_eq_count >
-           (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
-               cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
-                                  LPFC_SP_EQN_DEF;
-               if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
+                               "3188 Reducing IO channels to match number of "
+                               "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
+               cfg_fcp_io_channel = i;
+       }
+
+       if (cfg_fcp_io_channel >
+           phba->sli4_hba.max_cfg_param.max_eq) {
+               cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+               if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "2574 Not enough EQs (%d) from the "
                                        "pci function for supporting FCP "
                                        "EQs (%d)\n",
                                        phba->sli4_hba.max_cfg_param.max_eq,
-                                       phba->cfg_fcp_eq_count);
+                                       phba->cfg_fcp_io_channel);
                        goto out_error;
                }
                lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -6567,22 +6583,16 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
                                "FCP EQs (%d), the actual FCP EQs can "
                                "be supported: %d\n",
                                phba->sli4_hba.max_cfg_param.max_eq,
-                               phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
-       }
-       /* It does not make sense to have more EQs than WQs */
-       if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "2593 The FCP EQ count(%d) cannot be greater "
-                               "than the FCP WQ count(%d), limiting the "
-                               "FCP EQ count to %d\n", cfg_fcp_eq_count,
-                               phba->cfg_fcp_wq_count,
-                               phba->cfg_fcp_wq_count);
-               cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
+                               phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
        }
+
+       /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
+
        /* The actual number of FCP event queues adopted */
-       phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
-       /* The overall number of event queues used */
-       phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
+       phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
+       phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
+       phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
+       phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
 
        /* Get EQ depth from module parameter, fake the default for now */
        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -6615,50 +6625,104 @@ int
 lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
        struct lpfc_queue *qdesc;
-       int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+       int idx;
 
        /*
-        * Create Event Queues (EQs)
+        * Create HBA Record arrays.
         */
+       if (!phba->cfg_fcp_io_channel)
+               return -ERANGE;
 
-       /* Create slow path event queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
-                                     phba->sli4_hba.eq_ecount);
-       if (!qdesc) {
+       phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+       phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+       phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+       phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+       phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+       phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+       phba->sli4_hba.hba_eq =  kzalloc((sizeof(struct lpfc_queue *) *
+                               phba->cfg_fcp_io_channel), GFP_KERNEL);
+       if (!phba->sli4_hba.hba_eq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2576 Failed allocate memory for "
+                       "fast-path EQ record array\n");
+               goto out_error;
+       }
+
+       phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+                               phba->cfg_fcp_io_channel), GFP_KERNEL);
+       if (!phba->sli4_hba.fcp_cq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0496 Failed allocate slow-path EQ\n");
+                               "2577 Failed allocate memory for fast-path "
+                               "CQ record array\n");
+               goto out_error;
+       }
+
+       phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+                               phba->cfg_fcp_io_channel), GFP_KERNEL);
+       if (!phba->sli4_hba.fcp_wq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2578 Failed allocate memory for fast-path "
+                               "WQ record array\n");
                goto out_error;
        }
-       phba->sli4_hba.sp_eq = qdesc;
 
        /*
-        * Create fast-path FCP Event Queue(s).  The cfg_fcp_eq_count can be
-        * zero whenever there is exactly one interrupt vector.  This is not
-        * an error.
+        * Since the first EQ can have multiple CQs associated with it,
+        * this array is used to quickly see if we have a FCP fast-path
+        * CQ match.
         */
-       if (phba->cfg_fcp_eq_count) {
-               phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
-                                      phba->cfg_fcp_eq_count), GFP_KERNEL);
-               if (!phba->sli4_hba.fp_eq) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2576 Failed allocate memory for "
-                                       "fast-path EQ record array\n");
-                       goto out_free_sp_eq;
-               }
+       phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
+                                        phba->cfg_fcp_io_channel), GFP_KERNEL);
+       if (!phba->sli4_hba.fcp_cq_map) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2545 Failed allocate memory for fast-path "
+                               "CQ map\n");
+               goto out_error;
        }
-       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+
+       /*
+        * Create HBA Event Queues (EQs).  The cfg_fcp_io_channel specifies
+        * how many EQs to create.
+        */
+       for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+
+               /* Create EQs */
                qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
                                              phba->sli4_hba.eq_ecount);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0497 Failed allocate fast-path EQ\n");
-                       goto out_free_fp_eq;
+                                       "0497 Failed allocate EQ (%d)\n", idx);
+                       goto out_error;
                }
-               phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
+               phba->sli4_hba.hba_eq[idx] = qdesc;
+
+               /* Create Fast Path FCP CQs */
+               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+                                             phba->sli4_hba.cq_ecount);
+               if (!qdesc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0499 Failed allocate fast-path FCP "
+                                       "CQ (%d)\n", idx);
+                       goto out_error;
+               }
+               phba->sli4_hba.fcp_cq[idx] = qdesc;
+
+               /* Create Fast Path FCP WQs */
+               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+                                             phba->sli4_hba.wq_ecount);
+               if (!qdesc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0503 Failed allocate fast-path FCP "
+                                       "WQ (%d)\n", idx);
+                       goto out_error;
+               }
+               phba->sli4_hba.fcp_wq[idx] = qdesc;
        }
 
+
        /*
-        * Create Complete Queues (CQs)
+        * Create Slow Path Completion Queues (CQs)
         */
 
        /* Create slow-path Mailbox Command Complete Queue */
@@ -6667,7 +6731,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0500 Failed allocate slow-path mailbox CQ\n");
-               goto out_free_fp_eq;
+               goto out_error;
        }
        phba->sli4_hba.mbx_cq = qdesc;
 
@@ -6677,59 +6741,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0501 Failed allocate slow-path ELS CQ\n");
-               goto out_free_mbx_cq;
+               goto out_error;
        }
        phba->sli4_hba.els_cq = qdesc;
 
 
        /*
-        * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
-        * If there are no FCP EQs then create exactly one FCP CQ.
+        * Create Slow Path Work Queues (WQs)
         */
-       if (phba->cfg_fcp_eq_count)
-               phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
-                                                phba->cfg_fcp_eq_count),
-                                               GFP_KERNEL);
-       else
-               phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
-                                               GFP_KERNEL);
-       if (!phba->sli4_hba.fcp_cq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "2577 Failed allocate memory for fast-path "
-                               "CQ record array\n");
-               goto out_free_els_cq;
-       }
-       fcp_cqidx = 0;
-       do {
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-                                             phba->sli4_hba.cq_ecount);
-               if (!qdesc) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0499 Failed allocate fast-path FCP "
-                                       "CQ (%d)\n", fcp_cqidx);
-                       goto out_free_fcp_cq;
-               }
-               phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
-       } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
 
        /* Create Mailbox Command Queue */
-       phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
-       phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
 
        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
                                      phba->sli4_hba.mq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0505 Failed allocate slow-path MQ\n");
-               goto out_free_fcp_cq;
+               goto out_error;
        }
        phba->sli4_hba.mbx_wq = qdesc;
 
        /*
-        * Create all the Work Queues (WQs)
+        * Create ELS Work Queues
         */
-       phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
-       phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
 
        /* Create slow-path ELS Work Queue */
        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
@@ -6737,36 +6771,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0504 Failed allocate slow-path ELS WQ\n");
-               goto out_free_mbx_wq;
+               goto out_error;
        }
        phba->sli4_hba.els_wq = qdesc;
 
-       /* Create fast-path FCP Work Queue(s) */
-       phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
-                               phba->cfg_fcp_wq_count), GFP_KERNEL);
-       if (!phba->sli4_hba.fcp_wq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "2578 Failed allocate memory for fast-path "
-                               "WQ record array\n");
-               goto out_free_els_wq;
-       }
-       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
-                                             phba->sli4_hba.wq_ecount);
-               if (!qdesc) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0503 Failed allocate fast-path FCP "
-                                       "WQ (%d)\n", fcp_wqidx);
-                       goto out_free_fcp_wq;
-               }
-               phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
-       }
-
        /*
         * Create Receive Queue (RQ)
         */
-       phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
-       phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
 
        /* Create Receive Queue for header */
        qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
@@ -6774,7 +6785,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0506 Failed allocate receive HRQ\n");
-               goto out_free_fcp_wq;
+               goto out_error;
        }
        phba->sli4_hba.hdr_rq = qdesc;
 
@@ -6784,52 +6795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0507 Failed allocate receive DRQ\n");
-               goto out_free_hdr_rq;
+               goto out_error;
        }
        phba->sli4_hba.dat_rq = qdesc;
 
        return 0;
 
-out_free_hdr_rq:
-       lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
-       phba->sli4_hba.hdr_rq = NULL;
-out_free_fcp_wq:
-       for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
-               lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
-               phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
-       }
-       kfree(phba->sli4_hba.fcp_wq);
-       phba->sli4_hba.fcp_wq = NULL;
-out_free_els_wq:
-       lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
-       phba->sli4_hba.els_wq = NULL;
-out_free_mbx_wq:
-       lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
-       phba->sli4_hba.mbx_wq = NULL;
-out_free_fcp_cq:
-       for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
-               lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
-               phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
-       }
-       kfree(phba->sli4_hba.fcp_cq);
-       phba->sli4_hba.fcp_cq = NULL;
-out_free_els_cq:
-       lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
-       phba->sli4_hba.els_cq = NULL;
-out_free_mbx_cq:
-       lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
-       phba->sli4_hba.mbx_cq = NULL;
-out_free_fp_eq:
-       for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
-               lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
-               phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
-       }
-       kfree(phba->sli4_hba.fp_eq);
-       phba->sli4_hba.fp_eq = NULL;
-out_free_sp_eq:
-       lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
-       phba->sli4_hba.sp_eq = NULL;
 out_error:
+       lpfc_sli4_queue_destroy(phba);
        return -ENOMEM;
 }
 
@@ -6848,58 +6821,86 @@ out_error:
 void
 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 {
-       int fcp_qidx;
+       int idx;
+
+       if (phba->sli4_hba.hba_eq != NULL) {
+               /* Release HBA event queue */
+               for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+                       if (phba->sli4_hba.hba_eq[idx] != NULL) {
+                               lpfc_sli4_queue_free(
+                                       phba->sli4_hba.hba_eq[idx]);
+                               phba->sli4_hba.hba_eq[idx] = NULL;
+                       }
+               }
+               kfree(phba->sli4_hba.hba_eq);
+               phba->sli4_hba.hba_eq = NULL;
+       }
+
+       if (phba->sli4_hba.fcp_cq != NULL) {
+               /* Release FCP completion queue */
+               for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+                       if (phba->sli4_hba.fcp_cq[idx] != NULL) {
+                               lpfc_sli4_queue_free(
+                                       phba->sli4_hba.fcp_cq[idx]);
+                               phba->sli4_hba.fcp_cq[idx] = NULL;
+                       }
+               }
+               kfree(phba->sli4_hba.fcp_cq);
+               phba->sli4_hba.fcp_cq = NULL;
+       }
+
+       if (phba->sli4_hba.fcp_wq != NULL) {
+               /* Release FCP work queue */
+               for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+                       if (phba->sli4_hba.fcp_wq[idx] != NULL) {
+                               lpfc_sli4_queue_free(
+                                       phba->sli4_hba.fcp_wq[idx]);
+                               phba->sli4_hba.fcp_wq[idx] = NULL;
+                       }
+               }
+               kfree(phba->sli4_hba.fcp_wq);
+               phba->sli4_hba.fcp_wq = NULL;
+       }
+
+       /* Release FCP CQ mapping array */
+       if (phba->sli4_hba.fcp_cq_map != NULL) {
+               kfree(phba->sli4_hba.fcp_cq_map);
+               phba->sli4_hba.fcp_cq_map = NULL;
+       }
 
        /* Release mailbox command work queue */
-       lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
-       phba->sli4_hba.mbx_wq = NULL;
+       if (phba->sli4_hba.mbx_wq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+               phba->sli4_hba.mbx_wq = NULL;
+       }
 
        /* Release ELS work queue */
-       lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
-       phba->sli4_hba.els_wq = NULL;
-
-       /* Release FCP work queue */
-       if (phba->sli4_hba.fcp_wq != NULL)
-               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
-                    fcp_qidx++)
-                       lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
-       kfree(phba->sli4_hba.fcp_wq);
-       phba->sli4_hba.fcp_wq = NULL;
+       if (phba->sli4_hba.els_wq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+               phba->sli4_hba.els_wq = NULL;
+       }
 
        /* Release unsolicited receive queue */
-       lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
-       phba->sli4_hba.hdr_rq = NULL;
-       lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
-       phba->sli4_hba.dat_rq = NULL;
+       if (phba->sli4_hba.hdr_rq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+               phba->sli4_hba.hdr_rq = NULL;
+       }
+       if (phba->sli4_hba.dat_rq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+               phba->sli4_hba.dat_rq = NULL;
+       }
 
        /* Release ELS complete queue */
-       lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
-       phba->sli4_hba.els_cq = NULL;
+       if (phba->sli4_hba.els_cq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+               phba->sli4_hba.els_cq = NULL;
+       }
 
        /* Release mailbox command complete queue */
-       lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
-       phba->sli4_hba.mbx_cq = NULL;
-
-       /* Release FCP response complete queue */
-       fcp_qidx = 0;
-       if (phba->sli4_hba.fcp_cq != NULL)
-               do
-                       lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
-               while (++fcp_qidx < phba->cfg_fcp_eq_count);
-       kfree(phba->sli4_hba.fcp_cq);
-       phba->sli4_hba.fcp_cq = NULL;
-
-       /* Release fast-path event queue */
-       if (phba->sli4_hba.fp_eq != NULL)
-               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
-                    fcp_qidx++)
-                       lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
-       kfree(phba->sli4_hba.fp_eq);
-       phba->sli4_hba.fp_eq = NULL;
-
-       /* Release slow-path event queue */
-       lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
-       phba->sli4_hba.sp_eq = NULL;
+       if (phba->sli4_hba.mbx_cq != NULL) {
+               lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+               phba->sli4_hba.mbx_cq = NULL;
+       }
 
        return;
 }
@@ -6919,61 +6920,124 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 int
 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
 {
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring *pring;
        int rc = -ENOMEM;
        int fcp_eqidx, fcp_cqidx, fcp_wqidx;
        int fcp_cq_index = 0;
 
        /*
-        * Set up Event Queues (EQs)
+        * Set up HBA Event Queues (EQs)
         */
 
-       /* Set up slow-path event queue */
-       if (!phba->sli4_hba.sp_eq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0520 Slow-path EQ not allocated\n");
-               goto out_error;
-       }
-       rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
-                           LPFC_SP_DEF_IMAX);
-       if (rc) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0521 Failed setup of slow-path EQ: "
-                               "rc = 0x%x\n", rc);
-               goto out_error;
-       }
-       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "2583 Slow-path EQ setup: queue-id=%d\n",
-                       phba->sli4_hba.sp_eq->queue_id);
-
-       /* Set up fast-path event queue */
-       if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+       /* Set up HBA event queue */
+       if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3147 Fast-path EQs not allocated\n");
                rc = -ENOMEM;
-               goto out_destroy_sp_eq;
+               goto out_error;
        }
-       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
-               if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
+       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+               if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0522 Fast-path EQ (%d) not "
                                        "allocated\n", fcp_eqidx);
                        rc = -ENOMEM;
-                       goto out_destroy_fp_eq;
+                       goto out_destroy_hba_eq;
                }
-               rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
-                                   phba->cfg_fcp_imax);
+               rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
+                        (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0523 Failed setup of fast-path EQ "
                                        "(%d), rc = 0x%x\n", fcp_eqidx, rc);
-                       goto out_destroy_fp_eq;
+                       goto out_destroy_hba_eq;
                }
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "2584 Fast-path EQ setup: "
+                               "2584 HBA EQ setup: "
                                "queue[%d]-id=%d\n", fcp_eqidx,
-                               phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
+                               phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
        }
 
+       /* Set up fast-path FCP Response Complete Queue */
+       if (!phba->sli4_hba.fcp_cq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3148 Fast-path FCP CQ array not "
+                               "allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_hba_eq;
+       }
+
+       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
+               if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0526 Fast-path FCP CQ (%d) not "
+                                       "allocated\n", fcp_cqidx);
+                       rc = -ENOMEM;
+                       goto out_destroy_fcp_cq;
+               }
+               rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
+                       phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0527 Failed setup of fast-path FCP "
+                                       "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+                       goto out_destroy_fcp_cq;
+               }
+
+               /* Setup fcp_cq_map for fast lookup */
+               phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
+                               phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2588 FCP CQ setup: cq[%d]-id=%d, "
+                               "parent seq[%d]-id=%d\n",
+                               fcp_cqidx,
+                               phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+                               fcp_cqidx,
+                               phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
+       }
+
+       /* Set up fast-path FCP Work Queue */
+       if (!phba->sli4_hba.fcp_wq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3149 Fast-path FCP WQ array not "
+                               "allocated\n");
+               rc = -ENOMEM;
+               goto out_destroy_fcp_cq;
+       }
+
+       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
+               if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0534 Fast-path FCP WQ (%d) not "
+                                       "allocated\n", fcp_wqidx);
+                       rc = -ENOMEM;
+                       goto out_destroy_fcp_wq;
+               }
+               rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+                                   phba->sli4_hba.fcp_cq[fcp_wqidx],
+                                   LPFC_FCP);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0535 Failed setup of fast-path FCP "
+                                       "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+                       goto out_destroy_fcp_wq;
+               }
+
+               /* Bind this WQ to the next FCP ring */
+               pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
+               pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
+               phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2591 FCP WQ setup: wq[%d]-id=%d, "
+                               "parent cq[%d]-id=%d\n",
+                               fcp_wqidx,
+                               phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+                               fcp_cq_index,
+                               phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
+       }
        /*
         * Set up Complete Queues (CQs)
         */
@@ -6983,20 +7047,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0528 Mailbox CQ not allocated\n");
                rc = -ENOMEM;
-               goto out_destroy_fp_eq;
+               goto out_destroy_fcp_wq;
        }
-       rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
-                           LPFC_MCQ, LPFC_MBOX);
+       rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
+                       phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0529 Failed setup of slow-path mailbox CQ: "
                                "rc = 0x%x\n", rc);
-               goto out_destroy_fp_eq;
+               goto out_destroy_fcp_wq;
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                        "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
                        phba->sli4_hba.mbx_cq->queue_id,
-                       phba->sli4_hba.sp_eq->queue_id);
+                       phba->sli4_hba.hba_eq[0]->queue_id);
 
        /* Set up slow-path ELS Complete Queue */
        if (!phba->sli4_hba.els_cq) {
@@ -7005,8 +7069,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                rc = -ENOMEM;
                goto out_destroy_mbx_cq;
        }
-       rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
-                           LPFC_WCQ, LPFC_ELS);
+       rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
+                       phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
        if (rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0531 Failed setup of slow-path ELS CQ: "
@@ -7016,52 +7080,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                        "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
                        phba->sli4_hba.els_cq->queue_id,
-                       phba->sli4_hba.sp_eq->queue_id);
-
-       /* Set up fast-path FCP Response Complete Queue */
-       if (!phba->sli4_hba.fcp_cq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "3148 Fast-path FCP CQ array not "
-                               "allocated\n");
-               rc = -ENOMEM;
-               goto out_destroy_els_cq;
-       }
-       fcp_cqidx = 0;
-       do {
-               if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0526 Fast-path FCP CQ (%d) not "
-                                       "allocated\n", fcp_cqidx);
-                       rc = -ENOMEM;
-                       goto out_destroy_fcp_cq;
-               }
-               if (phba->cfg_fcp_eq_count)
-                       rc = lpfc_cq_create(phba,
-                                           phba->sli4_hba.fcp_cq[fcp_cqidx],
-                                           phba->sli4_hba.fp_eq[fcp_cqidx],
-                                           LPFC_WCQ, LPFC_FCP);
-               else
-                       rc = lpfc_cq_create(phba,
-                                           phba->sli4_hba.fcp_cq[fcp_cqidx],
-                                           phba->sli4_hba.sp_eq,
-                                           LPFC_WCQ, LPFC_FCP);
-               if (rc) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0527 Failed setup of fast-path FCP "
-                                       "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
-                       goto out_destroy_fcp_cq;
-               }
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "2588 FCP CQ setup: cq[%d]-id=%d, "
-                               "parent %seq[%d]-id=%d\n",
-                               fcp_cqidx,
-                               phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
-                               (phba->cfg_fcp_eq_count) ? "" : "sp_",
-                               fcp_cqidx,
-                               (phba->cfg_fcp_eq_count) ?
-                                  phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
-                                  phba->sli4_hba.sp_eq->queue_id);
-       } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
+                       phba->sli4_hba.hba_eq[0]->queue_id);
 
        /*
         * Set up all the Work Queues (WQs)
@@ -7072,7 +7091,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0538 Slow-path MQ not allocated\n");
                rc = -ENOMEM;
-               goto out_destroy_fcp_cq;
+               goto out_destroy_els_cq;
        }
        rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
                            phba->sli4_hba.mbx_cq, LPFC_MBOX);
@@ -7080,7 +7099,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0539 Failed setup of slow-path MQ: "
                                "rc = 0x%x\n", rc);
-               goto out_destroy_fcp_cq;
+               goto out_destroy_els_cq;
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                        "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
@@ -7102,49 +7121,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                                "rc = 0x%x\n", rc);
                goto out_destroy_mbx_wq;
        }
+
+       /* Bind this WQ to the ELS ring */
+       pring = &psli->ring[LPFC_ELS_RING];
+       pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
+       phba->sli4_hba.els_cq->pring = pring;
+
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                        "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
                        phba->sli4_hba.els_wq->queue_id,
                        phba->sli4_hba.els_cq->queue_id);
 
-       /* Set up fast-path FCP Work Queue */
-       if (!phba->sli4_hba.fcp_wq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "3149 Fast-path FCP WQ array not "
-                               "allocated\n");
-               rc = -ENOMEM;
-               goto out_destroy_els_wq;
-       }
-       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
-               if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0534 Fast-path FCP WQ (%d) not "
-                                       "allocated\n", fcp_wqidx);
-                       rc = -ENOMEM;
-                       goto out_destroy_fcp_wq;
-               }
-               rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
-                                   phba->sli4_hba.fcp_cq[fcp_cq_index],
-                                   LPFC_FCP);
-               if (rc) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0535 Failed setup of fast-path FCP "
-                                       "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
-                       goto out_destroy_fcp_wq;
-               }
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "2591 FCP WQ setup: wq[%d]-id=%d, "
-                               "parent cq[%d]-id=%d\n",
-                               fcp_wqidx,
-                               phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
-                               fcp_cq_index,
-                               phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
-               /* Round robin FCP Work Queue's Completion Queue assignment */
-               if (phba->cfg_fcp_eq_count)
-                       fcp_cq_index = ((fcp_cq_index + 1) %
-                                       phba->cfg_fcp_eq_count);
-       }
-
        /*
         * Create Receive Queue (RQ)
         */
@@ -7152,7 +7139,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0540 Receive Queue not allocated\n");
                rc = -ENOMEM;
-               goto out_destroy_fcp_wq;
+               goto out_destroy_els_wq;
        }
 
        lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7175,25 +7162,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.els_cq->queue_id);
        return 0;
 
-out_destroy_fcp_wq:
-       for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
-               lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
 out_destroy_els_wq:
        lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
 out_destroy_mbx_wq:
        lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
-out_destroy_fcp_cq:
-       for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
-               lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
 out_destroy_els_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
 out_destroy_mbx_cq:
        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
-out_destroy_fp_eq:
+out_destroy_fcp_wq:
+       for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+               lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_fcp_cq:
+       for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+               lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_hba_eq:
        for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
-               lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
-out_destroy_sp_eq:
-       lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+               lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
 out_error:
        return rc;
 }
@@ -7222,27 +7207,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
        /* Unset unsolicited receive queue */
        lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
        /* Unset FCP work queue */
-       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
-               lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+       if (phba->sli4_hba.fcp_wq) {
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+                    fcp_qidx++)
+                       lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+       }
        /* Unset mailbox command complete queue */
        lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
        /* Unset ELS complete queue */
        lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
        /* Unset FCP response complete queue */
        if (phba->sli4_hba.fcp_cq) {
-               fcp_qidx = 0;
-               do {
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
+                    fcp_qidx++)
                        lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
-               } while (++fcp_qidx < phba->cfg_fcp_eq_count);
        }
        /* Unset fast-path event queue */
-       if (phba->sli4_hba.fp_eq) {
-               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+       if (phba->sli4_hba.hba_eq) {
+               for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
                     fcp_qidx++)
-                       lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+                       lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
        }
-       /* Unset slow-path event queue */
-       lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
 }
 
 /**
@@ -7590,10 +7575,11 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
        /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
        length = (sizeof(struct lpfc_mbx_nop) -
                  sizeof(struct lpfc_sli4_cfg_mhdr));
-       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
-                        LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
 
        for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
+               lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                                LPFC_MBOX_OPCODE_NOP, length,
+                                LPFC_SLI4_MBX_EMBED);
                if (!phba->sli4_hba.intr_enable)
                        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
                else {
@@ -8133,33 +8119,22 @@ enable_msix_vectors:
                                "message=%d\n", index,
                                phba->sli4_hba.msix_entries[index].vector,
                                phba->sli4_hba.msix_entries[index].entry);
+
        /*
         * Assign MSI-X vectors to interrupt handlers
         */
-       if (vectors > 1)
-               rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
-                                &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
-                                LPFC_SP_DRIVER_HANDLER_NAME, phba);
-       else
-               /* All Interrupts need to be handled by one EQ */
-               rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
-                                &lpfc_sli4_intr_handler, IRQF_SHARED,
-                                LPFC_DRIVER_NAME, phba);
-       if (rc) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "0485 MSI-X slow-path request_irq failed "
-                               "(%d)\n", rc);
-               goto msi_fail_out;
-       }
+       for (index = 0; index < vectors; index++) {
+               memset(&phba->sli4_hba.handler_name[index], 0, 16);
+               sprintf((char *)&phba->sli4_hba.handler_name[index],
+                        LPFC_DRIVER_HANDLER_NAME"%d", index);
 
-       /* The rest of the vector(s) are associated to fast-path handler(s) */
-       for (index = 1; index < vectors; index++) {
-               phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
-               phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
+               phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+               phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+               atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
                rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
-                                &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
-                                LPFC_FP_DRIVER_HANDLER_NAME,
-                                &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+                                &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
+                                (char *)&phba->sli4_hba.handler_name[index],
+                                &phba->sli4_hba.fcp_eq_hdl[index]);
                if (rc) {
                        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
                                        "0486 MSI-X fast-path (%d) "
@@ -8173,12 +8148,9 @@ enable_msix_vectors:
 
 cfg_fail_out:
        /* free the irq already requested */
-       for (--index; index >= 1; index--)
-               free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
-                        &phba->sli4_hba.fcp_eq_hdl[index - 1]);
-
-       /* free the irq already requested */
-       free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+       for (--index; index >= 0; index--)
+               free_irq(phba->sli4_hba.msix_entries[index].vector,
+                        &phba->sli4_hba.fcp_eq_hdl[index]);
 
 msi_fail_out:
        /* Unconfigure MSI-X capability structure */
@@ -8199,11 +8171,9 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
        int index;
 
        /* Free up MSI-X multi-message vectors */
-       free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
-
-       for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
+       for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
                free_irq(phba->sli4_hba.msix_entries[index].vector,
-                        &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+                        &phba->sli4_hba.fcp_eq_hdl[index]);
 
        /* Disable MSI-X */
        pci_disable_msix(phba->pcidev);
@@ -8249,7 +8219,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
                return rc;
        }
 
-       for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
+       for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
        }
@@ -8329,10 +8299,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
                        /* Indicate initialization to INTx mode */
                        phba->intr_type = INTx;
                        intr_mode = 0;
-                       for (index = 0; index < phba->cfg_fcp_eq_count;
+                       for (index = 0; index < phba->cfg_fcp_io_channel;
                             index++) {
                                phba->sli4_hba.fcp_eq_hdl[index].idx = index;
                                phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+                               atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+                                       fcp_eq_in_use, 1);
                        }
                }
        }
@@ -9449,7 +9421,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        int error;
        uint32_t cfg_mode, intr_mode;
        int mcnt;
-       int adjusted_fcp_eq_count;
+       int adjusted_fcp_io_channel;
        const struct firmware *fw;
        uint8_t file_name[16];
 
@@ -9552,13 +9524,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
                }
                /* Default to single EQ for non-MSI-X */
                if (phba->intr_type != MSIX)
-                       adjusted_fcp_eq_count = 0;
+                       adjusted_fcp_io_channel = 1;
                else if (phba->sli4_hba.msix_vec_nr <
-                                       phba->cfg_fcp_eq_count + 1)
-                       adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+                                       phba->cfg_fcp_io_channel)
+                       adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
                else
-                       adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
-               phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
+                       adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+               phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
                /* Set up SLI-4 HBA */
                if (lpfc_sli4_hba_setup(phba)) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9694,6 +9666,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
         * buffers are released to their corresponding pools here.
         */
        lpfc_scsi_free(phba);
+
        lpfc_sli4_driver_resource_unset(phba);
 
        /* Unmap adapter Control and Doorbell registers */
@@ -10420,6 +10393,10 @@ static struct pci_device_id lpfc_id_table[] = {
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
                PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
+               PCI_ANY_ID, PCI_ANY_ID, },
        { 0 }
 };
 
index 20336f09fb3cede76ddcc5e808a58d64c3e76b66..efc9cd9def8b09d1c413fd97f6319ffb5929cb97 100644 (file)
@@ -92,7 +92,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
        memset(mp->virt, 0, LPFC_BPL_SIZE);
        INIT_LIST_HEAD(&mp->list);
        /* save address for completion */
-       pmb->context2 = (uint8_t *) mp;
+       pmb->context1 = (uint8_t *)mp;
        mb->un.varWords[3] = putPaddrLow(mp->phys);
        mb->un.varWords[4] = putPaddrHigh(mp->phys);
        mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -950,44 +950,47 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
        for (i = 0; i < psli->num_rings; i++) {
                pring = &psli->ring[i];
 
-               pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
+               pring->sli.sli3.sizeCiocb =
+                       phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
                                                        SLI2_IOCB_CMD_SIZE;
-               pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
+               pring->sli.sli3.sizeRiocb =
+                       phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
                                                        SLI2_IOCB_RSP_SIZE;
                /* A ring MUST have both cmd and rsp entries defined to be
                   valid */
-               if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
+               if ((pring->sli.sli3.numCiocb == 0) ||
+                       (pring->sli.sli3.numRiocb == 0)) {
                        pcbp->rdsc[i].cmdEntries = 0;
                        pcbp->rdsc[i].rspEntries = 0;
                        pcbp->rdsc[i].cmdAddrHigh = 0;
                        pcbp->rdsc[i].rspAddrHigh = 0;
                        pcbp->rdsc[i].cmdAddrLow = 0;
                        pcbp->rdsc[i].rspAddrLow = 0;
-                       pring->cmdringaddr = NULL;
-                       pring->rspringaddr = NULL;
+                       pring->sli.sli3.cmdringaddr = NULL;
+                       pring->sli.sli3.rspringaddr = NULL;
                        continue;
                }
                /* Command ring setup for ring */
-               pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
-               pcbp->rdsc[i].cmdEntries = pring->numCiocb;
+               pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
+               pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
 
                offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
                         (uint8_t *) phba->slim2p.virt;
                pdma_addr = phba->slim2p.phys + offset;
                pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
                pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
-               iocbCnt += pring->numCiocb;
+               iocbCnt += pring->sli.sli3.numCiocb;
 
                /* Response ring setup for ring */
-               pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
+               pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
 
-               pcbp->rdsc[i].rspEntries = pring->numRiocb;
+               pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
                offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
                         (uint8_t *)phba->slim2p.virt;
                pdma_addr = phba->slim2p.phys + offset;
                pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
                pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
-               iocbCnt += pring->numRiocb;
+               iocbCnt += pring->sli.sli3.numRiocb;
        }
 }
 
@@ -1609,12 +1612,15 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 
        switch (mbox->mbxCommand) {
        case MBX_WRITE_NV:      /* 0x03 */
+       case MBX_DUMP_MEMORY:   /* 0x17 */
        case MBX_UPDATE_CFG:    /* 0x1B */
        case MBX_DOWN_LOAD:     /* 0x1C */
        case MBX_DEL_LD_ENTRY:  /* 0x1D */
+       case MBX_WRITE_VPARMS:  /* 0x32 */
        case MBX_LOAD_AREA:     /* 0x81 */
        case MBX_WRITE_WWN:     /* 0x98 */
        case MBX_LOAD_EXP_ROM:  /* 0x9C */
+       case MBX_ACCESS_VDATA:  /* 0xA5 */
                return LPFC_MBOX_TMO_FLASH_CMD;
        case MBX_SLI4_CONFIG:   /* 0x9b */
                subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
@@ -1625,11 +1631,17 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                        case LPFC_MBOX_OPCODE_WRITE_OBJECT:
                        case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
                        case LPFC_MBOX_OPCODE_DELETE_OBJECT:
-                       case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
                        case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
                        case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
+                       case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
                        case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
                        case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
+                       case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
+                       case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
+                       case LPFC_MBOX_OPCODE_RESET_LICENSES:
+                       case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
+                       case LPFC_MBOX_OPCODE_GET_VPD_DATA:
+                       case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
                                return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
                        }
                }
index ade763d3930a7576e37b62285ab80b35869488aa..cd86069a0ba82a2954238139e8626283fac56d07 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -194,6 +194,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
                pci_pool_destroy(phba->lpfc_hbq_pool);
        phba->lpfc_hbq_pool = NULL;
 
+       if (phba->rrq_pool)
+               mempool_destroy(phba->rrq_pool);
+       phba->rrq_pool = NULL;
+
        /* Free NLP memory pool */
        mempool_destroy(phba->nlp_mem_pool);
        phba->nlp_mem_pool = NULL;
index 9133a97f045f0b702fbc3a77f473893dfad1aa39..d8fadcb2db73d1794b1ef9d481a6c65e35ff971d 100644 (file)
@@ -1777,6 +1777,117 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
        return ndlp->nlp_state;
 }
 
+static uint32_t
+lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                         void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+       struct ls_rjt     stat;
+
+       memset(&stat, 0, sizeof(struct ls_rjt));
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                        void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+       struct ls_rjt     stat;
+
+       memset(&stat, 0, sizeof(struct ls_rjt));
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                        void *arg, uint32_t evt)
+{
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+       struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+
+       spin_lock_irq(shost->host_lock);
+       ndlp->nlp_flag &= NLP_LOGO_ACC;
+       spin_unlock_irq(shost->host_lock);
+       lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+       return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                          void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+       struct ls_rjt     stat;
+
+       memset(&stat, 0, sizeof(struct ls_rjt));
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                        void *arg, uint32_t evt)
+{
+       struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+       struct ls_rjt     stat;
+
+       memset(&stat, 0, sizeof(struct ls_rjt));
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                         void *arg, uint32_t evt)
+{
+       struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+       ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+       spin_lock_irq(shost->host_lock);
+       ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+       spin_unlock_irq(shost->host_lock);
+       lpfc_disc_set_adisc(vport, ndlp);
+       return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+                         void *arg, uint32_t evt)
+{
+       /*
+        * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
+        * timed out and is calling for Device Remove.  In this case, the LOGO
+        * must be allowed to complete in state LOGO_ISSUE so that the rpi
+        * and other NLP flags are correctly cleaned up.
+        */
+       return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
+                            struct lpfc_nodelist *ndlp,
+                            void *arg, uint32_t evt)
+{
+       /*
+        * Device Recovery events have no meaning for a node with a LOGO
+        * outstanding.  The LOGO has to complete first and handle the
+        * node from that point.
+        */
+       return ndlp->nlp_state;
+}
+
 static uint32_t
 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                          void *arg, uint32_t evt)
@@ -2083,6 +2194,8 @@ lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        void *arg, uint32_t evt)
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+       /* For the fabric port just clear the fc flags. */
        if (ndlp->nlp_DID == Fabric_DID) {
                spin_lock_irq(shost->host_lock);
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2297,6 +2410,20 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
        lpfc_device_rm_prli_issue,      /* DEVICE_RM       */
        lpfc_device_recov_prli_issue,   /* DEVICE_RECOVERY */
 
+       lpfc_rcv_plogi_logo_issue,      /* RCV_PLOGI   LOGO_ISSUE     */
+       lpfc_rcv_prli_logo_issue,       /* RCV_PRLI        */
+       lpfc_rcv_logo_logo_issue,       /* RCV_LOGO        */
+       lpfc_rcv_padisc_logo_issue,     /* RCV_ADISC       */
+       lpfc_rcv_padisc_logo_issue,     /* RCV_PDISC       */
+       lpfc_rcv_prlo_logo_issue,       /* RCV_PRLO        */
+       lpfc_cmpl_plogi_illegal,        /* CMPL_PLOGI      */
+       lpfc_disc_illegal,              /* CMPL_PRLI       */
+       lpfc_cmpl_logo_logo_issue,      /* CMPL_LOGO       */
+       lpfc_disc_illegal,              /* CMPL_ADISC      */
+       lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
+       lpfc_device_rm_logo_issue,      /* DEVICE_RM       */
+       lpfc_device_recov_logo_issue,   /* DEVICE_RECOVERY */
+
        lpfc_rcv_plogi_unmap_node,      /* RCV_PLOGI   UNMAPPED_NODE  */
        lpfc_rcv_prli_unmap_node,       /* RCV_PRLI        */
        lpfc_rcv_logo_unmap_node,       /* RCV_LOGO        */
index 925975d2d76579359ebe4181141f6e077e384f33..64013f3097ad2dc4cecc2df4bb7d50fe128d6b05 100644 (file)
@@ -60,12 +60,6 @@ static char *dif_op_str[] = {
        "PROT_WRITE_PASS",
 };
 
-static char *dif_grd_str[] = {
-       "NO_GUARD",
-       "DIF_CRC",
-       "DIX_IP",
-};
-
 struct scsi_dif_tuple {
        __be16 guard_tag;       /* Checksum */
        __be16 app_tag;         /* Opaque storage */
@@ -3482,9 +3476,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        }
        lp = (uint32_t *)cmnd->sense_buffer;
 
-       if (!scsi_status && (resp_info & RESID_UNDER) &&
-               vport->cfg_log_verbose & LOG_FCP_UNDER)
-               logit = LOG_FCP_UNDER;
+       /* special handling for under run conditions */
+       if (!scsi_status && (resp_info & RESID_UNDER)) {
+               /* don't log under runs if fcp set... */
+               if (vport->cfg_log_verbose & LOG_FCP)
+                       logit = LOG_FCP_ERROR;
+               /* unless operator says so */
+               if (vport->cfg_log_verbose & LOG_FCP_UNDER)
+                       logit = LOG_FCP_UNDER;
+       }
 
        lpfc_printf_vlog(vport, KERN_WARNING, logit,
                         "9024 FCP command x%x failed: x%x SNS x%x x%x "
@@ -3552,11 +3552,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 
        /*
         * Check SLI validation that all the transfer was actually done
-        * (fcpi_parm should be zero). Apply check only to reads.
+        * (fcpi_parm should be zero).
         */
-       } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
+       } else if (fcpi_parm) {
                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
-                                "9029 FCP Read Check Error Data: "
+                                "9029 FCP Data Transfer Check Error: "
                                 "x%x x%x x%x x%x x%x\n",
                                 be32_to_cpu(fcpcmd->fcpDl),
                                 be32_to_cpu(fcprsp->rspResId),
@@ -3615,7 +3615,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
        cmd = lpfc_cmd->pCmd;
        shost = cmd->device->host;
 
-       lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
+       lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
        lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
        /* pick up SLI4 exhange busy status from HBA */
        lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
@@ -3660,10 +3660,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                        lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
                else if (lpfc_cmd->status >= IOSTAT_CNT)
                        lpfc_cmd->status = IOSTAT_DEFAULT;
-               if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
-                       && !lpfc_cmd->fcp_rsp->rspStatus3
-                       && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
-                       && !(phba->cfg_log_verbose & LOG_FCP_UNDER))
+               if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
+                   !lpfc_cmd->fcp_rsp->rspStatus3 &&
+                   (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
+                   !(vport->cfg_log_verbose & LOG_FCP_UNDER))
                        logit = 0;
                else
                        logit = LOG_FCP | LOG_FCP_UNDER;
@@ -3829,12 +3829,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
        cmd->scsi_done(cmd);
 
        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+               spin_lock_irq(&phba->hbalock);
+               lpfc_cmd->pCmd = NULL;
+               spin_unlock_irq(&phba->hbalock);
+
                /*
                 * If there is a thread waiting for command completion
                 * wake up the thread.
                 */
                spin_lock_irqsave(shost->host_lock, flags);
-               lpfc_cmd->pCmd = NULL;
                if (lpfc_cmd->waitq)
                        wake_up(lpfc_cmd->waitq);
                spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3868,12 +3871,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
                }
        }
 
+       spin_lock_irq(&phba->hbalock);
+       lpfc_cmd->pCmd = NULL;
+       spin_unlock_irq(&phba->hbalock);
+
        /*
         * If there is a thread waiting for command completion
         * wake up the thread.
         */
        spin_lock_irqsave(shost->host_lock, flags);
-       lpfc_cmd->pCmd = NULL;
        if (lpfc_cmd->waitq)
                wake_up(lpfc_cmd->waitq);
        spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3919,6 +3925,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
        int datadir = scsi_cmnd->sc_data_direction;
        char tag[2];
+       uint8_t *ptr;
+       bool sli4;
 
        if (!pnode || !NLP_CHK_NODE_ACT(pnode))
                return;
@@ -3930,8 +3938,13 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        int_to_scsilun(lpfc_cmd->pCmd->device->lun,
                        &lpfc_cmd->fcp_cmnd->fcp_lun);
 
-       memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
-       memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+       ptr = &fcp_cmnd->fcpCdb[0];
+       memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+       if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
+               ptr += scsi_cmnd->cmd_len;
+               memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
+       }
+
        if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
                switch (tag[0]) {
                case HEAD_OF_QUEUE_TAG:
@@ -3947,6 +3960,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        } else
                fcp_cmnd->fcpCntl1 = 0;
 
+       sli4 = (phba->sli_rev == LPFC_SLI_REV4);
+
        /*
         * There are three possibilities here - use scatter-gather segment, use
         * the single mapping, or neither.  Start the lpfc command prep by
@@ -3956,11 +3971,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        if (scsi_sg_count(scsi_cmnd)) {
                if (datadir == DMA_TO_DEVICE) {
                        iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
-                       if (phba->sli_rev < LPFC_SLI_REV4) {
+                       if (sli4)
+                               iocb_cmd->ulpPU = PARM_READ_CHECK;
+                       else {
                                iocb_cmd->un.fcpi.fcpi_parm = 0;
                                iocb_cmd->ulpPU = 0;
-                       } else
-                               iocb_cmd->ulpPU = PARM_READ_CHECK;
+                       }
                        fcp_cmnd->fcpCntl3 = WRITE_DATA;
                        phba->fc4OutputRequests++;
                } else {
@@ -3984,7 +4000,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
         * of the scsi_cmnd request_buffer
         */
        piocbq->iocb.ulpContext = pnode->nlp_rpi;
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (sli4)
                piocbq->iocb.ulpContext =
                  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
        if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
@@ -4241,9 +4257,8 @@ void lpfc_poll_timeout(unsigned long ptr)
  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
  **/
 static int
-lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 {
-       struct Scsi_Host  *shost = cmnd->device->host;
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
        struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -4299,53 +4314,28 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
        lpfc_cmd->timeout = 0;
        lpfc_cmd->start_time = jiffies;
        cmnd->host_scribble = (unsigned char *)lpfc_cmd;
-       cmnd->scsi_done = done;
 
        if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                               "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
-                               "guard=%s\n", cmnd->cmnd[0],
-                               dif_op_str[scsi_get_prot_op(cmnd)],
-                               dif_grd_str[scsi_host_get_guard(shost)]);
-                       if (cmnd->cmnd[0] == READ_10)
-                               lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                       "9035 BLKGRD: READ @ sector %llu, "
-                                       "cnt %u, rpt %d\n",
-                                       (unsigned long long)scsi_get_lba(cmnd),
-                                       blk_rq_sectors(cmnd->request),
-                                       (cmnd->cmnd[1]>>5));
-                       else if (cmnd->cmnd[0] == WRITE_10)
-                               lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                       "9036 BLKGRD: WRITE @ sector %llu, "
-                                       "cnt %u, wpt %d\n",
-                                       (unsigned long long)scsi_get_lba(cmnd),
-                                       blk_rq_sectors(cmnd->request),
-                                       (cmnd->cmnd[1]>>5));
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                                        "9033 BLKGRD: rcvd %s cmd:x%x "
+                                        "sector x%llx cnt %u pt %x\n",
+                                        dif_op_str[scsi_get_prot_op(cmnd)],
+                                        cmnd->cmnd[0],
+                                        (unsigned long long)scsi_get_lba(cmnd),
+                                        blk_rq_sectors(cmnd->request),
+                                        (cmnd->cmnd[1]>>5));
                }
-
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
        } else {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                               "9038 BLKGRD: rcvd unprotected cmd:"
-                               "%02x op=%s guard=%s\n", cmnd->cmnd[0],
-                               dif_op_str[scsi_get_prot_op(cmnd)],
-                               dif_grd_str[scsi_host_get_guard(shost)]);
-                       if (cmnd->cmnd[0] == READ_10)
-                               lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                       "9040 dbg: READ @ sector %llu, "
-                                       "cnt %u, rpt %d\n",
-                                       (unsigned long long)scsi_get_lba(cmnd),
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                                        "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
+                                        "x%x sector x%llx cnt %u pt %x\n",
+                                        cmnd->cmnd[0],
+                                        (unsigned long long)scsi_get_lba(cmnd),
                                         blk_rq_sectors(cmnd->request),
-                                       (cmnd->cmnd[1]>>5));
-                       else if (cmnd->cmnd[0] == WRITE_10)
-                               lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                       "9041 dbg: WRITE @ sector %llu, "
-                                       "cnt %u, wpt %d\n",
-                                       (unsigned long long)scsi_get_lba(cmnd),
-                                       blk_rq_sectors(cmnd->request),
-                                       (cmnd->cmnd[1]>>5));
+                                        (cmnd->cmnd[1]>>5));
                }
                err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
        }
@@ -4363,11 +4353,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
                goto out_host_busy_free_buf;
        }
        if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
-               spin_unlock(shost->host_lock);
                lpfc_sli_handle_fast_ring_event(phba,
                        &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
 
-               spin_lock(shost->host_lock);
                if (phba->cfg_poll & DISABLE_FCP_RING_INT)
                        lpfc_poll_rearm_timer(phba);
        }
@@ -4384,11 +4372,10 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
        return SCSI_MLQUEUE_TARGET_BUSY;
 
  out_fail_command:
-       done(cmnd);
+       cmnd->scsi_done(cmnd);
        return 0;
 }
 
-static DEF_SCSI_QCMD(lpfc_queuecommand)
 
 /**
  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
@@ -4414,7 +4401,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
 
        status = fc_block_scsi_eh(cmnd);
-       if (status)
+       if (status != 0 && status != SUCCESS)
                return status;
 
        spin_lock_irq(&phba->hbalock);
@@ -4428,7 +4415,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        }
 
        lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
-       if (!lpfc_cmd) {
+       if (!lpfc_cmd || !lpfc_cmd->pCmd) {
                spin_unlock_irq(&phba->hbalock);
                lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
                         "2873 SCSI Layer I/O Abort Request IO CMPL Status "
@@ -4521,9 +4508,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
                ret = FAILED;
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                                 "0748 abort handler timed out waiting "
-                                "for abort to complete: ret %#x, ID %d, "
-                                "LUN %d\n",
-                                ret, cmnd->device->id, cmnd->device->lun);
+                                "for abortng I/O (xri:x%x) to complete: "
+                                "ret %#x, ID %d, LUN %d\n",
+                                iocb->sli4_xritag, ret,
+                                cmnd->device->id, cmnd->device->lun);
        }
        goto out;
 
@@ -4769,7 +4757,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
        }
        pnode = rdata->pnode;
        status = fc_block_scsi_eh(cmnd);
-       if (status)
+       if (status != 0 && status != SUCCESS)
                return status;
 
        status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4836,7 +4824,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
        }
        pnode = rdata->pnode;
        status = fc_block_scsi_eh(cmnd);
-       if (status)
+       if (status != 0 && status != SUCCESS)
                return status;
 
        status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4904,7 +4892,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
                sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
 
        status = fc_block_scsi_eh(cmnd);
-       if (status)
+       if (status != 0 && status != SUCCESS)
                return status;
 
        /*
index 0e7e144507b23ff1fc20a2bf284ad1f1243bb393..219bf534ef9934584cdd3c24365c8766040ba75d 100644 (file)
@@ -69,6 +69,8 @@ static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
                                    struct lpfc_cqe *);
 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
                                       int);
+static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
+                       uint32_t);
 
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -94,6 +96,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
        union lpfc_wqe *temp_wqe;
        struct lpfc_register doorbell;
        uint32_t host_index;
+       uint32_t idx;
 
        /* sanity check on queue memory */
        if (unlikely(!q))
@@ -101,8 +104,12 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
        temp_wqe = q->qe[q->host_index].wqe;
 
        /* If the host has not yet processed the next entry then we are done */
-       if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+       idx = ((q->host_index + 1) % q->entry_count);
+       if (idx == q->hba_index) {
+               q->WQ_overflow++;
                return -ENOMEM;
+       }
+       q->WQ_posted++;
        /* set consumption flag every once in a while */
        if (!((q->host_index + 1) % q->entry_repost))
                bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
@@ -112,7 +119,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
 
        /* Update the host index before invoking device */
        host_index = q->host_index;
-       q->host_index = ((q->host_index + 1) % q->entry_count);
+
+       q->host_index = idx;
 
        /* Ring Doorbell */
        doorbell.word0 = 0;
@@ -120,7 +128,6 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
        bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
        bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
        writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
-       readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
 
        return 0;
 }
@@ -194,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
        bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
        bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
        writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
-       readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
        return 0;
 }
 
@@ -234,6 +240,7 @@ static struct lpfc_eqe *
 lpfc_sli4_eq_get(struct lpfc_queue *q)
 {
        struct lpfc_eqe *eqe;
+       uint32_t idx;
 
        /* sanity check on queue memory */
        if (unlikely(!q))
@@ -244,13 +251,33 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
        if (!bf_get_le32(lpfc_eqe_valid, eqe))
                return NULL;
        /* If the host has not yet processed the next entry then we are done */
-       if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+       idx = ((q->hba_index + 1) % q->entry_count);
+       if (idx == q->host_index)
                return NULL;
 
-       q->hba_index = ((q->hba_index + 1) % q->entry_count);
+       q->hba_index = idx;
        return eqe;
 }
 
+/**
+ * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
+ * @q: The Event Queue to disable interrupts
+ *
+ **/
+static inline void
+lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
+{
+       struct lpfc_register doorbell;
+
+       doorbell.word0 = 0;
+       bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+       bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+       bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+               (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+       bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+       writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+}
+
 /**
  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
  * @q: The Event Queue that the host has completed processing for.
@@ -318,6 +345,7 @@ static struct lpfc_cqe *
 lpfc_sli4_cq_get(struct lpfc_queue *q)
 {
        struct lpfc_cqe *cqe;
+       uint32_t idx;
 
        /* sanity check on queue memory */
        if (unlikely(!q))
@@ -327,11 +355,12 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
        if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
                return NULL;
        /* If the host has not yet processed the next entry then we are done */
-       if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+       idx = ((q->hba_index + 1) % q->entry_count);
+       if (idx == q->host_index)
                return NULL;
 
        cqe = q->qe[q->hba_index].cqe;
-       q->hba_index = ((q->hba_index + 1) % q->entry_count);
+       q->hba_index = idx;
        return cqe;
 }
 
@@ -472,8 +501,8 @@ lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
 static inline IOCB_t *
 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 {
-       return (IOCB_t *) (((char *) pring->cmdringaddr) +
-                          pring->cmdidx * phba->iocb_cmd_size);
+       return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
+                          pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
 }
 
 /**
@@ -489,8 +518,8 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 static inline IOCB_t *
 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 {
-       return (IOCB_t *) (((char *) pring->rspringaddr) +
-                          pring->rspidx * phba->iocb_rsp_size);
+       return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
+                          pring->sli.sli3.rspidx * phba->iocb_rsp_size);
 }
 
 /**
@@ -1320,21 +1349,23 @@ static IOCB_t *
 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 {
        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
-       uint32_t  max_cmd_idx = pring->numCiocb;
-       if ((pring->next_cmdidx == pring->cmdidx) &&
-          (++pring->next_cmdidx >= max_cmd_idx))
-               pring->next_cmdidx = 0;
+       uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
+       if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
+          (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
+               pring->sli.sli3.next_cmdidx = 0;
 
-       if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
+       if (unlikely(pring->sli.sli3.local_getidx ==
+               pring->sli.sli3.next_cmdidx)) {
 
-               pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+               pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
 
-               if (unlikely(pring->local_getidx >= max_cmd_idx)) {
+               if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "0315 Ring %d issue: portCmdGet %d "
                                        "is bigger than cmd ring %d\n",
                                        pring->ringno,
-                                       pring->local_getidx, max_cmd_idx);
+                                       pring->sli.sli3.local_getidx,
+                                       max_cmd_idx);
 
                        phba->link_state = LPFC_HBA_ERROR;
                        /*
@@ -1349,7 +1380,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
                        return NULL;
                }
 
-               if (pring->local_getidx == pring->next_cmdidx)
+               if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
                        return NULL;
        }
 
@@ -1484,8 +1515,8 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
         * Let the HBA know what IOCB slot will be the next one the
         * driver will put a command into.
         */
-       pring->cmdidx = pring->next_cmdidx;
-       writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
+       pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
+       writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
 }
 
 /**
@@ -2056,6 +2087,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
        case MBX_READ_EVENT_LOG:
        case MBX_SECURITY_MGMT:
        case MBX_AUTH_PORT:
+       case MBX_ACCESS_VDATA:
                ret = mbxCommand;
                break;
        default:
@@ -2786,7 +2818,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
                        "0312 Ring %d handler: portRspPut %d "
                        "is bigger than rsp ring %d\n",
                        pring->ringno, le32_to_cpu(pgp->rspPutInx),
-                       pring->numRiocb);
+                       pring->sli.sli3.numRiocb);
 
        phba->link_state = LPFC_HBA_ERROR;
 
@@ -2815,10 +2847,26 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 void lpfc_poll_eratt(unsigned long ptr)
 {
        struct lpfc_hba *phba;
-       uint32_t eratt = 0;
+       uint32_t eratt = 0, rem;
+       uint64_t sli_intr, cnt;
 
        phba = (struct lpfc_hba *)ptr;
 
+       /* Here we will also keep track of interrupts per sec of the hba */
+       sli_intr = phba->sli.slistat.sli_intr;
+
+       if (phba->sli.slistat.sli_prev_intr > sli_intr)
+               cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
+                       sli_intr);
+       else
+               cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
+
+       /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
+       rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
+       phba->sli.slistat.sli_ips = cnt;
+
+       phba->sli.slistat.sli_prev_intr = sli_intr;
+
        /* Check chip HA register for error event */
        eratt = lpfc_sli_check_eratt(phba);
 
@@ -2873,7 +2921,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
         * The next available response entry should never exceed the maximum
         * entries.  If it does, treat it as an adapter hardware error.
         */
-       portRspMax = pring->numRiocb;
+       portRspMax = pring->sli.sli3.numRiocb;
        portRspPut = le32_to_cpu(pgp->rspPutInx);
        if (unlikely(portRspPut >= portRspMax)) {
                lpfc_sli_rsp_pointers_error(phba, pring);
@@ -2887,7 +2935,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                phba->fcp_ring_in_use = 1;
 
        rmb();
-       while (pring->rspidx != portRspPut) {
+       while (pring->sli.sli3.rspidx != portRspPut) {
                /*
                 * Fetch an entry off the ring and copy it into a local data
                 * structure.  The copy involves a byte-swap since the
@@ -2896,8 +2944,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                entry = lpfc_resp_iocb(phba, pring);
                phba->last_completion_time = jiffies;
 
-               if (++pring->rspidx >= portRspMax)
-                       pring->rspidx = 0;
+               if (++pring->sli.sli3.rspidx >= portRspMax)
+                       pring->sli.sli3.rspidx = 0;
 
                lpfc_sli_pcimem_bcopy((uint32_t *) entry,
                                      (uint32_t *) &rspiocbq.iocb,
@@ -2915,7 +2963,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                         * queuedepths of the SCSI device.
                         */
                        if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-                               (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+                           ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                            IOERR_NO_RESOURCES)) {
                                spin_unlock_irqrestore(&phba->hbalock, iflag);
                                phba->lpfc_rampdown_queue_depth(phba);
                                spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2998,9 +3047,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                 * been updated, sync the pgp->rspPutInx and fetch the new port
                 * response put pointer.
                 */
-               writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
+               writel(pring->sli.sli3.rspidx,
+                       &phba->host_gp[pring->ringno].rspGetInx);
 
-               if (pring->rspidx == portRspPut)
+               if (pring->sli.sli3.rspidx == portRspPut)
                        portRspPut = le32_to_cpu(pgp->rspPutInx);
        }
 
@@ -3015,7 +3065,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                pring->stats.iocb_cmd_empty++;
 
                /* Force update of the local copy of cmdGetInx */
-               pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+               pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
                lpfc_sli_resume_iocb(phba, pring);
 
                if ((pring->lpfc_sli_cmd_available))
@@ -3086,7 +3136,8 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                 * queuedepths of the SCSI device.
                 */
                if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-                   (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+                   ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+                    IOERR_NO_RESOURCES)) {
                        spin_unlock_irqrestore(&phba->hbalock, iflag);
                        phba->lpfc_rampdown_queue_depth(phba);
                        spin_lock_irqsave(&phba->hbalock, iflag);
@@ -3247,7 +3298,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
         * The next available response entry should never exceed the maximum
         * entries.  If it does, treat it as an adapter hardware error.
         */
-       portRspMax = pring->numRiocb;
+       portRspMax = pring->sli.sli3.numRiocb;
        portRspPut = le32_to_cpu(pgp->rspPutInx);
        if (portRspPut >= portRspMax) {
                /*
@@ -3269,7 +3320,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
        }
 
        rmb();
-       while (pring->rspidx != portRspPut) {
+       while (pring->sli.sli3.rspidx != portRspPut) {
                /*
                 * Build a completion list and call the appropriate handler.
                 * The process is to get the next available response iocb, get
@@ -3297,8 +3348,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
                                      phba->iocb_rsp_size);
                irsp = &rspiocbp->iocb;
 
-               if (++pring->rspidx >= portRspMax)
-                       pring->rspidx = 0;
+               if (++pring->sli.sli3.rspidx >= portRspMax)
+                       pring->sli.sli3.rspidx = 0;
 
                if (pring->ringno == LPFC_ELS_RING) {
                        lpfc_debugfs_slow_ring_trc(phba,
@@ -3308,7 +3359,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
                                *(((uint32_t *) irsp) + 7));
                }
 
-               writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
+               writel(pring->sli.sli3.rspidx,
+                       &phba->host_gp[pring->ringno].rspGetInx);
 
                spin_unlock_irqrestore(&phba->hbalock, iflag);
                /* Handle the response IOCB */
@@ -3320,10 +3372,10 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
                 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
                 * response put pointer.
                 */
-               if (pring->rspidx == portRspPut) {
+               if (pring->sli.sli3.rspidx == portRspPut) {
                        portRspPut = le32_to_cpu(pgp->rspPutInx);
                }
-       } /* while (pring->rspidx != portRspPut) */
+       } /* while (pring->sli.sli3.rspidx != portRspPut) */
 
        if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
                /* At least one response entry has been freed */
@@ -3338,7 +3390,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
                pring->stats.iocb_cmd_empty++;
 
                /* Force update of the local copy of cmdGetInx */
-               pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+               pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
                lpfc_sli_resume_iocb(phba, pring);
 
                if ((pring->lpfc_sli_cmd_available))
@@ -3859,10 +3911,10 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
        for (i = 0; i < psli->num_rings; i++) {
                pring = &psli->ring[i];
                pring->flag = 0;
-               pring->rspidx = 0;
-               pring->next_cmdidx  = 0;
-               pring->local_getidx = 0;
-               pring->cmdidx = 0;
+               pring->sli.sli3.rspidx = 0;
+               pring->sli.sli3.next_cmdidx  = 0;
+               pring->sli.sli3.local_getidx = 0;
+               pring->sli.sli3.cmdidx = 0;
                pring->missbufcnt = 0;
        }
 
@@ -4893,16 +4945,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
        lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
        fcp_eqidx = 0;
        if (phba->sli4_hba.fcp_cq) {
-               do
+               do {
                        lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
                                             LPFC_QUEUE_REARM);
-               while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+               } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
        }
-       lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
-       if (phba->sli4_hba.fp_eq) {
-               for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
+       if (phba->sli4_hba.hba_eq) {
+               for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
                     fcp_eqidx++)
-                       lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+                       lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
                                             LPFC_QUEUE_REARM);
        }
 }
@@ -7784,14 +7835,18 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
  *
  * Return: index into SLI4 fast-path FCP queue index.
  **/
-static uint32_t
+static inline uint32_t
 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
-       ++phba->fcp_qidx;
-       if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
-               phba->fcp_qidx = 0;
+       int i;
+
+       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
+               i = smp_processor_id();
+       else
+               i = atomic_add_return(1, &phba->fcp_qidx);
 
-       return phba->fcp_qidx;
+       i = (i % phba->cfg_fcp_io_channel);
+       return i;
 }
 
 /**
@@ -8311,16 +8366,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 
        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
                (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
-               /*
-                * For FCP command IOCB, get a new WQ index to distribute
-                * WQE across the WQsr. On the other hand, for abort IOCB,
-                * it carries the same WQ index to the original command
-                * IOCB.
-                */
-               if (piocb->iocb_flag & LPFC_IO_FCP)
-                       piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
-               if (unlikely(!phba->sli4_hba.fcp_wq))
-                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
                                     &wqe))
                        return IOCB_ERROR;
@@ -8401,13 +8446,68 @@ int
 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
                    struct lpfc_iocbq *piocb, uint32_t flag)
 {
+       struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+       struct lpfc_sli_ring *pring;
+       struct lpfc_queue *fpeq;
+       struct lpfc_eqe *eqe;
        unsigned long iflags;
-       int rc;
+       int rc, idx;
 
-       spin_lock_irqsave(&phba->hbalock, iflags);
-       rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               if (piocb->iocb_flag &  LPFC_IO_FCP) {
+                       if (unlikely(!phba->sli4_hba.fcp_wq))
+                               return IOCB_ERROR;
+                       idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+                       piocb->fcp_wqidx = idx;
+                       ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+
+                       pring = &phba->sli.ring[ring_number];
+                       spin_lock_irqsave(&pring->ring_lock, iflags);
+                       rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
+                               flag);
+                       spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+                       if (lpfc_fcp_look_ahead) {
+                               fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
+
+                               if (atomic_dec_and_test(&fcp_eq_hdl->
+                                       fcp_eq_in_use)) {
 
+                                       /* Get associated EQ with this index */
+                                       fpeq = phba->sli4_hba.hba_eq[idx];
+
+                                       /* Turn off interrupts from this EQ */
+                                       lpfc_sli4_eq_clr_intr(fpeq);
+
+                                       /*
+                                        * Process all the events on FCP EQ
+                                        */
+                                       while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+                                               lpfc_sli4_hba_handle_eqe(phba,
+                                                       eqe, idx);
+                                               fpeq->EQ_processed++;
+                                       }
+
+                                       /* Always clear and re-arm the EQ */
+                                       lpfc_sli4_eq_release(fpeq,
+                                               LPFC_QUEUE_REARM);
+                               }
+                               atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+                       }
+               } else {
+                       pring = &phba->sli.ring[ring_number];
+                       spin_lock_irqsave(&pring->ring_lock, iflags);
+                       rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
+                               flag);
+                       spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+               }
+       } else {
+               /* For now, SLI2/3 will still use hbalock */
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+       }
        return rc;
 }
 
@@ -8434,18 +8534,18 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
 
        /* Take some away from the FCP ring */
        pring = &psli->ring[psli->fcp_ring];
-       pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
-       pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
-       pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
-       pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+       pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+       pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+       pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+       pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
 
        /* and give them to the extra ring */
        pring = &psli->ring[psli->extra_ring];
 
-       pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
-       pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
-       pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
-       pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+       pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+       pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+       pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+       pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
 
        /* Setup default profile for this ring */
        pring->iotag_max = 4096;
@@ -8457,56 +8557,6 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
        return 0;
 }
 
-/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
- * @vport: pointer to virtual port object.
- * @ndlp: nodelist pointer for the impacted rport.
- *
- * The driver calls this routine in response to a XRI ABORT CQE
- * event from the port.  In this event, the driver is required to
- * recover its login to the rport even though its login may be valid
- * from the driver's perspective.  The failed ABTS notice from the
- * port indicates the rport is not responding.
- */
-static void
-lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
-                          struct lpfc_nodelist *ndlp)
-{
-       struct Scsi_Host *shost;
-       struct lpfc_hba *phba;
-       unsigned long flags = 0;
-
-       shost = lpfc_shost_from_vport(vport);
-       phba = vport->phba;
-       if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
-               lpfc_printf_log(phba, KERN_INFO,
-                       LOG_SLI, "3093 No rport recovery needed. "
-                       "rport in state 0x%x\n",
-                       ndlp->nlp_state);
-               return;
-       }
-       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                       "3094 Start rport recovery on shost id 0x%x "
-                       "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
-                       "flags 0x%x\n",
-                       shost->host_no, ndlp->nlp_DID,
-                       vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
-                       ndlp->nlp_flag);
-       /*
-        * The rport is not responding.  Don't attempt ADISC recovery.
-        * Remove the FCP-2 flag to force a PLOGI.
-        */
-       spin_lock_irqsave(shost->host_lock, flags);
-       ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-       spin_unlock_irqrestore(shost->host_lock, flags);
-       lpfc_disc_state_machine(vport, ndlp, NULL,
-                               NLP_EVT_DEVICE_RECOVERY);
-       lpfc_cancel_retry_delay_tmo(vport, ndlp);
-       spin_lock_irqsave(shost->host_lock, flags);
-       ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-       spin_unlock_irqrestore(shost->host_lock, flags);
-       lpfc_disc_start(vport);
-}
-
 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
  * @phba: Pointer to HBA context object.
  * @iocbq: Pointer to iocb object.
@@ -8594,7 +8644,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
         * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
         * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
         */
-       ext_status = axri->parameter & WCQE_PARAM_MASK;
+       ext_status = axri->parameter & IOERR_PARAM_MASK;
        if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
            ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
                lpfc_sli_abts_recover_port(vport, ndlp);
@@ -8692,7 +8742,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
        struct lpfc_sli *psli = &phba->sli;
        struct lpfc_sli_ring *pring;
 
-       psli->num_rings = MAX_CONFIGURED_RINGS;
+       psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               psli->num_rings += phba->cfg_fcp_io_channel;
        psli->sli_flag = 0;
        psli->fcp_ring = LPFC_FCP_RING;
        psli->next_ring = LPFC_FCP_NEXT_RING;
@@ -8707,16 +8759,20 @@ lpfc_sli_setup(struct lpfc_hba *phba)
                switch (i) {
                case LPFC_FCP_RING:     /* ring 0 - FCP */
                        /* numCiocb and numRiocb are used in config_port */
-                       pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
-                       pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
-                       pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
-                       pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
-                       pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
-                       pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
-                       pring->sizeCiocb = (phba->sli_rev == 3) ?
+                       pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
+                       pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
+                       pring->sli.sli3.numCiocb +=
+                               SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+                       pring->sli.sli3.numRiocb +=
+                               SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+                       pring->sli.sli3.numCiocb +=
+                               SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+                       pring->sli.sli3.numRiocb +=
+                               SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+                       pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
                                                        SLI3_IOCB_CMD_SIZE :
                                                        SLI2_IOCB_CMD_SIZE;
-                       pring->sizeRiocb = (phba->sli_rev == 3) ?
+                       pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
                                                        SLI3_IOCB_RSP_SIZE :
                                                        SLI2_IOCB_RSP_SIZE;
                        pring->iotag_ctr = 0;
@@ -8727,12 +8783,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
                        break;
                case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
                        /* numCiocb and numRiocb are used in config_port */
-                       pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
-                       pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
-                       pring->sizeCiocb = (phba->sli_rev == 3) ?
+                       pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
+                       pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+                       pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
                                                        SLI3_IOCB_CMD_SIZE :
                                                        SLI2_IOCB_CMD_SIZE;
-                       pring->sizeRiocb = (phba->sli_rev == 3) ?
+                       pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
                                                        SLI3_IOCB_RSP_SIZE :
                                                        SLI2_IOCB_RSP_SIZE;
                        pring->iotag_max = phba->cfg_hba_queue_depth;
@@ -8740,12 +8796,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
                        break;
                case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
                        /* numCiocb and numRiocb are used in config_port */
-                       pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
-                       pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
-                       pring->sizeCiocb = (phba->sli_rev == 3) ?
+                       pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
+                       pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+                       pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
                                                        SLI3_IOCB_CMD_SIZE :
                                                        SLI2_IOCB_CMD_SIZE;
-                       pring->sizeRiocb = (phba->sli_rev == 3) ?
+                       pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
                                                        SLI3_IOCB_RSP_SIZE :
                                                        SLI2_IOCB_RSP_SIZE;
                        pring->fast_iotag = 0;
@@ -8786,8 +8842,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
                            lpfc_sli4_ct_abort_unsol_event;
                        break;
                }
-               totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
-                               (pring->numRiocb * pring->sizeRiocb);
+               totiocbsize += (pring->sli.sli3.numCiocb *
+                       pring->sli.sli3.sizeCiocb) +
+                       (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
        }
        if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
                /* Too many cmd / rsp ring entries in SLI2 SLIM */
@@ -8828,14 +8885,15 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
        for (i = 0; i < psli->num_rings; i++) {
                pring = &psli->ring[i];
                pring->ringno = i;
-               pring->next_cmdidx  = 0;
-               pring->local_getidx = 0;
-               pring->cmdidx = 0;
+               pring->sli.sli3.next_cmdidx  = 0;
+               pring->sli.sli3.local_getidx = 0;
+               pring->sli.sli3.cmdidx = 0;
                INIT_LIST_HEAD(&pring->txq);
                INIT_LIST_HEAD(&pring->txcmplq);
                INIT_LIST_HEAD(&pring->iocb_continueq);
                INIT_LIST_HEAD(&pring->iocb_continue_saveq);
                INIT_LIST_HEAD(&pring->postbufq);
+               spin_lock_init(&pring->ring_lock);
        }
        spin_unlock_irq(&phba->hbalock);
        return 1;
@@ -9334,6 +9392,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        IOCB_t *icmd = NULL;
        IOCB_t *iabt = NULL;
        int retval;
+       unsigned long iflags;
 
        /*
         * There are certain command types we don't want to abort.  And we
@@ -9386,7 +9445,17 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                         iabt->un.acxri.abortIoTag,
                         iabt->un.acxri.abortContextTag,
                         abtsiocbp->iotag);
-       retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
+
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               /* Note: both hbalock and ring_lock need to be set here */
+               spin_lock_irqsave(&pring->ring_lock, iflags);
+               retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+                       abtsiocbp, 0);
+               spin_unlock_irqrestore(&pring->ring_lock, iflags);
+       } else {
+               retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+                       abtsiocbp, 0);
+       }
 
        if (retval)
                __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -10947,12 +11016,12 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
        unsigned long iflags;
 
        wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
-       spin_lock_irqsave(&phba->hbalock, iflags);
+       spin_lock_irqsave(&pring->ring_lock, iflags);
        pring->stats.iocb_event++;
        /* Look up the ELS command IOCB and create pseudo response IOCB */
        cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       spin_unlock_irqrestore(&pring->ring_lock, iflags);
 
        if (unlikely(!cmdiocbq)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11154,6 +11223,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
 /**
  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
  * @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
  * @wcqe: Pointer to work-queue completion queue entry.
  *
  * This routine handles an ELS work-queue completion event.
@@ -11161,12 +11231,12 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
  * Return: true if work posted to worker thread, otherwise false.
  **/
 static bool
-lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                             struct lpfc_wcqe_complete *wcqe)
 {
        struct lpfc_iocbq *irspiocbq;
        unsigned long iflags;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+       struct lpfc_sli_ring *pring = cq->pring;
 
        /* Get an irspiocbq for later ELS response processing use */
        irspiocbq = lpfc_sli_get_iocbq(phba);
@@ -11311,14 +11381,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
        case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "2537 Receive Frame Truncated!!\n");
+               hrq->RQ_buf_trunc++;
        case FC_STATUS_RQ_SUCCESS:
                lpfc_sli4_rq_release(hrq, drq);
                spin_lock_irqsave(&phba->hbalock, iflags);
                dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
                if (!dma_buf) {
+                       hrq->RQ_no_buf_found++;
                        spin_unlock_irqrestore(&phba->hbalock, iflags);
                        goto out;
                }
+               hrq->RQ_rcv_buf++;
                memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
                /* save off the frame for the word thread to process */
                list_add_tail(&dma_buf->cq_event.list,
@@ -11330,6 +11403,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                break;
        case FC_STATUS_INSUFF_BUF_NEED_BUF:
        case FC_STATUS_INSUFF_BUF_FRM_DISC:
+               hrq->RQ_no_posted_buf++;
                /* Post more buffers if possible */
                spin_lock_irqsave(&phba->hbalock, iflags);
                phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
@@ -11367,7 +11441,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
        case CQE_CODE_COMPL_WQE:
                /* Process the WQ/RQ complete event */
                phba->last_completion_time = jiffies;
-               workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+               workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
                                (struct lpfc_wcqe_complete *)&cqevt);
                break;
        case CQE_CODE_RELEASE_WQE:
@@ -11411,31 +11485,18 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  *
  **/
 static void
-lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+       struct lpfc_queue *speq)
 {
-       struct lpfc_queue *cq = NULL, *childq, *speq;
+       struct lpfc_queue *cq = NULL, *childq;
        struct lpfc_cqe *cqe;
        bool workposted = false;
        int ecount = 0;
        uint16_t cqid;
 
-       if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "0359 Not a valid slow-path completion "
-                               "event: majorcode=x%x, minorcode=x%x\n",
-                               bf_get_le32(lpfc_eqe_major_code, eqe),
-                               bf_get_le32(lpfc_eqe_minor_code, eqe));
-               return;
-       }
-
        /* Get the reference to the corresponding CQ */
        cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
 
-       /* Search for completion queue pointer matching this cqid */
-       speq = phba->sli4_hba.sp_eq;
-       /* sanity check on queue memory */
-       if (unlikely(!speq))
-               return;
        list_for_each_entry(childq, &speq->child_list, list) {
                if (childq->queue_id == cqid) {
                        cq = childq;
@@ -11457,6 +11518,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
                        workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
                        if (!(++ecount % cq->entry_repost))
                                lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+                       cq->CQ_mbox++;
                }
                break;
        case LPFC_WCQ:
@@ -11470,6 +11532,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
                        if (!(++ecount % cq->entry_repost))
                                lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
                }
+
+               /* Track the max number of CQEs processed in 1 EQ */
+               if (ecount > cq->CQ_max_cqe)
+                       cq->CQ_max_cqe = ecount;
                break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11494,34 +11560,33 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
 
 /**
  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
- * @eqe: Pointer to fast-path completion queue entry.
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
+ * @wcqe: Pointer to work-queue completion queue entry.
  *
  * This routine process a fast-path work queue completion entry from fast-path
  * event queue for FCP command response completion.
  **/
 static void
-lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                             struct lpfc_wcqe_complete *wcqe)
 {
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+       struct lpfc_sli_ring *pring = cq->pring;
        struct lpfc_iocbq *cmdiocbq;
        struct lpfc_iocbq irspiocbq;
        unsigned long iflags;
 
-       spin_lock_irqsave(&phba->hbalock, iflags);
-       pring->stats.iocb_event++;
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
-
        /* Check for response status */
        if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
                /* If resource errors reported from HBA, reduce queue
                 * depth of the SCSI device.
                 */
-               if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
-                    IOSTAT_LOCAL_REJECT) &&
-                   (wcqe->parameter == IOERR_NO_RESOURCES)) {
+               if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
+                    IOSTAT_LOCAL_REJECT)) &&
+                   ((wcqe->parameter & IOERR_PARAM_MASK) ==
+                    IOERR_NO_RESOURCES))
                        phba->lpfc_rampdown_queue_depth(phba);
-               }
+
                /* Log the error status */
                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
                                "0373 FCP complete error: status=x%x, "
@@ -11534,10 +11599,11 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
        }
 
        /* Look up the FCP command IOCB and create pseudo response IOCB */
-       spin_lock_irqsave(&phba->hbalock, iflags);
+       spin_lock_irqsave(&pring->ring_lock, iflags);
+       pring->stats.iocb_event++;
        cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
                                bf_get(lpfc_wcqe_c_request_tag, wcqe));
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       spin_unlock_irqrestore(&pring->ring_lock, iflags);
        if (unlikely(!cmdiocbq)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
                                "0374 FCP complete with no corresponding "
@@ -11621,17 +11687,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
        /* Check and process for different type of WCQE and dispatch */
        switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
        case CQE_CODE_COMPL_WQE:
+               cq->CQ_wq++;
                /* Process the WQ complete event */
                phba->last_completion_time = jiffies;
-               lpfc_sli4_fp_handle_fcp_wcqe(phba,
+               lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
                                (struct lpfc_wcqe_complete *)&wcqe);
                break;
        case CQE_CODE_RELEASE_WQE:
+               cq->CQ_release_wqe++;
                /* Process the WQ release event */
                lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
                                (struct lpfc_wcqe_release *)&wcqe);
                break;
        case CQE_CODE_XRI_ABORTED:
+               cq->CQ_xri_aborted++;
                /* Process the WQ XRI abort event */
                phba->last_completion_time = jiffies;
                workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
@@ -11647,7 +11716,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 }
 
 /**
- * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
+ * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
  * @phba: Pointer to HBA context object.
  * @eqe: Pointer to fast-path event queue entry.
  *
@@ -11659,8 +11728,8 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
  * completion queue, and then return.
  **/
 static void
-lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
-                       uint32_t fcp_cqidx)
+lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+                       uint32_t qidx)
 {
        struct lpfc_queue *cq;
        struct lpfc_cqe *cqe;
@@ -11670,30 +11739,38 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
 
        if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "0366 Not a valid fast-path completion "
+                               "0366 Not a valid completion "
                                "event: majorcode=x%x, minorcode=x%x\n",
                                bf_get_le32(lpfc_eqe_major_code, eqe),
                                bf_get_le32(lpfc_eqe_minor_code, eqe));
                return;
        }
 
+       /* Get the reference to the corresponding CQ */
+       cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+       /* Check if this is a Slow path event */
+       if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
+               lpfc_sli4_sp_handle_eqe(phba, eqe,
+                       phba->sli4_hba.hba_eq[qidx]);
+               return;
+       }
+
        if (unlikely(!phba->sli4_hba.fcp_cq)) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
                                "3146 Fast-path completion queues "
                                "does not exist\n");
                return;
        }
-       cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
+       cq = phba->sli4_hba.fcp_cq[qidx];
        if (unlikely(!cq)) {
                if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "0367 Fast-path completion queue "
-                                       "(%d) does not exist\n", fcp_cqidx);
+                                       "(%d) does not exist\n", qidx);
                return;
        }
 
-       /* Get the reference to the corresponding CQ */
-       cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
        if (unlikely(cqid != cq->queue_id)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0368 Miss-matched fast-path completion "
@@ -11709,6 +11786,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                        lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
        }
 
+       /* Track the max number of CQEs processed in 1 EQ */
+       if (ecount > cq->CQ_max_cqe)
+               cq->CQ_max_cqe = ecount;
+
        /* Catch the no cq entry condition */
        if (unlikely(ecount == 0))
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11737,86 +11818,7 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
 }
 
 /**
- * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
- * @irq: Interrupt number.
- * @dev_id: The device context pointer.
- *
- * This function is directly called from the PCI layer as an interrupt
- * service routine when device with SLI-4 interface spec is enabled with
- * MSI-X multi-message interrupt mode and there are slow-path events in
- * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
- * interrupt mode, this function is called as part of the device-level
- * interrupt handler. When the PCI slot is in error recovery or the HBA is
- * undergoing initialization, the interrupt handler will not process the
- * interrupt. The link attention and ELS ring attention events are handled
- * by the worker thread. The interrupt handler signals the worker thread
- * and returns for these events. This function is called without any lock
- * held. It gets the hbalock to access and update SLI data structures.
- *
- * This function returns IRQ_HANDLED when interrupt is handled else it
- * returns IRQ_NONE.
- **/
-irqreturn_t
-lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
-{
-       struct lpfc_hba *phba;
-       struct lpfc_queue *speq;
-       struct lpfc_eqe *eqe;
-       unsigned long iflag;
-       int ecount = 0;
-
-       /*
-        * Get the driver's phba structure from the dev_id
-        */
-       phba = (struct lpfc_hba *)dev_id;
-
-       if (unlikely(!phba))
-               return IRQ_NONE;
-
-       /* Get to the EQ struct associated with this vector */
-       speq = phba->sli4_hba.sp_eq;
-       if (unlikely(!speq))
-               return IRQ_NONE;
-
-       /* Check device state for handling interrupt */
-       if (unlikely(lpfc_intr_state_check(phba))) {
-               /* Check again for link_state with lock held */
-               spin_lock_irqsave(&phba->hbalock, iflag);
-               if (phba->link_state < LPFC_LINK_DOWN)
-                       /* Flush, clear interrupt, and rearm the EQ */
-                       lpfc_sli4_eq_flush(phba, speq);
-               spin_unlock_irqrestore(&phba->hbalock, iflag);
-               return IRQ_NONE;
-       }
-
-       /*
-        * Process all the event on FCP slow-path EQ
-        */
-       while ((eqe = lpfc_sli4_eq_get(speq))) {
-               lpfc_sli4_sp_handle_eqe(phba, eqe);
-               if (!(++ecount % speq->entry_repost))
-                       lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
-       }
-
-       /* Always clear and re-arm the slow-path EQ */
-       lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
-
-       /* Catch the no cq entry condition */
-       if (unlikely(ecount == 0)) {
-               if (phba->intr_type == MSIX)
-                       /* MSI-X treated interrupt served as no EQ share INT */
-                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-                                       "0357 MSI-X interrupt with no EQE\n");
-               else
-                       /* Non MSI-X treated on interrupt as EQ share INT */
-                       return IRQ_NONE;
-       }
-
-       return IRQ_HANDLED;
-} /* lpfc_sli4_sp_intr_handler */
-
-/**
- * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
+ * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
  * @irq: Interrupt number.
  * @dev_id: The device context pointer.
  *
@@ -11833,11 +11835,16 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
  * equal to that of FCP CQ index.
  *
+ * The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
  * This function returns IRQ_HANDLED when interrupt is handled else it
  * returns IRQ_NONE.
  **/
 irqreturn_t
-lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
+lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 {
        struct lpfc_hba *phba;
        struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
@@ -11854,22 +11861,34 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
 
        if (unlikely(!phba))
                return IRQ_NONE;
-       if (unlikely(!phba->sli4_hba.fp_eq))
+       if (unlikely(!phba->sli4_hba.hba_eq))
                return IRQ_NONE;
 
        /* Get to the EQ struct associated with this vector */
-       fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+       fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
        if (unlikely(!fpeq))
                return IRQ_NONE;
 
+       if (lpfc_fcp_look_ahead) {
+               if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
+                       lpfc_sli4_eq_clr_intr(fpeq);
+               else {
+                       atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+                       return IRQ_NONE;
+               }
+       }
+
        /* Check device state for handling interrupt */
        if (unlikely(lpfc_intr_state_check(phba))) {
+               fpeq->EQ_badstate++;
                /* Check again for link_state with lock held */
                spin_lock_irqsave(&phba->hbalock, iflag);
                if (phba->link_state < LPFC_LINK_DOWN)
                        /* Flush, clear interrupt, and rearm the EQ */
                        lpfc_sli4_eq_flush(phba, fpeq);
                spin_unlock_irqrestore(&phba->hbalock, iflag);
+               if (lpfc_fcp_look_ahead)
+                       atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
                return IRQ_NONE;
        }
 
@@ -11877,15 +11896,27 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
         * Process all the event on FCP fast-path EQ
         */
        while ((eqe = lpfc_sli4_eq_get(fpeq))) {
-               lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
+               lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
                if (!(++ecount % fpeq->entry_repost))
                        lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+               fpeq->EQ_processed++;
        }
 
+       /* Track the max number of EQEs processed in 1 intr */
+       if (ecount > fpeq->EQ_max_eqe)
+               fpeq->EQ_max_eqe = ecount;
+
        /* Always clear and re-arm the fast-path EQ */
        lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
 
        if (unlikely(ecount == 0)) {
+               fpeq->EQ_no_entry++;
+
+               if (lpfc_fcp_look_ahead) {
+                       atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+                       return IRQ_NONE;
+               }
+
                if (phba->intr_type == MSIX)
                        /* MSI-X treated interrupt served as no EQ share INT */
                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11895,6 +11926,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
                        return IRQ_NONE;
        }
 
+       if (lpfc_fcp_look_ahead)
+               atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
        return IRQ_HANDLED;
 } /* lpfc_sli4_fp_intr_handler */
 
@@ -11919,8 +11952,8 @@ irqreturn_t
 lpfc_sli4_intr_handler(int irq, void *dev_id)
 {
        struct lpfc_hba  *phba;
-       irqreturn_t sp_irq_rc, fp_irq_rc;
-       bool fp_handled = false;
+       irqreturn_t hba_irq_rc;
+       bool hba_handled = false;
        uint32_t fcp_eqidx;
 
        /* Get the driver's phba structure from the dev_id */
@@ -11929,22 +11962,17 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
        if (unlikely(!phba))
                return IRQ_NONE;
 
-       /*
-        * Invokes slow-path host attention interrupt handling as appropriate.
-        */
-       sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
-
        /*
         * Invoke fast-path host attention interrupt handling as appropriate.
         */
-       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
-               fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
+       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+               hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
                                        &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
-               if (fp_irq_rc == IRQ_HANDLED)
-                       fp_handled |= true;
+               if (hba_irq_rc == IRQ_HANDLED)
+                       hba_handled |= true;
        }
 
-       return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
+       return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
 } /* lpfc_sli4_intr_handler */
 
 /**
@@ -12075,7 +12103,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
        union lpfc_sli4_cfg_shdr *shdr;
        uint16_t dmult;
 
-       if (startq >= phba->cfg_fcp_eq_count)
+       if (startq >= phba->cfg_fcp_io_channel)
                return 0;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -12089,12 +12117,13 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
        eq_delay = &mbox->u.mqe.un.eq_delay;
 
        /* Calculate delay multiper from maximum interrupt per second */
-       dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1;
+       dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
+       dmult = LPFC_DMULT_CONST/dmult - 1;
 
        cnt = 0;
-       for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count;
+       for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
            fcp_eqidx++) {
-               eq = phba->sli4_hba.fp_eq[fcp_eqidx];
+               eq = phba->sli4_hba.hba_eq[fcp_eqidx];
                if (!eq)
                        continue;
                eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
index 2626f58c0747ac0e60d9a807bde5465b86c7dc71..2f48d000a3b430f5db5da07eb188ac8ba1c02b72 100644 (file)
@@ -131,7 +131,9 @@ typedef struct lpfcMboxq {
 
 #define LPFC_MAX_RING_MASK  5  /* max num of rctl/type masks allowed per
                                   ring */
-#define LPFC_MAX_RING       4  /* max num of SLI rings used by driver */
+#define LPFC_SLI3_MAX_RING  4  /* Max num of SLI3 rings used by driver.
+                                  For SLI4, an additional ring for each
+                                  FCP WQ will be allocated.  */
 
 struct lpfc_sli_ring;
 
@@ -158,6 +160,24 @@ struct lpfc_sli_ring_stat {
        uint64_t iocb_rsp_full;  /* IOCB rsp ring full */
 };
 
+struct lpfc_sli3_ring {
+       uint32_t local_getidx;  /* last available cmd index (from cmdGetInx) */
+       uint32_t next_cmdidx;   /* next_cmd index */
+       uint32_t rspidx;        /* current index in response ring */
+       uint32_t cmdidx;        /* current index in command ring */
+       uint16_t numCiocb;      /* number of command iocb's per ring */
+       uint16_t numRiocb;      /* number of rsp iocb's per ring */
+       uint16_t sizeCiocb;     /* Size of command iocb's in this ring */
+       uint16_t sizeRiocb;     /* Size of response iocb's in this ring */
+       uint32_t *cmdringaddr;  /* virtual address for cmd rings */
+       uint32_t *rspringaddr;  /* virtual address for rsp rings */
+};
+
+struct lpfc_sli4_ring {
+       struct lpfc_queue *wqp; /* Pointer to associated WQ */
+};
+
+
 /* Structure used to hold SLI ring information */
 struct lpfc_sli_ring {
        uint16_t flag;          /* ring flags */
@@ -166,16 +186,10 @@ struct lpfc_sli_ring {
 #define LPFC_STOP_IOCB_EVENT     0x020 /* Stop processing IOCB cmds event */
        uint16_t abtsiotag;     /* tracks next iotag to use for ABTS */
 
-       uint32_t local_getidx;   /* last available cmd index (from cmdGetInx) */
-       uint32_t next_cmdidx;    /* next_cmd index */
-       uint32_t rspidx;        /* current index in response ring */
-       uint32_t cmdidx;        /* current index in command ring */
        uint8_t rsvd;
        uint8_t ringno;         /* ring number */
-       uint16_t numCiocb;      /* number of command iocb's per ring */
-       uint16_t numRiocb;      /* number of rsp iocb's per ring */
-       uint16_t sizeCiocb;     /* Size of command iocb's in this ring */
-       uint16_t sizeRiocb;     /* Size of response iocb's in this ring */
+
+       spinlock_t ring_lock;   /* lock for issuing commands */
 
        uint32_t fast_iotag;    /* max fastlookup based iotag           */
        uint32_t iotag_ctr;     /* keeps track of the next iotag to use */
@@ -186,8 +200,6 @@ struct lpfc_sli_ring {
        struct list_head txcmplq;
        uint16_t txcmplq_cnt;   /* current length of queue */
        uint16_t txcmplq_max;   /* max length */
-       uint32_t *cmdringaddr;  /* virtual address for cmd rings */
-       uint32_t *rspringaddr;  /* virtual address for rsp rings */
        uint32_t missbufcnt;    /* keep track of buffers to post */
        struct list_head postbufq;
        uint16_t postbufq_cnt;  /* current length of queue */
@@ -207,6 +219,10 @@ struct lpfc_sli_ring {
        /* cmd ring available */
        void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
                                        struct lpfc_sli_ring *);
+       union {
+               struct lpfc_sli3_ring sli3;
+               struct lpfc_sli4_ring sli4;
+       } sli;
 };
 
 /* Structure used for configuring rings to a specific profile or rctl / type */
@@ -239,6 +255,8 @@ struct lpfc_sli_stat {
        uint64_t mbox_stat_err;  /* Mbox cmds completed status error */
        uint64_t mbox_cmd;       /* Mailbox commands issued */
        uint64_t sli_intr;       /* Count of Host Attention interrupts */
+       uint64_t sli_prev_intr;  /* Previous cnt of Host Attention interrupts */
+       uint64_t sli_ips;        /* Host Attention interrupts per sec */
        uint32_t err_attn_event; /* Error Attn event counters */
        uint32_t link_event;     /* Link event counters */
        uint32_t mbox_event;     /* Mailbox event counters */
@@ -270,7 +288,7 @@ struct lpfc_sli {
 #define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
 #define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
 
-       struct lpfc_sli_ring ring[LPFC_MAX_RING];
+       struct lpfc_sli_ring *ring;
        int fcp_ring;           /* ring used for FCP initiator commands */
        int next_ring;
 
index ec756118c5c14e0e13f0c7cb598d6247bc74cddd..bd4bc4342ae227b43824277985d1103b70a17695 100644 (file)
 /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
 #define LPFC_NEMBED_MBOX_SGL_CNT               254
 
-/* Multi-queue arrangement for fast-path FCP work queues */
-#define LPFC_FN_EQN_MAX       8
-#define LPFC_SP_EQN_DEF       1
-#define LPFC_FP_EQN_DEF       4
-#define LPFC_FP_EQN_MIN       1
-#define LPFC_FP_EQN_MAX       (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
-
-#define LPFC_FN_WQN_MAX       32
-#define LPFC_SP_WQN_DEF       1
-#define LPFC_FP_WQN_DEF       4
-#define LPFC_FP_WQN_MIN       1
-#define LPFC_FP_WQN_MAX       (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
+/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
+#define LPFC_FCP_IO_CHAN_DEF       4
+#define LPFC_FCP_IO_CHAN_MIN       1
+#define LPFC_FCP_IO_CHAN_MAX       8
 
 /*
  * Provide the default FCF Record attributes used by the driver
@@ -141,6 +133,37 @@ struct lpfc_queue {
        uint32_t page_count;    /* Number of pages allocated for this queue */
        uint32_t host_index;    /* The host's index for putting or getting */
        uint32_t hba_index;     /* The last known hba index for get or put */
+
+       struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+
+       /* For q stats */
+       uint32_t q_cnt_1;
+       uint32_t q_cnt_2;
+       uint32_t q_cnt_3;
+       uint64_t q_cnt_4;
+/* defines for EQ stats */
+#define        EQ_max_eqe              q_cnt_1
+#define        EQ_no_entry             q_cnt_2
+#define        EQ_badstate             q_cnt_3
+#define        EQ_processed            q_cnt_4
+
+/* defines for CQ stats */
+#define        CQ_mbox                 q_cnt_1
+#define        CQ_max_cqe              q_cnt_1
+#define        CQ_release_wqe          q_cnt_2
+#define        CQ_xri_aborted          q_cnt_3
+#define        CQ_wq                   q_cnt_4
+
+/* defines for WQ stats */
+#define        WQ_overflow             q_cnt_1
+#define        WQ_posted               q_cnt_4
+
+/* defines for RQ stats */
+#define        RQ_no_posted_buf        q_cnt_1
+#define        RQ_no_buf_found         q_cnt_2
+#define        RQ_buf_trunc            q_cnt_3
+#define        RQ_rcv_buf              q_cnt_4
+
        union sli4_qe qe[1];    /* array to index entries (must be last) */
 };
 
@@ -350,6 +373,7 @@ struct lpfc_hba;
 struct lpfc_fcp_eq_hdl {
        uint32_t idx;
        struct lpfc_hba *phba;
+       atomic_t fcp_eq_in_use;
 };
 
 /* Port Capabilities for SLI4 Parameters */
@@ -407,6 +431,8 @@ struct lpfc_sli4_lnk_info {
        uint8_t lnk_no;
 };
 
+#define LPFC_SLI4_HANDLER_NAME_SZ      16
+
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {
        void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -463,20 +489,23 @@ struct lpfc_sli4_hba {
        struct lpfc_register sli_intf;
        struct lpfc_pc_sli4_params pc_sli4_params;
        struct msix_entry *msix_entries;
+       uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
        uint32_t cfg_eqn;
        uint32_t msix_vec_nr;
        struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+
        /* Pointers to the constructed SLI4 queues */
-       struct lpfc_queue **fp_eq; /* Fast-path event queue */
-       struct lpfc_queue *sp_eq;  /* Slow-path event queue */
+       struct lpfc_queue **hba_eq;/* Event queues for HBA */
+       struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
        struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+       uint16_t *fcp_cq_map;
+
+       struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+       struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
        struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
        struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
        struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
        struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
-       struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
-       struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
-       struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
 
        /* Setup information for various queue parameters */
        int eq_esize;
index 4704e5b5088e72fa1916ccdd9bba3972d00c85b8..04265a1c4e52e6e11cfaccea9b05fdcb0582bf34 100644 (file)
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.32"
+#define LPFC_DRIVER_VERSION "8.3.34"
 #define LPFC_DRIVER_NAME               "lpfc"
+
+/* Used for SLI 2/3 */
 #define LPFC_SP_DRIVER_HANDLER_NAME    "lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME    "lpfc:fp"
 
+/* Used for SLI4 */
+#define LPFC_DRIVER_HANDLER_NAME       "lpfc:"
+
 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
                LPFC_DRIVER_VERSION
 #define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex.  All rights reserved."
index e8f89264768109f69b075d83ce019f40f8967471..fcb005fa4bd136480367d7fe92bdd55b30a5f7df 100644 (file)
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "00.00.06.15-rc1"
-#define MEGASAS_RELDATE                                "Mar. 19, 2012"
-#define MEGASAS_EXT_VERSION                    "Mon. Mar. 19 17:00:00 PDT 2012"
+#define MEGASAS_VERSION                                "00.00.06.18-rc1"
+#define MEGASAS_RELDATE                                "Jun. 17, 2012"
+#define MEGASAS_EXT_VERSION                    "Tue. Jun. 17 17:00:00 PDT 2012"
 
 /*
  * Device IDs
@@ -747,6 +747,7 @@ struct megasas_ctrl_info {
 #define        MEGASAS_RESET_NOTICE_INTERVAL           5
 #define MEGASAS_IOCTL_CMD                      0
 #define MEGASAS_DEFAULT_CMD_TIMEOUT            90
+#define MEGASAS_THROTTLE_QUEUE_DEPTH           16
 
 /*
  * FW reports the maximum of number of commands that it can accept (maximum
@@ -1364,6 +1365,7 @@ struct megasas_instance {
        unsigned long bar;
        long reset_flags;
        struct mutex reset_mutex;
+       int throttlequeuedepth;
 };
 
 enum {
index ed38454228c626bdabbea4d4265b22dfbc637864..0393ec478cdf41257aebbf2e7e2cee7a244583eb 100644 (file)
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v00.00.06.15-rc1
+ *  Version : v00.00.06.18-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -71,6 +71,16 @@ static int msix_disable;
 module_param(msix_disable, int, S_IRUGO);
 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
 
+static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+module_param(throttlequeuedepth, int, S_IRUGO);
+MODULE_PARM_DESC(throttlequeuedepth,
+       "Adapter queue depth when throttled due to I/O timeout. Default: 16");
+
+int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+module_param(resetwaittime, int, S_IRUGO);
+MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
+                "before resetting adapter. Default: 180");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -1595,8 +1605,9 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
 {
        unsigned long flags;
        if (instance->flag & MEGASAS_FW_BUSY
-               && time_after(jiffies, instance->last_time + 5 * HZ)
-               && atomic_read(&instance->fw_outstanding) < 17) {
+           && time_after(jiffies, instance->last_time + 5 * HZ)
+           && atomic_read(&instance->fw_outstanding) <
+           instance->throttlequeuedepth + 1) {
 
                spin_lock_irqsave(instance->host->host_lock, flags);
                instance->flag &= ~MEGASAS_FW_BUSY;
@@ -1772,7 +1783,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
                return SUCCESS;
        }
 
-       for (i = 0; i < wait_time; i++) {
+       for (i = 0; i < resetwaittime; i++) {
 
                int outstanding = atomic_read(&instance->fw_outstanding);
 
@@ -1914,7 +1925,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
                /* FW is busy, throttle IO */
                spin_lock_irqsave(instance->host->host_lock, flags);
 
-               instance->host->can_queue = 16;
+               instance->host->can_queue = instance->throttlequeuedepth;
                instance->last_time = jiffies;
                instance->flag |= MEGASAS_FW_BUSY;
 
@@ -3577,6 +3588,24 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
        kfree(ctrl_info);
 
+       /* Check for valid throttlequeuedepth module parameter */
+       if (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY ||
+           instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) {
+               if (throttlequeuedepth > (instance->max_fw_cmds -
+                                         MEGASAS_SKINNY_INT_CMDS))
+                       instance->throttlequeuedepth =
+                               MEGASAS_THROTTLE_QUEUE_DEPTH;
+               else
+                       instance->throttlequeuedepth = throttlequeuedepth;
+       } else {
+               if (throttlequeuedepth > (instance->max_fw_cmds -
+                                         MEGASAS_INT_CMDS))
+                       instance->throttlequeuedepth =
+                               MEGASAS_THROTTLE_QUEUE_DEPTH;
+               else
+                       instance->throttlequeuedepth = throttlequeuedepth;
+       }
+
         /*
        * Setup tasklet for cmd completion
        */
index a610cf1d48473301f60a805fc2636bdd3d0d7714..ddf094e7d0acf8b628745389ee87d206da80e4c4 100644 (file)
@@ -94,6 +94,7 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
 void megaraid_sas_kill_hba(struct megasas_instance *instance);
 
 extern u32 megasas_dbg_lvl;
+extern int resetwaittime;
 
 /**
  * megasas_enable_intr_fusion -        Enables interrupts
@@ -461,8 +462,8 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
         * Allocate the dynamic array first and then allocate individual
         * commands.
         */
-       fusion->cmd_list = kmalloc(sizeof(struct megasas_cmd_fusion *)
-                                  *max_cmd, GFP_KERNEL);
+       fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
+                                  * max_cmd, GFP_KERNEL);
 
        if (!fusion->cmd_list) {
                printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
@@ -470,9 +471,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
                goto fail_cmd_list;
        }
 
-       memset(fusion->cmd_list, 0, sizeof(struct megasas_cmd_fusion *)
-              *max_cmd);
-
        max_cmd = instance->max_fw_cmds;
        for (i = 0; i < max_cmd; i++) {
                fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
@@ -2063,9 +2061,9 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
 {
        int i, outstanding, retval = 0;
-       u32 fw_state, wait_time = MEGASAS_RESET_WAIT_TIME;
+       u32 fw_state;
 
-       for (i = 0; i < wait_time; i++) {
+       for (i = 0; i < resetwaittime; i++) {
                /* Check if firmware is in fault state */
                fw_state = instance->instancet->read_fw_status_reg(
                        instance->reg_set) & MFI_STATE_MASK;
index bbb7e4bf30a3af61d764a2507734420e0e85828d..39f08dd20556ade6703cd9b47f99d685c64d2fc6 100644 (file)
@@ -2,7 +2,7 @@
 # Kernel configuration file for the MPT2SAS
 #
 # This code is based on drivers/scsi/mpt2sas/Kconfig
-# Copyright (C) 2007-2010  LSI Corporation
+# Copyright (C) 2007-2012  LSI Corporation
 #  (mailto:DL-MPTFusionLinux@lsi.com)
 
 # This program is free software; you can redistribute it and/or
index a80f3220c641b8e45e9bc558dfe8596dc1dd4c8b..e960f9625c78e8020424d0d9c42fcad534f19a1f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2011 LSI Corporation.
+ *  Copyright (c) 2000-2012 LSI Corporation.
  *
  *
  *           Name:  mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.23
+ *  mpi2.h Version:  02.00.25
  *
  *  Version History
  *  ---------------
@@ -72,6 +72,9 @@
  *  05-25-11  02.00.21  Bumped MPI2_HEADER_VERSION_UNIT.
  *  08-24-11  02.00.22  Bumped MPI2_HEADER_VERSION_UNIT.
  *  11-18-11  02.00.23  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  02-06-12  02.00.24  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  03-29-12  02.00.25  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                      Added Hard Reset delay timings.
  *  --------------------------------------------------------------------------
  */
 
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x17)
+#define MPI2_HEADER_VERSION_UNIT            (0x19)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -275,6 +278,11 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
 #define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET    (0x000000C4)
 
 
+/* Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC     (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC    (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC    (256000)
+
 /*****************************************************************************
 *
 *        Message Descriptors
index de90162413c23e2c720df8c2eed8207acf58f235..38c5da398143a83aca39b6e09e1429a0b05177a8 100644 (file)
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2012 LSI Corporation.
  *
  *
  *           Name:  mpi2_init.h
  *          Title:  MPI SCSI initiator mode messages and structures
  *  Creation Date:  June 23, 2006
  *
- *    mpi2_init.h Version:  02.00.11
+ *    mpi2_init.h Version:  02.00.13
  *
  *  Version History
  *  ---------------
@@ -34,6 +34,8 @@
  *  02-10-10  02.00.09  Removed unused structure that had "#if 0" around it.
  *  05-12-10  02.00.10  Added optional vendor-unique region to SCSI IO Request.
  *  11-10-10  02.00.11  Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ *  02-06-12  02.00.13  Added alternate defines for Task Priority / Command
+ *                      Priority to match SAM-4.
  *  --------------------------------------------------------------------------
  */
 
@@ -194,6 +196,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST
 
 #define MPI2_SCSIIO_CONTROL_TASKPRI_MASK        (0x00007800)
 #define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT       (11)
+/* alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK         (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT        (11)
 
 #define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK  (0x00000700)
 #define MPI2_SCSIIO_CONTROL_SIMPLEQ             (0x00000000)
index 9a925c07a9ec0c6e4f6c13b71b2654efc7099e95..b0d4760bb17dd843ea1ecccfed650d9613b88d79 100644 (file)
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2011 LSI Corporation.
+ *  Copyright (c) 2000-2012 LSI Corporation.
  *
  *
  *           Name:  mpi2_ioc.h
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.19
+ *  mpi2_ioc.h Version:  02.00.21
  *
  *  Version History
  *  ---------------
  *  08-24-11  02.00.19  Added PhysicalPort field to
  *                      MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
  *                      Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ *  03-29-12  02.00.21  Added a product specific range to event values.
  *  --------------------------------------------------------------------------
  */
 
@@ -492,7 +493,8 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
 #define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE             (0x0026)
 #define MPI2_EVENT_TEMP_THRESHOLD                   (0x0027)
 #define MPI2_EVENT_HOST_MESSAGE                     (0x0028)
-
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC             (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC             (0x007F)
 
 /* Log Entry Added Event data */
 
index 0601612b875add889302ae2630919c6851672d5a..2b38af213beb079d8060d0c588cdafca73da670f 100644 (file)
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2010 LSI Corporation.
+ *  Copyright (c) 2000-2012 LSI Corporation.
  *
  *
  *           Name:  mpi2_raid.h
  *          Title:  MPI Integrated RAID messages and structures
  *  Creation Date:  April 26, 2007
  *
- *    mpi2_raid.h Version:  02.00.06
+ *    mpi2_raid.h Version:  02.00.08
  *
  *  Version History
  *  ---------------
@@ -26,7 +26,7 @@
  *  08-24-10  02.00.06  Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
  *                      related structures and defines.
  *                      Added product-specific range to RAID Action values.
-
+ *  02-06-12  02.00.08  Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
  *  --------------------------------------------------------------------------
  */
 
@@ -181,6 +181,7 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
 #define MPI2_RAID_ACTION_START_RAID_FUNCTION        (0x21)
 #define MPI2_RAID_ACTION_STOP_RAID_FUNCTION         (0x22)
 #define MPI2_RAID_ACTION_COMPATIBILITY_CHECK        (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN            (0x24)
 #define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC       (0x80)
 #define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC       (0xFF)
 
index 9d5a56c4b3321deea9b9001468755d222fa13b51..ffd85c511c8e2925314d27a96f268f922d1db3aa 100644 (file)
@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -1978,9 +1978,9 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
                            MPT2SAS_INTEL_RMS2LL040_BRANDING);
                        break;
-               case MPT2SAS_INTEL_RAMSDALE_SSDID:
+               case MPT2SAS_INTEL_SSD910_SSDID:
                        printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
-                           MPT2SAS_INTEL_RAMSDALE_BRANDING);
+                           MPT2SAS_INTEL_SSD910_BRANDING);
                        break;
                default:
                        break;
index b3a1a30055d610419da7ef3389b0898db7154868..543d8d637479d4a983a80174412fe0f2f05e990a 100644 (file)
@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME            "mpt2sas"
 #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION    "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION         "13.100.00.00"
-#define MPT2SAS_MAJOR_VERSION          13
+#define MPT2SAS_DRIVER_VERSION         "14.100.00.00"
+#define MPT2SAS_MAJOR_VERSION          14
 #define MPT2SAS_MINOR_VERSION          100
 #define MPT2SAS_BUILD_VERSION          00
 #define MPT2SAS_RELEASE_VERSION                00
                                "Intel Integrated RAID Module RMS2LL040"
 #define MPT2SAS_INTEL_RS25GB008_BRANDING       \
                                "Intel(R) RAID Controller RS25GB008"
-#define MPT2SAS_INTEL_RAMSDALE_BRANDING        \
-                               "Intel 720 Series SSD"
+#define MPT2SAS_INTEL_SSD910_BRANDING          \
+                               "Intel(R) SSD 910 Series"
 /*
  * Intel HBA SSDIDs
  */
 #define MPT2SAS_INTEL_RMS2LL080_SSDID          0x350E
 #define MPT2SAS_INTEL_RMS2LL040_SSDID          0x350F
 #define MPT2SAS_INTEL_RS25GB008_SSDID          0x3000
-#define MPT2SAS_INTEL_RAMSDALE_SSDID           0x3700
+#define MPT2SAS_INTEL_SSD910_SSDID             0x3700
 
 /*
  * HP HBA branding
@@ -1096,6 +1096,8 @@ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2IOUnitPage1_t *config_page);
 int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
+       Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz);
 int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
 int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
index 2b4d37613d325416b606cee8a096e961a62ce42a..863778071a9dd3b25e85cc9201f9f659763aa393 100644 (file)
@@ -2,7 +2,7 @@
  * This module provides common API for accessing firmware configuration pages
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -682,6 +682,42 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
        return r;
 }
 
+/**
+ * mpt2sas_config_get_iounit_pg3 - obtain iounit page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
+       Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz)
+{
+       Mpi2ConfigRequest_t mpi_request;
+       int r;
+
+       memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+       mpi_request.Function = MPI2_FUNCTION_CONFIG;
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+       mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+       mpi_request.Header.PageNumber = 3;
+       mpi_request.Header.PageVersion = MPI2_IOUNITPAGE3_PAGEVERSION;
+       mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+       if (r)
+               goto out;
+
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+       return r;
+}
+
 /**
  * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8
  * @ioc: per adapter object
index 49bdd2dc8452bb27c79644ec1b5e4a1f30e3ad9b..08685c4cf231b99c89c9eb3b91f1dcbc4641b508 100644 (file)
@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -2181,10 +2181,12 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
                return -EAGAIN;
 
        state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
-       if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
-               return -EAGAIN;
-       else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+       if (state == NON_BLOCKING) {
+               if (!mutex_trylock(&ioc->ctl_cmds.mutex))
+                       return -EAGAIN;
+       } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
                return -ERESTARTSYS;
+       }
 
        switch (cmd) {
        case MPT2IOCINFO:
@@ -2690,6 +2692,75 @@ _ctl_ioc_reply_queue_count_show(struct device *cdev,
 static DEVICE_ATTR(reply_queue_count, S_IRUGO,
         _ctl_ioc_reply_queue_count_show, NULL);
 
+/**
+ * _ctl_BRM_status_show - Backup Rail Monitor Status
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
+       char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+       Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
+       Mpi2ConfigReply_t mpi_reply;
+       u16 backup_rail_monitor_status = 0;
+       u16 ioc_status;
+       int sz;
+       ssize_t rc = 0;
+
+       if (!ioc->is_warpdrive) {
+               printk(MPT2SAS_ERR_FMT "%s: BRM attribute is only for"\
+                   "warpdrive\n", ioc->name, __func__);
+               goto out;
+       }
+
+       /* allocate upto GPIOVal 36 entries */
+       sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
+       io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
+       if (!io_unit_pg3) {
+               printk(MPT2SAS_ERR_FMT "%s: failed allocating memory"\
+                   "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+               goto out;
+       }
+
+       if (mpt2sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
+           0) {
+               printk(MPT2SAS_ERR_FMT
+                   "%s: failed reading iounit_pg3\n", ioc->name,
+                   __func__);
+               goto out;
+       }
+
+       ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+       if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+               printk(MPT2SAS_ERR_FMT "%s: iounit_pg3 failed with"\
+                   "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+               goto out;
+       }
+
+       if (io_unit_pg3->GPIOCount < 25) {
+               printk(MPT2SAS_ERR_FMT "%s: iounit_pg3->GPIOCount less than"\
+                    "25 entries, detected (%d) entries\n", ioc->name, __func__,
+                   io_unit_pg3->GPIOCount);
+               goto out;
+       }
+
+       /* BRM status is in bit zero of GPIOVal[24] */
+       backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
+       rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
+
+ out:
+       kfree(io_unit_pg3);
+       return rc;
+}
+static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
+
 struct DIAG_BUFFER_START {
        __le32 Size;
        __le32 DiagVersion;
@@ -2901,6 +2972,7 @@ struct device_attribute *mpt2sas_host_attrs[] = {
        &dev_attr_host_trace_buffer,
        &dev_attr_host_trace_buffer_enable,
        &dev_attr_reply_queue_count,
+       &dev_attr_BRM_status,
        NULL,
 };
 
index 11ff1d5fb8f0dfe6a1c691421c003c07f4ddef1e..b5eb0d1b8ea6517bb91209d4f1d69e746ebc010d 100644 (file)
@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
index 9731f8e661bf8a909237271946a02af2104cdefe..69cc7d0c112cb75f2df3533fb9c2eb49a9ad41f3 100644 (file)
@@ -2,7 +2,7 @@
  * Logging Support for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
index 1ccae45c52707eaa0bbaaef649115cc0ff93659a..af4e6c451b1b56239d26a87dd23d4835bac69153 100644 (file)
@@ -2,7 +2,7 @@
  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -119,6 +119,15 @@ module_param(diag_buffer_enable, int, 0);
 MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
        "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
 
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask = 0;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+
 /**
  * struct sense_info - common structure for obtaining sense keys
  * @skey: sense key
@@ -3768,8 +3777,6 @@ static void
 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
 {
        u8 ascq;
-       u8 sk;
-       u8 host_byte;
 
        switch (ioc_status) {
        case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
@@ -3786,16 +3793,8 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
                break;
        }
 
-       if (scmd->sc_data_direction == DMA_TO_DEVICE) {
-               sk = ILLEGAL_REQUEST;
-               host_byte = DID_ABORT;
-       } else {
-               sk = ABORTED_COMMAND;
-               host_byte = DID_OK;
-       }
-
-       scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
-       scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
+       scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, ascq);
+       scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
            SAM_STAT_CHECK_CONDITION;
 }
 
@@ -5973,8 +5972,14 @@ _scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc,
 #endif
 
        if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
-           !ioc->sas_hba.num_phys)
+           !ioc->sas_hba.num_phys) {
+               if (disable_discovery > 0 && ioc->shost_recovery) {
+                       /* Wait for the reset to complete */
+                       while (ioc->shost_recovery)
+                               ssleep(1);
+               }
                _scsih_sas_host_add(ioc);
+       }
 }
 
 /**
@@ -7254,7 +7259,8 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
                _scsih_search_responding_sas_devices(ioc);
                _scsih_search_responding_raid_devices(ioc);
                _scsih_search_responding_expanders(ioc);
-               if (!ioc->is_driver_loading) {
+               if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+                   !ioc->sas_hba.num_phys)) {
                        _scsih_prep_device_scan(ioc);
                        _scsih_search_responding_sas_devices(ioc);
                        _scsih_search_responding_raid_devices(ioc);
@@ -7929,6 +7935,9 @@ _scsih_scan_start(struct Scsi_Host *shost)
        if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
                mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
 
+       if (disable_discovery > 0)
+               return;
+
        ioc->start_scan = 1;
        rc = mpt2sas_port_enable(ioc);
 
@@ -7950,6 +7959,12 @@ _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 
+       if (disable_discovery > 0) {
+               ioc->is_driver_loading = 0;
+               ioc->wait_for_discovery_to_complete = 0;
+               return 1;
+       }
+
        if (time >= (300 * HZ)) {
                ioc->base_cmds.status = MPT2_CMD_NOT_USED;
                printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout "
@@ -8055,8 +8070,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (max_sectors != 0xFFFF) {
                if (max_sectors < 64) {
                        shost->max_sectors = 64;
-                       printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
-                           "for max_sectors, range is 64 to 8192. Assigning "
+                       printk(MPT2SAS_WARN_FMT "Invalid value %d passed "\
+                           "for max_sectors, range is 64 to 32767. Assigning "\
                            "value of 64.\n", ioc->name, max_sectors);
                } else if (max_sectors > 32767) {
                        shost->max_sectors = 32767;
@@ -8078,8 +8093,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_add_shost_fail;
        }
 
-       scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
-           | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION);
+       /* register EEDP capabilities with SCSI layer */
+       if (prot_mask)
+               scsi_host_set_prot(shost, prot_mask);
+       else
+               scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+                                  | SHOST_DIF_TYPE2_PROTECTION
+                                  | SHOST_DIF_TYPE3_PROTECTION);
+
        scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
 
        /* event thread */
index c6cf20f60720bfc6dd2a59b96fe951ae6139c48a..8c2ffbe6af0f1dbd67e724507101eabe5e0ceca6 100644 (file)
@@ -2,7 +2,7 @@
  * SAS Transport Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2010  LSI Corporation
+ * Copyright (C) 2007-2012  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
index 4539d59a0857633f15f4c2edbcbcbbd82237f462..a3776d6ced60df7606b329f39ca3780914486741 100644 (file)
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
                        mv_dprintk("mvs_abort_task() mvi=%p task=%p "
                                   "slot=%p slot_idx=x%x\n",
                                   mvi, task, slot, slot_idx);
-                       mvs_tmf_timedout((unsigned long)task);
+                       task->task_state_flags |= SAS_TASK_STATE_ABORTED;
                        mvs_slot_task_free(mvi, task, slot, slot_idx);
                        rc = TMF_RESP_FUNC_COMPLETE;
                        goto out;
index ea8a0b47d66da53752a886b83c2b6bafc68f8135..af763eab2039022b997a572b324e6d9466a7449b 100644 (file)
@@ -5459,7 +5459,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
        pmcraid_shutdown(pdev);
 
        pmcraid_disable_interrupts(pinstance, ~0);
-       flush_work_sync(&pinstance->worker_q);
+       flush_work(&pinstance->worker_q);
 
        pmcraid_kill_tasklets(pinstance);
        pmcraid_unregister_interrupt_handler(pinstance);
index 5ab953029f8d1412ee66057c641771bf69d765cd..1c28215f8bede329f4257838008f0625cd849f2f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -26,7 +26,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
        struct qla_hw_data *ha = vha->hw;
        int rval = 0;
 
-       if (ha->fw_dump_reading == 0)
+       if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
                return 0;
 
        if (IS_QLA82XX(ha)) {
@@ -39,9 +39,14 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
                rval = memory_read_from_buffer(buf, count,
                    &off, ha->md_dump, ha->md_dump_size);
                return rval;
-       } else
+       } else if (ha->mctp_dumped && ha->mctp_dump_reading)
+               return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
+                   MCTP_DUMP_SIZE);
+       else if (ha->fw_dump_reading)
                return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
                                        ha->fw_dump_len);
+       else
+               return 0;
 }
 
 static ssize_t
@@ -107,6 +112,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
                if (IS_QLA82XX(ha))
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                break;
+       case 6:
+               if (!ha->mctp_dump_reading)
+                       break;
+               ql_log(ql_log_info, vha, 0x70c1,
+                   "MCTP dump cleared on (%ld).\n", vha->host_no);
+               ha->mctp_dump_reading = 0;
+               ha->mctp_dumped = 0;
+               break;
+       case 7:
+               if (ha->mctp_dumped && !ha->mctp_dump_reading) {
+                       ha->mctp_dump_reading = 1;
+                       ql_log(ql_log_info, vha, 0x70c2,
+                           "Raw mctp dump ready for read on (%ld).\n",
+                           vha->host_no);
+               }
+               break;
        }
        return count;
 }
@@ -564,6 +585,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
        int type;
+       uint32_t idc_control;
 
        if (off != 0)
                return -EINVAL;
@@ -587,22 +609,36 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                scsi_unblock_requests(vha->host);
                break;
        case 0x2025d:
-               if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
+               if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
                        return -EPERM;
 
                ql_log(ql_log_info, vha, 0x706f,
                    "Issuing MPI reset.\n");
 
-               /* Make sure FC side is not in reset */
-               qla2x00_wait_for_hba_online(vha);
-
-               /* Issue MPI reset */
-               scsi_block_requests(vha->host);
-               if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
-                       ql_log(ql_log_warn, vha, 0x7070,
-                           "MPI reset failed.\n");
-               scsi_unblock_requests(vha->host);
-               break;
+               if (IS_QLA83XX(ha)) {
+                       uint32_t idc_control;
+
+                       qla83xx_idc_lock(vha, 0);
+                       __qla83xx_get_idc_control(vha, &idc_control);
+                       idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
+                       __qla83xx_set_idc_control(vha, idc_control);
+                       qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+                           QLA8XXX_DEV_NEED_RESET);
+                       qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+                       qla83xx_idc_unlock(vha, 0);
+                       break;
+               } else {
+                       /* Make sure FC side is not in reset */
+                       qla2x00_wait_for_hba_online(vha);
+
+                       /* Issue MPI reset */
+                       scsi_block_requests(vha->host);
+                       if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
+                               ql_log(ql_log_warn, vha, 0x7070,
+                                   "MPI reset failed.\n");
+                       scsi_unblock_requests(vha->host);
+                       break;
+               }
        case 0x2025e:
                if (!IS_QLA82XX(ha) || vha != base_vha) {
                        ql_log(ql_log_info, vha, 0x7071,
@@ -616,6 +652,29 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                qla2xxx_wake_dpc(vha);
                qla2x00_wait_for_fcoe_ctx_reset(vha);
                break;
+       case 0x2025f:
+               if (!IS_QLA8031(ha))
+                       return -EPERM;
+               ql_log(ql_log_info, vha, 0x70bc,
+                   "Disabling Reset by IDC control\n");
+               qla83xx_idc_lock(vha, 0);
+               __qla83xx_get_idc_control(vha, &idc_control);
+               idc_control |= QLA83XX_IDC_RESET_DISABLED;
+               __qla83xx_set_idc_control(vha, idc_control);
+               qla83xx_idc_unlock(vha, 0);
+               break;
+       case 0x20260:
+               if (!IS_QLA8031(ha))
+                       return -EPERM;
+               ql_log(ql_log_info, vha, 0x70bd,
+                   "Enabling Reset by IDC control\n");
+               qla83xx_idc_lock(vha, 0);
+               __qla83xx_get_idc_control(vha, &idc_control);
+               idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
+               __qla83xx_set_idc_control(vha, idc_control);
+               qla83xx_idc_unlock(vha, 0);
+               break;
+
        }
        return count;
 }
@@ -1251,6 +1310,49 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
            state[1], state[2], state[3], state[4]);
 }
 
+static ssize_t
+qla2x00_diag_requests_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       if (!IS_BIDI_CAPABLE(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "\n");
+
+       return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
+}
+
+static ssize_t
+qla2x00_diag_megabytes_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       if (!IS_BIDI_CAPABLE(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "\n");
+
+       return snprintf(buf, PAGE_SIZE, "%llu\n",
+           vha->bidi_stats.transfer_bytes >> 20);
+}
+
+static ssize_t
+qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
+       char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t size;
+
+       if (!ha->fw_dumped)
+               size = 0;
+       else if (IS_QLA82XX(ha))
+               size = ha->md_template_size + ha->md_dump_size;
+       else
+               size = ha->fw_dump_len;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1289,6 +1391,9 @@ static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
+static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
+static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
+static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_driver_version,
@@ -1318,6 +1423,9 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_fw_state,
        &dev_attr_optrom_gold_fw_version,
        &dev_attr_thermal_temp,
+       &dev_attr_diag_requests,
+       &dev_attr_diag_megabytes,
+       &dev_attr_fw_dump_size,
        NULL,
 };
 
@@ -1704,7 +1812,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 
        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
-                       int prot = 0;
+                       int prot = 0, guard;
                        vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_user, vha, 0x7082,
                            "Registered for DIF/DIX type 1 and 3 protection.\n");
@@ -1717,7 +1825,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
                            | SHOST_DIX_TYPE1_PROTECTION
                            | SHOST_DIX_TYPE2_PROTECTION
                            | SHOST_DIX_TYPE3_PROTECTION);
-                       scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
+
+                       guard = SHOST_DIX_GUARD_CRC;
+
+                       if (IS_PI_IPGUARD_CAPABLE(ha) &&
+                           (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+                               guard |= SHOST_DIX_GUARD_IP;
+
+                       scsi_host_set_guard(vha->host, guard);
                } else
                        vha->flags.difdix_supported = 0;
        }
index c68883806c54b8092e81ff82f62db06fd8ee822f..2f9bddd3c616d810303b39dc6f7cb5f22af14828 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -530,13 +530,13 @@ done_unmap_sg:
 done:
        return rval;
 }
-
-/* Set the port configuration to enable the
- * internal loopback on ISP81XX
+/*
+ * Set the port configuration to enable the internal or external loopback
+ * depending on the loopback mode.
  */
 static inline int
-qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
-    uint16_t *new_config)
+qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+       uint16_t *new_config, uint16_t mode)
 {
        int ret = 0;
        int rval = 0;
@@ -545,8 +545,14 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
                goto done_set_internal;
 
-       new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
-       memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+       if (mode == INTERNAL_LOOPBACK)
+               new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
+       else if (mode == EXTERNAL_LOOPBACK)
+               new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
+       ql_dbg(ql_dbg_user, vha, 0x70be,
+            "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
+
+       memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
 
        ha->notify_dcbx_comp = 1;
        ret = qla81xx_set_port_config(vha, new_config);
@@ -562,9 +568,17 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
        if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
                ql_dbg(ql_dbg_user, vha, 0x7022,
                    "State change notification not received.\n");
-       } else
-               ql_dbg(ql_dbg_user, vha, 0x7023,
-                   "State change received.\n");
+               rval = -EINVAL;
+       } else {
+               if (ha->flags.idc_compl_status) {
+                       ql_dbg(ql_dbg_user, vha, 0x70c3,
+                           "Bad status in IDC Completion AEN\n");
+                       rval = -EINVAL;
+                       ha->flags.idc_compl_status = 0;
+               } else
+                       ql_dbg(ql_dbg_user, vha, 0x7023,
+                           "State change received.\n");
+       }
 
        ha->notify_dcbx_comp = 0;
 
@@ -572,11 +586,9 @@ done_set_internal:
        return rval;
 }
 
-/* Set the port configuration to disable the
- * internal loopback on ISP81XX
- */
+/* Disable loopback mode */
 static inline int
-qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
     int wait)
 {
        int ret = 0;
@@ -589,8 +601,12 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
 
        memset(new_config, 0 , sizeof(new_config));
        if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
-                       ENABLE_INTERNAL_LOOPBACK) {
+           ENABLE_INTERNAL_LOOPBACK ||
+           (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+           ENABLE_EXTERNAL_LOOPBACK) {
                new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+               ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+                   (new_config[0] & INTERNAL_LOOPBACK_MASK));
                memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
 
                ha->notify_dcbx_comp = wait;
@@ -707,7 +723,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 
        elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 
-       if ((ha->current_topology == ISP_CFG_F ||
+       if (atomic_read(&vha->loop_state) == LOOP_READY &&
+           (ha->current_topology == ISP_CFG_F ||
            ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
            le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
            && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
@@ -729,30 +746,24 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                goto done_free_dma_req;
                        }
 
-                       if (elreq.options != EXTERNAL_LOOPBACK) {
-                               ql_dbg(ql_dbg_user, vha, 0x7020,
-                                   "Internal: current port config = %x\n",
-                                   config[0]);
-                               if (qla81xx_set_internal_loopback(vha, config,
-                                       new_config)) {
-                                       ql_log(ql_log_warn, vha, 0x7024,
-                                           "Internal loopback failed.\n");
-                                       bsg_job->reply->result =
-                                               (DID_ERROR << 16);
-                                       rval = -EPERM;
-                                       goto done_free_dma_req;
-                               }
-                       } else {
-                               /* For external loopback to work
-                                * ensure internal loopback is disabled
-                                */
-                               if (qla81xx_reset_internal_loopback(vha,
-                                       config, 1)) {
-                                       bsg_job->reply->result =
-                                               (DID_ERROR << 16);
-                                       rval = -EPERM;
-                                       goto done_free_dma_req;
-                               }
+                       ql_dbg(ql_dbg_user, vha, 0x70c0,
+                           "elreq.options=%04x\n", elreq.options);
+
+                       if (elreq.options == EXTERNAL_LOOPBACK)
+                               if (IS_QLA8031(ha))
+                                       rval = qla81xx_set_loopback_mode(vha,
+                                           config, new_config, elreq.options);
+                               else
+                                       rval = qla81xx_reset_loopback_mode(vha,
+                                           config, 1);
+                       else
+                               rval = qla81xx_set_loopback_mode(vha, config,
+                                   new_config, elreq.options);
+
+                       if (rval) {
+                               bsg_job->reply->result = (DID_ERROR << 16);
+                               rval = -EPERM;
+                               goto done_free_dma_req;
                        }
 
                        type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -766,7 +777,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
                                /* Revert back to original port config
                                 * Also clear internal loopback
                                 */
-                               qla81xx_reset_internal_loopback(vha,
+                               qla81xx_reset_loopback_mode(vha,
                                    new_config, 0);
                        }
 
@@ -1364,7 +1375,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
        struct qla_hw_data *ha = vha->hw;
        int rval = 0;
 
-       if (ha->flags.isp82xx_reset_hdlr_active)
+       if (ha->flags.nic_core_reset_hdlr_active)
                return -EBUSY;
 
        rval = qla2x00_optrom_setup(bsg_job, vha, 0);
@@ -1559,6 +1570,276 @@ done:
        return 0;
 }
 
+static int
+qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       int rval = 0;
+       uint8_t bsg[DMA_POOL_SIZE];
+       struct qla_i2c_access *i2c = (void *)bsg;
+       dma_addr_t sfp_dma;
+       uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+       if (!sfp) {
+               bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+                   EXT_STATUS_NO_MEMORY;
+               goto done;
+       }
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+       memcpy(sfp, i2c->buffer, i2c->length);
+       rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+           i2c->device, i2c->offset, i2c->length, i2c->option);
+
+       if (rval) {
+               bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+                   EXT_STATUS_MAILBOX;
+               goto dealloc;
+       }
+
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+       dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->result = DID_OK << 16;
+       bsg_job->job_done(bsg_job);
+
+       return 0;
+}
+
+static int
+qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       int rval = 0;
+       uint8_t bsg[DMA_POOL_SIZE];
+       struct qla_i2c_access *i2c = (void *)bsg;
+       dma_addr_t sfp_dma;
+       uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+       if (!sfp) {
+               bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+                   EXT_STATUS_NO_MEMORY;
+               goto done;
+       }
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+       rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
+               i2c->device, i2c->offset, i2c->length, i2c->option);
+
+       if (rval) {
+               bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+                   EXT_STATUS_MAILBOX;
+               goto dealloc;
+       }
+
+       memcpy(i2c->buffer, sfp, i2c->length);
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
+
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+       dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
+       bsg_job->reply->result = DID_OK << 16;
+       bsg_job->job_done(bsg_job);
+
+       return 0;
+}
+
+static int
+qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       uint16_t thread_id;
+       uint32_t rval = EXT_STATUS_OK;
+       uint16_t req_sg_cnt = 0;
+       uint16_t rsp_sg_cnt = 0;
+       uint16_t nextlid = 0;
+       uint32_t tot_dsds;
+       srb_t *sp = NULL;
+       uint32_t req_data_len = 0;
+       uint32_t rsp_data_len = 0;
+
+       /* Check the type of the adapter */
+       if (!IS_BIDI_CAPABLE(ha)) {
+               ql_log(ql_log_warn, vha, 0x70a0,
+                       "This adapter is not supported\n");
+               rval = EXT_STATUS_NOT_SUPPORTED;
+               goto done;
+       }
+
+       if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+               test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+               test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+               rval =  EXT_STATUS_BUSY;
+               goto done;
+       }
+
+       /* Check if host is online */
+       if (!vha->flags.online) {
+               ql_log(ql_log_warn, vha, 0x70a1,
+                       "Host is not online\n");
+               rval = EXT_STATUS_DEVICE_OFFLINE;
+               goto done;
+       }
+
+       /* Check if cable is plugged in or not */
+       if (vha->device_flags & DFLG_NO_CABLE) {
+               ql_log(ql_log_warn, vha, 0x70a2,
+                       "Cable is unplugged...\n");
+               rval = EXT_STATUS_INVALID_CFG;
+               goto done;
+       }
+
+       /* Check if the switch is connected or not */
+       if (ha->current_topology != ISP_CFG_F) {
+               ql_log(ql_log_warn, vha, 0x70a3,
+                       "Host is not connected to the switch\n");
+               rval = EXT_STATUS_INVALID_CFG;
+               goto done;
+       }
+
+       /* Check if operating mode is P2P */
+       if (ha->operating_mode != P2P) {
+               ql_log(ql_log_warn, vha, 0x70a4,
+                   "Host is operating mode is not P2p\n");
+               rval = EXT_STATUS_INVALID_CFG;
+               goto done;
+       }
+
+       thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+       mutex_lock(&ha->selflogin_lock);
+       if (vha->self_login_loop_id == 0) {
+               /* Initialize all required  fields of fcport */
+               vha->bidir_fcport.vha = vha;
+               vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
+               vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
+               vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
+               vha->bidir_fcport.loop_id = vha->loop_id;
+
+               if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
+                       ql_log(ql_log_warn, vha, 0x70a7,
+                           "Failed to login port %06X for bidirectional IOCB\n",
+                           vha->bidir_fcport.d_id.b24);
+                       mutex_unlock(&ha->selflogin_lock);
+                       rval = EXT_STATUS_MAILBOX;
+                       goto done;
+               }
+               vha->self_login_loop_id = nextlid - 1;
+
+       }
+       /* Assign the self login loop id to fcport */
+       mutex_unlock(&ha->selflogin_lock);
+
+       vha->bidir_fcport.loop_id = vha->self_login_loop_id;
+
+       req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+               bsg_job->request_payload.sg_list,
+               bsg_job->request_payload.sg_cnt,
+               DMA_TO_DEVICE);
+
+       if (!req_sg_cnt) {
+               rval = EXT_STATUS_NO_MEMORY;
+               goto done;
+       }
+
+       rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+               bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+               DMA_FROM_DEVICE);
+
+       if (!rsp_sg_cnt) {
+               rval = EXT_STATUS_NO_MEMORY;
+               goto done_unmap_req_sg;
+       }
+
+       if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+               (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+               ql_dbg(ql_dbg_user, vha, 0x70a9,
+                   "Dma mapping resulted in different sg counts "
+                   "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
+                   "%x dma_reply_sg_cnt: %x]\n",
+                   bsg_job->request_payload.sg_cnt, req_sg_cnt,
+                   bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+               rval = EXT_STATUS_NO_MEMORY;
+               goto done_unmap_sg;
+       }
+
+       if (req_data_len != rsp_data_len) {
+               rval = EXT_STATUS_BUSY;
+               ql_log(ql_log_warn, vha, 0x70aa,
+                   "req_data_len != rsp_data_len\n");
+               goto done_unmap_sg;
+       }
+
+       req_data_len = bsg_job->request_payload.payload_len;
+       rsp_data_len = bsg_job->reply_payload.payload_len;
+
+
+       /* Alloc SRB structure */
+       sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
+       if (!sp) {
+               ql_dbg(ql_dbg_user, vha, 0x70ac,
+                   "Alloc SRB structure failed\n");
+               rval = EXT_STATUS_NO_MEMORY;
+               goto done_unmap_sg;
+       }
+
+       /*Populate srb->ctx with bidir ctx*/
+       sp->u.bsg_job = bsg_job;
+       sp->free = qla2x00_bsg_sp_free;
+       sp->type = SRB_BIDI_CMD;
+       sp->done = qla2x00_bsg_job_done;
+
+       /* Add the read and write sg count */
+       tot_dsds = rsp_sg_cnt + req_sg_cnt;
+
+       rval = qla2x00_start_bidir(sp, vha, tot_dsds);
+       if (rval != EXT_STATUS_OK)
+               goto done_free_srb;
+       /* the bsg request  will be completed in the interrupt handler */
+       return rval;
+
+done_free_srb:
+       mempool_free(sp, ha->srb_mempool);
+done_unmap_sg:
+       dma_unmap_sg(&ha->pdev->dev,
+           bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+       dma_unmap_sg(&ha->pdev->dev,
+           bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+done:
+
+       /* Return an error vendor specific response
+        * and complete the bsg request
+        */
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->reply_payload_rcv_len = 0;
+       bsg_job->reply->result = (DID_OK) << 16;
+       bsg_job->job_done(bsg_job);
+       /* Always retrun success, vendor rsp carries correct status */
+       return 0;
+}
+
 static int
 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
 {
@@ -1596,6 +1877,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
        case QL_VND_WRITE_FRU_STATUS:
                return qla2x00_write_fru_status(bsg_job);
 
+       case QL_VND_WRITE_I2C:
+               return qla2x00_write_i2c(bsg_job);
+
+       case QL_VND_READ_I2C:
+               return qla2x00_read_i2c(bsg_job);
+
+       case QL_VND_DIAG_IO_CMD:
+               return qla24xx_process_bidir_cmd(bsg_job);
+
        default:
                bsg_job->reply->result = (DID_ERROR << 16);
                bsg_job->job_done(bsg_job);
index 70caa63a8930e196229c1f8a5cd2654c7c21b597..37b8b7ba7421c92d8e92bb17b55ceb7b31ae438b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #define QL_VND_SET_FRU_VERSION 0x0B
 #define QL_VND_READ_FRU_STATUS 0x0C
 #define QL_VND_WRITE_FRU_STATUS        0x0D
+#define QL_VND_DIAG_IO_CMD     0x0A
+#define QL_VND_WRITE_I2C       0x10
+#define QL_VND_READ_I2C                0x11
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK                  0
 #define EXT_STATUS_ERR                 1
+#define EXT_STATUS_BUSY                        2
 #define EXT_STATUS_INVALID_PARAM       6
+#define EXT_STATUS_DATA_OVERRUN                7
+#define EXT_STATUS_DATA_UNDERRUN       8
 #define EXT_STATUS_MAILBOX             11
 #define EXT_STATUS_NO_MEMORY           17
+#define EXT_STATUS_DEVICE_OFFLINE      22
+
+/*
+ * To support bidirectional iocb
+ * BSG Vendor specific returns
+ */
+#define EXT_STATUS_NOT_SUPPORTED       27
+#define EXT_STATUS_INVALID_CFG         28
+#define EXT_STATUS_DMA_ERR             29
+#define EXT_STATUS_TIMEOUT             30
+#define EXT_STATUS_THREAD_FAILED       31
+#define EXT_STATUS_DATA_CMP_FAILED     32
 
 /* BSG definations for interpreting CommandSent field */
 #define INT_DEF_LB_LOOPBACK_CMD         0
 #define INT_DEF_LB_ECHO_CMD             1
 
 /* Loopback related definations */
+#define INTERNAL_LOOPBACK              0xF1
 #define EXTERNAL_LOOPBACK              0xF2
 #define ENABLE_INTERNAL_LOOPBACK       0x02
+#define ENABLE_EXTERNAL_LOOPBACK       0x04
 #define INTERNAL_LOOPBACK_MASK         0x000E
 #define MAX_ELS_FRAME_PAYLOAD          252
 #define ELS_OPCODE_BYTE                        0x10
@@ -183,4 +203,12 @@ struct qla_status_reg {
        uint8_t reserved[7];
 } __packed;
 
+struct qla_i2c_access {
+       uint16_t device;
+       uint16_t offset;
+       uint16_t option;
+       uint16_t length;
+       uint8_t  buffer[0x40];
+} __packed;
+
 #endif
index fdee5611f3e2afce938c7f7ad559ce5d8eecffe9..44efe3cc79e6b5e90523cac89a718fbd3d946761 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0122       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x1140       | 0x111a-0x111b  |
+ * | Module Init and Probe        |       0x0124       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x114f       | 0x111a-0x111b  |
  * |                              |                    | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
- * | Device Discovery             |       0x2086       | 0x2020-0x2022  |
- * | Queue Command and IO tracing |       0x3030       | 0x3006,0x3008  |
+ * | Device Discovery             |       0x2087       | 0x2020-0x2022, |
+ * |                              |                    | 0x2016         |
+ * | Queue Command and IO tracing |       0x3030       | 0x3006-0x300b  |
+ * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x302d-0x302e  |
- * | DPC Thread                   |       0x401c       | 0x4002,0x4013  |
- * | Async Events                 |       0x505f       | 0x502b-0x502f  |
+ * | DPC Thread                   |       0x401d       | 0x4002,0x4013  |
+ * | Async Events                 |       0x5071       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
  * | Timer Routines               |       0x6011       |                |
- * | User Space Interactions      |       0x709f       | 0x7018,0x702e, |
+ * | User Space Interactions      |       0x70c3       | 0x7018,0x702e, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
- * |                              |                    | 0x708c         |
+ * |                              |                    | 0x708c,        |
+ * |                              |                    | 0x70a5,0x70a6, |
+ * |                              |                    | 0x70a8,0x70ab, |
+ * |                              |                    | 0x70ad-0x70ae  |
  * | Task Management              |       0x803c       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb054       | 0xb024         |
+ * | ISP82XX Specific             |       0xb084       | 0xb002,0xb024  |
  * | MultiQ                       |       0xc00c       |               |
  * | Misc                         |       0xd010       |               |
  * | Target Mode                 |       0xe06f       |                |
@@ -2357,7 +2362,7 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
 
 /*
  * This function is for formatting and logging debug information.
- * It is to be used when vha is not available and pci is availble,
+ * It is to be used when vha is not available and pci is available,
  * i.e., before host allocation. It formats the message and logs it
  * to the messages file.
  * parameters:
@@ -2452,7 +2457,7 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
 
 /*
  * This function is for formatting and logging log messages.
- * It is to be used when vha is not available and pci is availble,
+ * It is to be used when vha is not available and pci is available,
  * i.e., before host allocation. It formats the message and logs
  * it to the messages file. All the messages are logged irrespective
  * of the value of ql2xextended_error_logging.
index f278df8cce0f02988e95f85e3f65d82381150f47..8f911c0b1e74afa0c78566964edb355e0ae0034b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 39007f53aec0284b9cd855592d786823bbad5786..a9725bf5527bc139e1f0110e7a2c419626f21f56 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #define WRT_REG_WORD(addr, data)       writew(data,addr)
 #define WRT_REG_DWORD(addr, data)      writel(data,addr)
 
+/*
+ * ISP83XX specific remote register addresses
+ */
+#define QLA83XX_LED_PORT0                      0x00201320
+#define QLA83XX_LED_PORT1                      0x00201328
+#define QLA83XX_IDC_DEV_STATE          0x22102384
+#define QLA83XX_IDC_MAJOR_VERSION      0x22102380
+#define QLA83XX_IDC_MINOR_VERSION      0x22102398
+#define QLA83XX_IDC_DRV_PRESENCE       0x22102388
+#define QLA83XX_IDC_DRIVER_ACK         0x2210238c
+#define QLA83XX_IDC_CONTROL                    0x22102390
+#define QLA83XX_IDC_AUDIT                      0x22102394
+#define QLA83XX_IDC_LOCK_RECOVERY      0x2210239c
+#define QLA83XX_DRIVER_LOCKID          0x22102104
+#define QLA83XX_DRIVER_LOCK                    0x8111c028
+#define QLA83XX_DRIVER_UNLOCK          0x8111c02c
+#define QLA83XX_FLASH_LOCKID           0x22102100
+#define QLA83XX_FLASH_LOCK                     0x8111c010
+#define QLA83XX_FLASH_UNLOCK           0x8111c014
+#define QLA83XX_DEV_PARTINFO1          0x221023e0
+#define QLA83XX_DEV_PARTINFO2          0x221023e4
+#define QLA83XX_FW_HEARTBEAT           0x221020b0
+#define QLA83XX_PEG_HALT_STATUS1       0x221020a8
+#define QLA83XX_PEG_HALT_STATUS2       0x221020ac
+
+/* 83XX: Macros defining 8200 AEN Reason codes */
+#define IDC_DEVICE_STATE_CHANGE BIT_0
+#define IDC_PEG_HALT_STATUS_CHANGE BIT_1
+#define IDC_NIC_FW_REPORTED_FAILURE BIT_2
+#define IDC_HEARTBEAT_FAILURE BIT_3
+
+/* 83XX: Macros defining 8200 AEN Error-levels */
+#define ERR_LEVEL_NON_FATAL 0x1
+#define ERR_LEVEL_RECOVERABLE_FATAL 0x2
+#define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4
+
+/* 83XX: Macros for IDC Version */
+#define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01
+#define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0
+
+/* 83XX: Macros for scheduling dpc tasks */
+#define QLA83XX_NIC_CORE_RESET 0x1
+#define QLA83XX_IDC_STATE_HANDLER 0x2
+#define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3
+
+/* 83XX: Macros for defining IDC-Control bits */
+#define QLA83XX_IDC_RESET_DISABLED BIT_0
+#define QLA83XX_IDC_GRACEFUL_RESET BIT_1
+
+/* 83XX: Macros for different timeouts */
+#define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30
+#define QLA83XX_IDC_RESET_ACK_TIMEOUT 10
+#define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ)
+
+/* 83XX: Macros for defining class in DEV-Partition Info register */
+#define QLA83XX_CLASS_TYPE_NONE                0x0
+#define QLA83XX_CLASS_TYPE_NIC         0x1
+#define QLA83XX_CLASS_TYPE_FCOE                0x2
+#define QLA83XX_CLASS_TYPE_ISCSI       0x3
+
+/* 83XX: Macros for IDC Lock-Recovery stages */
+#define IDC_LOCK_RECOVERY_STAGE1       0x1 /* Stage1: Intent for
+                                            * lock-recovery
+                                            */
+#define IDC_LOCK_RECOVERY_STAGE2       0x2 /* Stage2: Perform lock-recovery */
+
+/* 83XX: Macros for IDC Audit type */
+#define IDC_AUDIT_TIMESTAMP            0x0 /* IDC-AUDIT: Record timestamp of
+                                            * dev-state change to NEED-RESET
+                                            * or NEED-QUIESCENT
+                                            */
+#define IDC_AUDIT_COMPLETION           0x1 /* IDC-AUDIT: Record duration of
+                                            * reset-recovery completion is
+                                            * second
+                                            */
+
 /*
  * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
  * 133Mhz slot.
 #define MAX_FIBRE_DEVICES_2400 2048
 #define MAX_FIBRE_DEVICES_LOOP 128
 #define MAX_FIBRE_DEVICES_MAX  MAX_FIBRE_DEVICES_2400
+#define LOOPID_MAP_SIZE                (ha->max_fibre_devices)
 #define MAX_FIBRE_LUNS         0xFFFF
 #define        MAX_HOST_COUNT          16
 
@@ -259,6 +336,7 @@ struct srb_iocb {
 #define SRB_ADISC_CMD  6
 #define SRB_TM_CMD     7
 #define SRB_SCSI_CMD   8
+#define SRB_BIDI_CMD   9
 
 typedef struct srb {
        atomic_t ref_count;
@@ -594,6 +672,20 @@ typedef struct {
 #define MBA_DISCARD_RND_FRAME  0x8048  /* discard RND frame due to error. */
 #define MBA_REJECTED_FCP_CMD   0x8049  /* rejected FCP_CMD. */
 
+/* 83XX FCoE specific */
+#define MBA_IDC_AEN            0x8200  /* FCoE: NIC Core state change AEN */
+
+/* Interrupt type codes */
+#define INTR_ROM_MB_SUCCESS            0x1
+#define INTR_ROM_MB_FAILED             0x2
+#define INTR_MB_SUCCESS                        0x10
+#define INTR_MB_FAILED                 0x11
+#define INTR_ASYNC_EVENT               0x12
+#define INTR_RSP_QUE_UPDATE            0x13
+#define INTR_RSP_QUE_UPDATE_83XX       0x14
+#define INTR_ATIO_QUE_UPDATE           0x1C
+#define INTR_ATIO_RSP_QUE_UPDATE       0x1D
+
 /* ISP mailbox loopback echo diagnostic error code */
 #define MBS_LB_RESET   0x17
 /*
@@ -718,6 +810,7 @@ typedef struct {
 #define MBC_SEND_RNFT_ELS              0x5e    /* Send RNFT ELS request */
 #define MBC_GET_LINK_PRIV_STATS                0x6d    /* Get link & private data. */
 #define MBC_SET_VENDOR_ID              0x76    /* Set Vendor ID. */
+#define MBC_PORT_RESET                 0x120   /* Port Reset */
 #define MBC_SET_PORT_CONFIG            0x122   /* Set port configuration */
 #define MBC_GET_PORT_CONFIG            0x123   /* Get port configuration */
 
@@ -1375,9 +1468,10 @@ typedef struct {
 } cont_a64_entry_t;
 
 #define PO_MODE_DIF_INSERT     0
-#define PO_MODE_DIF_REMOVE     BIT_0
-#define PO_MODE_DIF_PASS       BIT_1
-#define PO_MODE_DIF_REPLACE    (BIT_0 + BIT_1)
+#define PO_MODE_DIF_REMOVE     1
+#define PO_MODE_DIF_PASS       2
+#define PO_MODE_DIF_REPLACE    3
+#define PO_MODE_DIF_TCP_CKSUM  6
 #define PO_ENABLE_DIF_BUNDLING BIT_8
 #define PO_ENABLE_INCR_GUARD_SEED      BIT_3
 #define PO_DISABLE_INCR_REF_TAG        BIT_5
@@ -1509,6 +1603,13 @@ typedef struct {
 #define CS_RETRY               0x82    /* Driver defined */
 #define CS_LOOP_DOWN_ABORT     0x83    /* Driver defined */
 
+#define CS_BIDIR_RD_OVERRUN                    0x700
+#define CS_BIDIR_RD_WR_OVERRUN                 0x707
+#define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN                0x715
+#define CS_BIDIR_RD_UNDERRUN                   0x1500
+#define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN                0x1507
+#define CS_BIDIR_RD_WR_UNDERRUN                        0x1515
+#define CS_BIDIR_DMA                           0x200
 /*
  * Status entry status flags
  */
@@ -2373,6 +2474,11 @@ struct qla_statistics {
        uint64_t output_bytes;
 };
 
+struct bidi_statistics {
+       unsigned long long io_count;
+       unsigned long long transfer_bytes;
+};
+
 /* Multi queue support */
 #define MBC_INITIALIZE_MULTIQ 0x1f
 #define QLA_QUE_PAGE 0X1000
@@ -2509,14 +2615,16 @@ struct qla_hw_data {
                uint32_t        disable_msix_handshake  :1;
                uint32_t        fcp_prio_enabled        :1;
                uint32_t        isp82xx_fw_hung:1;
+               uint32_t        nic_core_hung:1;
 
                uint32_t        quiesce_owner:1;
                uint32_t        thermal_supported:1;
-               uint32_t        isp82xx_reset_hdlr_active:1;
-               uint32_t        isp82xx_reset_owner:1;
+               uint32_t        nic_core_reset_hdlr_active:1;
+               uint32_t        nic_core_reset_owner:1;
                uint32_t        isp82xx_no_md_cap:1;
                uint32_t        host_shutting_down:1;
-               /* 30 bits */
+               uint32_t        idc_compl_status:1;
+               /* 32 bits */
        } flags;
 
        /* This spinlock is used to protect "io transactions", you must
@@ -2670,6 +2778,16 @@ struct qla_hw_data {
 #define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
 #define IS_CT6_SUPPORTED(ha)   ((ha)->device_type & DT_CT6_SUPPORTED)
 #define IS_MQUE_CAPABLE(ha)    ((ha)->mqenable || IS_QLA83XX(ha))
+#define IS_BIDI_CAPABLE(ha)    ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
+/* Bit 21 of fw_attributes decides the MCTP capabilities */
+#define IS_MCTP_CAPABLE(ha)    (IS_QLA2031(ha) && \
+                               ((ha)->fw_attributes_ext[0] & BIT_0))
+#define IS_PI_UNINIT_CAPABLE(ha)       (IS_QLA83XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha)      (IS_QLA83XX(ha))
+#define IS_PI_DIFB_DIX0_CAPABLE(ha)    (0)
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE(ha)    (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
+    (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
 
        /* HBA serial number */
        uint8_t         serial0;
@@ -2753,6 +2871,7 @@ struct qla_hw_data {
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
        int notify_dcbx_comp;
+       struct mutex selflogin_lock;
 
        /* Basic firmware related information. */
        uint16_t        fw_major_version;
@@ -2784,7 +2903,12 @@ struct qla_hw_data {
        int             fw_dump_reading;
        dma_addr_t      eft_dma;
        void            *eft;
-
+/* Current size of mctp dump is 0x086064 bytes */
+#define MCTP_DUMP_SIZE  0x086064
+       dma_addr_t      mctp_dump_dma;
+       void            *mctp_dump;
+       int             mctp_dumped;
+       int             mctp_dump_reading;
        uint32_t        chain_offset;
        struct dentry *dfs_dir;
        struct dentry *dfs_fce;
@@ -2896,8 +3020,8 @@ struct qla_hw_data {
        unsigned long   mn_win_crb;
        unsigned long   ms_win_crb;
        int             qdr_sn_window;
-       uint32_t        nx_dev_init_timeout;
-       uint32_t        nx_reset_timeout;
+       uint32_t        fcoe_dev_init_timeout;
+       uint32_t        fcoe_reset_timeout;
        rwlock_t        hw_lock;
        uint16_t        portnum;                /* port number */
        int             link_width;
@@ -2918,6 +3042,20 @@ struct qla_hw_data {
        void            *md_dump;
        uint32_t        md_dump_size;
 
+       void            *loop_id_map;
+
+       /* QLA83XX IDC specific fields */
+       uint32_t        idc_audit_ts;
+
+       /* DPC low-priority workqueue */
+       struct workqueue_struct *dpc_lp_wq;
+       struct work_struct idc_aen;
+       /* DPC high-priority workqueue */
+       struct workqueue_struct *dpc_hp_wq;
+       struct work_struct nic_core_reset;
+       struct work_struct idc_state_handler;
+       struct work_struct nic_core_unrecoverable;
+
        struct qlt_hw_data tgt;
 };
 
@@ -2985,6 +3123,13 @@ typedef struct scsi_qla_host {
 
        /* ISP configuration data. */
        uint16_t        loop_id;                /* Host adapter loop id */
+       uint16_t        self_login_loop_id;     /* host adapter loop id
+                                                * get it on self login
+                                                */
+       fc_port_t       bidir_fcport;           /* fcport used for bidir cmnds
+                                                * no need of allocating it for
+                                                * each command
+                                                */
 
        port_id_t       d_id;                   /* Host adapter port id */
        uint8_t         marker_needed;
@@ -3038,6 +3183,7 @@ typedef struct scsi_qla_host {
        int             seconds_since_last_heartbeat;
        struct fc_host_statistics fc_host_stat;
        struct qla_statistics qla_stats;
+       struct bidi_statistics bidi_stats;
 
        atomic_t        vref_count;
 } scsi_qla_host_t;
index 499c74e39ee5bd7d3a0a4db84f309630edb9dbeb..706c4f7bc7c95f5b5bc127c9d90ebd983e2fbb39 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 6d7d7758c797978bad64cc2fbcb5883a825b39fd..59524aa0ab324cf538fa1928d43ecdb843c5c28a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -381,6 +381,44 @@ struct init_cb_24xx {
 /*
  * ISP queue - command entry structure definition.
  */
+#define COMMAND_BIDIRECTIONAL 0x75
+struct cmd_bidir {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System defined */
+       uint8_t entry_status;           /* Entry status. */
+
+       uint32_t handle;                /* System handle. */
+
+       uint16_t nport_handle;          /* N_PORT hanlde. */
+
+       uint16_t timeout;               /* Commnad timeout. */
+
+       uint16_t wr_dseg_count;         /* Write Data segment count. */
+       uint16_t rd_dseg_count;         /* Read Data segment count. */
+
+       struct scsi_lun lun;            /* FCP LUN (BE). */
+
+       uint16_t control_flags;         /* Control flags. */
+#define BD_WRAP_BACK                   BIT_3
+#define BD_READ_DATA                   BIT_1
+#define BD_WRITE_DATA                  BIT_0
+
+       uint16_t fcp_cmnd_dseg_len;             /* Data segment length. */
+       uint32_t fcp_cmnd_dseg_address[2];      /* Data segment address. */
+
+       uint16_t reserved[2];                   /* Reserved */
+
+       uint32_t rd_byte_count;                 /* Total Byte count Read. */
+       uint32_t wr_byte_count;                 /* Total Byte count write. */
+
+       uint8_t port_id[3];                     /* PortID of destination port.*/
+       uint8_t vp_index;
+
+       uint32_t fcp_data_dseg_address[2];      /* Data segment address. */
+       uint16_t fcp_data_dseg_len;             /* Data segment length. */
+};
+
 #define COMMAND_TYPE_6 0x48            /* Command Type 6 entry */
 struct cmd_type_6 {
        uint8_t entry_type;             /* Entry type. */
@@ -1130,7 +1168,7 @@ struct mid_db_entry_24xx {
 /*
  * Virtual Port Control IOCB
  */
-#define VP_CTRL_IOCB_TYPE      0x30    /* Vitual Port Control entry. */
+#define VP_CTRL_IOCB_TYPE      0x30    /* Virtual Port Control entry. */
 struct vp_ctrl_entry_24xx {
        uint8_t entry_type;             /* Entry type. */
        uint8_t entry_count;            /* Entry count. */
@@ -1166,7 +1204,7 @@ struct vp_ctrl_entry_24xx {
 /*
  * Modify Virtual Port Configuration IOCB
  */
-#define VP_CONFIG_IOCB_TYPE    0x31    /* Vitual Port Config entry. */
+#define VP_CONFIG_IOCB_TYPE    0x31    /* Virtual Port Config entry. */
 struct vp_config_entry_24xx {
        uint8_t entry_type;             /* Entry type. */
        uint8_t entry_count;            /* Entry count. */
@@ -1502,7 +1540,10 @@ struct access_chip_rsp_84xx {
 /*
  * ISP83xx mailbox commands
  */
-#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
+#define MBC_WRITE_REMOTE_REG           0x0001 /* Write remote register */
+#define MBC_READ_REMOTE_REG            0x0009 /* Read remote register */
+#define MBC_RESTART_NIC_FIRMWARE       0x003d /* Restart NIC firmware */
+#define MBC_SET_ACCESS_CONTROL         0x003e /* Access control command */
 
 /* Flash access control option field bit definitions */
 #define FAC_OPT_FORCE_SEMAPHORE                BIT_15
index 9eacd2df111b85108dd8b3e75c3ea0320ec927b5..6acb39785a46a2992fa0ec6e761dff79c56d0f93 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -48,7 +48,7 @@ extern void qla2x00_update_fcports(scsi_qla_host_t *);
 
 extern int qla2x00_abort_isp(scsi_qla_host_t *);
 extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
-extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *);
+extern void qla2x00_quiesce_io(scsi_qla_host_t *);
 
 extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
 
@@ -76,6 +76,14 @@ extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
 
 extern fc_port_t *
 qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
+
+extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t);
+extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *);
+extern void qla83xx_idc_audit(scsi_qla_host_t *, int);
+extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
+extern void qla83xx_reset_ownership(scsi_qla_host_t *);
+extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -133,6 +141,20 @@ extern void qla2x00_relogin(struct scsi_qla_host *);
 extern void qla2x00_do_work(struct scsi_qla_host *);
 extern void qla2x00_free_fcports(struct scsi_qla_host *);
 
+extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
+extern void qla83xx_service_idc_aen(struct work_struct *);
+extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *);
+extern void qla83xx_idc_state_handler_work(struct work_struct *);
+extern void qla83xx_nic_core_reset_work(struct work_struct *);
+
+extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t);
+extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t);
+extern int qla83xx_idc_state_handler(scsi_qla_host_t *);
+extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+
 /*
  * Global Functions in qla_mid.c source file.
  */
@@ -188,6 +210,8 @@ extern int qla2x00_start_sp(srb_t *);
 extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
 extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
 extern int qla24xx_dif_start_scsi(srb_t *);
+extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 
 extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
 extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
@@ -376,6 +400,9 @@ qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
 extern int
 qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
 
+extern int
+qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -419,7 +446,11 @@ extern void qla24xx_beacon_blink(struct scsi_qla_host *);
 extern void qla83xx_beacon_blink(struct scsi_qla_host *);
 extern int qla82xx_beacon_on(struct scsi_qla_host *);
 extern int qla82xx_beacon_off(struct scsi_qla_host *);
-extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t);
+extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
+extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
+extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
+    uint32_t, uint16_t *);
 
 extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
     uint32_t, uint32_t);
@@ -527,7 +558,6 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 /* PCI related functions */
 extern int qla82xx_pci_config(struct scsi_qla_host *);
 extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
-extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
 extern int qla82xx_pci_region_offset(struct pci_dev *, int);
 extern int qla82xx_iospace_config(struct qla_hw_data *);
 
@@ -580,6 +610,7 @@ extern uint32_t  qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
 extern int qla82xx_idc_lock(struct qla_hw_data *);
 extern void qla82xx_idc_unlock(struct qla_hw_data *);
 extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
 extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
 
 extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
index 05260d25fe469f8e28bfba0807a874c4e2173acd..f4e4bd7c3f4d66b8ea06bb415315e5e737cb45df 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1131,7 +1131,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
                return ret;
 
        rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
-           0xfa, mb, BIT_1|BIT_0);
+           0xfa, mb, BIT_1);
        if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
                if (rval == QLA_MEMORY_ALLOC_FAILED)
                        ql_dbg(ql_dbg_disc, vha, 0x2085,
index a44653b421612dcb35a302cc74a2ef64b689dd1a..799a58bb98599c8900a7d24d391b75883e9a8a16 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -77,7 +77,7 @@ qla2x00_sp_free(void *data, void *ptr)
 
 /* Asynchronous Login/Logout Routines -------------------------------------- */
 
-static inline unsigned long
+unsigned long
 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
 {
        unsigned long tmo;
@@ -429,6 +429,79 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
 
+int
+qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t idc_major_ver, idc_minor_ver;
+       uint16_t config[4];
+
+       qla83xx_idc_lock(vha, 0);
+
+       /* SV: TODO: Assign initialization timeout from
+        * flash-info / other param
+        */
+       ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
+       ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
+
+       /* Set our fcoe function presence */
+       if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_p3p, vha, 0xb077,
+                   "Error while setting DRV-Presence.\n");
+               rval = QLA_FUNCTION_FAILED;
+               goto exit;
+       }
+
+       /* Decide the reset ownership */
+       qla83xx_reset_ownership(vha);
+
+       /*
+        * On first protocol driver load:
+        * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
+        * register.
+        * Others: Check compatibility with current IDC Major version.
+        */
+       qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
+       if (ha->flags.nic_core_reset_owner) {
+               /* Set IDC Major version */
+               idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
+               qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
+
+               /* Clearing IDC-Lock-Recovery register */
+               qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
+       } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
+               /*
+                * Clear further IDC participation if we are not compatible with
+                * the current IDC Major Version.
+                */
+               ql_log(ql_log_warn, vha, 0xb07d,
+                   "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
+                   idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
+               __qla83xx_clear_drv_presence(vha);
+               rval = QLA_FUNCTION_FAILED;
+               goto exit;
+       }
+       /* Each function sets its supported Minor version. */
+       qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
+       idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
+       qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
+
+       if (ha->flags.nic_core_reset_owner) {
+               memset(config, 0, sizeof(config));
+               if (!qla81xx_get_port_config(vha, config))
+                       qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+                           QLA8XXX_DEV_READY);
+       }
+
+       rval = qla83xx_idc_state_handler(vha);
+
+exit:
+       qla83xx_idc_unlock(vha, 0);
+
+       return rval;
+}
+
 /*
 * qla2x00_initialize_adapter
 *      Initialize board.
@@ -537,6 +610,14 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
                }
        }
 
+       /* Load the NIC Core f/w if we are the first protocol driver. */
+       if (IS_QLA8031(ha)) {
+               rval = qla83xx_nic_core_fw_load(vha);
+               if (rval)
+                       ql_log(ql_log_warn, vha, 0x0124,
+                           "Error in initializing NIC Core f/w.\n");
+       }
+
        if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
                qla24xx_read_fcp_prio_cfg(vha);
 
@@ -686,7 +767,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
 
        /* PCIe -- adjust Maximum Read Request Size (2048). */
        if (pci_is_pcie(ha->pdev))
-               pcie_set_readrq(ha->pdev, 2048);
+               pcie_set_readrq(ha->pdev, 4096);
 
        pci_disable_rom(ha->pdev);
 
@@ -722,7 +803,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
 
        /* PCIe -- adjust Maximum Read Request Size (2048). */
        if (pci_is_pcie(ha->pdev))
-               pcie_set_readrq(ha->pdev, 2048);
+               pcie_set_readrq(ha->pdev, 4096);
 
        pci_disable_rom(ha->pdev);
 
@@ -1480,7 +1561,8 @@ enable_82xx_npiv:
                            "ISP Firmware failed checksum.\n");
                        goto failed;
                }
-       }
+       } else
+               goto failed;
 
        if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
                /* Enable proper parity. */
@@ -1825,7 +1907,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
        ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
 
        if (ha->flags.npiv_supported) {
-               if (ha->operating_mode == LOOP)
+               if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
                        ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
                mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
        }
@@ -2682,11 +2764,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
        new_fcport = NULL;
        entries = MAX_FIBRE_DEVICES_LOOP;
 
-       ql_dbg(ql_dbg_disc, vha, 0x2016,
-           "Getting FCAL position map.\n");
-       if (ql2xextended_error_logging & ql_dbg_disc)
-               qla2x00_get_fcal_position_map(vha, NULL);
-
        /* Get list of logged in devices. */
        memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
        rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
@@ -2753,6 +2830,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
                if (loop_id > LAST_LOCAL_LOOP_ID)
                        continue;
 
+               memset(new_fcport, 0, sizeof(fc_port_t));
+
                /* Fill in member data. */
                new_fcport->d_id.b.domain = domain;
                new_fcport->d_id.b.area = area;
@@ -3285,7 +3364,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                         */
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
                                fcport->d_id.b24 = new_fcport->d_id.b24;
-                               fcport->loop_id = FC_NO_LOOP_ID;
+                               qla2x00_clear_loop_id(fcport);
                                fcport->flags |= (FCF_FABRIC_DEVICE |
                                    FCF_LOGIN_NEEDED);
                                break;
@@ -3306,7 +3385,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                                ha->isp_ops->fabric_logout(vha, fcport->loop_id,
                                    fcport->d_id.b.domain, fcport->d_id.b.area,
                                    fcport->d_id.b.al_pa);
-                               fcport->loop_id = FC_NO_LOOP_ID;
+                               qla2x00_clear_loop_id(fcport);
                        }
 
                        break;
@@ -3352,71 +3431,32 @@ int
 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
 {
        int     rval;
-       int     found;
-       fc_port_t *fcport;
-       uint16_t first_loop_id;
        struct qla_hw_data *ha = vha->hw;
-       struct scsi_qla_host *vp;
-       struct scsi_qla_host *tvp;
        unsigned long flags = 0;
 
        rval = QLA_SUCCESS;
 
-       /* Save starting loop ID. */
-       first_loop_id = dev->loop_id;
-
-       for (;;) {
-               /* Skip loop ID if already used by adapter. */
-               if (dev->loop_id == vha->loop_id)
-                       dev->loop_id++;
-
-               /* Skip reserved loop IDs. */
-               while (qla2x00_is_reserved_id(vha, dev->loop_id))
-                       dev->loop_id++;
-
-               /* Reset loop ID if passed the end. */
-               if (dev->loop_id > ha->max_loop_id) {
-                       /* first loop ID. */
-                       dev->loop_id = ha->min_external_loopid;
-               }
-
-               /* Check for loop ID being already in use. */
-               found = 0;
-               fcport = NULL;
-
-               spin_lock_irqsave(&ha->vport_slock, flags);
-               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
-                       list_for_each_entry(fcport, &vp->vp_fcports, list) {
-                               if (fcport->loop_id == dev->loop_id &&
-                                                               fcport != dev) {
-                                       /* ID possibly in use */
-                                       found++;
-                                       break;
-                               }
-                       }
-                       if (found)
-                               break;
-               }
-               spin_unlock_irqrestore(&ha->vport_slock, flags);
+       spin_lock_irqsave(&ha->vport_slock, flags);
 
-               /* If not in use then it is free to use. */
-               if (!found) {
-                       ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
-                           "Assigning new loopid=%x, portid=%x.\n",
-                           dev->loop_id, dev->d_id.b24);
-                       break;
-               }
+       dev->loop_id = find_first_zero_bit(ha->loop_id_map,
+           LOOPID_MAP_SIZE);
+       if (dev->loop_id >= LOOPID_MAP_SIZE ||
+           qla2x00_is_reserved_id(vha, dev->loop_id)) {
+               dev->loop_id = FC_NO_LOOP_ID;
+               rval = QLA_FUNCTION_FAILED;
+       } else
+               set_bit(dev->loop_id, ha->loop_id_map);
 
-               /* ID in use. Try next value. */
-               dev->loop_id++;
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
 
-               /* If wrap around. No free ID to use. */
-               if (dev->loop_id == first_loop_id) {
-                       dev->loop_id = FC_NO_LOOP_ID;
-                       rval = QLA_FUNCTION_FAILED;
-                       break;
-               }
-       }
+       if (rval == QLA_SUCCESS)
+               ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+                   "Assigning new loopid=%x, portid=%x.\n",
+                   dev->loop_id, dev->d_id.b24);
+       else
+               ql_log(ql_log_warn, dev->vha, 0x2087,
+                   "No loop_id's available, portid=%x.\n",
+                   dev->d_id.b24);
 
        return (rval);
 }
@@ -3616,7 +3656,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
                        ha->isp_ops->fabric_logout(vha, fcport->loop_id,
                            fcport->d_id.b.domain, fcport->d_id.b.area,
                            fcport->d_id.b.al_pa);
-                       fcport->loop_id = FC_NO_LOOP_ID;
+                       qla2x00_clear_loop_id(fcport);
                        fcport->login_retry = 0;
 
                        rval = 3;
@@ -3775,8 +3815,363 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
+/* Assumes idc_lock always held on entry */
+void
+qla83xx_reset_ownership(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t drv_presence, drv_presence_mask;
+       uint32_t dev_part_info1, dev_part_info2, class_type;
+       uint32_t class_type_mask = 0x3;
+       uint16_t fcoe_other_function = 0xffff, i;
+
+       qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+
+       qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
+       qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+       for (i = 0; i < 8; i++) {
+               class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
+               if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+                   (i != ha->portnum)) {
+                       fcoe_other_function = i;
+                       break;
+               }
+       }
+       if (fcoe_other_function == 0xffff) {
+               for (i = 0; i < 8; i++) {
+                       class_type = ((dev_part_info2 >> (i * 4)) &
+                           class_type_mask);
+                       if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+                           ((i + 8) != ha->portnum)) {
+                               fcoe_other_function = i + 8;
+                               break;
+                       }
+               }
+       }
+       /*
+        * Prepare drv-presence mask based on fcoe functions present.
+        * However consider only valid physical fcoe function numbers (0-15).
+        */
+       drv_presence_mask = ~((1 << (ha->portnum)) |
+                       ((fcoe_other_function == 0xffff) ?
+                        0 : (1 << (fcoe_other_function))));
+
+       /* We are the reset owner iff:
+        *    - No other protocol drivers present.
+        *    - This is the lowest among fcoe functions. */
+       if (!(drv_presence & drv_presence_mask) &&
+                       (ha->portnum < fcoe_other_function)) {
+               ql_dbg(ql_dbg_p3p, vha, 0xb07f,
+                   "This host is Reset owner.\n");
+               ha->flags.nic_core_reset_owner = 1;
+       }
+}
+
+int
+__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t drv_ack;
+
+       rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+       if (rval == QLA_SUCCESS) {
+               drv_ack |= (1 << ha->portnum);
+               rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+       }
+
+       return rval;
+}
+
+int
+qla83xx_set_drv_ack(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+
+       qla83xx_idc_lock(vha, 0);
+       rval = __qla83xx_set_drv_ack(vha);
+       qla83xx_idc_unlock(vha, 0);
+
+       return rval;
+}
+
+int
+__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t drv_ack;
+
+       rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+       if (rval == QLA_SUCCESS) {
+               drv_ack &= ~(1 << ha->portnum);
+               rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+       }
+
+       return rval;
+}
+
+int
+qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+
+       qla83xx_idc_lock(vha, 0);
+       rval = __qla83xx_clear_drv_ack(vha);
+       qla83xx_idc_unlock(vha, 0);
+
+       return rval;
+}
+
+const char *
+qla83xx_dev_state_to_string(uint32_t dev_state)
+{
+       switch (dev_state) {
+       case QLA8XXX_DEV_COLD:
+               return "COLD/RE-INIT";
+       case QLA8XXX_DEV_INITIALIZING:
+               return "INITIALIZING";
+       case QLA8XXX_DEV_READY:
+               return "READY";
+       case QLA8XXX_DEV_NEED_RESET:
+               return "NEED RESET";
+       case QLA8XXX_DEV_NEED_QUIESCENT:
+               return "NEED QUIESCENT";
+       case QLA8XXX_DEV_FAILED:
+               return "FAILED";
+       case QLA8XXX_DEV_QUIESCENT:
+               return "QUIESCENT";
+       default:
+               return "Unknown";
+       }
+}
+
+/* Assumes idc-lock always held on entry */
+void
+qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t idc_audit_reg = 0, duration_secs = 0;
+
+       switch (audit_type) {
+       case IDC_AUDIT_TIMESTAMP:
+               ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
+               idc_audit_reg = (ha->portnum) |
+                   (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
+               qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+               break;
+
+       case IDC_AUDIT_COMPLETION:
+               duration_secs = ((jiffies_to_msecs(jiffies) -
+                   jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
+               idc_audit_reg = (ha->portnum) |
+                   (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
+               qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+               break;
+
+       default:
+               ql_log(ql_log_warn, vha, 0xb078,
+                   "Invalid audit type specified.\n");
+               break;
+       }
+}
+
+/* Assumes idc_lock always held on entry */
+int
+qla83xx_initiating_reset(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t  idc_control, dev_state;
+
+       __qla83xx_get_idc_control(vha, &idc_control);
+       if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
+               ql_log(ql_log_info, vha, 0xb080,
+                   "NIC Core reset has been disabled. idc-control=0x%x\n",
+                   idc_control);
+               return QLA_FUNCTION_FAILED;
+       }
+
+       /* Set NEED-RESET iff in READY state and we are the reset-owner */
+       qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+       if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
+               qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+                   QLA8XXX_DEV_NEED_RESET);
+               ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
+               qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+       } else {
+               const char *state = qla83xx_dev_state_to_string(dev_state);
+               ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
+
+               /* SV: XXX: Is timeout required here? */
+               /* Wait for IDC state change READY -> NEED_RESET */
+               while (dev_state == QLA8XXX_DEV_READY) {
+                       qla83xx_idc_unlock(vha, 0);
+                       msleep(200);
+                       qla83xx_idc_lock(vha, 0);
+                       qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+               }
+       }
+
+       /* Send IDC ack by writing to drv-ack register */
+       __qla83xx_set_drv_ack(vha);
+
+       return QLA_SUCCESS;
+}
+
+int
+__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
+{
+       return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+int
+qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
+{
+       int rval = QLA_SUCCESS;
+
+       qla83xx_idc_lock(vha, 0);
+       rval = __qla83xx_set_idc_control(vha, idc_control);
+       qla83xx_idc_unlock(vha, 0);
+
+       return rval;
+}
+
+int
+__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
+{
+       return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+int
+qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
+{
+       int rval = QLA_SUCCESS;
+
+       qla83xx_idc_lock(vha, 0);
+       rval = __qla83xx_get_idc_control(vha, idc_control);
+       qla83xx_idc_unlock(vha, 0);
+
+       return rval;
+}
+
+int
+qla83xx_check_driver_presence(scsi_qla_host_t *vha)
+{
+       uint32_t drv_presence = 0;
+       struct qla_hw_data *ha = vha->hw;
+
+       qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+       if (drv_presence & (1 << ha->portnum))
+               return QLA_SUCCESS;
+       else
+               return QLA_TEST_FAILED;
+}
+
+int
+qla83xx_nic_core_reset(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+       struct qla_hw_data *ha = vha->hw;
+
+       ql_dbg(ql_dbg_p3p, vha, 0xb058,
+           "Entered  %s().\n", __func__);
+
+       if (vha->device_flags & DFLG_DEV_FAILED) {
+               ql_log(ql_log_warn, vha, 0xb059,
+                   "Device in unrecoverable FAILED state.\n");
+               return QLA_FUNCTION_FAILED;
+       }
+
+       qla83xx_idc_lock(vha, 0);
+
+       if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0xb05a,
+                   "Function=0x%x has been removed from IDC participation.\n",
+                   ha->portnum);
+               rval = QLA_FUNCTION_FAILED;
+               goto exit;
+       }
+
+       qla83xx_reset_ownership(vha);
+
+       rval = qla83xx_initiating_reset(vha);
+
+       /*
+        * Perform reset if we are the reset-owner,
+        * else wait till IDC state changes to READY/FAILED.
+        */
+       if (rval == QLA_SUCCESS) {
+               rval = qla83xx_idc_state_handler(vha);
+
+               if (rval == QLA_SUCCESS)
+                       ha->flags.nic_core_hung = 0;
+               __qla83xx_clear_drv_ack(vha);
+       }
+
+exit:
+       qla83xx_idc_unlock(vha, 0);
+
+       ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
+
+       return rval;
+}
+
+int
+qla2xxx_mctp_dump(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       int rval = QLA_FUNCTION_FAILED;
+
+       if (!IS_MCTP_CAPABLE(ha)) {
+               /* This message can be removed from the final version */
+               ql_log(ql_log_info, vha, 0x506d,
+                   "This board is not MCTP capable\n");
+               return rval;
+       }
+
+       if (!ha->mctp_dump) {
+               ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
+                   MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
+
+               if (!ha->mctp_dump) {
+                       ql_log(ql_log_warn, vha, 0x506e,
+                           "Failed to allocate memory for mctp dump\n");
+                       return rval;
+               }
+       }
+
+#define MCTP_DUMP_STR_ADDR     0x00000000
+       rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
+           MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x506f,
+                   "Failed to capture mctp dump\n");
+       } else {
+               ql_log(ql_log_info, vha, 0x5070,
+                   "Mctp dump capture for host (%ld/%p).\n",
+                   vha->host_no, ha->mctp_dump);
+               ha->mctp_dumped = 1;
+       }
+
+       if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
+               ha->flags.nic_core_reset_hdlr_active = 1;
+               rval = qla83xx_restart_nic_firmware(vha);
+               if (rval)
+                       /* NIC Core reset failed. */
+                       ql_log(ql_log_warn, vha, 0x5071,
+                           "Failed to restart nic firmware\n");
+               else
+                       ql_dbg(ql_dbg_p3p, vha, 0xb084,
+                           "Restarted NIC firmware successfully.\n");
+               ha->flags.nic_core_reset_hdlr_active = 0;
+       }
+
+       return rval;
+
+}
+
 /*
-* qla82xx_quiescent_state_cleanup
+* qla2x00_quiesce_io
 * Description: This function will block the new I/Os
 *              Its not aborting any I/Os as context
 *              is not destroyed during quiescence
@@ -3784,20 +4179,20 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
 * return   : void
 */
 void
-qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
+qla2x00_quiesce_io(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *vp;
 
-       ql_dbg(ql_dbg_p3p, vha, 0xb002,
-           "Performing ISP error recovery - ha=%p.\n", ha);
+       ql_dbg(ql_dbg_dpc, vha, 0x401d,
+           "Quiescing I/O - ha=%p.\n", ha);
 
        atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
                qla2x00_mark_all_devices_lost(vha, 0);
                list_for_each_entry(vp, &ha->vp_list, list)
-                       qla2x00_mark_all_devices_lost(vha, 0);
+                       qla2x00_mark_all_devices_lost(vp, 0);
        } else {
                if (!atomic_read(&vha->loop_down_timer))
                        atomic_set(&vha->loop_down_timer,
@@ -3913,6 +4308,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
        if (vha->flags.online) {
                qla2x00_abort_isp_cleanup(vha);
 
+               if (IS_QLA8031(ha)) {
+                       ql_dbg(ql_dbg_p3p, vha, 0xb05c,
+                           "Clearing fcoe driver presence.\n");
+                       if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
+                               ql_dbg(ql_dbg_p3p, vha, 0xb073,
+                                   "Error while clearing DRV-Presence.\n");
+               }
+
                if (unlikely(pci_channel_offline(ha->pdev) &&
                    ha->flags.pci_channel_io_perm_failure)) {
                        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -4021,6 +4424,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                }
                spin_unlock_irqrestore(&ha->vport_slock, flags);
 
+               if (IS_QLA8031(ha)) {
+                       ql_dbg(ql_dbg_p3p, vha, 0xb05d,
+                           "Setting back fcoe driver presence.\n");
+                       if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
+                               ql_dbg(ql_dbg_p3p, vha, 0xb074,
+                                   "Error while setting DRV-Presence.\n");
+               }
        } else {
                ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
                       __func__);
@@ -5088,6 +5498,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
                rval = 1;
        }
 
+       if (IS_T10_PI_CAPABLE(ha))
+               nv->frame_payload_size &= ~7;
+
        /* Reset Initialization control block */
        memset(icb, 0, ha->init_cb_size);
 
index 6e457643c63909f51741b68f86bce197c4d9c81d..c0462c04c885de1d8cf3008d7c6200e04ad11028 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -57,6 +57,20 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
        return fcp;
 }
 
+static inline void
+qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
+{
+       int i;
+
+       if (IS_FWI2_CAPABLE(ha))
+               return;
+
+       for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
+               set_bit(i, ha->loop_id_map);
+       set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
+       set_bit(BROADCAST, ha->loop_id_map);
+}
+
 static inline int
 qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
 {
@@ -68,6 +82,18 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
            loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
 }
 
+static inline void
+qla2x00_clear_loop_id(fc_port_t *fcport) {
+       struct qla_hw_data *ha = fcport->vha->hw;
+
+       if (fcport->loop_id == FC_NO_LOOP_ID ||
+           qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
+               return;
+
+       clear_bit(fcport->loop_id, ha->loop_id_map);
+       fcport->loop_id = FC_NO_LOOP_ID;
+}
+
 static inline void
 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
 {
index 70dbf53d9e0f4fe0762073cb0bfec1508251fc8f..03b75263283995894a023e11c458aa21e228d105 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -147,13 +147,6 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
        uint8_t guard = scsi_host_get_guard(cmd->device->host);
 
-       /* We only support T10 DIF right now */
-       if (guard != SHOST_DIX_GUARD_CRC) {
-               ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
-                   "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
-               return 0;
-       }
-
        /* We always use DIFF Bundling for best performance */
        *fw_prot_opts = 0;
 
@@ -172,10 +165,11 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
                *fw_prot_opts |= PO_MODE_DIF_REMOVE;
                break;
        case SCSI_PROT_READ_PASS:
-               *fw_prot_opts |= PO_MODE_DIF_PASS;
-               break;
        case SCSI_PROT_WRITE_PASS:
-               *fw_prot_opts |= PO_MODE_DIF_PASS;
+               if (guard & SHOST_DIX_GUARD_IP)
+                       *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
+               else
+                       *fw_prot_opts |= PO_MODE_DIF_PASS;
                break;
        default:        /* Normal Request */
                *fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -821,7 +815,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
     unsigned int protcnt)
 {
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-       scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 
        switch (scsi_get_prot_type(cmd)) {
        case SCSI_PROT_DIF_TYPE0:
@@ -891,12 +884,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
                pkt->ref_tag_mask[3] = 0xff;
                break;
        }
-
-       ql_dbg(ql_dbg_io, vha, 0x3009,
-           "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
-           "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
-           pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
-           scsi_get_prot_type(cmd), cmd);
 }
 
 struct qla2_sgx {
@@ -1068,9 +1055,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
        int     i;
        uint16_t        used_dsds = tot_dsds;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-       scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-
-       uint8_t         *cp;
 
        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
                dma_addr_t      sle_dma;
@@ -1113,19 +1097,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
                        cur_dsd = (uint32_t *)next_dsd;
                }
                sle_dma = sg_dma_address(sg);
-               ql_dbg(ql_dbg_io, vha, 0x300a,
-                   "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
-                   i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
+
                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
                avail_dsds--;
 
-               if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
-                       cp = page_address(sg_page(sg)) + sg->offset;
-                       ql_dbg(ql_dbg_io, vha, 0x300b,
-                           "User data buffer=%p for cmd=%p.\n", cp, cmd);
-               }
        }
        /* Null termination */
        *cur_dsd++ = 0;
@@ -1148,8 +1125,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
        struct scsi_cmnd *cmd;
        uint32_t *cur_dsd = dsd;
        uint16_t        used_dsds = tot_dsds;
-       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-       uint8_t         *cp;
 
        cmd = GET_CMD_SP(sp);
        scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
@@ -1193,23 +1168,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
                        cur_dsd = (uint32_t *)next_dsd;
                }
                sle_dma = sg_dma_address(sg);
-               if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
-                       ql_dbg(ql_dbg_io, vha, 0x3027,
-                           "%s(): %p, sg_entry %d - "
-                           "addr=0x%x0x%x, len=%d.\n",
-                           __func__, cur_dsd, i,
-                           LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
-               }
+
                *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
                *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
 
-               if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
-                       cp = page_address(sg_page(sg)) + sg->offset;
-                       ql_dbg(ql_dbg_io, vha, 0x3028,
-                           "%s(): Protection Data buffer = %p.\n", __func__,
-                           cp);
-               }
                avail_dsds--;
        }
        /* Null termination */
@@ -1386,6 +1349,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 
        if (!qla2x00_hba_err_chk_enabled(sp))
                fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+       /* HBA error checking enabled */
+       else if (IS_PI_UNINIT_CAPABLE(ha)) {
+               if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
+                   || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+                       SCSI_PROT_DIF_TYPE2))
+                       fw_prot_opts |= BIT_10;
+               else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+                   SCSI_PROT_DIF_TYPE3)
+                       fw_prot_opts |= BIT_11;
+       }
 
        if (!bundling) {
                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
@@ -1858,7 +1831,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
        }
        if (index == MAX_OUTSTANDING_COMMANDS) {
                ql_log(ql_log_warn, vha, 0x700b,
-                   "No room on oustanding cmd array.\n");
+                   "No room on outstanding cmd array.\n");
                goto queuing_error;
        }
 
@@ -2665,3 +2638,201 @@ done:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return rval;
 }
+
+static void
+qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
+                               struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
+{
+       uint16_t avail_dsds;
+       uint32_t *cur_dsd;
+       uint32_t req_data_len = 0;
+       uint32_t rsp_data_len = 0;
+       struct scatterlist *sg;
+       int index;
+       int entry_count = 1;
+       struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+
+       /*Update entry type to indicate bidir command */
+       *((uint32_t *)(&cmd_pkt->entry_type)) =
+               __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
+
+       /* Set the transfer direction, in this set both flags
+        * Also set the BD_WRAP_BACK flag, firmware will take care
+        * assigning DID=SID for outgoing pkts.
+        */
+       cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
+       cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+       cmd_pkt->control_flags =
+                       __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
+                                                       BD_WRAP_BACK);
+
+       req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+       cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
+       cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
+       cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+
+       vha->bidi_stats.transfer_bytes += req_data_len;
+       vha->bidi_stats.io_count++;
+
+       /* Only one dsd is available for bidirectional IOCB, remaining dsds
+        * are bundled in continuation iocb
+        */
+       avail_dsds = 1;
+       cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+
+       index = 0;
+
+       for_each_sg(bsg_job->request_payload.sg_list, sg,
+                               bsg_job->request_payload.sg_cnt, index) {
+               dma_addr_t sle_dma;
+               cont_a64_entry_t *cont_pkt;
+
+               /* Allocate additional continuation packets */
+               if (avail_dsds == 0) {
+                       /* Continuation type 1 IOCB can accomodate
+                        * 5 DSDS
+                        */
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+                       cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+                       avail_dsds = 5;
+                       entry_count++;
+               }
+               sle_dma = sg_dma_address(sg);
+               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+               avail_dsds--;
+       }
+       /* For read request DSD will always goes to continuation IOCB
+        * and follow the write DSD. If there is room on the current IOCB
+        * then it is added to that IOCB else new continuation IOCB is
+        * allocated.
+        */
+       for_each_sg(bsg_job->reply_payload.sg_list, sg,
+                               bsg_job->reply_payload.sg_cnt, index) {
+               dma_addr_t sle_dma;
+               cont_a64_entry_t *cont_pkt;
+
+               /* Allocate additional continuation packets */
+               if (avail_dsds == 0) {
+                       /* Continuation type 1 IOCB can accomodate
+                        * 5 DSDS
+                        */
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+                       cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+                       avail_dsds = 5;
+                       entry_count++;
+               }
+               sle_dma = sg_dma_address(sg);
+               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+               avail_dsds--;
+       }
+       /* This value should be same as number of IOCB required for this cmd */
+       cmd_pkt->entry_count = entry_count;
+}
+
+int
+qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
+{
+
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+       uint32_t handle;
+       uint32_t index;
+       uint16_t req_cnt;
+       uint16_t cnt;
+       uint32_t *clr_ptr;
+       struct cmd_bidir *cmd_pkt = NULL;
+       struct rsp_que *rsp;
+       struct req_que *req;
+       int rval = EXT_STATUS_OK;
+       device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
+
+       rval = QLA_SUCCESS;
+
+       rsp = ha->rsp_q_map[0];
+       req = vha->req;
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, req,
+                       rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+                       return EXT_STATUS_MAILBOX;
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+               handle++;
+       if (handle == MAX_OUTSTANDING_COMMANDS)
+               handle = 1;
+       if (!req->outstanding_cmds[handle])
+               break;
+       }
+
+       if (index == MAX_OUTSTANDING_COMMANDS) {
+               rval = EXT_STATUS_BUSY;
+               goto queuing_error;
+       }
+
+       /* Calculate number of IOCB required */
+       req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+       /* Check for room on request queue. */
+       if (req->cnt < req_cnt + 2) {
+               if (ha->mqenable)
+                       cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+               else if (IS_QLA82XX(ha))
+                       cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+               else if (IS_FWI2_CAPABLE(ha))
+                       cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+               else
+                       cnt = qla2x00_debounce_register(
+                                       ISP_REQ_Q_OUT(ha, &reg->isp));
+
+               if  (req->ring_index < cnt)
+                       req->cnt = cnt - req->ring_index;
+               else
+                       req->cnt = req->length -
+                               (req->ring_index - cnt);
+       }
+       if (req->cnt < req_cnt + 2) {
+               rval = EXT_STATUS_BUSY;
+               goto queuing_error;
+       }
+
+       cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
+       cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+       /* Zero out remaining portion of packet. */
+       /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+       clr_ptr = (uint32_t *)cmd_pkt + 2;
+       memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+       /* Set NPORT-ID  (of vha)*/
+       cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
+       cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
+       cmd_pkt->port_id[1] = vha->d_id.b.area;
+       cmd_pkt->port_id[2] = vha->d_id.b.domain;
+
+       qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
+       cmd_pkt->entry_status = (uint8_t) rsp->id;
+       /* Build command packet. */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       req->cnt -= req_cnt;
+
+       /* Send the command to the firmware */
+       wmb();
+       qla2x00_start_iocbs(vha, req);
+queuing_error:
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       return rval;
+}
index 6f67a9d4998b6d43fbab53a0a75918c6f607fbf0..5733811ce8e79eb0bce599550604fe6290444f5f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -294,6 +294,11 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
            "%04x %04x %04x %04x %04x %04x %04x.\n",
            event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
            mb[4], mb[5], mb[6]);
+       if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
+               vha->hw->flags.idc_compl_status = 1;
+               if (vha->hw->notify_dcbx_comp)
+                       complete(&vha->hw->dcbx_comp);
+       }
 
        /* Acknowledgement needed? [Notify && non-zero timeout]. */
        timeout = (descr >> 8) & 0xf;
@@ -332,6 +337,166 @@ qla2x00_get_link_speed_str(struct qla_hw_data *ha)
        return link_speed;
 }
 
+void
+qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       /*
+        * 8200 AEN Interpretation:
+        * mb[0] = AEN code
+        * mb[1] = AEN Reason code
+        * mb[2] = LSW of Peg-Halt Status-1 Register
+        * mb[6] = MSW of Peg-Halt Status-1 Register
+        * mb[3] = LSW of Peg-Halt Status-2 register
+        * mb[7] = MSW of Peg-Halt Status-2 register
+        * mb[4] = IDC Device-State Register value
+        * mb[5] = IDC Driver-Presence Register value
+        */
+       ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
+           "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
+           mb[0], mb[1], mb[2], mb[6]);
+       ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
+           "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
+           "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
+
+       if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
+                               IDC_HEARTBEAT_FAILURE)) {
+               ha->flags.nic_core_hung = 1;
+               ql_log(ql_log_warn, vha, 0x5060,
+                   "83XX: F/W Error Reported: Check if reset required.\n");
+
+               if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
+                       uint32_t protocol_engine_id, fw_err_code, err_level;
+
+                       /*
+                        * IDC_PEG_HALT_STATUS_CHANGE interpretation:
+                        *  - PEG-Halt Status-1 Register:
+                        *      (LSW = mb[2], MSW = mb[6])
+                        *      Bits 0-7   = protocol-engine ID
+                        *      Bits 8-28  = f/w error code
+                        *      Bits 29-31 = Error-level
+                        *          Error-level 0x1 = Non-Fatal error
+                        *          Error-level 0x2 = Recoverable Fatal error
+                        *          Error-level 0x4 = UnRecoverable Fatal error
+                        *  - PEG-Halt Status-2 Register:
+                        *      (LSW = mb[3], MSW = mb[7])
+                        */
+                       protocol_engine_id = (mb[2] & 0xff);
+                       fw_err_code = (((mb[2] & 0xff00) >> 8) |
+                           ((mb[6] & 0x1fff) << 8));
+                       err_level = ((mb[6] & 0xe000) >> 13);
+                       ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
+                           "Register: protocol_engine_id=0x%x "
+                           "fw_err_code=0x%x err_level=0x%x.\n",
+                           protocol_engine_id, fw_err_code, err_level);
+                       ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
+                           "Register: 0x%x%x.\n", mb[7], mb[3]);
+                       if (err_level == ERR_LEVEL_NON_FATAL) {
+                               ql_log(ql_log_warn, vha, 0x5063,
+                                   "Not a fatal error, f/w has recovered "
+                                   "iteself.\n");
+                       } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
+                               ql_log(ql_log_fatal, vha, 0x5064,
+                                   "Recoverable Fatal error: Chip reset "
+                                   "required.\n");
+                               qla83xx_schedule_work(vha,
+                                   QLA83XX_NIC_CORE_RESET);
+                       } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
+                               ql_log(ql_log_fatal, vha, 0x5065,
+                                   "Unrecoverable Fatal error: Set FAILED "
+                                   "state, reboot required.\n");
+                               qla83xx_schedule_work(vha,
+                                   QLA83XX_NIC_CORE_UNRECOVERABLE);
+                       }
+               }
+
+               if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
+                       uint16_t peg_fw_state, nw_interface_link_up;
+                       uint16_t nw_interface_signal_detect, sfp_status;
+                       uint16_t htbt_counter, htbt_monitor_enable;
+                       uint16_t sfp_additonal_info, sfp_multirate;
+                       uint16_t sfp_tx_fault, link_speed, dcbx_status;
+
+                       /*
+                        * IDC_NIC_FW_REPORTED_FAILURE interpretation:
+                        *  - PEG-to-FC Status Register:
+                        *      (LSW = mb[2], MSW = mb[6])
+                        *      Bits 0-7   = Peg-Firmware state
+                        *      Bit 8      = N/W Interface Link-up
+                        *      Bit 9      = N/W Interface signal detected
+                        *      Bits 10-11 = SFP Status
+                        *        SFP Status 0x0 = SFP+ transceiver not expected
+                        *        SFP Status 0x1 = SFP+ transceiver not present
+                        *        SFP Status 0x2 = SFP+ transceiver invalid
+                        *        SFP Status 0x3 = SFP+ transceiver present and
+                        *        valid
+                        *      Bits 12-14 = Heartbeat Counter
+                        *      Bit 15     = Heartbeat Monitor Enable
+                        *      Bits 16-17 = SFP Additional Info
+                        *        SFP info 0x0 = Unregocnized transceiver for
+                        *        Ethernet
+                        *        SFP info 0x1 = SFP+ brand validation failed
+                        *        SFP info 0x2 = SFP+ speed validation failed
+                        *        SFP info 0x3 = SFP+ access error
+                        *      Bit 18     = SFP Multirate
+                        *      Bit 19     = SFP Tx Fault
+                        *      Bits 20-22 = Link Speed
+                        *      Bits 23-27 = Reserved
+                        *      Bits 28-30 = DCBX Status
+                        *        DCBX Status 0x0 = DCBX Disabled
+                        *        DCBX Status 0x1 = DCBX Enabled
+                        *        DCBX Status 0x2 = DCBX Exchange error
+                        *      Bit 31     = Reserved
+                        */
+                       peg_fw_state = (mb[2] & 0x00ff);
+                       nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
+                       nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
+                       sfp_status = ((mb[2] & 0x0c00) >> 10);
+                       htbt_counter = ((mb[2] & 0x7000) >> 12);
+                       htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
+                       sfp_additonal_info = (mb[6] & 0x0003);
+                       sfp_multirate = ((mb[6] & 0x0004) >> 2);
+                       sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
+                       link_speed = ((mb[6] & 0x0070) >> 4);
+                       dcbx_status = ((mb[6] & 0x7000) >> 12);
+
+                       ql_log(ql_log_warn, vha, 0x5066,
+                           "Peg-to-Fc Status Register:\n"
+                           "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
+                           "nw_interface_signal_detect=0x%x"
+                           "\nsfp_statis=0x%x.\n ", peg_fw_state,
+                           nw_interface_link_up, nw_interface_signal_detect,
+                           sfp_status);
+                       ql_log(ql_log_warn, vha, 0x5067,
+                           "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
+                           "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
+                           htbt_counter, htbt_monitor_enable,
+                           sfp_additonal_info, sfp_multirate);
+                       ql_log(ql_log_warn, vha, 0x5068,
+                           "sfp_tx_fault=0x%x, link_state=0x%x, "
+                           "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
+                           dcbx_status);
+
+                       qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+               }
+
+               if (mb[1] & IDC_HEARTBEAT_FAILURE) {
+                       ql_log(ql_log_warn, vha, 0x5069,
+                           "Heartbeat Failure encountered, chip reset "
+                           "required.\n");
+
+                       qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+               }
+       }
+
+       if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
+               ql_log(ql_log_info, vha, 0x506a,
+                   "IDC Device-State changed = 0x%x.\n", mb[4]);
+               qla83xx_schedule_work(vha, MBA_IDC_AEN);
+       }
+}
+
 /**
  * qla2x00_async_event() - Process aynchronous events.
  * @ha: SCSI driver HA context
@@ -681,8 +846,7 @@ skip_rio:
                 * it.  Otherwise ignore it and Wait for RSCN to come in.
                 */
                atomic_set(&vha->loop_down_timer, 0);
-               if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
-                   atomic_read(&vha->loop_state) != LOOP_DEAD) {
+               if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
                        ql_dbg(ql_dbg_async, vha, 0x5011,
                            "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
                            mb[1], mb[2], mb[3]);
@@ -822,11 +986,28 @@ skip_rio:
                    "FCF Configuration Error -- %04x %04x %04x.\n",
                    mb[1], mb[2], mb[3]);
                break;
-       case MBA_IDC_COMPLETE:
        case MBA_IDC_NOTIFY:
+               /* See if we need to quiesce any I/O */
+               if (IS_QLA8031(vha->hw))
+                       if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+                           (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+                               set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+                               qla2xxx_wake_dpc(vha);
+                       }
+       case MBA_IDC_COMPLETE:
        case MBA_IDC_TIME_EXT:
-               qla81xx_idc_event(vha, mb[0], mb[1]);
+               if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
+                       qla81xx_idc_event(vha, mb[0], mb[1]);
                break;
+
+       case MBA_IDC_AEN:
+               mb[4] = RD_REG_WORD(&reg24->mailbox4);
+               mb[5] = RD_REG_WORD(&reg24->mailbox5);
+               mb[6] = RD_REG_WORD(&reg24->mailbox6);
+               mb[7] = RD_REG_WORD(&reg24->mailbox7);
+               qla83xx_handle_8200_aen(vha, mb);
+               break;
+
        default:
                ql_dbg(ql_dbg_async, vha, 0x5057,
                    "Unknown AEN:%04x %04x %04x %04x\n",
@@ -1414,7 +1595,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
 
 struct scsi_dif_tuple {
        __be16 guard;       /* Checksum */
-       __be16 app_tag;         /* APPL identifer */
+       __be16 app_tag;         /* APPL identifier */
        __be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
@@ -1546,6 +1727,149 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
        return 1;
 }
 
+static void
+qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+                                 struct req_que *req, uint32_t index)
+{
+       struct qla_hw_data *ha = vha->hw;
+       srb_t *sp;
+       uint16_t        comp_status;
+       uint16_t        scsi_status;
+       uint16_t thread_id;
+       uint32_t rval = EXT_STATUS_OK;
+       struct fc_bsg_job *bsg_job = NULL;
+       sts_entry_t *sts;
+       struct sts_entry_24xx *sts24;
+       sts = (sts_entry_t *) pkt;
+       sts24 = (struct sts_entry_24xx *) pkt;
+
+       /* Validate handle. */
+       if (index >= MAX_OUTSTANDING_COMMANDS) {
+               ql_log(ql_log_warn, vha, 0x70af,
+                   "Invalid SCSI completion handle 0x%x.\n", index);
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               return;
+       }
+
+       sp = req->outstanding_cmds[index];
+       if (sp) {
+               /* Free outstanding command slot. */
+               req->outstanding_cmds[index] = NULL;
+               bsg_job = sp->u.bsg_job;
+       } else {
+               ql_log(ql_log_warn, vha, 0x70b0,
+                   "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
+                   req->id, index);
+
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               return;
+       }
+
+       if (IS_FWI2_CAPABLE(ha)) {
+               comp_status = le16_to_cpu(sts24->comp_status);
+               scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+       } else {
+               comp_status = le16_to_cpu(sts->comp_status);
+               scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+       }
+
+       thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+       switch (comp_status) {
+       case CS_COMPLETE:
+               if (scsi_status == 0) {
+                       bsg_job->reply->reply_payload_rcv_len =
+                                       bsg_job->reply_payload.payload_len;
+                       rval = EXT_STATUS_OK;
+               }
+               goto done;
+
+       case CS_DATA_OVERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b1,
+                   "Command completed with date overrun thread_id=%d\n",
+                   thread_id);
+               rval = EXT_STATUS_DATA_OVERRUN;
+               break;
+
+       case CS_DATA_UNDERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b2,
+                   "Command completed with date underrun thread_id=%d\n",
+                   thread_id);
+               rval = EXT_STATUS_DATA_UNDERRUN;
+               break;
+       case CS_BIDIR_RD_OVERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b3,
+                   "Command completed with read data overrun thread_id=%d\n",
+                   thread_id);
+               rval = EXT_STATUS_DATA_OVERRUN;
+               break;
+
+       case CS_BIDIR_RD_WR_OVERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b4,
+                   "Command completed with read and write data overrun "
+                   "thread_id=%d\n", thread_id);
+               rval = EXT_STATUS_DATA_OVERRUN;
+               break;
+
+       case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b5,
+                   "Command completed with read data over and write data "
+                   "underrun thread_id=%d\n", thread_id);
+               rval = EXT_STATUS_DATA_OVERRUN;
+               break;
+
+       case CS_BIDIR_RD_UNDERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b6,
+                   "Command completed with read data data underrun "
+                   "thread_id=%d\n", thread_id);
+               rval = EXT_STATUS_DATA_UNDERRUN;
+               break;
+
+       case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b7,
+                   "Command completed with read data under and write data "
+                   "overrun thread_id=%d\n", thread_id);
+               rval = EXT_STATUS_DATA_UNDERRUN;
+               break;
+
+       case CS_BIDIR_RD_WR_UNDERRUN:
+               ql_dbg(ql_dbg_user, vha, 0x70b8,
+                   "Command completed with read and write data underrun "
+                   "thread_id=%d\n", thread_id);
+               rval = EXT_STATUS_DATA_UNDERRUN;
+               break;
+
+       case CS_BIDIR_DMA:
+               ql_dbg(ql_dbg_user, vha, 0x70b9,
+                   "Command completed with data DMA error thread_id=%d\n",
+                   thread_id);
+               rval = EXT_STATUS_DMA_ERR;
+               break;
+
+       case CS_TIMEOUT:
+               ql_dbg(ql_dbg_user, vha, 0x70ba,
+                   "Command completed with timeout thread_id=%d\n",
+                   thread_id);
+               rval = EXT_STATUS_TIMEOUT;
+               break;
+       default:
+               ql_dbg(ql_dbg_user, vha, 0x70bb,
+                   "Command completed with completion status=0x%x "
+                   "thread_id=%d\n", comp_status, thread_id);
+               rval = EXT_STATUS_ERR;
+               break;
+       }
+               bsg_job->reply->reply_payload_rcv_len = 0;
+
+done:
+       /* Return the vendor specific reply to API */
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       /* Always return DID_OK, bsg will send the vendor specific response
+        * in this case only */
+       sp->done(vha, sp, (DID_OK << 6));
+
+}
+
 /**
  * qla2x00_status_entry() - Process a Status IOCB entry.
  * @ha: SCSI driver HA context
@@ -1573,12 +1897,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        struct req_que *req;
        int logit = 1;
        int res = 0;
+       uint16_t state_flags = 0;
 
        sts = (sts_entry_t *) pkt;
        sts24 = (struct sts_entry_24xx *) pkt;
        if (IS_FWI2_CAPABLE(ha)) {
                comp_status = le16_to_cpu(sts24->comp_status);
                scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+               state_flags = le16_to_cpu(sts24->state_flags);
        } else {
                comp_status = le16_to_cpu(sts->comp_status);
                scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
@@ -1587,17 +1913,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        que = MSW(sts->handle);
        req = ha->req_q_map[que];
 
-       /* Fast path completion. */
-       if (comp_status == CS_COMPLETE && scsi_status == 0) {
-               qla2x00_process_completed_request(vha, req, handle);
-
-               return;
-       }
-
        /* Validate handle. */
        if (handle < MAX_OUTSTANDING_COMMANDS) {
                sp = req->outstanding_cmds[handle];
-               req->outstanding_cmds[handle] = NULL;
        } else
                sp = NULL;
 
@@ -1612,6 +1930,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                qla2xxx_wake_dpc(vha);
                return;
        }
+
+       if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
+               qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
+               return;
+       }
+
+       /* Fast path completion. */
+       if (comp_status == CS_COMPLETE && scsi_status == 0) {
+               qla2x00_process_completed_request(vha, req, handle);
+
+               return;
+       }
+
+       req->outstanding_cmds[handle] = NULL;
        cp = GET_CMD_SP(sp);
        if (cp == NULL) {
                ql_dbg(ql_dbg_io, vha, 0x3018,
@@ -1830,7 +2162,21 @@ check_scsi_status:
 
        case CS_DIF_ERROR:
                logit = qla2x00_handle_dif_error(sp, sts24);
+               res = cp->result;
                break;
+
+       case CS_TRANSPORT:
+               res = DID_ERROR << 16;
+
+               if (!IS_PI_SPLIT_DET_CAPABLE(ha))
+                       break;
+
+               if (state_flags & BIT_4)
+                       scmd_printk(KERN_WARNING, cp,
+                           "Unsupported device '%s' found.\n",
+                           cp->device->vendor);
+               break;
+
        default:
                res = DID_ERROR << 16;
                break;
@@ -2150,7 +2496,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
        unsigned long   iter;
        uint32_t        stat;
        uint32_t        hccr;
-       uint16_t        mb[4];
+       uint16_t        mb[8];
        struct rsp_que *rsp;
        unsigned long   flags;
 
@@ -2191,29 +2537,29 @@ qla24xx_intr_handler(int irq, void *dev_id)
                        break;
 
                switch (stat & 0xff) {
-               case 0x1:
-               case 0x2:
-               case 0x10:
-               case 0x11:
+               case INTR_ROM_MB_SUCCESS:
+               case INTR_ROM_MB_FAILED:
+               case INTR_MB_SUCCESS:
+               case INTR_MB_FAILED:
                        qla24xx_mbx_completion(vha, MSW(stat));
                        status |= MBX_INTERRUPT;
 
                        break;
-               case 0x12:
+               case INTR_ASYNC_EVENT:
                        mb[0] = MSW(stat);
                        mb[1] = RD_REG_WORD(&reg->mailbox1);
                        mb[2] = RD_REG_WORD(&reg->mailbox2);
                        mb[3] = RD_REG_WORD(&reg->mailbox3);
                        qla2x00_async_event(vha, rsp, mb);
                        break;
-               case 0x13:
-               case 0x14:
+               case INTR_RSP_QUE_UPDATE:
+               case INTR_RSP_QUE_UPDATE_83XX:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
-               case 0x1C: /* ATIO queue updated */
+               case INTR_ATIO_QUE_UPDATE:
                        qlt_24xx_process_atio_queue(vha);
                        break;
-               case 0x1D: /* ATIO and response queues updated */
+               case INTR_ATIO_RSP_QUE_UPDATE:
                        qlt_24xx_process_atio_queue(vha);
                        qla24xx_process_response_queue(vha, rsp);
                        break;
@@ -2224,6 +2570,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
                }
                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
                RD_REG_DWORD_RELAXED(&reg->hccr);
+               if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
+                       ndelay(3500);
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -2306,7 +2654,7 @@ qla24xx_msix_default(int irq, void *dev_id)
        int             status;
        uint32_t        stat;
        uint32_t        hccr;
-       uint16_t        mb[4];
+       uint16_t        mb[8];
        unsigned long flags;
 
        rsp = (struct rsp_que *) dev_id;
@@ -2342,29 +2690,29 @@ qla24xx_msix_default(int irq, void *dev_id)
                        break;
 
                switch (stat & 0xff) {
-               case 0x1:
-               case 0x2:
-               case 0x10:
-               case 0x11:
+               case INTR_ROM_MB_SUCCESS:
+               case INTR_ROM_MB_FAILED:
+               case INTR_MB_SUCCESS:
+               case INTR_MB_FAILED:
                        qla24xx_mbx_completion(vha, MSW(stat));
                        status |= MBX_INTERRUPT;
 
                        break;
-               case 0x12:
+               case INTR_ASYNC_EVENT:
                        mb[0] = MSW(stat);
                        mb[1] = RD_REG_WORD(&reg->mailbox1);
                        mb[2] = RD_REG_WORD(&reg->mailbox2);
                        mb[3] = RD_REG_WORD(&reg->mailbox3);
                        qla2x00_async_event(vha, rsp, mb);
                        break;
-               case 0x13:
-               case 0x14:
+               case INTR_RSP_QUE_UPDATE:
+               case INTR_RSP_QUE_UPDATE_83XX:
                        qla24xx_process_response_queue(vha, rsp);
                        break;
-               case 0x1C: /* ATIO queue updated */
+               case INTR_ATIO_QUE_UPDATE:
                        qlt_24xx_process_atio_queue(vha);
                        break;
-               case 0x1D: /* ATIO and response queues updated */
+               case INTR_ATIO_RSP_QUE_UPDATE:
                        qlt_24xx_process_atio_queue(vha);
                        qla24xx_process_response_queue(vha, rsp);
                        break;
@@ -2570,7 +2918,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 skip_msix:
 
        if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-           !IS_QLA8001(ha))
+           !IS_QLA8001(ha) && !IS_QLA82XX(ha))
                goto skip_msi;
 
        ret = pci_enable_msi(ha->pdev);
@@ -2581,6 +2929,11 @@ skip_msix:
        } else
                ql_log(ql_log_warn, vha, 0x0039,
                    "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
+
+       /* Skip INTx on ISP82xx. */
+       if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
+               return QLA_FUNCTION_FAILED;
+
 skip_msi:
 
        ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2595,21 +2948,9 @@ skip_msi:
 
 clear_risc_ints:
 
-       /*
-        * FIXME: Noted that 8014s were being dropped during NK testing.
-        * Timing deltas during MSI-X/INTa transitions?
-        */
-       if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
-               goto fail;
        spin_lock_irq(&ha->hardware_lock);
-       if (IS_FWI2_CAPABLE(ha)) {
-               WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
-               WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
-       } else {
+       if (!IS_FWI2_CAPABLE(ha))
                WRT_REG_WORD(&reg->isp.semaphore, 0);
-               WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
-               WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
-       }
        spin_unlock_irq(&ha->hardware_lock);
 
 fail:
index d5ce92c0a8fcef8e246ef25599e9d635a9a5b454..18c509fae555a2c7331a87b817eab581ebc03d2c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                return QLA_FUNCTION_TIMEOUT;
        }
 
-       if (ha->flags.isp82xx_fw_hung) {
+       if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
                /* Setting Link-Down error */
                mcp->mb[0] = MBS_LINK_DOWN_ERROR;
                ql_log(ql_log_warn, vha, 0x1004,
@@ -232,7 +232,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                ha->flags.mbox_int = 0;
                clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
-               if (ha->flags.isp82xx_fw_hung) {
+               if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
                        ha->flags.mbox_busy = 0;
                        /* Setting Link-Down error */
                        mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -369,7 +369,7 @@ premature_exit:
 
 mbx_done:
        if (rval) {
-               ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
+               ql_log(ql_log_warn, base_vha, 0x1020,
                    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
                    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
        } else {
@@ -533,7 +533,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
        mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
        if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
                mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
-       if (IS_QLA83XX(vha->hw))
+       if (IS_FWI2_CAPABLE(ha))
                mcp->in_mb |= MBX_17|MBX_16|MBX_15;
        mcp->flags = 0;
        mcp->tov = MBX_TOV_SECONDS;
@@ -559,18 +559,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
                ha->phy_version[1] = mcp->mb[9] >> 8;
                ha->phy_version[2] = mcp->mb[9] & 0xff;
        }
-       if (IS_QLA83XX(ha)) {
-               if (mcp->mb[6] & BIT_15) {
-                       ha->fw_attributes_h = mcp->mb[15];
-                       ha->fw_attributes_ext[0] = mcp->mb[16];
-                       ha->fw_attributes_ext[1] = mcp->mb[17];
-                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
-                           "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
-                           __func__, mcp->mb[15], mcp->mb[6]);
-               } else
-                       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
-                           "%s: FwAttributes [Upper]  invalid, MB6:%04x\n",
-                           __func__, mcp->mb[6]);
+       if (IS_FWI2_CAPABLE(ha)) {
+               ha->fw_attributes_h = mcp->mb[15];
+               ha->fw_attributes_ext[0] = mcp->mb[16];
+               ha->fw_attributes_ext[1] = mcp->mb[17];
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
+                   "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
+                   __func__, mcp->mb[15], mcp->mb[6]);
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
+                   "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
+                   __func__, mcp->mb[17], mcp->mb[16]);
        }
 
 failed:
@@ -3408,7 +3406,6 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
 
        return rval;
 }
-
 /* 84XX Support **************************************************************/
 
 struct cs84xx_mgmt_cmd {
@@ -4428,7 +4425,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
            "Entered %s.\n", __func__);
 
        /* Integer part */
-       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
+       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
+               BIT_13|BIT_12|BIT_0);
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
                ha->flags.thermal_supported = 0;
@@ -4437,7 +4435,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
        *temp = byte;
 
        /* Fraction part */
-       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
+       rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
+               BIT_13|BIT_12|BIT_0);
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
                ha->flags.thermal_supported = 0;
@@ -4741,7 +4740,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
 }
 
 int
-qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
 {
        int rval;
        struct qla_hw_data *ha = vha->hw;
@@ -4814,3 +4813,186 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
        return rval;
 }
 
+int
+qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long retry_max_time = jiffies + (2 * HZ);
+
+       if (!IS_QLA83XX(ha))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
+
+retry_rd_reg:
+       mcp->mb[0] = MBC_READ_REMOTE_REG;
+       mcp->mb[1] = LSW(reg);
+       mcp->mb[2] = MSW(reg);
+       mcp->out_mb = MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x114c,
+                   "Failed=%x mb[0]=%x mb[1]=%x.\n",
+                   rval, mcp->mb[0], mcp->mb[1]);
+       } else {
+               *data = (mcp->mb[3] | (mcp->mb[4] << 16));
+               if (*data == QLA8XXX_BAD_VALUE) {
+                       /*
+                        * During soft-reset CAMRAM register reads might
+                        * return 0xbad0bad0. So retry for MAX of 2 sec
+                        * while reading camram registers.
+                        */
+                       if (time_after(jiffies, retry_max_time)) {
+                               ql_dbg(ql_dbg_mbx, vha, 0x1141,
+                                   "Failure to read CAMRAM register. "
+                                   "data=0x%x.\n", *data);
+                               return QLA_FUNCTION_FAILED;
+                       }
+                       msleep(100);
+                       goto retry_rd_reg;
+               }
+               ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+int
+qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA83XX(ha))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
+       mcp->out_mb = MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1144,
+                   "Failed=%x mb[0]=%x mb[1]=%x.\n",
+                   rval, mcp->mb[0], mcp->mb[1]);
+               ha->isp_ops->fw_dump(vha, 0);
+       } else {
+               ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+int
+qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
+       uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+       uint8_t subcode = (uint8_t)options;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA8031(ha))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
+       mcp->mb[1] = options;
+       mcp->out_mb = MBX_1|MBX_0;
+       if (subcode & BIT_2) {
+               mcp->mb[2] = LSW(start_addr);
+               mcp->mb[3] = MSW(start_addr);
+               mcp->mb[4] = LSW(end_addr);
+               mcp->mb[5] = MSW(end_addr);
+               mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
+       }
+       mcp->in_mb = MBX_2|MBX_1|MBX_0;
+       if (!(subcode & (BIT_2 | BIT_5)))
+               mcp->in_mb |= MBX_4|MBX_3;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1147,
+                   "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
+                   rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+                   mcp->mb[4]);
+               ha->isp_ops->fw_dump(vha, 0);
+       } else {
+               if (subcode & BIT_5)
+                       *sector_size = mcp->mb[1];
+               else if (subcode & (BIT_6 | BIT_7)) {
+                       ql_dbg(ql_dbg_mbx, vha, 0x1148,
+                           "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+               } else if (subcode & (BIT_3 | BIT_4)) {
+                       ql_dbg(ql_dbg_mbx, vha, 0x1149,
+                           "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+               }
+               ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+int
+qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+       uint32_t size)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_MCTP_CAPABLE(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+       mcp->mb[1] = LSW(addr);
+       mcp->mb[2] = MSW(req_dma);
+       mcp->mb[3] = LSW(req_dma);
+       mcp->mb[4] = MSW(size);
+       mcp->mb[5] = LSW(size);
+       mcp->mb[6] = MSW(MSD(req_dma));
+       mcp->mb[7] = LSW(MSD(req_dma));
+       mcp->mb[8] = MSW(addr);
+       /* Setting RAM ID to valid */
+       mcp->mb[10] |= BIT_7;
+       /* For MCTP RAM ID is 0x40 */
+       mcp->mb[10] |= 0x40;
+
+       mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
+           MBX_0;
+
+       mcp->in_mb = MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x114e,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
index 3e8b32419e68959440c8f85716867e6dfc897435..bd4708a422cd78b09fd8594ded593af775343398 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -476,7 +476,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
 
        vha->req = base_vha->req;
        host->can_queue = base_vha->req->length + 128;
-       host->this_id = 255;
        host->cmd_per_lun = 3;
        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
                host->max_cmd_len = 32;
@@ -643,7 +642,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
                        &req->dma, GFP_KERNEL);
        if (req->ring == NULL) {
                ql_log(ql_log_fatal, base_vha, 0x00da,
-                   "Failed to allocte memory for request_ring.\n");
+                   "Failed to allocate memory for request_ring.\n");
                goto que_failed;
        }
 
index 7cfdf2bd8edb3f9eab7d940afcb7330b6d7355ca..14cd361742fa668259b1df795bec4616e7bd68d4 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1612,23 +1612,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
 }
 
 /* PCI related functions */
-char *
-qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
-{
-       struct qla_hw_data *ha = vha->hw;
-       char lwstr[6];
-       uint16_t lnk;
-
-       pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
-       ha->link_width = (lnk >> 4) & 0x3f;
-
-       strcpy(str, "PCIe (");
-       strcat(str, "2.5Gb/s ");
-       snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
-       strcat(str, lwstr);
-       return str;
-}
-
 int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
 {
        unsigned long val = 0;
@@ -2319,6 +2302,29 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
        ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
 }
 
+inline void
+qla82xx_set_idc_version(scsi_qla_host_t *vha)
+{
+       int idc_ver;
+       uint32_t drv_active;
+       struct qla_hw_data *ha = vha->hw;
+
+       drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+       if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
+               qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
+                   QLA82XX_IDC_VERSION);
+               ql_log(ql_log_info, vha, 0xb082,
+                   "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
+       } else {
+               idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION);
+               if (idc_ver != QLA82XX_IDC_VERSION)
+                       ql_log(ql_log_info, vha, 0xb083,
+                           "qla2xxx driver IDC version %d is not compatible "
+                           "with IDC version %d of the other drivers\n",
+                           QLA82XX_IDC_VERSION, idc_ver);
+       }
+}
+
 inline void
 qla82xx_set_drv_active(scsi_qla_host_t *vha)
 {
@@ -2353,7 +2359,7 @@ qla82xx_need_reset(struct qla_hw_data *ha)
        uint32_t drv_state;
        int rval;
 
-       if (ha->flags.isp82xx_reset_owner)
+       if (ha->flags.nic_core_reset_owner)
                return 1;
        else {
                drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
@@ -2860,7 +2866,7 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
                timeout = msleep_interruptible(200);
                if (timeout) {
                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                               QLA82XX_DEV_FAILED);
+                               QLA8XXX_DEV_FAILED);
                        return QLA_FUNCTION_FAILED;
                }
 
@@ -2891,10 +2897,7 @@ dev_initialize:
        /* set to DEV_INITIALIZING */
        ql_log(ql_log_info, vha, 0x009e,
            "HW State: INITIALIZING.\n");
-       qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
-
-       /* Driver that sets device state to initializating sets IDC version */
-       qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
+       qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
 
        qla82xx_idc_unlock(ha);
        rval = qla82xx_start_firmware(vha);
@@ -2904,14 +2907,14 @@ dev_initialize:
                ql_log(ql_log_fatal, vha, 0x00ad,
                    "HW State: FAILED.\n");
                qla82xx_clear_drv_active(ha);
-               qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
+               qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED);
                return rval;
        }
 
 dev_ready:
        ql_log(ql_log_info, vha, 0x00ae,
            "HW State: READY.\n");
-       qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
+       qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
 
        return QLA_SUCCESS;
 }
@@ -2935,7 +2938,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
 
        if (vha->flags.online) {
                /*Block any further I/O and wait for pending cmnds to complete*/
-               qla82xx_quiescent_state_cleanup(vha);
+               qla2x00_quiesce_io(vha);
        }
 
        /* Set the quiescence ready bit */
@@ -2960,7 +2963,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
                            "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
                            drv_active, drv_state);
                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                           QLA82XX_DEV_READY);
+                           QLA8XXX_DEV_READY);
                        ql_log(ql_log_info, vha, 0xb025,
                            "HW State: DEV_READY.\n");
                        qla82xx_idc_unlock(ha);
@@ -2981,10 +2984,10 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
        }
        dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
        /* everyone acked so set the state to DEV_QUIESCENCE */
-       if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
+       if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
                ql_log(ql_log_info, vha, 0xb026,
                    "HW State: DEV_QUIESCENT.\n");
-               qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
+               qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT);
        }
 }
 
@@ -3014,8 +3017,8 @@ qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
        return dev_state;
 }
 
-static void
-qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
+void
+qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
 
@@ -3023,9 +3026,10 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
        ql_log(ql_log_fatal, vha, 0x00b8,
            "Disabling the board.\n");
 
-       qla82xx_idc_lock(ha);
-       qla82xx_clear_drv_active(ha);
-       qla82xx_idc_unlock(ha);
+       if (IS_QLA82XX(ha)) {
+               qla82xx_clear_drv_active(ha);
+               qla82xx_idc_unlock(ha);
+       }
 
        /* Set DEV_FAILED flag to disable timer */
        vha->device_flags |= DFLG_DEV_FAILED;
@@ -3064,7 +3068,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
        }
 
        drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
-       if (!ha->flags.isp82xx_reset_owner) {
+       if (!ha->flags.nic_core_reset_owner) {
                ql_dbg(ql_dbg_p3p, vha, 0xb028,
                    "reset_acknowledged by 0x%x\n", ha->portnum);
                qla82xx_set_rst_ready(ha);
@@ -3076,7 +3080,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
        }
 
        /* wait for 10 seconds for reset ack from all functions */
-       reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+       reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
 
        drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
        drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
@@ -3088,7 +3092,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
            drv_state, drv_active, dev_state, active_mask);
 
        while (drv_state != drv_active &&
-           dev_state != QLA82XX_DEV_INITIALIZING) {
+           dev_state != QLA8XXX_DEV_INITIALIZING) {
                if (time_after_eq(jiffies, reset_timeout)) {
                        ql_log(ql_log_warn, vha, 0x00b5,
                            "Reset timeout.\n");
@@ -3099,7 +3103,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
                qla82xx_idc_lock(ha);
                drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
                drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
-               if (ha->flags.isp82xx_reset_owner)
+               if (ha->flags.nic_core_reset_owner)
                        drv_active &= active_mask;
                dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
        }
@@ -3115,11 +3119,11 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
            dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
 
        /* Force to DEV_COLD unless someone else is starting a reset */
-       if (dev_state != QLA82XX_DEV_INITIALIZING &&
-           dev_state != QLA82XX_DEV_COLD) {
+       if (dev_state != QLA8XXX_DEV_INITIALIZING &&
+           dev_state != QLA8XXX_DEV_COLD) {
                ql_log(ql_log_info, vha, 0x00b7,
                    "HW State: COLD/RE-INIT.\n");
-               qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+               qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
                qla82xx_set_rst_ready(ha);
                if (ql2xmdenable) {
                        if (qla82xx_md_collect(vha))
@@ -3226,8 +3230,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
        int loopcount = 0;
 
        qla82xx_idc_lock(ha);
-       if (!vha->flags.init_done)
+       if (!vha->flags.init_done) {
                qla82xx_set_drv_active(vha);
+               qla82xx_set_idc_version(vha);
+       }
 
        dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
        old_dev_state = dev_state;
@@ -3237,7 +3243,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
            dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
 
        /* wait for 30 seconds for device to go ready */
-       dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+       dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
 
        while (1) {
 
@@ -3261,18 +3267,18 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
                }
 
                switch (dev_state) {
-               case QLA82XX_DEV_READY:
-                       ha->flags.isp82xx_reset_owner = 0;
-                       goto exit;
-               case QLA82XX_DEV_COLD:
+               case QLA8XXX_DEV_READY:
+                       ha->flags.nic_core_reset_owner = 0;
+                       goto rel_lock;
+               case QLA8XXX_DEV_COLD:
                        rval = qla82xx_device_bootstrap(vha);
                        break;
-               case QLA82XX_DEV_INITIALIZING:
+               case QLA8XXX_DEV_INITIALIZING:
                        qla82xx_idc_unlock(ha);
                        msleep(1000);
                        qla82xx_idc_lock(ha);
                        break;
-               case QLA82XX_DEV_NEED_RESET:
+               case QLA8XXX_DEV_NEED_RESET:
                        if (!ql2xdontresethba)
                                qla82xx_need_reset_handler(vha);
                        else {
@@ -3281,31 +3287,31 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
                                qla82xx_idc_lock(ha);
                        }
                        dev_init_timeout = jiffies +
-                           (ha->nx_dev_init_timeout * HZ);
+                           (ha->fcoe_dev_init_timeout * HZ);
                        break;
-               case QLA82XX_DEV_NEED_QUIESCENT:
+               case QLA8XXX_DEV_NEED_QUIESCENT:
                        qla82xx_need_qsnt_handler(vha);
                        /* Reset timeout value after quiescence handler */
-                       dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
+                       dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
                                                         * HZ);
                        break;
-               case QLA82XX_DEV_QUIESCENT:
+               case QLA8XXX_DEV_QUIESCENT:
                        /* Owner will exit and other will wait for the state
                         * to get changed
                         */
                        if (ha->flags.quiesce_owner)
-                               goto exit;
+                               goto rel_lock;
 
                        qla82xx_idc_unlock(ha);
                        msleep(1000);
                        qla82xx_idc_lock(ha);
 
                        /* Reset timeout value after quiescence handler */
-                       dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
+                       dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
                                                         * HZ);
                        break;
-               case QLA82XX_DEV_FAILED:
-                       qla82xx_dev_failed_handler(vha);
+               case QLA8XXX_DEV_FAILED:
+                       qla8xxx_dev_failed_handler(vha);
                        rval = QLA_FUNCTION_FAILED;
                        goto exit;
                default:
@@ -3315,8 +3321,9 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
                }
                loopcount++;
        }
-exit:
+rel_lock:
        qla82xx_idc_unlock(ha);
+exit:
        return rval;
 }
 
@@ -3364,22 +3371,30 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
 
        /* don't poll if reset is going on */
-       if (!ha->flags.isp82xx_reset_hdlr_active) {
+       if (!ha->flags.nic_core_reset_hdlr_active) {
                dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
                if (qla82xx_check_temp(vha)) {
                        set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
                        ha->flags.isp82xx_fw_hung = 1;
                        qla82xx_clear_pending_mbx(vha);
-               } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
+               } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
                    !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
                        ql_log(ql_log_warn, vha, 0x6001,
                            "Adapter reset needed.\n");
                        set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-               } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
+               } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
                        !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
                        ql_log(ql_log_warn, vha, 0x6002,
                            "Quiescent needed.\n");
                        set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+               } else if (dev_state == QLA8XXX_DEV_FAILED &&
+                       !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) &&
+                       vha->flags.online == 1) {
+                       ql_log(ql_log_warn, vha, 0xb055,
+                           "Adapter state is failed. Offlining.\n");
+                       set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+                       ha->flags.isp82xx_fw_hung = 1;
+                       qla82xx_clear_pending_mbx(vha);
                } else {
                        if (qla82xx_check_fw_alive(vha)) {
                                ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3441,12 +3456,12 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
        uint32_t dev_state;
 
        dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-       if (dev_state == QLA82XX_DEV_READY) {
+       if (dev_state == QLA8XXX_DEV_READY) {
                ql_log(ql_log_info, vha, 0xb02f,
                    "HW State: NEED RESET\n");
                qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                       QLA82XX_DEV_NEED_RESET);
-               ha->flags.isp82xx_reset_owner = 1;
+                       QLA8XXX_DEV_NEED_RESET);
+               ha->flags.nic_core_reset_owner = 1;
                ql_dbg(ql_dbg_p3p, vha, 0xb030,
                    "reset_owner is 0x%x\n", ha->portnum);
        } else
@@ -3477,7 +3492,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
                    "Device in failed state, exiting.\n");
                return QLA_SUCCESS;
        }
-       ha->flags.isp82xx_reset_hdlr_active = 1;
+       ha->flags.nic_core_reset_hdlr_active = 1;
 
        qla82xx_idc_lock(ha);
        qla82xx_set_reset_owner(vha);
@@ -3491,7 +3506,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
 
        if (rval == QLA_SUCCESS) {
                ha->flags.isp82xx_fw_hung = 0;
-               ha->flags.isp82xx_reset_hdlr_active = 0;
+               ha->flags.nic_core_reset_hdlr_active = 0;
                qla82xx_restart_isp(vha);
        }
 
@@ -4026,7 +4041,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
 
        if (r_addr & 0xf) {
                ql_log(ql_log_warn, vha, 0xb033,
-                   "Read addr 0x%x not 16 bytes alligned\n", r_addr);
+                   "Read addr 0x%x not 16 bytes aligned\n", r_addr);
                return rval;
        }
 
index 6eb210e3cc637242aed65efde17902a0f94c2d60..6c953e8c08f09a0b96f291f2a41e1e1f0d47959d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #define QLA82XX_CRB_DRV_IDC_VERSION  (QLA82XX_CAM_RAM(0x174))
 
 /* Every driver should use these Device State */
-#define QLA82XX_DEV_COLD               1
-#define QLA82XX_DEV_INITIALIZING       2
-#define QLA82XX_DEV_READY              3
-#define QLA82XX_DEV_NEED_RESET         4
-#define QLA82XX_DEV_NEED_QUIESCENT     5
-#define QLA82XX_DEV_FAILED             6
-#define QLA82XX_DEV_QUIESCENT          7
+#define QLA8XXX_DEV_COLD               1
+#define QLA8XXX_DEV_INITIALIZING       2
+#define QLA8XXX_DEV_READY              3
+#define QLA8XXX_DEV_NEED_RESET         4
+#define QLA8XXX_DEV_NEED_QUIESCENT     5
+#define QLA8XXX_DEV_FAILED             6
+#define QLA8XXX_DEV_QUIESCENT          7
 #define        MAX_STATES                      8 /* Increment if new state added */
+#define QLA8XXX_BAD_VALUE              0xbad0bad0
 
 #define QLA82XX_IDC_VERSION                    1
 #define QLA82XX_ROM_DEV_INIT_TIMEOUT           30
index d3052622e77abe203f53b81710688b20e7eb0894..d501bf5f806bed4f1c4d3691b6ee17bed6c81bdf 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -113,11 +113,11 @@ MODULE_PARM_DESC(ql2xfdmienable,
 static int ql2xmaxqdepth = MAX_Q_DEPTH;
 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xmaxqdepth,
-               "Maximum queue depth to report for target devices.");
+               "Maximum queue depth to set for each LUN. "
+               "Default is 32.");
 
-/* Do not change the value of this after module load */
-int ql2xenabledif = 0;
-module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
+int ql2xenabledif = 2;
+module_param(ql2xenabledif, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xenabledif,
                " Enable T10-CRC-DIF "
                " Default is 0 - No DIF Support. 1 - Enable it"
@@ -1078,7 +1078,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
        if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
            cmd->device->lun, type) != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x800d,
-                   "wait for peding cmds failed for cmd=%p.\n", cmd);
+                   "wait for pending cmds failed for cmd=%p.\n", cmd);
                goto eh_reset_failed;
        }
 
@@ -1177,7 +1177,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 eh_bus_reset_done:
        ql_log(ql_log_warn, vha, 0x802b,
            "BUS RESET %s nexus=%ld:%d:%d.\n",
-           (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
+           (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
 
        return ret;
 }
@@ -1357,6 +1357,9 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
        scsi_qla_host_t *vha = shost_priv(sdev->host);
        struct req_que *req = vha->req;
 
+       if (IS_T10_PI_CAPABLE(vha->hw))
+               blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
        if (sdev->tagged_supported)
                scsi_activate_tcq(sdev, req->max_q_depth);
        else
@@ -1919,7 +1922,7 @@ static struct isp_operations qla82xx_isp_ops = {
        .nvram_config           = qla81xx_nvram_config,
        .update_fw_options      = qla24xx_update_fw_options,
        .load_risc              = qla82xx_load_risc,
-       .pci_info_str           = qla82xx_pci_info_str,
+       .pci_info_str           = qla24xx_pci_info_str,
        .fw_version_str         = qla24xx_fw_version_str,
        .intr_handler           = qla82xx_intr_handler,
        .enable_intrs           = qla82xx_enable_intrs,
@@ -2149,7 +2152,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        scsi_qla_host_t *base_vha = NULL;
        struct qla_hw_data *ha;
        char pci_info[30];
-       char fw_str[30];
+       char fw_str[30], wq_name[30];
        struct scsi_host_template *sht;
        int bars, mem_only = 0;
        uint16_t req_length = 0, rsp_length = 0;
@@ -2203,12 +2206,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ha->mem_only = mem_only;
        spin_lock_init(&ha->hardware_lock);
        spin_lock_init(&ha->vport_slock);
+       mutex_init(&ha->selflogin_lock);
 
        /* Set ISP-type information. */
        qla2x00_set_isp_flags(ha);
 
        /* Set EEH reset type to fundamental if required by hba */
-       if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+       if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
+           IS_QLA83XX(ha))
                pdev->needs_freset = 1;
 
        ha->prev_topology = 0;
@@ -2318,6 +2323,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
                ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
        } else if (IS_QLA83XX(ha)) {
+               ha->portnum = PCI_FUNC(ha->pdev->devfn);
                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
@@ -2416,7 +2422,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->can_queue, base_vha->req,
            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
        host->max_id = ha->max_fibre_devices;
-       host->this_id = 255;
        host->cmd_per_lun = 3;
        host->unique_id = host->host_no;
        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
@@ -2499,7 +2504,7 @@ que_init:
                if (IS_QLA82XX(ha)) {
                        qla82xx_idc_lock(ha);
                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                               QLA82XX_DEV_FAILED);
+                               QLA8XXX_DEV_FAILED);
                        qla82xx_idc_unlock(ha);
                        ql_log(ql_log_fatal, base_vha, 0x00d7,
                            "HW State: FAILED.\n");
@@ -2542,6 +2547,20 @@ que_init:
         */
        qla2xxx_wake_dpc(base_vha);
 
+       if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
+               sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
+               ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
+               INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
+
+               sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
+               ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
+               INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
+               INIT_WORK(&ha->idc_state_handler,
+                   qla83xx_idc_state_handler_work);
+               INIT_WORK(&ha->nic_core_unrecoverable,
+                   qla83xx_nic_core_unrecoverable_work);
+       }
+
 skip_dpc:
        list_add_tail(&base_vha->list, &ha->vp_list);
        base_vha->host->irq = ha->pdev->irq;
@@ -2557,7 +2576,7 @@ skip_dpc:
 
        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
-                       int prot = 0;
+                       int prot = 0, guard;
                        base_vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_init, base_vha, 0x00f1,
                            "Registering for DIF/DIX type 1 and 3 protection.\n");
@@ -2570,7 +2589,14 @@ skip_dpc:
                            | SHOST_DIX_TYPE1_PROTECTION
                            | SHOST_DIX_TYPE2_PROTECTION
                            | SHOST_DIX_TYPE3_PROTECTION);
-                       scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
+
+                       guard = SHOST_DIX_GUARD_CRC;
+
+                       if (IS_PI_IPGUARD_CAPABLE(ha) &&
+                           (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+                               guard |= SHOST_DIX_GUARD_IP;
+
+                       scsi_host_set_guard(host, guard);
                } else
                        base_vha->flags.difdix_supported = 0;
        }
@@ -2750,6 +2776,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        mutex_unlock(&ha->vport_lock);
 
+       if (IS_QLA8031(ha)) {
+               ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+                   "Clearing fcoe driver presence.\n");
+               if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+                           "Error while clearing DRV-Presence.\n");
+       }
+
        set_bit(UNLOADING, &base_vha->dpc_flags);
 
        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
@@ -2771,6 +2805,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
                ha->wq = NULL;
        }
 
+       /* Cancel all work and destroy DPC workqueues */
+       if (ha->dpc_lp_wq) {
+               cancel_work_sync(&ha->idc_aen);
+               destroy_workqueue(ha->dpc_lp_wq);
+               ha->dpc_lp_wq = NULL;
+       }
+
+       if (ha->dpc_hp_wq) {
+               cancel_work_sync(&ha->nic_core_reset);
+               cancel_work_sync(&ha->idc_state_handler);
+               cancel_work_sync(&ha->nic_core_unrecoverable);
+               destroy_workqueue(ha->dpc_hp_wq);
+               ha->dpc_hp_wq = NULL;
+       }
+
        /* Kill the kernel thread for this host */
        if (ha->dpc_thread) {
                struct task_struct *t = ha->dpc_thread;
@@ -2837,7 +2886,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
        qla2x00_stop_dpc_thread(vha);
 
        qla25xx_delete_queues(vha);
-
        if (ha->flags.fce_enabled)
                qla2x00_disable_fce_trace(vha, NULL, NULL);
 
@@ -2872,6 +2920,7 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
 
        list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
                list_del(&fcport->list);
+               qla2x00_clear_loop_id(fcport);
                kfree(fcport);
                fcport = NULL;
        }
@@ -3169,6 +3218,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        }
 
        INIT_LIST_HEAD(&ha->vp_list);
+
+       /* Allocate memory for our loop_id bitmap */
+       ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
+           GFP_KERNEL);
+       if (!ha->loop_id_map)
+               goto fail_async_pd;
+       else {
+               qla2x00_set_reserved_loop_ids(ha);
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+                   "loop_id_map=%p. \n", ha->loop_id_map);
+       }
+
        return 1;
 
 fail_async_pd:
@@ -3280,6 +3341,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 {
        qla2x00_free_fw_dump(ha);
 
+       if (ha->mctp_dump)
+               dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
+                   ha->mctp_dump_dma);
+
        if (ha->srb_mempool)
                mempool_destroy(ha->srb_mempool);
 
@@ -3352,6 +3417,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
        kfree(ha->nvram);
        kfree(ha->npiv_info);
        kfree(ha->swl);
+       kfree(ha->loop_id_map);
 
        ha->srb_mempool = NULL;
        ha->ctx_mempool = NULL;
@@ -3687,13 +3753,651 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
                        }
 
                        if (fcport->login_retry == 0 && status != QLA_SUCCESS)
-                               fcport->loop_id = FC_NO_LOOP_ID;
+                               qla2x00_clear_loop_id(fcport);
                }
                if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                        break;
        }
 }
 
+/* Schedule work on any of the dpc-workqueues */
+void
+qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
+{
+       struct qla_hw_data *ha = base_vha->hw;
+
+       switch (work_code) {
+       case MBA_IDC_AEN: /* 0x8200 */
+               if (ha->dpc_lp_wq)
+                       queue_work(ha->dpc_lp_wq, &ha->idc_aen);
+               break;
+
+       case QLA83XX_NIC_CORE_RESET: /* 0x1 */
+               if (!ha->flags.nic_core_reset_hdlr_active) {
+                       if (ha->dpc_hp_wq)
+                               queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
+               } else
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
+                           "NIC Core reset is already active. Skip "
+                           "scheduling it again.\n");
+               break;
+       case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
+               if (ha->dpc_hp_wq)
+                       queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
+               break;
+       case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
+               if (ha->dpc_hp_wq)
+                       queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
+               break;
+       default:
+               ql_log(ql_log_warn, base_vha, 0xb05f,
+                   "Unknow work-code=0x%x.\n", work_code);
+       }
+
+       return;
+}
+
+/* Work: Perform NIC Core Unrecoverable state handling */
+void
+qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
+{
+       struct qla_hw_data *ha =
+               container_of(work, struct qla_hw_data, nic_core_unrecoverable);
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+       uint32_t dev_state = 0;
+
+       qla83xx_idc_lock(base_vha, 0);
+       qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+       qla83xx_reset_ownership(base_vha);
+       if (ha->flags.nic_core_reset_owner) {
+               ha->flags.nic_core_reset_owner = 0;
+               qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+                   QLA8XXX_DEV_FAILED);
+               ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
+               qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+       }
+       qla83xx_idc_unlock(base_vha, 0);
+}
+
+/* Work: Execute IDC state handler */
+void
+qla83xx_idc_state_handler_work(struct work_struct *work)
+{
+       struct qla_hw_data *ha =
+               container_of(work, struct qla_hw_data, idc_state_handler);
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+       uint32_t dev_state = 0;
+
+       qla83xx_idc_lock(base_vha, 0);
+       qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+       if (dev_state == QLA8XXX_DEV_FAILED ||
+                       dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
+               qla83xx_idc_state_handler(base_vha);
+       qla83xx_idc_unlock(base_vha, 0);
+}
+
+int
+qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
+{
+       int rval = QLA_SUCCESS;
+       unsigned long heart_beat_wait = jiffies + (1 * HZ);
+       uint32_t heart_beat_counter1, heart_beat_counter2;
+
+       do {
+               if (time_after(jiffies, heart_beat_wait)) {
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
+                           "Nic Core f/w is not alive.\n");
+                       rval = QLA_FUNCTION_FAILED;
+                       break;
+               }
+
+               qla83xx_idc_lock(base_vha, 0);
+               qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+                   &heart_beat_counter1);
+               qla83xx_idc_unlock(base_vha, 0);
+               msleep(100);
+               qla83xx_idc_lock(base_vha, 0);
+               qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+                   &heart_beat_counter2);
+               qla83xx_idc_unlock(base_vha, 0);
+       } while (heart_beat_counter1 == heart_beat_counter2);
+
+       return rval;
+}
+
+/* Work: Perform NIC Core Reset handling */
+void
+qla83xx_nic_core_reset_work(struct work_struct *work)
+{
+       struct qla_hw_data *ha =
+               container_of(work, struct qla_hw_data, nic_core_reset);
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+       uint32_t dev_state = 0;
+
+       if (IS_QLA2031(ha)) {
+               if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
+                       ql_log(ql_log_warn, base_vha, 0xb081,
+                           "Failed to dump mctp\n");
+               return;
+       }
+
+       if (!ha->flags.nic_core_reset_hdlr_active) {
+               if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
+                       qla83xx_idc_lock(base_vha, 0);
+                       qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+                           &dev_state);
+                       qla83xx_idc_unlock(base_vha, 0);
+                       if (dev_state != QLA8XXX_DEV_NEED_RESET) {
+                               ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
+                                   "Nic Core f/w is alive.\n");
+                               return;
+                       }
+               }
+
+               ha->flags.nic_core_reset_hdlr_active = 1;
+               if (qla83xx_nic_core_reset(base_vha)) {
+                       /* NIC Core reset failed. */
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
+                           "NIC Core reset failed.\n");
+               }
+               ha->flags.nic_core_reset_hdlr_active = 0;
+       }
+}
+
+/* Work: Handle 8200 IDC aens */
+void
+qla83xx_service_idc_aen(struct work_struct *work)
+{
+       struct qla_hw_data *ha =
+               container_of(work, struct qla_hw_data, idc_aen);
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+       uint32_t dev_state, idc_control;
+
+       qla83xx_idc_lock(base_vha, 0);
+       qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+       qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
+       qla83xx_idc_unlock(base_vha, 0);
+       if (dev_state == QLA8XXX_DEV_NEED_RESET) {
+               if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
+                           "Application requested NIC Core Reset.\n");
+                       qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+               } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
+                   QLA_SUCCESS) {
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
+                           "Other protocol driver requested NIC Core Reset.\n");
+                       qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+               }
+       } else if (dev_state == QLA8XXX_DEV_FAILED ||
+                       dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+               qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+       }
+}
+
+static void
+qla83xx_wait_logic(void)
+{
+       int i;
+
+       /* Yield CPU */
+       if (!in_interrupt()) {
+               /*
+                * Wait about 200ms before retrying again.
+                * This controls the number of retries for single
+                * lock operation.
+                */
+               msleep(100);
+               schedule();
+       } else {
+               for (i = 0; i < 20; i++)
+                       cpu_relax(); /* This a nop instr on i386 */
+       }
+}
+
+int
+qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
+{
+       int rval;
+       uint32_t data;
+       uint32_t idc_lck_rcvry_stage_mask = 0x3;
+       uint32_t idc_lck_rcvry_owner_mask = 0x3c;
+       struct qla_hw_data *ha = base_vha->hw;
+
+       rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
+       if (rval)
+               return rval;
+
+       if ((data & idc_lck_rcvry_stage_mask) > 0) {
+               return QLA_SUCCESS;
+       } else {
+               data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
+               rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+                   data);
+               if (rval)
+                       return rval;
+
+               msleep(200);
+
+               rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+                   &data);
+               if (rval)
+                       return rval;
+
+               if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
+                       data &= (IDC_LOCK_RECOVERY_STAGE2 |
+                                       ~(idc_lck_rcvry_stage_mask));
+                       rval = qla83xx_wr_reg(base_vha,
+                           QLA83XX_IDC_LOCK_RECOVERY, data);
+                       if (rval)
+                               return rval;
+
+                       /* Forcefully perform IDC UnLock */
+                       rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
+                           &data);
+                       if (rval)
+                               return rval;
+                       /* Clear lock-id by setting 0xff */
+                       rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+                           0xff);
+                       if (rval)
+                               return rval;
+                       /* Clear lock-recovery by setting 0x0 */
+                       rval = qla83xx_wr_reg(base_vha,
+                           QLA83XX_IDC_LOCK_RECOVERY, 0x0);
+                       if (rval)
+                               return rval;
+               } else
+                       return QLA_SUCCESS;
+       }
+
+       return rval;
+}
+
+int
+qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
+{
+       int rval = QLA_SUCCESS;
+       uint32_t o_drv_lockid, n_drv_lockid;
+       unsigned long lock_recovery_timeout;
+
+       lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
+retry_lockid:
+       rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
+       if (rval)
+               goto exit;
+
+       /* MAX wait time before forcing IDC Lock recovery = 2 secs */
+       if (time_after_eq(jiffies, lock_recovery_timeout)) {
+               if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
+                       return QLA_SUCCESS;
+               else
+                       return QLA_FUNCTION_FAILED;
+       }
+
+       rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
+       if (rval)
+               goto exit;
+
+       if (o_drv_lockid == n_drv_lockid) {
+               qla83xx_wait_logic();
+               goto retry_lockid;
+       } else
+               return QLA_SUCCESS;
+
+exit:
+       return rval;
+}
+
+void
+qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+       uint16_t options = (requester_id << 15) | BIT_6;
+       uint32_t data;
+       struct qla_hw_data *ha = base_vha->hw;
+
+       /* IDC-lock implementation using driver-lock/lock-id remote registers */
+retry_lock:
+       if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
+           == QLA_SUCCESS) {
+               if (data) {
+                       /* Setting lock-id to our function-number */
+                       qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+                           ha->portnum);
+               } else {
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
+                           "Failed to acquire IDC lock. retrying...\n");
+
+                       /* Retry/Perform IDC-Lock recovery */
+                       if (qla83xx_idc_lock_recovery(base_vha)
+                           == QLA_SUCCESS) {
+                               qla83xx_wait_logic();
+                               goto retry_lock;
+                       } else
+                               ql_log(ql_log_warn, base_vha, 0xb075,
+                                   "IDC Lock recovery FAILED.\n");
+               }
+
+       }
+
+       return;
+
+       /* XXX: IDC-lock implementation using access-control mbx */
+retry_lock2:
+       if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+               ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
+                   "Failed to acquire IDC lock. retrying...\n");
+               /* Retry/Perform IDC-Lock recovery */
+               if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
+                       qla83xx_wait_logic();
+                       goto retry_lock2;
+               } else
+                       ql_log(ql_log_warn, base_vha, 0xb076,
+                           "IDC Lock recovery FAILED.\n");
+       }
+
+       return;
+}
+
+void
+qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+       uint16_t options = (requester_id << 15) | BIT_7, retry;
+       uint32_t data;
+       struct qla_hw_data *ha = base_vha->hw;
+
+       /* IDC-unlock implementation using driver-unlock/lock-id
+        * remote registers
+        */
+       retry = 0;
+retry_unlock:
+       if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
+           == QLA_SUCCESS) {
+               if (data == ha->portnum) {
+                       qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
+                       /* Clearing lock-id by setting 0xff */
+                       qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
+               } else if (retry < 10) {
+                       /* SV: XXX: IDC unlock retrying needed here? */
+
+                       /* Retry for IDC-unlock */
+                       qla83xx_wait_logic();
+                       retry++;
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
+                           "Failed to release IDC lock, retyring=%d\n", retry);
+                       goto retry_unlock;
+               }
+       } else if (retry < 10) {
+               /* Retry for IDC-unlock */
+               qla83xx_wait_logic();
+               retry++;
+               ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
+                   "Failed to read drv-lockid, retyring=%d\n", retry);
+               goto retry_unlock;
+       }
+
+       return;
+
+       /* XXX: IDC-unlock implementation using access-control mbx */
+       retry = 0;
+retry_unlock2:
+       if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+               if (retry < 10) {
+                       /* Retry for IDC-unlock */
+                       qla83xx_wait_logic();
+                       retry++;
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
+                           "Failed to release IDC lock, retyring=%d\n", retry);
+                       goto retry_unlock2;
+               }
+       }
+
+       return;
+}
+
+int
+__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t drv_presence;
+
+       rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+       if (rval == QLA_SUCCESS) {
+               drv_presence |= (1 << ha->portnum);
+               rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+                   drv_presence);
+       }
+
+       return rval;
+}
+
+int
+qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+
+       qla83xx_idc_lock(vha, 0);
+       rval = __qla83xx_set_drv_presence(vha);
+       qla83xx_idc_unlock(vha, 0);
+
+       return rval;
+}
+
+int
+__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t drv_presence;
+
+       rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+       if (rval == QLA_SUCCESS) {
+               drv_presence &= ~(1 << ha->portnum);
+               rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+                   drv_presence);
+       }
+
+       return rval;
+}
+
+int
+qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+
+       qla83xx_idc_lock(vha, 0);
+       rval = __qla83xx_clear_drv_presence(vha);
+       qla83xx_idc_unlock(vha, 0);
+
+       return rval;
+}
+
+void
+qla83xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t drv_ack, drv_presence;
+       unsigned long ack_timeout;
+
+       /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
+       ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+       while (1) {
+               qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+               qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+               if (drv_ack == drv_presence)
+                       break;
+
+               if (time_after_eq(jiffies, ack_timeout)) {
+                       ql_log(ql_log_warn, vha, 0xb067,
+                           "RESET ACK TIMEOUT! drv_presence=0x%x "
+                           "drv_ack=0x%x\n", drv_presence, drv_ack);
+                       /*
+                        * The function(s) which did not ack in time are forced
+                        * to withdraw any further participation in the IDC
+                        * reset.
+                        */
+                       if (drv_ack != drv_presence)
+                               qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+                                   drv_ack);
+                       break;
+               }
+
+               qla83xx_idc_unlock(vha, 0);
+               msleep(1000);
+               qla83xx_idc_lock(vha, 0);
+       }
+
+       qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
+       ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
+}
+
+int
+qla83xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+       int rval = QLA_SUCCESS;
+       uint32_t idc_control;
+
+       qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
+       ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
+
+       /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
+       __qla83xx_get_idc_control(vha, &idc_control);
+       idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
+       __qla83xx_set_idc_control(vha, 0);
+
+       qla83xx_idc_unlock(vha, 0);
+       rval = qla83xx_restart_nic_firmware(vha);
+       qla83xx_idc_lock(vha, 0);
+
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_fatal, vha, 0xb06a,
+                   "Failed to restart NIC f/w.\n");
+               qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
+               ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
+       } else {
+               ql_dbg(ql_dbg_p3p, vha, 0xb06c,
+                   "Success in restarting nic f/w.\n");
+               qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
+               ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
+       }
+
+       return rval;
+}
+
+/* Assumes idc_lock always held on entry */
+int
+qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
+{
+       struct qla_hw_data *ha = base_vha->hw;
+       int rval = QLA_SUCCESS;
+       unsigned long dev_init_timeout;
+       uint32_t dev_state;
+
+       /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
+       dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+       while (1) {
+
+               if (time_after_eq(jiffies, dev_init_timeout)) {
+                       ql_log(ql_log_warn, base_vha, 0xb06e,
+                           "Initialization TIMEOUT!\n");
+                       /* Init timeout. Disable further NIC Core
+                        * communication.
+                        */
+                       qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+                               QLA8XXX_DEV_FAILED);
+                       ql_log(ql_log_info, base_vha, 0xb06f,
+                           "HW State: FAILED.\n");
+               }
+
+               qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+               switch (dev_state) {
+               case QLA8XXX_DEV_READY:
+                       if (ha->flags.nic_core_reset_owner)
+                               qla83xx_idc_audit(base_vha,
+                                   IDC_AUDIT_COMPLETION);
+                       ha->flags.nic_core_reset_owner = 0;
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
+                           "Reset_owner reset by 0x%x.\n",
+                           ha->portnum);
+                       goto exit;
+               case QLA8XXX_DEV_COLD:
+                       if (ha->flags.nic_core_reset_owner)
+                               rval = qla83xx_device_bootstrap(base_vha);
+                       else {
+                       /* Wait for AEN to change device-state */
+                               qla83xx_idc_unlock(base_vha, 0);
+                               msleep(1000);
+                               qla83xx_idc_lock(base_vha, 0);
+                       }
+                       break;
+               case QLA8XXX_DEV_INITIALIZING:
+                       /* Wait for AEN to change device-state */
+                       qla83xx_idc_unlock(base_vha, 0);
+                       msleep(1000);
+                       qla83xx_idc_lock(base_vha, 0);
+                       break;
+               case QLA8XXX_DEV_NEED_RESET:
+                       if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
+                               qla83xx_need_reset_handler(base_vha);
+                       else {
+                               /* Wait for AEN to change device-state */
+                               qla83xx_idc_unlock(base_vha, 0);
+                               msleep(1000);
+                               qla83xx_idc_lock(base_vha, 0);
+                       }
+                       /* reset timeout value after need reset handler */
+                       dev_init_timeout = jiffies +
+                           (ha->fcoe_dev_init_timeout * HZ);
+                       break;
+               case QLA8XXX_DEV_NEED_QUIESCENT:
+                       /* XXX: DEBUG for now */
+                       qla83xx_idc_unlock(base_vha, 0);
+                       msleep(1000);
+                       qla83xx_idc_lock(base_vha, 0);
+                       break;
+               case QLA8XXX_DEV_QUIESCENT:
+                       /* XXX: DEBUG for now */
+                       if (ha->flags.quiesce_owner)
+                               goto exit;
+
+                       qla83xx_idc_unlock(base_vha, 0);
+                       msleep(1000);
+                       qla83xx_idc_lock(base_vha, 0);
+                       dev_init_timeout = jiffies +
+                           (ha->fcoe_dev_init_timeout * HZ);
+                       break;
+               case QLA8XXX_DEV_FAILED:
+                       if (ha->flags.nic_core_reset_owner)
+                               qla83xx_idc_audit(base_vha,
+                                   IDC_AUDIT_COMPLETION);
+                       ha->flags.nic_core_reset_owner = 0;
+                       __qla83xx_clear_drv_presence(base_vha);
+                       qla83xx_idc_unlock(base_vha, 0);
+                       qla8xxx_dev_failed_handler(base_vha);
+                       rval = QLA_FUNCTION_FAILED;
+                       qla83xx_idc_lock(base_vha, 0);
+                       goto exit;
+               case QLA8XXX_BAD_VALUE:
+                       qla83xx_idc_unlock(base_vha, 0);
+                       msleep(1000);
+                       qla83xx_idc_lock(base_vha, 0);
+                       break;
+               default:
+                       ql_log(ql_log_warn, base_vha, 0xb071,
+                           "Unknow Device State: %x.\n", dev_state);
+                       qla83xx_idc_unlock(base_vha, 0);
+                       qla8xxx_dev_failed_handler(base_vha);
+                       rval = QLA_FUNCTION_FAILED;
+                       qla83xx_idc_lock(base_vha, 0);
+                       goto exit;
+               }
+       }
+
+exit:
+       return rval;
+}
+
 /**************************************************************************
 * qla2x00_do_dpc
 *   This kernel thread is a task that is schedule by the interrupt handler
@@ -3749,7 +4453,7 @@ qla2x00_do_dpc(void *data)
                                &base_vha->dpc_flags)) {
                                qla82xx_idc_lock(ha);
                                qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                                       QLA82XX_DEV_FAILED);
+                                       QLA8XXX_DEV_FAILED);
                                qla82xx_idc_unlock(ha);
                                ql_log(ql_log_info, base_vha, 0x4004,
                                    "HW State: FAILED.\n");
@@ -3819,14 +4523,21 @@ qla2x00_do_dpc(void *data)
                if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
                            "Quiescence mode scheduled.\n");
-                       qla82xx_device_state_handler(base_vha);
-                       clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
-                       if (!ha->flags.quiesce_owner) {
-                               qla2x00_perform_loop_resync(base_vha);
-
-                               qla82xx_idc_lock(ha);
-                               qla82xx_clear_qsnt_ready(base_vha);
-                               qla82xx_idc_unlock(ha);
+                       if (IS_QLA82XX(ha)) {
+                               qla82xx_device_state_handler(base_vha);
+                               clear_bit(ISP_QUIESCE_NEEDED,
+                                   &base_vha->dpc_flags);
+                               if (!ha->flags.quiesce_owner) {
+                                       qla2x00_perform_loop_resync(base_vha);
+
+                                       qla82xx_idc_lock(ha);
+                                       qla82xx_clear_qsnt_ready(base_vha);
+                                       qla82xx_idc_unlock(ha);
+                               }
+                       } else {
+                               clear_bit(ISP_QUIESCE_NEEDED,
+                                   &base_vha->dpc_flags);
+                               qla2x00_quiesce_io(base_vha);
                        }
                        ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
                            "Quiescence mode end.\n");
@@ -4326,7 +5037,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
                qla82xx_idc_lock(ha);
 
                qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                   QLA82XX_DEV_INITIALIZING);
+                   QLA8XXX_DEV_INITIALIZING);
 
                qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
                    QLA82XX_IDC_VERSION);
@@ -4350,12 +5061,12 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
                            "HW State: FAILED.\n");
                        qla82xx_clear_drv_active(ha);
                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                           QLA82XX_DEV_FAILED);
+                           QLA8XXX_DEV_FAILED);
                } else {
                        ql_log(ql_log_info, base_vha, 0x900c,
                            "HW State: READY.\n");
                        qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                           QLA82XX_DEV_READY);
+                           QLA8XXX_DEV_READY);
                        qla82xx_idc_unlock(ha);
                        ha->flags.isp82xx_fw_hung = 0;
                        rval = qla82xx_restart_isp(base_vha);
@@ -4370,7 +5081,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
                    "This devfn is not reset owner = 0x%x.\n",
                    ha->pdev->devfn);
                if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
-                   QLA82XX_DEV_READY)) {
+                   QLA8XXX_DEV_READY)) {
                        ha->flags.isp82xx_fw_hung = 0;
                        rval = qla82xx_restart_isp(base_vha);
                        qla82xx_idc_lock(ha);
@@ -4495,6 +5206,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
index d70f03008981a9b1b6ee7f5a0d9ff31795edef87..892a81e457bcf8942c26edae574a7a8c8e06ddc3 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index a683e766d1aea088115eb3bde698ebf8390c5a1d..32fdc2a66dd109f9f9cc88d38eb257aa0cdec512 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -966,16 +966,16 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
                QLA82XX_IDC_PARAM_ADDR , 8);
 
        if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
-               ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
-               ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
+               ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
+               ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
        } else {
-               ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
-               ha->nx_reset_timeout = le32_to_cpu(*wptr);
+               ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
+               ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
        }
        ql_dbg(ql_dbg_init, vha, 0x004e,
-           "nx_dev_init_timeout=%d "
-           "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
-           ha->nx_reset_timeout);
+           "fcoe_dev_init_timeout=%d "
+           "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout,
+           ha->fcoe_reset_timeout);
        return;
 }
 
@@ -1017,7 +1017,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
            !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
                return;
 
-       if (ha->flags.isp82xx_reset_hdlr_active)
+       if (ha->flags.nic_core_reset_hdlr_active)
                return;
 
        ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1662,6 +1662,23 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
+static uint32_t
+qla83xx_select_led_port(struct qla_hw_data *ha)
+{
+       uint32_t led_select_value = 0;
+
+       if (!IS_QLA83XX(ha))
+               goto out;
+
+       if (ha->flags.port0)
+               led_select_value = QLA83XX_LED_PORT0;
+       else
+               led_select_value = QLA83XX_LED_PORT1;
+
+out:
+       return led_select_value;
+}
+
 void
 qla83xx_beacon_blink(struct scsi_qla_host *vha)
 {
@@ -1669,22 +1686,34 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
        struct qla_hw_data *ha = vha->hw;
        uint16_t led_cfg[6];
        uint16_t orig_led_cfg[6];
+       uint32_t led_10_value, led_43_value;
 
        if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
                return;
 
-       if (IS_QLA2031(ha) && ha->beacon_blink_led) {
-               if (ha->flags.port0)
-                       led_select_value = 0x00201320;
-               else
-                       led_select_value = 0x00201328;
+       if (!ha->beacon_blink_led)
+               return;
+
+       if (IS_QLA2031(ha)) {
+               led_select_value = qla83xx_select_led_port(ha);
 
-               qla83xx_write_remote_reg(vha, led_select_value, 0x40002000);
-               qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000);
+               qla83xx_wr_reg(vha, led_select_value, 0x40002000);
+               qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000);
+               msleep(1000);
+               qla83xx_wr_reg(vha, led_select_value, 0x40004000);
+               qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
+       } else if (IS_QLA8031(ha)) {
+               led_select_value = qla83xx_select_led_port(ha);
+
+               qla83xx_rd_reg(vha, led_select_value, &led_10_value);
+               qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value);
+               qla83xx_wr_reg(vha, led_select_value, 0x01f44000);
+               msleep(500);
+               qla83xx_wr_reg(vha, led_select_value, 0x400001f4);
                msleep(1000);
-               qla83xx_write_remote_reg(vha, led_select_value, 0x40004000);
-               qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000);
-       } else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) {
+               qla83xx_wr_reg(vha, led_select_value, led_10_value);
+               qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value);
+       } else if (IS_QLA81XX(ha)) {
                int rval;
 
                /* Save Current */
index 5b30132960c7901e514948c092ab105f43b483a0..bddc97c5c8e92fdd1de11b24d9ac338653daca8e 100644 (file)
@@ -969,7 +969,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        mutex_unlock(&ha->tgt.tgt_mutex);
 
-       flush_delayed_work_sync(&tgt->sess_del_work);
+       flush_delayed_work(&tgt->sess_del_work);
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
            "Waiting for sess works (tgt %p)", tgt);
index f5fdb16bec9b2771e5f41ca5d6ae99f28a1eb4fb..cfe934e1af42039187aba1ca50d4c2c096b597aa 100644 (file)
@@ -1,15 +1,15 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2011 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.04.00.03-k"
+#define QLA2XXX_VERSION      "8.04.00.07-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   4
 #define QLA_DRIVER_PATCH_VER   0
-#define QLA_DRIVER_BETA_VER    3
+#define QLA_DRIVER_BETA_VER    0
index f1ad02ea212b6331f16619ca8de821b97da977de..e4dc7c733c2994a1dd236f8f70bccd561043eff7 100644 (file)
@@ -4,5 +4,5 @@ config SCSI_QLA_ISCSI
        select SCSI_ISCSI_ATTRS
        select ISCSI_BOOT_SYSFS
        ---help---
-       This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
-       iSCSI host adapter family.
+       This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
+       and 8032 (ISP83XX) iSCSI host adapter family.
index 5b44139ff43d43e884f82895cd162bd9b9dca211..4230977748cf2573d3e8201dbd5feead3196ca35 100644 (file)
@@ -1,5 +1,5 @@
 qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
-               ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o
+               ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
 
 obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
 
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
new file mode 100644 (file)
index 0000000..6e9af20
--- /dev/null
@@ -0,0 +1,1611 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)   2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <linux/ratelimit.h>
+
+#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
+{
+       return readl((void __iomem *)(ha->nx_pcibase + addr));
+}
+
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
+{
+       writel(val, (void __iomem *)(ha->nx_pcibase + addr));
+}
+
+static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
+{
+       uint32_t val;
+       int ret_val = QLA_SUCCESS;
+
+       qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
+       val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
+       if (val != addr) {
+               ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
+                          __func__, addr, val);
+               ret_val = QLA_ERROR;
+       }
+
+       return ret_val;
+}
+
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+                             uint32_t *data)
+{
+       int ret_val;
+
+       ret_val = qla4_83xx_set_win_base(ha, addr);
+
+       if (ret_val == QLA_SUCCESS)
+               *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
+       else
+               ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
+                          __func__, addr);
+
+       return ret_val;
+}
+
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+                             uint32_t data)
+{
+       int ret_val;
+
+       ret_val = qla4_83xx_set_win_base(ha, addr);
+
+       if (ret_val == QLA_SUCCESS)
+               qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
+       else
+               ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
+                          __func__, addr, data);
+
+       return ret_val;
+}
+
+static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
+{
+       int lock_owner;
+       int timeout = 0;
+       uint32_t lock_status = 0;
+       int ret_val = QLA_SUCCESS;
+
+       while (lock_status == 0) {
+               lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
+               if (lock_status)
+                       break;
+
+               if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
+                       lock_owner = qla4_83xx_rd_reg(ha,
+                                                     QLA83XX_FLASH_LOCK_ID);
+                       ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
+                                  __func__, ha->func_num, lock_owner);
+                       ret_val = QLA_ERROR;
+                       break;
+               }
+               msleep(20);
+       }
+
+       qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
+       return ret_val;
+}
+
+static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
+{
+       /* Reading FLASH_UNLOCK register unlocks the Flash */
+       qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
+       qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
+}
+
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+                            uint8_t *p_data, int u32_word_count)
+{
+       int i;
+       uint32_t u32_word;
+       uint32_t addr = flash_addr;
+       int ret_val = QLA_SUCCESS;
+
+       ret_val = qla4_83xx_flash_lock(ha);
+       if (ret_val == QLA_ERROR)
+               goto exit_lock_error;
+
+       if (addr & 0x03) {
+               ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+                          __func__, addr);
+               ret_val = QLA_ERROR;
+               goto exit_flash_read;
+       }
+
+       for (i = 0; i < u32_word_count; i++) {
+               ret_val = qla4_83xx_wr_reg_indirect(ha,
+                                                   QLA83XX_FLASH_DIRECT_WINDOW,
+                                                   (addr & 0xFFFF0000));
+               if (ret_val == QLA_ERROR) {
+                       ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
+                                  __func__, addr);
+                       goto exit_flash_read;
+               }
+
+               ret_val = qla4_83xx_rd_reg_indirect(ha,
+                                               QLA83XX_FLASH_DIRECT_DATA(addr),
+                                               &u32_word);
+               if (ret_val == QLA_ERROR) {
+                       ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+                                  __func__, addr);
+                       goto exit_flash_read;
+               }
+
+               *(__le32 *)p_data = le32_to_cpu(u32_word);
+               p_data = p_data + 4;
+               addr = addr + 4;
+       }
+
+exit_flash_read:
+       qla4_83xx_flash_unlock(ha);
+
+exit_lock_error:
+       return ret_val;
+}
+
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+                                     uint32_t flash_addr, uint8_t *p_data,
+                                     int u32_word_count)
+{
+       uint32_t i;
+       uint32_t u32_word;
+       uint32_t flash_offset;
+       uint32_t addr = flash_addr;
+       int ret_val = QLA_SUCCESS;
+
+       flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
+
+       if (addr & 0x3) {
+               ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+                          __func__, addr);
+               ret_val = QLA_ERROR;
+               goto exit_lockless_read;
+       }
+
+       ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
+                                           addr);
+       if (ret_val == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+                          __func__, addr);
+               goto exit_lockless_read;
+       }
+
+       /* Check if data is spread across multiple sectors  */
+       if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+           (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+
+               /* Multi sector read */
+               for (i = 0; i < u32_word_count; i++) {
+                       ret_val = qla4_83xx_rd_reg_indirect(ha,
+                                               QLA83XX_FLASH_DIRECT_DATA(addr),
+                                               &u32_word);
+                       if (ret_val == QLA_ERROR) {
+                               ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+                                          __func__, addr);
+                               goto exit_lockless_read;
+                       }
+
+                       *(__le32 *)p_data  = le32_to_cpu(u32_word);
+                       p_data = p_data + 4;
+                       addr = addr + 4;
+                       flash_offset = flash_offset + 4;
+
+                       if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+                               /* This write is needed once for each sector */
+                               ret_val = qla4_83xx_wr_reg_indirect(ha,
+                                                  QLA83XX_FLASH_DIRECT_WINDOW,
+                                                  addr);
+                               if (ret_val == QLA_ERROR) {
+                                       ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+                                                  __func__, addr);
+                                       goto exit_lockless_read;
+                               }
+                               flash_offset = 0;
+                       }
+               }
+       } else {
+               /* Single sector read */
+               for (i = 0; i < u32_word_count; i++) {
+                       ret_val = qla4_83xx_rd_reg_indirect(ha,
+                                               QLA83XX_FLASH_DIRECT_DATA(addr),
+                                               &u32_word);
+                       if (ret_val == QLA_ERROR) {
+                               ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+                                          __func__, addr);
+                               goto exit_lockless_read;
+                       }
+
+                       *(__le32 *)p_data = le32_to_cpu(u32_word);
+                       p_data = p_data + 4;
+                       addr = addr + 4;
+               }
+       }
+
+exit_lockless_read:
+       return ret_val;
+}
+
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+       if (qla4_83xx_flash_lock(ha))
+               ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
+
+       /*
+        * We got the lock, or someone else is holding the lock
+        * since we are restting, forcefully unlock
+        */
+       qla4_83xx_flash_unlock(ha);
+}
+
+/**
+ * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
+ * @ha: Pointer to adapter structure
+ * @addr: Flash address to write to
+ * @data: Data to be written
+ * @count: word_count to be written
+ *
+ * Return: On success return QLA_SUCCESS
+ *        On error return QLA_ERROR
+ **/
+static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+                                      uint32_t *data, uint32_t count)
+{
+       int i, j;
+       uint32_t agt_ctrl;
+       unsigned long flags;
+       int ret_val = QLA_SUCCESS;
+
+       /* Only 128-bit aligned access */
+       if (addr & 0xF) {
+               ret_val = QLA_ERROR;
+               goto exit_ms_mem_write;
+       }
+
+       write_lock_irqsave(&ha->hw_lock, flags);
+
+       /* Write address */
+       ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+       if (ret_val == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
+                          __func__);
+               goto exit_ms_mem_write_unlock;
+       }
+
+       for (i = 0; i < count; i++, addr += 16) {
+               if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+                                            QLA8XXX_ADDR_QDR_NET_MAX)) ||
+                     (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+                                            QLA8XXX_ADDR_DDR_NET_MAX)))) {
+                       ret_val = QLA_ERROR;
+                       goto exit_ms_mem_write_unlock;
+               }
+
+               ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
+                                                   addr);
+               /* Write data */
+               ret_val |= qla4_83xx_wr_reg_indirect(ha,
+                                                    MD_MIU_TEST_AGT_WRDATA_LO,
+                                                    *data++);
+               ret_val |= qla4_83xx_wr_reg_indirect(ha,
+                                                    MD_MIU_TEST_AGT_WRDATA_HI,
+                                                    *data++);
+               ret_val |= qla4_83xx_wr_reg_indirect(ha,
+                                                    MD_MIU_TEST_AGT_WRDATA_ULO,
+                                                    *data++);
+               ret_val |= qla4_83xx_wr_reg_indirect(ha,
+                                                    MD_MIU_TEST_AGT_WRDATA_UHI,
+                                                    *data++);
+               if (ret_val == QLA_ERROR) {
+                       ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
+                                  __func__);
+                       goto exit_ms_mem_write_unlock;
+               }
+
+               /* Check write status */
+               ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+                                                   MIU_TA_CTL_WRITE_ENABLE);
+               ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+                                                    MIU_TA_CTL_WRITE_START);
+               if (ret_val == QLA_ERROR) {
+                       ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
+                                  __func__);
+                       goto exit_ms_mem_write_unlock;
+               }
+
+               for (j = 0; j < MAX_CTL_CHECK; j++) {
+                       ret_val = qla4_83xx_rd_reg_indirect(ha,
+                                                       MD_MIU_TEST_AGT_CTRL,
+                                                       &agt_ctrl);
+                       if (ret_val == QLA_ERROR) {
+                               ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
+                                          __func__);
+                               goto exit_ms_mem_write_unlock;
+                       }
+                       if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+                               break;
+               }
+
+               /* Status check failed */
+               if (j >= MAX_CTL_CHECK) {
+                       printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
+                                          __func__);
+                       ret_val = QLA_ERROR;
+                       goto exit_ms_mem_write_unlock;
+               }
+       }
+
+exit_ms_mem_write_unlock:
+       write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+       return ret_val;
+}
+
+#define INTENT_TO_RECOVER      0x01
+#define PROCEED_TO_RECOVER     0x02
+
+static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
+{
+
+       uint32_t lock = 0, lockid;
+       int ret_val = QLA_ERROR;
+
+       lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+
+       /* Check for other Recovery in progress, go wait */
+       if ((lockid & 0x3) != 0)
+               goto exit_lock_recovery;
+
+       /* Intent to Recover */
+       ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+                                  (ha->func_num << 2) | INTENT_TO_RECOVER);
+
+       msleep(200);
+
+       /* Check Intent to Recover is advertised */
+       lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+       if ((lockid & 0x3C) != (ha->func_num << 2))
+               goto exit_lock_recovery;
+
+       ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
+                  __func__, ha->func_num);
+
+       /* Proceed to Recover */
+       ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+                                  (ha->func_num << 2) | PROCEED_TO_RECOVER);
+
+       /* Force Unlock */
+       ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
+       ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
+
+       /* Clear bits 0-5 in IDC_RECOVERY register*/
+       ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
+
+       /* Get lock */
+       lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
+       if (lock) {
+               lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
+               lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
+               ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
+               ret_val = QLA_SUCCESS;
+       }
+
+exit_lock_recovery:
+       return ret_val;
+}
+
+#define        QLA83XX_DRV_LOCK_MSLEEP         200
+
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
+{
+       int timeout = 0;
+       uint32_t status = 0;
+       int ret_val = QLA_SUCCESS;
+       uint32_t first_owner = 0;
+       uint32_t tmo_owner = 0;
+       uint32_t lock_id;
+       uint32_t func_num;
+       uint32_t lock_cnt;
+
+       while (status == 0) {
+               status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
+               if (status) {
+                       /* Increment Counter (8-31) and update func_num (0-7) on
+                        * getting a successful lock  */
+                       lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+                       lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
+                       qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
+                       break;
+               }
+
+               if (timeout == 0)
+                       /* Save counter + ID of function holding the lock for
+                        * first failure */
+                       first_owner = ha->isp_ops->rd_reg_direct(ha,
+                                                         QLA83XX_DRV_LOCK_ID);
+
+               if (++timeout >=
+                   (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
+                       tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+                       func_num = tmo_owner & 0xFF;
+                       lock_cnt = tmo_owner >> 8;
+                       ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
+                                  __func__, ha->func_num, func_num, lock_cnt,
+                                  (first_owner & 0xFF));
+
+                       if (first_owner != tmo_owner) {
+                               /* Some other driver got lock, OR same driver
+                                * got lock again (counter value changed), when
+                                * we were waiting for lock.
+                                * Retry for another 2 sec */
+                               ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
+                                          __func__, ha->func_num);
+                               timeout = 0;
+                       } else {
+                               /* Same driver holding lock > 2sec.
+                                * Force Recovery */
+                               ret_val = qla4_83xx_lock_recovery(ha);
+                               if (ret_val == QLA_SUCCESS) {
+                                       /* Recovered and got lock */
+                                       ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
+                                                  __func__, ha->func_num);
+                                       break;
+                               }
+                               /* Recovery Failed, some other function
+                                * has the lock, wait for 2secs and retry */
+                               ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n",
+                                          __func__, ha->func_num);
+                               timeout = 0;
+                       }
+               }
+               msleep(QLA83XX_DRV_LOCK_MSLEEP);
+       }
+
+       return ret_val;
+}
+
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
+{
+       int id;
+
+       id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+
+       if ((id & 0xFF) != ha->func_num) {
+               ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
+                          __func__, ha->func_num, (id & 0xFF));
+               return;
+       }
+
+       /* Keep lock counter value, update the ha->func_num to 0xFF */
+       qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
+       qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
+}
+
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
+{
+       uint32_t idc_ctrl;
+
+       idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+       idc_ctrl |= DONTRESET_BIT0;
+       qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+       DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+                         idc_ctrl));
+}
+
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
+{
+       uint32_t idc_ctrl;
+
+       idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+       idc_ctrl &= ~DONTRESET_BIT0;
+       qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+       DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+                         idc_ctrl));
+}
+
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
+{
+       uint32_t idc_ctrl;
+
+       idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+       return idc_ctrl & DONTRESET_BIT0;
+}
+
+/*-------------------------IDC State Machine ---------------------*/
+
+enum {
+       UNKNOWN_CLASS = 0,
+       NIC_CLASS,
+       FCOE_CLASS,
+       ISCSI_CLASS
+};
+
+struct device_info {
+       int func_num;
+       int device_type;
+       int port_num;
+};
+
+static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+{
+       uint32_t drv_active;
+       uint32_t dev_part, dev_part1, dev_part2;
+       int i;
+       struct device_info device_map[16];
+       int func_nibble;
+       int nibble;
+       int nic_present = 0;
+       int iscsi_present = 0;
+       int iscsi_func_low = 0;
+
+       /* Use the dev_partition register to determine the PCI function number
+        * and then check drv_active register to see which driver is loaded */
+       dev_part1 = qla4_83xx_rd_reg(ha,
+                                    ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
+       dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
+       drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
+
+       /* Each function has 4 bits in dev_partition Info register,
+        * Lower 2 bits - device type, Upper 2 bits - physical port number */
+       dev_part = dev_part1;
+       for (i = nibble = 0; i <= 15; i++, nibble++) {
+               func_nibble = dev_part & (0xF << (nibble * 4));
+               func_nibble >>= (nibble * 4);
+               device_map[i].func_num = i;
+               device_map[i].device_type = func_nibble & 0x3;
+               device_map[i].port_num = func_nibble & 0xC;
+
+               if (device_map[i].device_type == NIC_CLASS) {
+                       if (drv_active & (1 << device_map[i].func_num)) {
+                               nic_present++;
+                               break;
+                       }
+               } else if (device_map[i].device_type == ISCSI_CLASS) {
+                       if (drv_active & (1 << device_map[i].func_num)) {
+                               if (!iscsi_present ||
+                                   (iscsi_present &&
+                                    (iscsi_func_low > device_map[i].func_num)))
+                                       iscsi_func_low = device_map[i].func_num;
+
+                               iscsi_present++;
+                       }
+               }
+
+               /* For function_num[8..15] get info from dev_part2 register */
+               if (nibble == 7) {
+                       nibble = 0;
+                       dev_part = dev_part2;
+               }
+       }
+
+       /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
+        * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
+        * present. */
+       if (!nic_present && (ha->func_num == iscsi_func_low)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: can reset - NIC not present and lower iSCSI function is %d\n",
+                                 __func__, ha->func_num));
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * qla4_83xx_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
+{
+       uint32_t dev_state, drv_state, drv_active;
+       unsigned long reset_timeout, dev_init_timeout;
+
+       ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
+                  __func__);
+
+       if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
+                                 __func__));
+               qla4_8xxx_set_rst_ready(ha);
+
+               /* Non-reset owners ACK Reset and wait for device INIT state
+                * as part of Reset Recovery by Reset Owner */
+               dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+               do {
+                       if (time_after_eq(jiffies, dev_init_timeout)) {
+                               ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
+                                          __func__);
+                               break;
+                       }
+
+                       ha->isp_ops->idc_unlock(ha);
+                       msleep(1000);
+                       ha->isp_ops->idc_lock(ha);
+
+                       dev_state = qla4_8xxx_rd_direct(ha,
+                                                       QLA8XXX_CRB_DEV_STATE);
+               } while (dev_state == QLA8XXX_DEV_NEED_RESET);
+       } else {
+               qla4_8xxx_set_rst_ready(ha);
+               reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+               drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+               drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+               ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
+                          __func__, drv_state, drv_active);
+
+               while (drv_state != drv_active) {
+                       if (time_after_eq(jiffies, reset_timeout)) {
+                               ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+                                          __func__, DRIVER_NAME, drv_state,
+                                          drv_active);
+                               break;
+                       }
+
+                       ha->isp_ops->idc_unlock(ha);
+                       msleep(1000);
+                       ha->isp_ops->idc_lock(ha);
+
+                       drv_state = qla4_8xxx_rd_direct(ha,
+                                                       QLA8XXX_CRB_DRV_STATE);
+                       drv_active = qla4_8xxx_rd_direct(ha,
+                                                       QLA8XXX_CRB_DRV_ACTIVE);
+               }
+
+               if (drv_state != drv_active) {
+                       ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
+                                  __func__, (drv_active ^ drv_state));
+                       drv_active = drv_active & drv_state;
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
+                                           drv_active);
+               }
+
+               clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
+               /* Start Reset Recovery */
+               qla4_8xxx_device_bootstrap(ha);
+       }
+}
+
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
+{
+       uint32_t idc_params, ret_val;
+
+       ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
+                                          (uint8_t *)&idc_params, 1);
+       if (ret_val == QLA_SUCCESS) {
+               ha->nx_dev_init_timeout = idc_params & 0xFFFF;
+               ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
+       } else {
+               ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
+               ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
+       }
+
+       DEBUG2(ql4_printk(KERN_DEBUG, ha,
+                         "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
+                         __func__, ha->nx_dev_init_timeout,
+                         ha->nx_reset_timeout));
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+
+static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
+{
+       uint8_t *phdr;
+
+       if (!ha->reset_tmplt.buff) {
+               ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
+                          __func__);
+               return;
+       }
+
+       phdr = ha->reset_tmplt.buff;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
+                         *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+                         *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+                         *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+                         *(phdr+13), *(phdr+14), *(phdr+15)));
+}
+
+static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
+{
+       uint8_t *p_cache;
+       uint32_t src, count, size;
+       uint64_t dest;
+       int ret_val = QLA_SUCCESS;
+
+       src = QLA83XX_BOOTLOADER_FLASH_ADDR;
+       dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
+       size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
+
+       /* 128 bit alignment check */
+       if (size & 0xF)
+               size = (size + 16) & ~0xF;
+
+       /* 16 byte count */
+       count = size/16;
+
+       p_cache = vmalloc(size);
+       if (p_cache == NULL) {
+               ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
+                          __func__);
+               ret_val = QLA_ERROR;
+               goto exit_copy_bootloader;
+       }
+
+       ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
+                                                   size / sizeof(uint32_t));
+       if (ret_val == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
+                          __func__);
+               goto exit_copy_error;
+       }
+       DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
+                         __func__));
+
+       /* 128 bit/16 byte write to MS memory */
+       ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
+                                             count);
+       if (ret_val == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
+                          __func__);
+               goto exit_copy_error;
+       }
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
+                         __func__, size));
+
+exit_copy_error:
+       vfree(p_cache);
+
+exit_copy_bootloader:
+       return ret_val;
+}
+
+static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
+{
+       uint32_t val, ret_val = QLA_ERROR;
+       int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+
+       do {
+               val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
+               if (val == PHAN_INITIALIZE_COMPLETE) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "%s: Command Peg initialization complete. State=0x%x\n",
+                                         __func__, val));
+                       ret_val = QLA_SUCCESS;
+                       break;
+               }
+               msleep(CRB_CMDPEG_CHECK_DELAY);
+       } while (--retries);
+
+       return ret_val;
+}
+
+/**
+ * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ **/
+static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
+                             int duration, uint32_t test_mask,
+                             uint32_t test_result)
+{
+       uint32_t value;
+       uint8_t retries;
+       int ret_val = QLA_SUCCESS;
+
+       ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+       if (ret_val == QLA_ERROR)
+               goto exit_poll_reg;
+
+       retries = duration / 10;
+       do {
+               if ((value & test_mask) != test_result) {
+                       msleep(duration / 10);
+                       ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+                       if (ret_val == QLA_ERROR)
+                               goto exit_poll_reg;
+
+                       ret_val = QLA_ERROR;
+               } else {
+                       ret_val = QLA_SUCCESS;
+                       break;
+               }
+       } while (retries--);
+
+exit_poll_reg:
+       if (ret_val == QLA_ERROR) {
+               ha->reset_tmplt.seq_error++;
+               ql4_printk(KERN_ERR, ha, "%s: Poll Failed:  0x%08x 0x%08x 0x%08x\n",
+                          __func__, value, test_mask, test_result);
+       }
+
+       return ret_val;
+}
+
+static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
+{
+       uint32_t sum =  0;
+       uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
+       int u16_count =  ha->reset_tmplt.hdr->size / sizeof(uint16_t);
+       int ret_val;
+
+       while (u16_count-- > 0)
+               sum += *buff++;
+
+       while (sum >> 16)
+               sum = (sum & 0xFFFF) +  (sum >> 16);
+
+       /* checksum of 0 indicates a valid template */
+       if (~sum) {
+               ret_val = QLA_SUCCESS;
+       } else {
+               ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
+                          __func__);
+               ret_val = QLA_ERROR;
+       }
+
+       return ret_val;
+}
+
+/**
+ * qla4_83xx_read_reset_template - Read Reset Template from Flash
+ * @ha: Pointer to adapter structure
+ **/
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
+{
+       uint8_t *p_buff;
+       uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+       uint32_t ret_val;
+
+       ha->reset_tmplt.seq_error = 0;
+       ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
+       if (ha->reset_tmplt.buff == NULL) {
+               ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
+                          __func__);
+               goto exit_read_reset_template;
+       }
+
+       p_buff = ha->reset_tmplt.buff;
+       addr = QLA83XX_RESET_TEMPLATE_ADDR;
+
+       tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
+                                   sizeof(uint32_t);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: Read template hdr size %d from Flash\n",
+                         __func__, tmplt_hdr_def_size));
+
+       /* Copy template header from flash */
+       ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+                                          tmplt_hdr_def_size);
+       if (ret_val != QLA_SUCCESS) {
+               ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
+                          __func__);
+               goto exit_read_template_error;
+       }
+
+       ha->reset_tmplt.hdr =
+               (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
+
+       /* Validate the template header size and signature */
+       tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+       if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+           (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+               ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
+                          __func__, tmplt_hdr_size, tmplt_hdr_def_size);
+               goto exit_read_template_error;
+       }
+
+       addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
+       p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
+       tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
+                             ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: Read rest of the template size %d\n",
+                         __func__, ha->reset_tmplt.hdr->size));
+
+       /* Copy rest of the template */
+       ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+                                          tmplt_hdr_def_size);
+       if (ret_val != QLA_SUCCESS) {
+               ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n",
+                          __func__);
+               goto exit_read_template_error;
+       }
+
+       /* Integrity check */
+       if (qla4_83xx_reset_seq_checksum_test(ha)) {
+               ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
+                          __func__);
+               goto exit_read_template_error;
+       }
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
+                         __func__));
+
+       /* Get STOP, START, INIT sequence offsets */
+       ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
+                                     ha->reset_tmplt.hdr->init_seq_offset;
+       ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
+                                      ha->reset_tmplt.hdr->start_seq_offset;
+       ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
+                                     ha->reset_tmplt.hdr->hdr_size;
+       qla4_83xx_dump_reset_seq_hdr(ha);
+
+       goto exit_read_reset_template;
+
+exit_read_template_error:
+       vfree(ha->reset_tmplt.buff);
+
+exit_read_reset_template:
+       return;
+}
+
+/**
+ * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ **/
+static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
+                                        uint32_t raddr, uint32_t waddr)
+{
+       uint32_t value;
+
+       qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+       qla4_83xx_wr_reg_indirect(ha, waddr, value);
+}
+
+/**
+ * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
+ *
+ * This function read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ **/
+static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
+                                 uint32_t waddr,
+                                 struct qla4_83xx_rmw *p_rmw_hdr)
+{
+       uint32_t value;
+
+       if (p_rmw_hdr->index_a)
+               value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
+       else
+               qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+
+       value &= p_rmw_hdr->test_mask;
+       value <<= p_rmw_hdr->shl;
+       value >>= p_rmw_hdr->shr;
+       value |= p_rmw_hdr->or_value;
+       value ^= p_rmw_hdr->xor_value;
+
+       qla4_83xx_wr_reg_indirect(ha, waddr, value);
+
+       return;
+}
+
+static void qla4_83xx_write_list(struct scsi_qla_host *ha,
+                                struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       struct qla4_83xx_entry *p_entry;
+       uint32_t i;
+
+       p_entry = (struct qla4_83xx_entry *)
+                 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+       for (i = 0; i < p_hdr->count; i++, p_entry++) {
+               qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
+               if (p_hdr->delay)
+                       udelay((uint32_t)(p_hdr->delay));
+       }
+}
+
+static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
+                                     struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       struct qla4_83xx_entry *p_entry;
+       uint32_t i;
+
+       p_entry = (struct qla4_83xx_entry *)
+                 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+       for (i = 0; i < p_hdr->count; i++, p_entry++) {
+               qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
+               if (p_hdr->delay)
+                       udelay((uint32_t)(p_hdr->delay));
+       }
+}
+
+static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
+                               struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       long delay;
+       struct qla4_83xx_entry *p_entry;
+       struct qla4_83xx_poll *p_poll;
+       uint32_t i;
+       uint32_t value;
+
+       p_poll = (struct qla4_83xx_poll *)
+                ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+       /* Entries start after 8 byte qla4_83xx_poll, poll header contains
+        * the test_mask, test_value. */
+       p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
+                                            sizeof(struct qla4_83xx_poll));
+
+       delay = (long)p_hdr->delay;
+       if (!delay) {
+               for (i = 0; i < p_hdr->count; i++, p_entry++) {
+                       qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+                                          p_poll->test_mask,
+                                          p_poll->test_value);
+               }
+       } else {
+               for (i = 0; i < p_hdr->count; i++, p_entry++) {
+                       if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+                                              p_poll->test_mask,
+                                              p_poll->test_value)) {
+                               qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
+                                                         &value);
+                               qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
+                                                         &value);
+                       }
+               }
+       }
+}
+
+static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
+                                     struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       long delay;
+       struct qla4_83xx_quad_entry *p_entry;
+       struct qla4_83xx_poll *p_poll;
+       uint32_t i;
+
+       p_poll = (struct qla4_83xx_poll *)
+                ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+       p_entry = (struct qla4_83xx_quad_entry *)
+                 ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+       delay = (long)p_hdr->delay;
+
+       for (i = 0; i < p_hdr->count; i++, p_entry++) {
+               qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
+                                         p_entry->dr_value);
+               qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+                                         p_entry->ar_value);
+               if (delay) {
+                       if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+                                              p_poll->test_mask,
+                                              p_poll->test_value)) {
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                                 "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
+                                                 __func__, i,
+                                                 ha->reset_tmplt.seq_index));
+                       }
+               }
+       }
+}
+
+static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
+                                       struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       struct qla4_83xx_entry *p_entry;
+       struct qla4_83xx_rmw *p_rmw_hdr;
+       uint32_t i;
+
+       p_rmw_hdr = (struct qla4_83xx_rmw *)
+                   ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+       p_entry = (struct qla4_83xx_entry *)
+                 ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
+
+       for (i = 0; i < p_hdr->count; i++, p_entry++) {
+               qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
+                                     p_rmw_hdr);
+               if (p_hdr->delay)
+                       udelay((uint32_t)(p_hdr->delay));
+       }
+}
+
+static void qla4_83xx_pause(struct scsi_qla_host *ha,
+                           struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       if (p_hdr->delay)
+               mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
+                                    struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       long delay;
+       int index;
+       struct qla4_83xx_quad_entry *p_entry;
+       struct qla4_83xx_poll *p_poll;
+       uint32_t i;
+       uint32_t value;
+
+       p_poll = (struct qla4_83xx_poll *)
+                ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+       p_entry = (struct qla4_83xx_quad_entry *)
+                 ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+       delay = (long)p_hdr->delay;
+
+       for (i = 0; i < p_hdr->count; i++, p_entry++) {
+               qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+                                         p_entry->ar_value);
+               if (delay) {
+                       if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+                                              p_poll->test_mask,
+                                              p_poll->test_value)) {
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                                 "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
+                                                 __func__, i,
+                                                 ha->reset_tmplt.seq_index));
+                       } else {
+                               index = ha->reset_tmplt.array_index;
+                               qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
+                                                         &value);
+                               ha->reset_tmplt.array[index++] = value;
+
+                               if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
+                                       ha->reset_tmplt.array_index = 1;
+                       }
+               }
+       }
+}
+
+static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
+                             struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       ha->reset_tmplt.seq_end = 1;
+}
+
+static void qla4_83xx_template_end(struct scsi_qla_host *ha,
+                                  struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+       ha->reset_tmplt.template_end = 1;
+
+       if (ha->reset_tmplt.seq_error == 0) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: Reset sequence completed SUCCESSFULLY.\n",
+                                 __func__));
+       } else {
+               ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
+                          __func__);
+       }
+}
+
+/**
+ * qla4_83xx_process_reset_template - Process reset template.
+ *
+ * Process all entries in reset template till entry with SEQ_END opcode,
+ * which indicates end of the reset template processing. Each entry has a
+ * Reset Entry header, entry opcode/command, with size of the entry, number
+ * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ **/
+static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
+                                            char *p_buff)
+{
+       int index, entries;
+       struct qla4_83xx_reset_entry_hdr *p_hdr;
+       char *p_entry = p_buff;
+
+       ha->reset_tmplt.seq_end = 0;
+       ha->reset_tmplt.template_end = 0;
+       entries = ha->reset_tmplt.hdr->entries;
+       index = ha->reset_tmplt.seq_index;
+
+       for (; (!ha->reset_tmplt.seq_end) && (index  < entries); index++) {
+
+               p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
+               switch (p_hdr->cmd) {
+               case OPCODE_NOP:
+                       break;
+               case OPCODE_WRITE_LIST:
+                       qla4_83xx_write_list(ha, p_hdr);
+                       break;
+               case OPCODE_READ_WRITE_LIST:
+                       qla4_83xx_read_write_list(ha, p_hdr);
+                       break;
+               case OPCODE_POLL_LIST:
+                       qla4_83xx_poll_list(ha, p_hdr);
+                       break;
+               case OPCODE_POLL_WRITE_LIST:
+                       qla4_83xx_poll_write_list(ha, p_hdr);
+                       break;
+               case OPCODE_READ_MODIFY_WRITE:
+                       qla4_83xx_read_modify_write(ha, p_hdr);
+                       break;
+               case OPCODE_SEQ_PAUSE:
+                       qla4_83xx_pause(ha, p_hdr);
+                       break;
+               case OPCODE_SEQ_END:
+                       qla4_83xx_seq_end(ha, p_hdr);
+                       break;
+               case OPCODE_TMPL_END:
+                       qla4_83xx_template_end(ha, p_hdr);
+                       break;
+               case OPCODE_POLL_READ_LIST:
+                       qla4_83xx_poll_read_list(ha, p_hdr);
+                       break;
+               default:
+                       ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
+                                  __func__, p_hdr->cmd, index);
+                       break;
+               }
+
+               /* Set pointer to next entry in the sequence. */
+               p_entry += p_hdr->size;
+       }
+
+       ha->reset_tmplt.seq_index = index;
+}
+
+static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
+{
+       ha->reset_tmplt.seq_index = 0;
+       qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
+
+       if (ha->reset_tmplt.seq_end != 1)
+               ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
+                          __func__);
+}
+
+static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
+{
+       qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
+
+       if (ha->reset_tmplt.template_end != 1)
+               ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
+                          __func__);
+}
+
+static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
+{
+       qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
+
+       if (ha->reset_tmplt.seq_end != 1)
+               ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
+                          __func__);
+}
+
+static int qla4_83xx_restart(struct scsi_qla_host *ha)
+{
+       int ret_val = QLA_SUCCESS;
+
+       qla4_83xx_process_stop_seq(ha);
+
+       /* Collect minidump*/
+       if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags))
+               qla4_8xxx_get_minidump(ha);
+
+       qla4_83xx_process_init_seq(ha);
+
+       if (qla4_83xx_copy_bootloader(ha)) {
+               ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
+                          __func__);
+               ret_val = QLA_ERROR;
+               goto exit_restart;
+       }
+
+       qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
+       qla4_83xx_process_start_seq(ha);
+
+exit_restart:
+       return ret_val;
+}
+
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
+{
+       int ret_val = QLA_SUCCESS;
+
+       ret_val = qla4_83xx_restart(ha);
+       if (ret_val == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
+               goto exit_start_fw;
+       } else {
+               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
+                                 __func__));
+       }
+
+       ret_val = qla4_83xx_check_cmd_peg_status(ha);
+       if (ret_val == QLA_ERROR)
+               ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
+                          __func__);
+
+exit_start_fw:
+       return ret_val;
+}
+
+/*----------------------Interrupt Related functions ---------------------*/
+
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+       uint32_t mb_int, ret;
+
+       if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
+               qla4_8xxx_mbx_intr_disable(ha);
+
+       ret = readl(&ha->qla4_83xx_reg->mbox_int);
+       mb_int = ret & ~INT_ENABLE_FW_MB;
+       writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+       writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+}
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+       uint32_t mb_int;
+
+       qla4_8xxx_mbx_intr_enable(ha);
+       mb_int = INT_ENABLE_FW_MB;
+       writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+       writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+
+       set_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+                             int incount)
+{
+       int i;
+
+       /* Load all mailbox registers, except mailbox 0. */
+       for (i = 1; i < incount; i++)
+               writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
+
+       writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
+
+       /* Set Host Interrupt register to 1, to tell the firmware that
+        * a mailbox command is pending. Firmware after reading the
+        * mailbox command, clears the host interrupt register */
+       writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
+}
+
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
+{
+       int intr_status;
+
+       intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
+       if (intr_status) {
+               ha->mbox_status_count = outcount;
+               ha->isp_ops->interrupt_service_routine(ha, intr_status);
+       }
+}
+
+/**
+ * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * @ha: pointer to host adapter structure.
+ **/
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
+{
+       int rval;
+       uint32_t dev_state;
+
+       ha->isp_ops->idc_lock(ha);
+       dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+
+       if (ql4xdontresethba)
+               qla4_83xx_set_idc_dontreset(ha);
+
+       if (dev_state == QLA8XXX_DEV_READY) {
+               /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
+                * recovery */
+               if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
+                       ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
+                                  __func__);
+                       rval = QLA_ERROR;
+                       goto exit_isp_reset;
+               }
+
+               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
+                                 __func__));
+               qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                   QLA8XXX_DEV_NEED_RESET);
+
+       } else {
+               /* If device_state is NEED_RESET, go ahead with
+                * Reset,irrespective of ql4xdontresethba. This is to allow a
+                * non-reset-owner to force a reset. Non-reset-owner sets
+                * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+                * and then forces a Reset by setting device_state to
+                * NEED_RESET. */
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: HW state already set to NEED_RESET\n",
+                                 __func__));
+       }
+
+       /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority
+        * and which drivers are present. Unlike ISP8022, the function setting
+        * NEED_RESET, may not be the Reset owner. */
+       if (qla4_83xx_can_perform_reset(ha))
+               set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+
+       ha->isp_ops->idc_unlock(ha);
+       rval = qla4_8xxx_device_state_handler(ha);
+
+       ha->isp_ops->idc_lock(ha);
+       qla4_8xxx_clear_rst_ready(ha);
+exit_isp_reset:
+       ha->isp_ops->idc_unlock(ha);
+
+       if (rval == QLA_SUCCESS)
+               clear_bit(AF_FW_RECOVERY, &ha->flags);
+
+       return rval;
+}
+
+static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
+{
+       u32 val = 0, val1 = 0;
+       int i, status = QLA_SUCCESS;
+
+       status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
+       DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
+
+       /* Port 0 Rx Buffer Pause Threshold Registers. */
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+               "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+       for (i = 0; i < 8; i++) {
+               status = qla4_83xx_rd_reg_indirect(ha,
+                               QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
+               DEBUG2(pr_info("0x%x ", val));
+       }
+
+       DEBUG2(pr_info("\n"));
+
+       /* Port 1 Rx Buffer Pause Threshold Registers. */
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+               "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+       for (i = 0; i < 8; i++) {
+               status = qla4_83xx_rd_reg_indirect(ha,
+                               QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
+               DEBUG2(pr_info("0x%x  ", val));
+       }
+
+       DEBUG2(pr_info("\n"));
+
+       /* Port 0 RxB Traffic Class Max Cell Registers. */
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+               "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
+       for (i = 0; i < 4; i++) {
+               status = qla4_83xx_rd_reg_indirect(ha,
+                              QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
+               DEBUG2(pr_info("0x%x  ", val));
+       }
+
+       DEBUG2(pr_info("\n"));
+
+       /* Port 1 RxB Traffic Class Max Cell Registers. */
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+               "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
+       for (i = 0; i < 4; i++) {
+               status = qla4_83xx_rd_reg_indirect(ha,
+                              QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
+               DEBUG2(pr_info("0x%x  ", val));
+       }
+
+       DEBUG2(pr_info("\n"));
+
+       /* Port 0 RxB Rx Traffic Class Stats. */
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
+       for (i = 7; i >= 0; i--) {
+               status = qla4_83xx_rd_reg_indirect(ha,
+                                                  QLA83XX_PORT0_RXB_TC_STATS,
+                                                  &val);
+               val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
+               qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
+                                         (val | (i << 29)));
+               status = qla4_83xx_rd_reg_indirect(ha,
+                                                  QLA83XX_PORT0_RXB_TC_STATS,
+                                                  &val);
+               DEBUG2(pr_info("0x%x  ", val));
+       }
+
+       DEBUG2(pr_info("\n"));
+
+       /* Port 1 RxB Rx Traffic Class Stats. */
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
+       for (i = 7; i >= 0; i--) {
+               status = qla4_83xx_rd_reg_indirect(ha,
+                                                  QLA83XX_PORT1_RXB_TC_STATS,
+                                                  &val);
+               val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
+               qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
+                                         (val | (i << 29)));
+               status = qla4_83xx_rd_reg_indirect(ha,
+                                                  QLA83XX_PORT1_RXB_TC_STATS,
+                                                  &val);
+               DEBUG2(pr_info("0x%x  ", val));
+       }
+
+       DEBUG2(pr_info("\n"));
+
+       status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+                                          &val);
+       status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+                                          &val1);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+                         val, val1));
+}
+
+static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+       int i;
+
+       /* set SRE-Shim Control Register */
+       qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
+                                 QLA83XX_SET_PAUSE_VAL);
+
+       for (i = 0; i < 8; i++) {
+               /* Port 0 Rx Buffer Pause Threshold Registers. */
+               qla4_83xx_wr_reg_indirect(ha,
+                                     QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
+                                     QLA83XX_SET_PAUSE_VAL);
+               /* Port 1 Rx Buffer Pause Threshold Registers. */
+               qla4_83xx_wr_reg_indirect(ha,
+                                     QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
+                                     QLA83XX_SET_PAUSE_VAL);
+       }
+
+       for (i = 0; i < 4; i++) {
+               /* Port 0 RxB Traffic Class Max Cell Registers. */
+               qla4_83xx_wr_reg_indirect(ha,
+                                    QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
+                                    QLA83XX_SET_TC_MAX_CELL_VAL);
+               /* Port 1 RxB Traffic Class Max Cell Registers. */
+               qla4_83xx_wr_reg_indirect(ha,
+                                    QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
+                                    QLA83XX_SET_TC_MAX_CELL_VAL);
+       }
+
+       qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+                                 QLA83XX_SET_PAUSE_VAL);
+       qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+                                 QLA83XX_SET_PAUSE_VAL);
+
+       ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
+}
+
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+       ha->isp_ops->idc_lock(ha);
+       qla4_83xx_dump_pause_control_regs(ha);
+       __qla4_83xx_disable_pause(ha);
+       ha->isp_ops->idc_unlock(ha);
+}
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
new file mode 100644 (file)
index 0000000..6a00f90
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QL483XX_H
+#define __QL483XX_H
+
+/* Indirectly Mapped Registers */
+#define QLA83XX_FLASH_SPI_STATUS       0x2808E010
+#define QLA83XX_FLASH_SPI_CONTROL      0x2808E014
+#define QLA83XX_FLASH_STATUS           0x42100004
+#define QLA83XX_FLASH_CONTROL          0x42110004
+#define QLA83XX_FLASH_ADDR             0x42110008
+#define QLA83XX_FLASH_WRDATA           0x4211000C
+#define QLA83XX_FLASH_RDDATA           0x42110018
+#define QLA83XX_FLASH_DIRECT_WINDOW    0x42110030
+#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Directly Mapped Registers in 83xx register table */
+
+/* Flash access regs */
+#define QLA83XX_FLASH_LOCK             0x3850
+#define QLA83XX_FLASH_UNLOCK           0x3854
+#define QLA83XX_FLASH_LOCK_ID          0x3500
+
+/* Driver Lock regs */
+#define QLA83XX_DRV_LOCK               0x3868
+#define QLA83XX_DRV_UNLOCK             0x386C
+#define QLA83XX_DRV_LOCK_ID            0x3504
+#define QLA83XX_DRV_LOCKRECOVERY       0x379C
+
+/* IDC version */
+#define QLA83XX_IDC_VER_MAJ_VALUE       0x1
+#define QLA83XX_IDC_VER_MIN_VALUE       0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA83XX_CRB_IDC_VER_MAJOR      0x3780
+#define QLA83XX_CRB_IDC_VER_MINOR      0x3798
+#define QLA83XX_IDC_DRV_CTRL           0x3790
+#define QLA83XX_IDC_DRV_AUDIT          0x3794
+#define QLA83XX_SRE_SHIM_CONTROL       0x0D200284
+#define QLA83XX_PORT0_RXB_PAUSE_THRS   0x0B2003A4
+#define QLA83XX_PORT1_RXB_PAUSE_THRS   0x0B2013A4
+#define QLA83XX_PORT0_RXB_TC_MAX_CELL  0x0B200388
+#define QLA83XX_PORT1_RXB_TC_MAX_CELL  0x0B201388
+#define QLA83XX_PORT0_RXB_TC_STATS     0x0B20039C
+#define QLA83XX_PORT1_RXB_TC_STATS     0x0B20139C
+#define QLA83XX_PORT2_IFB_PAUSE_THRS   0x0B200704
+#define QLA83XX_PORT3_IFB_PAUSE_THRS   0x0B201704
+
+/* set value to pause threshold value */
+#define QLA83XX_SET_PAUSE_VAL          0x0
+#define QLA83XX_SET_TC_MAX_CELL_VAL    0x03FF03FF
+
+/* qla_83xx_reg_tbl registers */
+#define QLA83XX_PEG_HALT_STATUS1       0x34A8
+#define QLA83XX_PEG_HALT_STATUS2       0x34AC
+#define QLA83XX_PEG_ALIVE_COUNTER      0x34B0 /* FW_HEARTBEAT */
+#define QLA83XX_FW_CAPABILITIES                0x3528
+#define QLA83XX_CRB_DRV_ACTIVE         0x3788 /* IDC_DRV_PRESENCE */
+#define QLA83XX_CRB_DEV_STATE          0x3784 /* IDC_DEV_STATE */
+#define QLA83XX_CRB_DRV_STATE          0x378C /* IDC_DRV_ACK */
+#define QLA83XX_CRB_DRV_SCRATCH                0x3548
+#define QLA83XX_CRB_DEV_PART_INFO1     0x37E0
+#define QLA83XX_CRB_DEV_PART_INFO2     0x37E4
+
+#define QLA83XX_FW_VER_MAJOR           0x3550
+#define QLA83XX_FW_VER_MINOR           0x3554
+#define QLA83XX_FW_VER_SUB             0x3558
+#define QLA83XX_NPAR_STATE             0x359C
+#define QLA83XX_FW_IMAGE_VALID         0x35FC
+#define QLA83XX_CMDPEG_STATE           0x3650
+#define QLA83XX_ASIC_TEMP              0x37B4
+#define QLA83XX_FW_API                 0x356C
+#define QLA83XX_DRV_OP_MODE            0x3570
+
+static const uint32_t qla4_83xx_reg_tbl[] = {
+       QLA83XX_PEG_HALT_STATUS1,
+       QLA83XX_PEG_HALT_STATUS2,
+       QLA83XX_PEG_ALIVE_COUNTER,
+       QLA83XX_CRB_DRV_ACTIVE,
+       QLA83XX_CRB_DEV_STATE,
+       QLA83XX_CRB_DRV_STATE,
+       QLA83XX_CRB_DRV_SCRATCH,
+       QLA83XX_CRB_DEV_PART_INFO1,
+       QLA83XX_CRB_IDC_VER_MAJOR,
+       QLA83XX_FW_VER_MAJOR,
+       QLA83XX_FW_VER_MINOR,
+       QLA83XX_FW_VER_SUB,
+       QLA83XX_CMDPEG_STATE,
+       QLA83XX_ASIC_TEMP,
+};
+
+#define QLA83XX_CRB_WIN_BASE           0x3800
+#define QLA83XX_CRB_WIN_FUNC(f)                (QLA83XX_CRB_WIN_BASE+((f)*4))
+#define QLA83XX_SEM_LOCK_BASE          0x3840
+#define QLA83XX_SEM_UNLOCK_BASE                0x3844
+#define QLA83XX_SEM_LOCK_FUNC(f)       (QLA83XX_SEM_LOCK_BASE+((f)*8))
+#define QLA83XX_SEM_UNLOCK_FUNC(f)     (QLA83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLA83XX_LINK_STATE(f)          (0x3698+((f) > 7 ? 4 : 0))
+#define QLA83XX_LINK_SPEED(f)          (0x36E0+(((f) >> 2) * 4))
+#define QLA83XX_MAX_LINK_SPEED(f)       (0x36F0+(((f) / 4) * 4))
+#define QLA83XX_LINK_SPEED_FACTOR      10
+
+/* FLASH API Defines */
+#define QLA83xx_FLASH_MAX_WAIT_USEC    100
+#define QLA83XX_FLASH_LOCK_TIMEOUT     10000
+#define QLA83XX_FLASH_SECTOR_SIZE      65536
+#define QLA83XX_DRV_LOCK_TIMEOUT       2000
+#define QLA83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLA83XX_FLASH_WRITE_CMD                0xdacdacda
+#define QLA83XX_FLASH_BUFFER_WRITE_CMD 0xcadcadca
+#define QLA83XX_FLASH_READ_RETRY_COUNT 2000
+#define QLA83XX_FLASH_STATUS_READY     0x6
+#define QLA83XX_FLASH_BUFFER_WRITE_MIN 2
+#define QLA83XX_FLASH_BUFFER_WRITE_MAX 64
+#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA83XX_ERASE_MODE             1
+#define QLA83XX_WRITE_MODE             2
+#define QLA83XX_DWORD_WRITE_MODE       3
+
+#define QLA83XX_GLOBAL_RESET           0x38CC
+#define QLA83XX_WILDCARD               0x38F0
+#define QLA83XX_INFORMANT              0x38FC
+#define QLA83XX_HOST_MBX_CTRL          0x3038
+#define QLA83XX_FW_MBX_CTRL            0x303C
+#define QLA83XX_BOOTLOADER_ADDR                0x355C
+#define QLA83XX_BOOTLOADER_SIZE                0x3560
+#define QLA83XX_FW_IMAGE_ADDR          0x3564
+#define QLA83XX_MBX_INTR_ENABLE                0x1000
+#define QLA83XX_MBX_INTR_MASK          0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0         0x1
+#define GRACEFUL_RESET_BIT1    0x2
+
+#define QLA83XX_HALT_STATUS_INFORMATIONAL      (0x1 << 29)
+#define QLA83XX_HALT_STATUS_FW_RESET           (0x2 << 29)
+#define QLA83XX_HALT_STATUS_UNRECOVERABLE      (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA83XX_BOOTLOADER_FLASH_ADDR  0x10000
+#define QLA83XX_BOOT_FROM_FLASH                0
+
+#define QLA83XX_IDC_PARAM_ADDR         0x3e8020
+/* Reset template definitions */
+#define QLA83XX_MAX_RESET_SEQ_ENTRIES  16
+#define QLA83XX_RESTART_TEMPLATE_SIZE  0x2000
+#define QLA83XX_RESET_TEMPLATE_ADDR    0x4F0000
+#define QLA83XX_RESET_SEQ_VERSION      0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP                     0x0000
+#define OPCODE_WRITE_LIST              0x0001
+#define OPCODE_READ_WRITE_LIST         0x0002
+#define OPCODE_POLL_LIST               0x0004
+#define OPCODE_POLL_WRITE_LIST         0x0008
+#define OPCODE_READ_MODIFY_WRITE       0x0010
+#define OPCODE_SEQ_PAUSE               0x0020
+#define OPCODE_SEQ_END                 0x0040
+#define OPCODE_TMPL_END                        0x0080
+#define OPCODE_POLL_READ_LIST          0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE      0xCAFE
+struct qla4_83xx_reset_template_hdr {
+       __le16  version;
+       __le16  signature;
+       __le16  size;
+       __le16  entries;
+       __le16  hdr_size;
+       __le16  checksum;
+       __le16  init_seq_offset;
+       __le16  start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla4_83xx_reset_entry_hdr {
+       __le16 cmd;
+       __le16 size;
+       __le16 count;
+       __le16 delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla4_83xx_poll {
+       __le32  test_mask;
+       __le32  test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla4_83xx_rmw {
+       __le32  test_mask;
+       __le32  xor_value;
+       __le32  or_value;
+       uint8_t shl;
+       uint8_t shr;
+       uint8_t index_a;
+       uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla4_83xx_entry {
+       __le32 arg1;
+       __le32 arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla4_83xx_quad_entry {
+       __le32 dr_addr;
+       __le32 dr_value;
+       __le32 ar_addr;
+       __le32 ar_value;
+} __packed;
+
+struct qla4_83xx_reset_template {
+       int seq_index;
+       int seq_error;
+       int array_index;
+       uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES];
+       uint8_t *buff;
+       uint8_t *stop_offset;
+       uint8_t *start_offset;
+       uint8_t *init_offset;
+       struct qla4_83xx_reset_template_hdr *hdr;
+       uint8_t seq_end;
+       uint8_t template_end;
+};
+
+/* POLLRD Entry */
+struct qla83xx_minidump_entry_pollrd {
+       struct qla8xxx_minidump_entry_hdr h;
+       uint32_t select_addr;
+       uint32_t read_addr;
+       uint32_t select_value;
+       uint16_t select_value_stride;
+       uint16_t op_count;
+       uint32_t poll_wait;
+       uint32_t poll_mask;
+       uint32_t data_size;
+       uint32_t rsvd_1;
+};
+
+/* RDMUX2 Entry */
+struct qla83xx_minidump_entry_rdmux2 {
+       struct qla8xxx_minidump_entry_hdr h;
+       uint32_t select_addr_1;
+       uint32_t select_addr_2;
+       uint32_t select_value_1;
+       uint32_t select_value_2;
+       uint32_t op_count;
+       uint32_t select_value_mask;
+       uint32_t read_addr;
+       uint8_t select_value_stride;
+       uint8_t data_size;
+       uint8_t rsvd[2];
+};
+
+/* POLLRDMWR Entry */
+struct qla83xx_minidump_entry_pollrdmwr {
+       struct qla8xxx_minidump_entry_hdr h;
+       uint32_t addr_1;
+       uint32_t addr_2;
+       uint32_t value_1;
+       uint32_t value_2;
+       uint32_t poll_wait;
+       uint32_t poll_mask;
+       uint32_t modify_mask;
+       uint32_t data_size;
+};
+
+/* IDC additional information */
+struct qla4_83xx_idc_information {
+       uint32_t request_desc;  /* IDC request descriptor */
+       uint32_t info1; /* IDC additional info */
+       uint32_t info2; /* IDC additional info */
+       uint32_t info3; /* IDC additional info */
+};
+
+#endif
index c681b2a355e137a99edcfd39e1c18b8c7dcbda11..76819b71ada761f200a9182d20cb746e41b5d6be 100644 (file)
@@ -17,7 +17,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
        struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
                                               struct device, kobj)));
 
-       if (!is_qla8022(ha))
+       if (is_qla40XX(ha))
                return -EINVAL;
 
        if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
@@ -38,7 +38,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
        long reading;
        int ret = 0;
 
-       if (!is_qla8022(ha))
+       if (is_qla40XX(ha))
                return -EINVAL;
 
        if (off != 0)
@@ -75,21 +75,21 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
                break;
        case 2:
                /* Reset HBA */
-               qla4_8xxx_idc_lock(ha);
-               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               if (dev_state == QLA82XX_DEV_READY) {
+               ha->isp_ops->idc_lock(ha);
+               dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+               if (dev_state == QLA8XXX_DEV_READY) {
                        ql4_printk(KERN_INFO, ha,
                                   "%s: Setting Need reset, reset_owner is 0x%x.\n",
                                   __func__, ha->func_num);
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                                       QLA82XX_DEV_NEED_RESET);
-                       set_bit(AF_82XX_RST_OWNER, &ha->flags);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_NEED_RESET);
+                       set_bit(AF_8XXX_RST_OWNER, &ha->flags);
                } else
                        ql4_printk(KERN_INFO, ha,
                                   "%s: Reset not performed as device state is 0x%x\n",
                                   __func__, dev_state);
 
-               qla4_8xxx_idc_unlock(ha);
+               ha->isp_ops->idc_unlock(ha);
                break;
        default:
                /* do nothing */
@@ -150,7 +150,7 @@ qla4xxx_fw_version_show(struct device *dev,
 {
        struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
 
-       if (is_qla8022(ha))
+       if (is_qla80XX(ha))
                return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
                                ha->firmware_version[0],
                                ha->firmware_version[1],
@@ -214,7 +214,7 @@ qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
 {
        struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
 
-       if (!is_qla8022(ha))
+       if (is_qla40XX(ha))
                return -ENOSYS;
 
        return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
@@ -226,7 +226,7 @@ qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
 {
        struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
 
-       if (!is_qla8022(ha))
+       if (is_qla40XX(ha))
                return -ENOSYS;
 
        return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
@@ -238,7 +238,7 @@ qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
 {
        struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
 
-       if (!is_qla8022(ha))
+       if (is_qla40XX(ha))
                return -ENOSYS;
 
        return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
index 8d58ae2748292b45026061f9c0e22c2bae31cdf7..77b7c594010f9c15158625596633c13121b13281 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -37,7 +37,7 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
        if (is_qla8022(ha)) {
                for (i = 1; i < MBOX_REG_COUNT; i++)
                        printk(KERN_INFO "mailbox[%d]     = 0x%08X\n",
-                           i, readl(&ha->qla4_8xxx_reg->mailbox_in[i]));
+                           i, readl(&ha->qla4_82xx_reg->mailbox_in[i]));
                return;
        }
 
@@ -131,3 +131,31 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
                    &ha->reg->ctrl_status);
        }
 }
+
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
+{
+       uint32_t halt_status1, halt_status2;
+
+       halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+       halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2);
+
+       if (is_qla8022(ha)) {
+               ql4_printk(KERN_INFO, ha,
+                          "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n"
+                          " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+                          " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+                          " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+                          " PEG_NET_4_PC: 0x%x\n", ha->host_no,
+                          __func__, halt_status1, halt_status2,
+                          qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
+                          qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
+                          qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
+                          qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
+                          qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
+       } else if (is_qla8032(ha)) {
+               ql4_printk(KERN_INFO, ha,
+                          "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n"
+                          " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
+                          ha->host_no, __func__, halt_status1, halt_status2);
+       }
+}
index abd83602cddaebee3712bb7c2906c04256317959..5b0afc18ef1860707b756966e2f8f445d47c0a03 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
index 7fdba7f1ffb70ef68fa1165b716013163a5323c3..329d553eae943d9acc524af42652721df8ad3fa1 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -42,6 +42,7 @@
 #include "ql4_nx.h"
 #include "ql4_fw.h"
 #include "ql4_nvram.h"
+#include "ql4_83xx.h"
 
 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
 #define PCI_DEVICE_ID_QLOGIC_ISP4010   0x4010
 #define PCI_DEVICE_ID_QLOGIC_ISP8022   0x8022
 #endif
 
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8324
+#define PCI_DEVICE_ID_QLOGIC_ISP8324   0x8032
+#endif
+
 #define ISP4XXX_PCI_FN_1       0x1
 #define ISP4XXX_PCI_FN_2       0x3
 
@@ -388,8 +393,10 @@ struct isp_operations {
        void (*disable_intrs) (struct scsi_qla_host *);
        void (*enable_intrs) (struct scsi_qla_host *);
        int (*start_firmware) (struct scsi_qla_host *);
+       int (*restart_firmware) (struct scsi_qla_host *);
        irqreturn_t (*intr_handler) (int , void *);
        void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t);
+       int (*need_reset) (struct scsi_qla_host *);
        int (*reset_chip) (struct scsi_qla_host *);
        int (*reset_firmware) (struct scsi_qla_host *);
        void (*queue_iocb) (struct scsi_qla_host *);
@@ -397,6 +404,15 @@ struct isp_operations {
        uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *);
        uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *);
        int (*get_sys_info) (struct scsi_qla_host *);
+       uint32_t (*rd_reg_direct) (struct scsi_qla_host *, ulong);
+       void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
+       int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
+       int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
+       int (*idc_lock) (struct scsi_qla_host *);
+       void (*idc_unlock) (struct scsi_qla_host *);
+       void (*rom_lock_recovery) (struct scsi_qla_host *);
+       void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
+       void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
 };
 
 struct ql4_mdump_size_table {
@@ -497,8 +513,9 @@ struct scsi_qla_host {
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
 #define AF_BUILD_DDB_LIST              22 /* 0x00400000 */
 #define AF_82XX_FW_DUMPED              24 /* 0x01000000 */
-#define AF_82XX_RST_OWNER              25 /* 0x02000000 */
+#define AF_8XXX_RST_OWNER              25 /* 0x02000000 */
 #define AF_82XX_DUMP_READING           26 /* 0x04000000 */
+#define AF_83XX_NO_FW_DUMP             27 /* 0x08000000 */
 
        unsigned long dpc_flags;
 
@@ -514,7 +531,7 @@ struct scsi_qla_host {
 #define DPC_RESET_ACTIVE               20 /* 0x00040000 */
 #define DPC_HA_UNRECOVERABLE           21 /* 0x00080000 ISP-82xx only*/
 #define DPC_HA_NEED_QUIESCENT          22 /* 0x00100000 ISP-82xx only*/
-
+#define DPC_POST_IDC_ACK               23 /* 0x00200000 */
 
        struct Scsi_Host *host; /* pointer to host data */
        uint32_t tot_ddbs;
@@ -647,7 +664,7 @@ struct scsi_qla_host {
        uint8_t acb_version;
 
        /* qla82xx specific fields */
-       struct device_reg_82xx  __iomem *qla4_8xxx_reg; /* Base I/O address */
+       struct device_reg_82xx  __iomem *qla4_82xx_reg; /* Base I/O address */
        unsigned long nx_pcibase;       /* Base I/O address */
        uint8_t *nx_db_rd_ptr;          /* Doorbell read pointer */
        unsigned long nx_db_wr_ptr;     /* Door bell write pointer */
@@ -733,6 +750,13 @@ struct scsi_qla_host {
 #define MAX_MRB                128
        struct mrb *active_mrb_array[MAX_MRB];
        uint32_t mrb_index;
+
+       uint32_t *reg_tbl;
+       struct qla4_83xx_reset_template reset_tmplt;
+       struct device_reg_83xx  __iomem *qla4_83xx_reg; /* Base I/O address
+                                                          for ISP8324 */
+       uint32_t pf_bit;
+       struct qla4_83xx_idc_information idc_info;
 };
 
 struct ql4_task_data {
@@ -752,7 +776,7 @@ struct ql4_task_data {
 
 struct qla_endpoint {
        struct Scsi_Host *host;
-       struct sockaddr dst_addr;
+       struct sockaddr_storage dst_addr;
 };
 
 struct qla_conn {
@@ -795,13 +819,20 @@ static inline int is_qla8022(struct scsi_qla_host *ha)
        return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
 }
 
-/* Note: Currently AER/EEH is now supported only for 8022 cards
- * This function needs to be updated when AER/EEH is enabled
- * for other cards.
- */
+static inline int is_qla8032(struct scsi_qla_host *ha)
+{
+       return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
+}
+
+static inline int is_qla80XX(struct scsi_qla_host *ha)
+{
+       return is_qla8022(ha) || is_qla8032(ha);
+}
+
 static inline int is_aer_supported(struct scsi_qla_host *ha)
 {
-       return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
+       return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
+               (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324));
 }
 
 static inline int adapter_up(struct scsi_qla_host *ha)
@@ -942,6 +973,20 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
               test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
 
 }
+
+static inline int qla4_8xxx_rd_direct(struct scsi_qla_host *ha,
+                                     const uint32_t crb_reg)
+{
+       return ha->isp_ops->rd_reg_direct(ha, ha->reg_tbl[crb_reg]);
+}
+
+static inline void qla4_8xxx_wr_direct(struct scsi_qla_host *ha,
+                                      const uint32_t crb_reg,
+                                      const uint32_t value)
+{
+       ha->isp_ops->wr_reg_direct(ha, ha->reg_tbl[crb_reg], value);
+}
+
 /*---------------------------------------------------------------------------*/
 
 /* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
index 7240948fb929bcb557398ecd774fe9fc36c7fae3..1c47950203573e096768d371dd1c131f2aa35e7e 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -65,6 +65,40 @@ struct device_reg_82xx {
 #define ISRX_82XX_RISC_INT     BIT_0 /* RISC interrupt. */
 };
 
+/* ISP 83xx I/O Register Set structure */
+struct device_reg_83xx {
+       __le32 mailbox_in[16];  /* 0x0000 */
+       __le32 reserve1[496];   /* 0x0040 */
+       __le32 mailbox_out[16]; /* 0x0800 */
+       __le32 reserve2[496];
+       __le32 mbox_int;        /* 0x1000 */
+       __le32 reserve3[63];
+       __le32 req_q_out;       /* 0x1100 */
+       __le32 reserve4[63];
+
+       __le32 rsp_q_in;        /* 0x1200 */
+       __le32 reserve5[1919];
+
+       __le32 req_q_in;        /* 0x3000 */
+       __le32 reserve6[3];
+       __le32 iocb_int_mask;   /* 0x3010 */
+       __le32 reserve7[3];
+       __le32 rsp_q_out;       /* 0x3020 */
+       __le32 reserve8[3];
+       __le32 anonymousbuff;   /* 0x3030 */
+       __le32 mb_int_mask;     /* 0x3034 */
+
+       __le32 host_intr;       /* 0x3038 - Host Interrupt Register */
+       __le32 risc_intr;       /* 0x303C - RISC Interrupt Register */
+       __le32 reserve9[544];
+       __le32 leg_int_ptr;     /* 0x38C0 - Legacy Interrupt Pointer Register */
+       __le32 leg_int_trig;    /* 0x38C4 - Legacy Interrupt Trigger Control */
+       __le32 leg_int_mask;    /* 0x38C8 - Legacy Interrupt Mask Register */
+};
+
+#define INT_ENABLE_FW_MB       (1 << 2)
+#define INT_MASK_FW_MB         (1 << 2)
+
 /*  remote register set (access via PCI memory read/write) */
 struct isp_reg {
 #define MBOX_REG_COUNT 8
@@ -356,6 +390,9 @@ struct qla_flt_region {
 #define LOGOUT_OPTION_CLOSE_SESSION            0x0002
 #define LOGOUT_OPTION_RELOGIN                  0x0004
 #define LOGOUT_OPTION_FREE_DDB                 0x0008
+#define MBOX_CMD_SET_PARAM                     0x0059
+#define SET_DRVR_VERSION                       0x200
+#define MAX_DRVR_VER_LEN                       24
 #define MBOX_CMD_EXECUTE_IOCB_A64              0x005A
 #define MBOX_CMD_INITIALIZE_FIRMWARE           0x0060
 #define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK                0x0061
@@ -417,6 +454,10 @@ struct qla_flt_region {
 #define MBOX_CMD_GET_CRASH_RECORD              0x0076  /* 4010 only */
 #define MBOX_CMD_GET_CONN_EVENT_LOG            0x0077
 
+#define MBOX_CMD_IDC_ACK                       0x0101
+#define MBOX_CMD_PORT_RESET                    0x0120
+#define MBOX_CMD_SET_PORT_CONFIG               0x0122
+
 /*  Mailbox status definitions */
 #define MBOX_COMPLETION_STATUS                 4
 #define MBOX_STS_BUSY                          0x0007
@@ -453,6 +494,8 @@ struct qla_flt_region {
 #define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED       0x802C
 #define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED      0x802D
 #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD                0x802E
+#define MBOX_ASTS_IDC_COMPLETE                 0x8100
+#define MBOX_ASTS_IDC_NOTIFY                   0x8101
 #define MBOX_ASTS_TXSCVR_INSERTED              0x8130
 #define MBOX_ASTS_TXSCVR_REMOVED               0x8131
 
@@ -1195,9 +1238,12 @@ struct ql_iscsi_stats {
        uint8_t reserved2[264]; /* 0x0308 - 0x040F */
 };
 
-#define QLA82XX_DBG_STATE_ARRAY_LEN            16
-#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN         8
-#define QLA82XX_DBG_RSVD_ARRAY_LEN             8
+#define QLA8XXX_DBG_STATE_ARRAY_LEN            16
+#define QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN         8
+#define QLA8XXX_DBG_RSVD_ARRAY_LEN             8
+#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN       16
+#define QLA83XX_SS_OCM_WNDREG_INDEX            3
+#define QLA83XX_SS_PCI_INDEX                   0
 
 struct qla4_8xxx_minidump_template_hdr {
        uint32_t entry_type;
@@ -1214,8 +1260,9 @@ struct qla4_8xxx_minidump_template_hdr {
        uint32_t driver_info_word3;
        uint32_t driver_info_word4;
 
-       uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
-       uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+       uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
+       uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
+       uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
 };
 
 #endif /*  _QLA4X_FW_H */
index 5b2525c4139e3275b920d1e525a4f5807286241a..57a5a3cf5770d5e402d88db55feead1b417a3175 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -109,28 +109,28 @@ uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
 void qla4_8xxx_pci_config(struct scsi_qla_host *);
 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
 int qla4_8xxx_load_risc(struct scsi_qla_host *);
-irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id);
-void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha);
-void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha);
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
 
-int qla4_8xxx_crb_win_lock(struct scsi_qla_host *);
-void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *);
-int qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
-void qla4_8xxx_wr_32(struct scsi_qla_host *, ulong, u32);
-int qla4_8xxx_rd_32(struct scsi_qla_host *, ulong);
-int qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
-int qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
-int qla4_8xxx_isp_reset(struct scsi_qla_host *ha);
-void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
+int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
+void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong);
+int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
+int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
+int qla4_82xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
                uint32_t intr_status);
-uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
-uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
 int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
 void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
 int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
 int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
-void qla4_8xxx_enable_intrs(struct scsi_qla_host *ha);
-void qla4_8xxx_disable_intrs(struct scsi_qla_host *ha);
+void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
+void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
 int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
 void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
 irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
@@ -138,8 +138,8 @@ irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
 irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
-int qla4_8xxx_idc_lock(struct scsi_qla_host *ha);
-void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha);
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha);
 int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
 void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
 void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
@@ -203,6 +203,62 @@ int qla4xxx_req_template_size(struct scsi_qla_host *ha);
 void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
 void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
 void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha);
+int qla4_8xxx_need_reset(struct scsi_qla_host *ha);
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data);
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data);
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+                             int incount);
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+                           int incount);
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha);
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha);
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id);
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+                                        uint32_t intr_status);
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
+uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+                             uint32_t *data);
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+                             uint32_t data);
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha);
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha);
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+                             int incount);
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha);
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+                                     uint32_t flash_addr, uint8_t *p_data,
+                                     int u32_word_count);
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha);
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha);
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+                            uint8_t *p_data, int u32_word_count);
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha);
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
+int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
index ddd9472066cb39c0ca6df5c919f9ef132f2109e3..1aca1b4f70b820edd867ba566d883e4ec1b6793c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -102,11 +102,18 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
 
        if (is_qla8022(ha)) {
                writel(0,
-                   (unsigned long  __iomem *)&ha->qla4_8xxx_reg->req_q_out);
+                   (unsigned long  __iomem *)&ha->qla4_82xx_reg->req_q_out);
                writel(0,
-                   (unsigned long  __iomem *)&ha->qla4_8xxx_reg->rsp_q_in);
+                   (unsigned long  __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
                writel(0,
-                   (unsigned long  __iomem *)&ha->qla4_8xxx_reg->rsp_q_out);
+                   (unsigned long  __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
+       } else if (is_qla8032(ha)) {
+               writel(0,
+                      (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
+               writel(0,
+                      (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in);
+               writel(0,
+                      (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out);
        } else {
                /*
                 * Initialize DMA Shadow registers.  The firmware is really
@@ -524,7 +531,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
        /* For 82xx, stop firmware before initializing because if BIOS
         * has previously initialized firmware, then driver's initialize
         * firmware will fail. */
-       if (is_qla8022(ha))
+       if (is_qla80XX(ha))
                qla4_8xxx_stop_firmware(ha);
 
        ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
@@ -537,7 +544,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
        if (!qla4xxx_fw_ready(ha))
                return status;
 
-       if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+       if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
                qla4xxx_alloc_fw_dump(ha);
 
        return qla4xxx_get_firmware_status(ha);
@@ -946,9 +953,9 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
 
        set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
-       if (is_qla8022(ha) && (status == QLA_ERROR)) {
+       if (is_qla80XX(ha) && (status == QLA_ERROR)) {
                /* Since interrupts are registered in start_firmware for
-                * 82xx, release them here if initialize_adapter fails */
+                * 80XX, release them here if initialize_adapter fails */
                qla4xxx_free_irqs(ha);
        }
 
index 62f90bdec5d542a7177b3c532d8c20d4039f3f95..6f4decd44c6a06db9ccff88e377b03ec0ada017a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
index 2a2022a6bb9bede3fb71f07988cd705932040dc7..f48f37a281d185c32dcc23790413109ffc158289 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -192,35 +192,47 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
        }
 }
 
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
+{
+       writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
+       readl(&ha->qla4_83xx_reg->req_q_in);
+}
+
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
+{
+       writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
+       readl(&ha->qla4_83xx_reg->rsp_q_out);
+}
+
 /**
- * qla4_8xxx_queue_iocb - Tell ISP it's got new request(s)
+ * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
  * @ha: pointer to host adapter structure.
  *
  * This routine notifies the ISP that one or more new request
  * queue entries have been placed on the request queue.
  **/
-void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
 {
        uint32_t dbval = 0;
 
        dbval = 0x14 | (ha->func_num << 5);
        dbval = dbval | (0 << 8) | (ha->request_in << 16);
 
-       qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
+       qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
 }
 
 /**
- * qla4_8xxx_complete_iocb - Tell ISP we're done with response(s)
+ * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
  * @ha: pointer to host adapter structure.
  *
  * This routine notifies the ISP that one or more response/completion
  * queue entries have been processed by the driver.
  * This also clears the interrupt.
  **/
-void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha)
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
 {
-       writel(ha->response_out, &ha->qla4_8xxx_reg->rsp_q_out);
-       readl(&ha->qla4_8xxx_reg->rsp_q_out);
+       writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
+       readl(&ha->qla4_82xx_reg->rsp_q_out);
 }
 
 /**
index fc542a9bb106231ac9f4991388b2f9ee1e3d0d46..15ea81465ce4eaf7facddd6ecce4d682b36b6b5b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -126,7 +126,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
                ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
                           "handle=0x%0x, srb=%p\n", __func__,
                           sts_entry->handle, srb);
-               if (is_qla8022(ha))
+               if (is_qla80XX(ha))
                        set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
                else
                        set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -243,56 +243,72 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
 
                scsi_set_resid(cmd, residual);
 
-               /*
-                * If there is scsi_status, it takes precedense over
-                * underflow condition.
-                */
-               if (scsi_status != 0) {
-                       cmd->result = DID_OK << 16 | scsi_status;
+               if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
+
+                       /* Both the firmware and target reported UNDERRUN:
+                        *
+                        * MID-LAYER UNDERFLOW case:
+                        * Some kernels do not properly detect midlayer
+                        * underflow, so we manually check it and return
+                        * ERROR if the minimum required data was not
+                        * received.
+                        *
+                        * ALL OTHER cases:
+                        * Fall thru to check scsi_status
+                        */
+                       if (!scsi_status && (scsi_bufflen(cmd) - residual) <
+                           cmd->underflow) {
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                                 "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
+                                                  ha->host_no,
+                                                  cmd->device->channel,
+                                                  cmd->device->id,
+                                                  cmd->device->lun, __func__,
+                                                  scsi_bufflen(cmd),
+                                                  residual));
 
-                       if (scsi_status != SCSI_CHECK_CONDITION)
+                               cmd->result = DID_ERROR << 16;
                                break;
+                       }
+
+               } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
+                          scsi_status != SAM_STAT_BUSY) {
 
-                       /* Copy Sense Data into sense buffer. */
-                       qla4xxx_copy_sense(ha, sts_entry, srb);
-               } else {
                        /*
-                        * If RISC reports underrun and target does not
-                        * report it then we must have a lost frame, so
-                        * tell upper layer to retry it by reporting a
-                        * bus busy.
+                        * The firmware reports UNDERRUN, but the target does
+                        * not report it:
+                        *
+                        *   scsi_status     |    host_byte       device_byte
+                        *                   |     (19:16)          (7:0)
+                        *   =============   |    =========       ===========
+                        *   TASK_SET_FULL   |    DID_OK          scsi_status
+                        *   BUSY            |    DID_OK          scsi_status
+                        *   ALL OTHERS      |    DID_ERROR       scsi_status
+                        *
+                        *   Note: If scsi_status is task set full or busy,
+                        *   then this else if would fall thru to check the
+                        *   scsi_status and return DID_OK.
                         */
-                       if ((sts_entry->iscsiFlags &
-                            ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
-                               cmd->result = DID_BUS_BUSY << 16;
-                       } else if ((scsi_bufflen(cmd) - residual) <
-                                  cmd->underflow) {
-                               /*
-                                * Handle mid-layer underflow???
-                                *
-                                * For kernels less than 2.4, the driver must
-                                * return an error if an underflow is detected.
-                                * For kernels equal-to and above 2.4, the
-                                * mid-layer will appearantly handle the
-                                * underflow by detecting the residual count --
-                                * unfortunately, we do not see where this is
-                                * actually being done.  In the interim, we
-                                * will return DID_ERROR.
-                                */
-                               DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
-                                       "Mid-layer Data underrun1, "
-                                       "xferlen = 0x%x, "
-                                       "residual = 0x%x\n", ha->host_no,
-                                       cmd->device->channel,
-                                       cmd->device->id,
-                                       cmd->device->lun, __func__,
-                                       scsi_bufflen(cmd), residual));
 
-                               cmd->result = DID_ERROR << 16;
-                       } else {
-                               cmd->result = DID_OK << 16;
-                       }
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+                                         ha->host_no,
+                                         cmd->device->channel,
+                                         cmd->device->id,
+                                         cmd->device->lun, __func__,
+                                         residual,
+                                         scsi_bufflen(cmd)));
+
+                       cmd->result = DID_ERROR << 16 | scsi_status;
+                       goto check_scsi_status;
                }
+
+               cmd->result = DID_OK << 16 | scsi_status;
+
+check_scsi_status:
+               if (scsi_status == SAM_STAT_CHECK_CONDITION)
+                       qla4xxx_copy_sense(ha, sts_entry, srb);
+
                break;
 
        case SCS_DEVICE_LOGGED_OUT:
@@ -578,6 +594,14 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
 {
        int i;
        uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+       __le32 __iomem *mailbox_out;
+
+       if (is_qla8032(ha))
+               mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
+       else if (is_qla8022(ha))
+               mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
+       else
+               mailbox_out = &ha->reg->mailbox[0];
 
        if ((mbox_status == MBOX_STS_BUSY) ||
            (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -590,9 +614,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                         * location and set mailbox command done flag
                         */
                        for (i = 0; i < ha->mbox_status_count; i++)
-                               ha->mbox_status[i] = is_qla8022(ha)
-                                   ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
-                                   : readl(&ha->reg->mailbox[i]);
+                               ha->mbox_status[i] = readl(&mailbox_out[i]);
 
                        set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
 
@@ -601,9 +623,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                }
        } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
                for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
-                       mbox_sts[i] = is_qla8022(ha)
-                           ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
-                           : readl(&ha->reg->mailbox[i]);
+                       mbox_sts[i] = readl(&mailbox_out[i]);
 
                /* Immediately process the AENs that don't require much work.
                 * Only queue the database_changed AENs */
@@ -619,7 +639,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                        ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
                        qla4xxx_dump_registers(ha);
 
-                       if (ql4xdontresethba) {
+                       if ((is_qla8022(ha) && ql4xdontresethba) ||
+                           (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
                                DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
                                    ha->host_no, __func__));
                        } else {
@@ -635,7 +656,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                case MBOX_ASTS_DHCP_LEASE_EXPIRED:
                        DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
                                      "Reset HA\n", ha->host_no, mbox_status));
-                       if (is_qla8022(ha))
+                       if (is_qla80XX(ha))
                                set_bit(DPC_RESET_HA_FW_CONTEXT,
                                        &ha->dpc_flags);
                        else
@@ -700,7 +721,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                                set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
                        else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
                                 (mbox_sts[2] == ACB_STATE_VALID)) {
-                               if (is_qla8022(ha))
+                               if (is_qla80XX(ha))
                                        set_bit(DPC_RESET_HA_FW_CONTEXT,
                                                &ha->dpc_flags);
                                else
@@ -785,6 +806,43 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                            " removed\n",  ha->host_no, mbox_sts[0]));
                        break;
 
+               case MBOX_ASTS_IDC_NOTIFY:
+               {
+                       uint32_t opcode;
+                       if (is_qla8032(ha)) {
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                                 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+                                                 ha->host_no, mbox_sts[0],
+                                                 mbox_sts[1], mbox_sts[2],
+                                                 mbox_sts[3], mbox_sts[4]));
+                               opcode = mbox_sts[1] >> 16;
+                               if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
+                                   (opcode == MBOX_CMD_PORT_RESET)) {
+                                       set_bit(DPC_POST_IDC_ACK,
+                                               &ha->dpc_flags);
+                                       ha->idc_info.request_desc = mbox_sts[1];
+                                       ha->idc_info.info1 = mbox_sts[2];
+                                       ha->idc_info.info2 = mbox_sts[3];
+                                       ha->idc_info.info3 = mbox_sts[4];
+                                       qla4xxx_wake_dpc(ha);
+                               }
+                       }
+                       break;
+               }
+
+               case MBOX_ASTS_IDC_COMPLETE:
+                       if (is_qla8032(ha)) {
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                                 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+                                                 ha->host_no, mbox_sts[0],
+                                                 mbox_sts[1], mbox_sts[2],
+                                                 mbox_sts[3], mbox_sts[4]));
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                                 "scsi:%ld: AEN %04x IDC Complete notification\n",
+                                                 ha->host_no, mbox_sts[0]));
+                       }
+                       break;
+
                default:
                        DEBUG2(printk(KERN_WARNING
                                      "scsi%ld: AEN %04x UNKNOWN\n",
@@ -799,14 +857,31 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
        }
 }
 
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+                                        uint32_t intr_status)
+{
+       /* Process mailbox/asynch event interrupt.*/
+       if (intr_status) {
+               qla4xxx_isr_decode_mailbox(ha,
+                               readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+               /* clear the interrupt */
+               writel(0, &ha->qla4_83xx_reg->risc_intr);
+       } else {
+               qla4xxx_process_response_queue(ha);
+       }
+
+       /* clear the interrupt */
+       writel(0, &ha->qla4_83xx_reg->mb_int_mask);
+}
+
 /**
- * qla4_8xxx_interrupt_service_routine - isr
+ * qla4_82xx_interrupt_service_routine - isr
  * @ha: pointer to host adapter structure.
  *
  * This is the main interrupt service routine.
  * hardware_lock locked upon entry. runs in interrupt context.
  **/
-void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
     uint32_t intr_status)
 {
        /* Process response queue interrupt. */
@@ -816,11 +891,11 @@ void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
        /* Process mailbox/asynch event interrupt.*/
        if (intr_status & HSRX_RISC_MB_INT)
                qla4xxx_isr_decode_mailbox(ha,
-                   readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
+                   readl(&ha->qla4_82xx_reg->mailbox_out[0]));
 
        /* clear the interrupt */
-       writel(0, &ha->qla4_8xxx_reg->host_int);
-       readl(&ha->qla4_8xxx_reg->host_int);
+       writel(0, &ha->qla4_82xx_reg->host_int);
+       readl(&ha->qla4_82xx_reg->host_int);
 }
 
 /**
@@ -850,12 +925,12 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
 }
 
 /**
- * qla4_8xxx_spurious_interrupt - processes spurious interrupt
+ * qla4_82xx_spurious_interrupt - processes spurious interrupt
  * @ha: pointer to host adapter structure.
  * @reqs_count: .
  *
  **/
-static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
+static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
     uint8_t reqs_count)
 {
        if (reqs_count)
@@ -863,9 +938,9 @@ static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
        if (is_qla8022(ha)) {
-               writel(0, &ha->qla4_8xxx_reg->host_int);
+               writel(0, &ha->qla4_82xx_reg->host_int);
                if (test_bit(AF_INTx_ENABLED, &ha->flags))
-                       qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+                       qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
                            0xfbff);
        }
        ha->spurious_int_count++;
@@ -968,11 +1043,11 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
 }
 
 /**
- * qla4_8xxx_intr_handler - hardware interrupt handler.
+ * qla4_82xx_intr_handler - hardware interrupt handler.
  * @irq: Unused
  * @dev_id: Pointer to host adapter structure
  **/
-irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
 {
        struct scsi_qla_host *ha = dev_id;
        uint32_t intr_status;
@@ -984,11 +1059,11 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
                return IRQ_HANDLED;
 
        ha->isr_count++;
-       status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
+       status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
        if (!(status & ha->nx_legacy_intr.int_vec_bit))
                return IRQ_NONE;
 
-       status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
+       status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
        if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
                DEBUG2(ql4_printk(KERN_INFO, ha,
                    "%s legacy Int not triggered\n", __func__));
@@ -996,30 +1071,30 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
        }
 
        /* clear the interrupt */
-       qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+       qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
 
        /* read twice to ensure write is flushed */
-       qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
-       qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
+       qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+       qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        while (1) {
-               if (!(readl(&ha->qla4_8xxx_reg->host_int) &
+               if (!(readl(&ha->qla4_82xx_reg->host_int) &
                    ISRX_82XX_RISC_INT)) {
-                       qla4_8xxx_spurious_interrupt(ha, reqs_count);
+                       qla4_82xx_spurious_interrupt(ha, reqs_count);
                        break;
                }
-               intr_status =  readl(&ha->qla4_8xxx_reg->host_status);
+               intr_status =  readl(&ha->qla4_82xx_reg->host_status);
                if ((intr_status &
                    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0)  {
-                       qla4_8xxx_spurious_interrupt(ha, reqs_count);
+                       qla4_82xx_spurious_interrupt(ha, reqs_count);
                        break;
                }
 
                ha->isp_ops->interrupt_service_routine(ha, intr_status);
 
                /* Enable Interrupt */
-               qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+               qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
 
                if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
                        break;
@@ -1029,6 +1104,59 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#define LEG_INT_PTR_B31                (1 << 31)
+#define LEG_INT_PTR_B30                (1 << 30)
+#define PF_BITS_MASK           (0xF << 16)
+
+/**
+ * qla4_83xx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
+{
+       struct scsi_qla_host *ha = dev_id;
+       uint32_t leg_int_ptr = 0;
+       unsigned long flags = 0;
+
+       ha->isr_count++;
+       leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+
+       /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+       if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+                          __func__);
+               return IRQ_NONE;
+       }
+
+       /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+       if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+                          __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
+               return IRQ_NONE;
+       }
+
+       /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+        * Control register and poll till Legacy Interrupt Pointer register
+        * bit30 is 0.
+        */
+       writel(0, &ha->qla4_83xx_reg->leg_int_trig);
+       do {
+               leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+               if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
+                       break;
+       } while (leg_int_ptr & LEG_INT_PTR_B30);
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
+       ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
 irqreturn_t
 qla4_8xxx_msi_handler(int irq, void *dev_id)
 {
@@ -1043,15 +1171,46 @@ qla4_8xxx_msi_handler(int irq, void *dev_id)
 
        ha->isr_count++;
        /* clear the interrupt */
-       qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+       qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
 
        /* read twice to ensure write is flushed */
-       qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
-       qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
+       qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+       qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
 
        return qla4_8xxx_default_intr_handler(irq, dev_id);
 }
 
+static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
+{
+       struct scsi_qla_host *ha = dev_id;
+       unsigned long flags;
+       uint32_t ival = 0;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       ival = readl(&ha->qla4_83xx_reg->risc_intr);
+       if (ival == 0) {
+               ql4_printk(KERN_INFO, ha,
+                          "%s: It is a spurious mailbox interrupt!\n",
+                          __func__);
+               ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+               ival &= ~INT_MASK_FW_MB;
+               writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+               goto exit;
+       }
+
+       qla4xxx_isr_decode_mailbox(ha,
+                                  readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+       writel(0, &ha->qla4_83xx_reg->risc_intr);
+       ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+       ival &= ~INT_MASK_FW_MB;
+       writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+       ha->isr_count++;
+exit:
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       return IRQ_HANDLED;
+}
+
 /**
  * qla4_8xxx_default_intr_handler - hardware interrupt handler.
  * @irq: Unused
@@ -1068,29 +1227,32 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id)
        uint32_t intr_status;
        uint8_t reqs_count = 0;
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       while (1) {
-               if (!(readl(&ha->qla4_8xxx_reg->host_int) &
-                   ISRX_82XX_RISC_INT)) {
-                       qla4_8xxx_spurious_interrupt(ha, reqs_count);
-                       break;
-               }
+       if (is_qla8032(ha)) {
+               qla4_83xx_mailbox_intr_handler(irq, dev_id);
+       } else {
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               while (1) {
+                       if (!(readl(&ha->qla4_82xx_reg->host_int) &
+                           ISRX_82XX_RISC_INT)) {
+                               qla4_82xx_spurious_interrupt(ha, reqs_count);
+                               break;
+                       }
 
-               intr_status =  readl(&ha->qla4_8xxx_reg->host_status);
-               if ((intr_status &
-                   (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
-                       qla4_8xxx_spurious_interrupt(ha, reqs_count);
-                       break;
-               }
+                       intr_status =  readl(&ha->qla4_82xx_reg->host_status);
+                       if ((intr_status &
+                           (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
+                               qla4_82xx_spurious_interrupt(ha, reqs_count);
+                               break;
+                       }
 
-               ha->isp_ops->interrupt_service_routine(ha, intr_status);
+                       ha->isp_ops->interrupt_service_routine(ha, intr_status);
 
-               if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
-                       break;
+                       if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+                               break;
+               }
+               ha->isr_count++;
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
        }
-
-       ha->isr_count++;
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -1099,13 +1261,25 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
 {
        struct scsi_qla_host *ha = dev_id;
        unsigned long flags;
+       uint32_t ival = 0;
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       qla4xxx_process_response_queue(ha);
-       writel(0, &ha->qla4_8xxx_reg->host_int);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
+       if (is_qla8032(ha)) {
+               ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
+               if (ival == 0) {
+                       ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
+                                  __func__);
+                       goto exit_msix_rsp_q;
+               }
+               qla4xxx_process_response_queue(ha);
+               writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
+       } else {
+               qla4xxx_process_response_queue(ha);
+               writel(0, &ha->qla4_82xx_reg->host_int);
+       }
        ha->isr_count++;
+exit_msix_rsp_q:
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -1177,11 +1351,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
 {
        int ret;
 
-       if (!is_qla8022(ha))
+       if (is_qla40XX(ha))
                goto try_intx;
 
-       if (ql4xenablemsix == 2)
+       if (ql4xenablemsix == 2) {
+               /* Note: MSI Interrupts not supported for ISP8324 */
+               if (is_qla8032(ha)) {
+                       ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n",
+                                  __func__);
+                       goto try_intx;
+               }
                goto try_msi;
+       }
 
        if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
                goto try_intx;
@@ -1192,6 +1373,12 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
                DEBUG2(ql4_printk(KERN_INFO, ha,
                    "MSI-X: Enabled (0x%X).\n", ha->revision_id));
                goto irq_attached;
+       } else {
+               if (is_qla8032(ha)) {
+                       ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n",
+                                  __func__, ret);
+                       goto try_intx;
+               }
        }
 
        ql4_printk(KERN_WARNING, ha,
@@ -1214,9 +1401,15 @@ try_msi:
                        pci_disable_msi(ha->pdev);
                }
        }
-       ql4_printk(KERN_WARNING, ha,
-           "MSI: Falling back-to INTx mode -- %d.\n", ret);
 
+       /*
+        * Prevent interrupts from falling back to INTx mode in cases where
+        * interrupts cannot get acquired through MSI-X or MSI mode.
+        */
+       if (is_qla8022(ha)) {
+               ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
+               goto irq_not_attached;
+       }
 try_intx:
        /* Trying INTx */
        ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -1230,7 +1423,7 @@ try_intx:
                ql4_printk(KERN_WARNING, ha,
                    "INTx: Failed to reserve interrupt %d already in"
                    " use.\n", ha->pdev->irq);
-               return ret;
+               goto irq_not_attached;
        }
 
 irq_attached:
@@ -1238,6 +1431,7 @@ irq_attached:
        ha->host->irq = ha->pdev->irq;
        ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
            __func__, ha->pdev->irq);
+irq_not_attached:
        return ret;
 }
 
index cab8f665a41faca343dba8f404e01ba96075abaf..3d41034191f02588b6c94cc4ec7c4d6d53fb0650 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -9,7 +9,39 @@
 #include "ql4_glbl.h"
 #include "ql4_dbg.h"
 #include "ql4_inline.h"
+#include "ql4_version.h"
 
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+                           int in_count)
+{
+       int i;
+
+       /* Load all mailbox registers, except mailbox 0. */
+       for (i = 1; i < in_count; i++)
+               writel(mbx_cmd[i], &ha->reg->mailbox[i]);
+
+       /* Wakeup firmware  */
+       writel(mbx_cmd[0], &ha->reg->mailbox[0]);
+       readl(&ha->reg->mailbox[0]);
+       writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
+       readl(&ha->reg->ctrl_status);
+}
+
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+       int intr_status;
+
+       intr_status = readl(&ha->reg->ctrl_status);
+       if (intr_status & INTR_PENDING) {
+               /*
+                * Service the interrupt.
+                * The ISR will save the mailbox status registers
+                * to a temporary storage location in the adapter structure.
+                */
+               ha->mbox_status_count = out_count;
+               ha->isp_ops->interrupt_service_routine(ha, intr_status);
+       }
+}
 
 /**
  * qla4xxx_mailbox_command - issues mailbox commands
@@ -30,7 +62,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
        int status = QLA_ERROR;
        uint8_t i;
        u_long wait_count;
-       uint32_t intr_status;
        unsigned long flags = 0;
        uint32_t dev_state;
 
@@ -77,7 +108,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                msleep(10);
        }
 
-       if (is_qla8022(ha)) {
+       if (is_qla80XX(ha)) {
                if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
                        DEBUG2(ql4_printk(KERN_WARNING, ha,
                                          "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
@@ -85,10 +116,10 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                        goto mbox_exit;
                }
                /* Do not send any mbx cmd if h/w is in failed state*/
-               qla4_8xxx_idc_lock(ha);
-               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               qla4_8xxx_idc_unlock(ha);
-               if (dev_state == QLA82XX_DEV_FAILED) {
+               ha->isp_ops->idc_lock(ha);
+               dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+               ha->isp_ops->idc_unlock(ha);
+               if (dev_state == QLA8XXX_DEV_FAILED) {
                        ql4_printk(KERN_WARNING, ha,
                                   "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
                                   ha->host_no, __func__);
@@ -102,30 +133,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
        for (i = 0; i < outCount; i++)
                ha->mbox_status[i] = 0;
 
-       if (is_qla8022(ha)) {
-               /* Load all mailbox registers, except mailbox 0. */
-               DEBUG5(
-                   printk("scsi%ld: %s: Cmd ", ha->host_no, __func__);
-                   for (i = 0; i < inCount; i++)
-                       printk("mb%d=%04x ", i, mbx_cmd[i]);
-                   printk("\n"));
-
-               for (i = 1; i < inCount; i++)
-                       writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]);
-               writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]);
-               readl(&ha->qla4_8xxx_reg->mailbox_in[0]);
-               writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint);
-       } else {
-               /* Load all mailbox registers, except mailbox 0. */
-               for (i = 1; i < inCount; i++)
-                       writel(mbx_cmd[i], &ha->reg->mailbox[i]);
-
-               /* Wakeup firmware  */
-               writel(mbx_cmd[0], &ha->reg->mailbox[0]);
-               readl(&ha->reg->mailbox[0]);
-               writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
-               readl(&ha->reg->ctrl_status);
-       }
+       /* Queue the mailbox command to the firmware */
+       ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -167,37 +176,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                         */
 
                        spin_lock_irqsave(&ha->hardware_lock, flags);
-                       if (is_qla8022(ha)) {
-                               intr_status =
-                                   readl(&ha->qla4_8xxx_reg->host_int);
-                               if (intr_status & ISRX_82XX_RISC_INT) {
-                                       ha->mbox_status_count = outCount;
-                                       intr_status =
-                                        readl(&ha->qla4_8xxx_reg->host_status);
-                                       ha->isp_ops->interrupt_service_routine(
-                                           ha, intr_status);
-                                       if (test_bit(AF_INTERRUPTS_ON,
-                                           &ha->flags) &&
-                                           test_bit(AF_INTx_ENABLED,
-                                           &ha->flags))
-                                               qla4_8xxx_wr_32(ha,
-                                               ha->nx_legacy_intr.tgt_mask_reg,
-                                               0xfbff);
-                               }
-                       } else {
-                               intr_status = readl(&ha->reg->ctrl_status);
-                               if (intr_status & INTR_PENDING) {
-                                       /*
-                                        * Service the interrupt.
-                                        * The ISR will save the mailbox status
-                                        * registers to a temporary storage
-                                        * location in the adapter structure.
-                                        */
-                                       ha->mbox_status_count = outCount;
-                                       ha->isp_ops->interrupt_service_routine(
-                                           ha, intr_status);
-                               }
-                       }
+                       ha->isp_ops->process_mailbox_interrupt(ha, outCount);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                        msleep(10);
                }
@@ -205,7 +184,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
 
        /* Check for mailbox timeout. */
        if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
-               if (is_qla8022(ha) &&
+               if (is_qla80XX(ha) &&
                    test_bit(AF_FW_RECOVERY, &ha->flags)) {
                        DEBUG2(ql4_printk(KERN_INFO, ha,
                            "scsi%ld: %s: prematurely completing mbx cmd as "
@@ -222,9 +201,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                if (is_qla8022(ha)) {
                        ql4_printk(KERN_INFO, ha,
                                   "disabling pause transmit on port 0 & 1.\n");
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                       qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
                                        CRB_NIU_XG_PAUSE_CTL_P0 |
                                        CRB_NIU_XG_PAUSE_CTL_P1);
+               } else if (is_qla8032(ha)) {
+                       ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
+                                  __func__);
+                       qla4_83xx_disable_pause(ha);
                }
                goto mbox_exit;
        }
@@ -373,7 +356,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
        memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
 
        if (is_qla8022(ha))
-               qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
+               qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
 
        mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
        mbox_cmd[1] = 0;
@@ -566,7 +549,7 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
                __constant_cpu_to_le16(FWOPT_SESSION_MODE |
                                       FWOPT_INITIATOR_MODE);
 
-       if (is_qla8022(ha))
+       if (is_qla80XX(ha))
                init_fw_cb->fw_options |=
                    __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
 
@@ -1695,7 +1678,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
        conn = cls_conn->dd_data;
        qla_conn = conn->dd_data;
        sess = conn->session;
-       dst_addr = &qla_conn->qla_ep->dst_addr;
+       dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
 
        if (dst_addr->sa_family == AF_INET6)
                options |= IPV6_DEFAULT_DDB_ENTRY;
@@ -1953,3 +1936,72 @@ int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
        }
        return status;
 }
+
+/**
+ * qla4_8xxx_set_param - set driver version in firmware.
+ * @ha: Pointer to host adapter structure.
+ * @param: Parameter to set i.e driver version
+ **/
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
+{
+       uint32_t mbox_cmd[MBOX_REG_COUNT];
+       uint32_t mbox_sts[MBOX_REG_COUNT];
+       uint32_t status;
+
+       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+       memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+       mbox_cmd[0] = MBOX_CMD_SET_PARAM;
+       if (param == SET_DRVR_VERSION) {
+               mbox_cmd[1] = SET_DRVR_VERSION;
+               strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
+                       MAX_DRVR_VER_LEN);
+       } else {
+               ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
+                          __func__, param);
+               status = QLA_ERROR;
+               goto exit_set_param;
+       }
+
+       status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
+                                        mbox_sts);
+       if (status == QLA_ERROR)
+               ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+                          __func__, mbox_sts[0]);
+
+exit_set_param:
+       return status;
+}
+
+/**
+ * qla4_83xx_post_idc_ack - post IDC ACK
+ * @ha: Pointer to host adapter structure.
+ *
+ * Posts IDC ACK for IDC Request Notification AEN.
+ **/
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
+{
+       uint32_t mbox_cmd[MBOX_REG_COUNT];
+       uint32_t mbox_sts[MBOX_REG_COUNT];
+       int status;
+
+       memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+       memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+       mbox_cmd[0] = MBOX_CMD_IDC_ACK;
+       mbox_cmd[1] = ha->idc_info.request_desc;
+       mbox_cmd[2] = ha->idc_info.info1;
+       mbox_cmd[3] = ha->idc_info.info2;
+       mbox_cmd[4] = ha->idc_info.info3;
+
+       status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+                                        mbox_cmd, mbox_sts);
+       if (status == QLA_ERROR)
+               ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+                          mbox_sts[0]);
+       else
+              DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n",
+                                __func__));
+
+       return status;
+}
index 7851f314ba96bd1cf5ec032dac0420b14996d4e7..325db1f2c09139bb712663b2d14d78fecc20baa8 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
index 945cc328f57f705d636400090e9217656e406d57..dba0514d1c708581bbe753651f87d202a285ea6f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
index 807bf76f1b6a851c137af35865cb4d8082ca452d..499a92db1cf64d3b1d44201592b289aefd38cd71 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -10,6 +10,7 @@
 #include <linux/ratelimit.h>
 #include "ql4_def.h"
 #include "ql4_glbl.h"
+#include "ql4_inline.h"
 
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 
@@ -27,7 +28,7 @@
 #define CRB_BLK(off)   ((off >> 20) & 0x3f)
 #define CRB_SUBBLK(off)        ((off >> 16) & 0xf)
 #define CRB_WINDOW_2M  (0x130060)
-#define CRB_HI(off)    ((qla4_8xxx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+#define CRB_HI(off)    ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
                        ((off) & 0xf0000))
 #define QLA82XX_PCI_CAMQM_2M_END       (0x04800800UL)
 #define QLA82XX_PCI_CAMQM_2M_BASE      (0x000ff800UL)
@@ -51,7 +52,7 @@ static int qla4_8xxx_crb_table_initialized;
        (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
         QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
 static void
-qla4_8xxx_crb_addr_transform_setup(void)
+qla4_82xx_crb_addr_transform_setup(void)
 {
        qla4_8xxx_crb_addr_transform(XDMA);
        qla4_8xxx_crb_addr_transform(TIMR);
@@ -268,7 +269,7 @@ static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
 /*
  * top 12 bits of crb internal address (hub, agent)
  */
-static unsigned qla4_8xxx_crb_hub_agt[64] = {
+static unsigned qla4_82xx_crb_hub_agt[64] = {
        0,
        QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
        QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
@@ -353,7 +354,7 @@ static char *qdev_state[] = {
  * side effect: lock crb window
  */
 static void
-qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
+qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
 {
        u32 win_read;
 
@@ -373,96 +374,115 @@ qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
 }
 
 void
-qla4_8xxx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
+qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
 {
        unsigned long flags = 0;
        int rv;
 
-       rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off);
+       rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
 
        BUG_ON(rv == -1);
 
        if (rv == 1) {
                write_lock_irqsave(&ha->hw_lock, flags);
-               qla4_8xxx_crb_win_lock(ha);
-               qla4_8xxx_pci_set_crbwindow_2M(ha, &off);
+               qla4_82xx_crb_win_lock(ha);
+               qla4_82xx_pci_set_crbwindow_2M(ha, &off);
        }
 
        writel(data, (void __iomem *)off);
 
        if (rv == 1) {
-               qla4_8xxx_crb_win_unlock(ha);
+               qla4_82xx_crb_win_unlock(ha);
                write_unlock_irqrestore(&ha->hw_lock, flags);
        }
 }
 
-int
-qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
 {
        unsigned long flags = 0;
        int rv;
        u32 data;
 
-       rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off);
+       rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
 
        BUG_ON(rv == -1);
 
        if (rv == 1) {
                write_lock_irqsave(&ha->hw_lock, flags);
-               qla4_8xxx_crb_win_lock(ha);
-               qla4_8xxx_pci_set_crbwindow_2M(ha, &off);
+               qla4_82xx_crb_win_lock(ha);
+               qla4_82xx_pci_set_crbwindow_2M(ha, &off);
        }
        data = readl((void __iomem *)off);
 
        if (rv == 1) {
-               qla4_8xxx_crb_win_unlock(ha);
+               qla4_82xx_crb_win_unlock(ha);
                write_unlock_irqrestore(&ha->hw_lock, flags);
        }
        return data;
 }
 
 /* Minidump related functions */
-static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
-                             u32 data, uint8_t flag)
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
 {
-       uint32_t win_read, off_value, rval = QLA_SUCCESS;
+       uint32_t win_read, off_value;
+       int rval = QLA_SUCCESS;
 
        off_value  = off & 0xFFFF0000;
        writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
 
-       /* Read back value to make sure write has gone through before trying
+       /*
+        * Read back value to make sure write has gone through before trying
         * to use it.
         */
        win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
        if (win_read != off_value) {
                DEBUG2(ql4_printk(KERN_INFO, ha,
                                  "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
-                                  __func__, off_value, win_read, off));
-               return QLA_ERROR;
+                                 __func__, off_value, win_read, off));
+               rval = QLA_ERROR;
+       } else {
+               off_value  = off & 0x0000FFFF;
+               *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+                                              ha->nx_pcibase));
        }
+       return rval;
+}
 
-       off_value  = off & 0x0000FFFF;
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
+{
+       uint32_t win_read, off_value;
+       int rval = QLA_SUCCESS;
+
+       off_value  = off & 0xFFFF0000;
+       writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
 
-       if (flag)
+       /* Read back value to make sure write has gone through before trying
+        * to use it.
+        */
+       win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+       if (win_read != off_value) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+                                 __func__, off_value, win_read, off));
+               rval = QLA_ERROR;
+       } else {
+               off_value  = off & 0x0000FFFF;
                writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
                                              ha->nx_pcibase));
-       else
-               rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
-                                             ha->nx_pcibase));
-
+       }
        return rval;
 }
 
 #define CRB_WIN_LOCK_TIMEOUT 100000000
 
-int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
 {
        int i;
        int done = 0, timeout = 0;
 
        while (!done) {
                /* acquire semaphore3 from PCI HW block */
-               done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+               done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
                if (done == 1)
                        break;
                if (timeout >= CRB_WIN_LOCK_TIMEOUT)
@@ -478,32 +498,32 @@ int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
                                cpu_relax();    /*This a nop instr on i386*/
                }
        }
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
        return 0;
 }
 
-void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *ha)
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
 {
-       qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+       qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
 }
 
 #define IDC_LOCK_TIMEOUT 100000000
 
 /**
- * qla4_8xxx_idc_lock - hw_lock
+ * qla4_82xx_idc_lock - hw_lock
  * @ha: pointer to adapter structure
  *
  * General purpose lock used to synchronize access to
  * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
  **/
-int qla4_8xxx_idc_lock(struct scsi_qla_host *ha)
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
 {
        int i;
        int done = 0, timeout = 0;
 
        while (!done) {
                /* acquire semaphore5 from PCI HW block */
-               done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+               done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
                if (done == 1)
                        break;
                if (timeout >= IDC_LOCK_TIMEOUT)
@@ -522,13 +542,13 @@ int qla4_8xxx_idc_lock(struct scsi_qla_host *ha)
        return 0;
 }
 
-void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha)
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
 {
-       qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+       qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
 }
 
 int
-qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
+qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
 {
        struct crb_128M_2M_sub_block_map *m;
 
@@ -562,44 +582,40 @@ qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
        return 1;
 }
 
-/*  PCI Windowing for DDR regions.  */
-#define QLA82XX_ADDR_IN_RANGE(addr, low, high)            \
-       (((addr) <= (high)) && ((addr) >= (low)))
-
 /*
 * check memory access boundary.
 * used by test agent. support ddr access only for now
 */
 static unsigned long
-qla4_8xxx_pci_mem_bound_check(struct scsi_qla_host *ha,
+qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
                unsigned long long addr, int size)
 {
-       if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
-           QLA82XX_ADDR_DDR_NET_MAX) ||
-           !QLA82XX_ADDR_IN_RANGE(addr + size - 1,
-           QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) ||
+       if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+           QLA8XXX_ADDR_DDR_NET_MAX) ||
+           !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
+           QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
            ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
                return 0;
        }
        return 1;
 }
 
-static int qla4_8xxx_pci_set_window_warning_count;
+static int qla4_82xx_pci_set_window_warning_count;
 
 static unsigned long
-qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
+qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
 {
        int window;
        u32 win_read;
 
-       if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
-           QLA82XX_ADDR_DDR_NET_MAX)) {
+       if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+           QLA8XXX_ADDR_DDR_NET_MAX)) {
                /* DDR network side */
                window = MN_WIN(addr);
                ha->ddr_mn_window = window;
-               qla4_8xxx_wr_32(ha, ha->mn_win_crb |
+               qla4_82xx_wr_32(ha, ha->mn_win_crb |
                    QLA82XX_PCI_CRBSPACE, window);
-               win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb |
+               win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
                    QLA82XX_PCI_CRBSPACE);
                if ((win_read << 17) != window) {
                        ql4_printk(KERN_WARNING, ha,
@@ -607,8 +623,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
                        __func__, window, win_read);
                }
                addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
-       } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
-                               QLA82XX_ADDR_OCM0_MAX)) {
+       } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+                               QLA8XXX_ADDR_OCM0_MAX)) {
                unsigned int temp1;
                /* if bits 19:18&17:11 are on */
                if ((addr & 0x00ff800) == 0xff800) {
@@ -618,9 +634,9 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
 
                window = OCM_WIN(addr);
                ha->ddr_mn_window = window;
-               qla4_8xxx_wr_32(ha, ha->mn_win_crb |
+               qla4_82xx_wr_32(ha, ha->mn_win_crb |
                    QLA82XX_PCI_CRBSPACE, window);
-               win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb |
+               win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
                    QLA82XX_PCI_CRBSPACE);
                temp1 = ((window & 0x1FF) << 7) |
                    ((window & 0x0FFFE0000) >> 17);
@@ -630,14 +646,14 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
                }
                addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
 
-       } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+       } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
                                QLA82XX_P3_ADDR_QDR_NET_MAX)) {
                /* QDR network side */
                window = MS_WIN(addr);
                ha->qdr_sn_window = window;
-               qla4_8xxx_wr_32(ha, ha->ms_win_crb |
+               qla4_82xx_wr_32(ha, ha->ms_win_crb |
                    QLA82XX_PCI_CRBSPACE, window);
-               win_read = qla4_8xxx_rd_32(ha,
+               win_read = qla4_82xx_rd_32(ha,
                     ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
                if (win_read != window) {
                        printk("%s: Written MSwin (0x%x) != Read "
@@ -650,8 +666,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
                 * peg gdb frequently accesses memory that doesn't exist,
                 * this limits the chit chat so debugging isn't slowed down.
                 */
-               if ((qla4_8xxx_pci_set_window_warning_count++ < 8) ||
-                   (qla4_8xxx_pci_set_window_warning_count%64 == 0)) {
+               if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
+                   (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
                        printk("%s: Warning:%s Unknown address range!\n",
                            __func__, DRIVER_NAME);
                }
@@ -661,7 +677,7 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
 }
 
 /* check if address is in the same windows as the previous access */
-static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
+static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
                unsigned long long addr)
 {
        int window;
@@ -669,20 +685,20 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
 
        qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
 
-       if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
-           QLA82XX_ADDR_DDR_NET_MAX)) {
+       if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+           QLA8XXX_ADDR_DDR_NET_MAX)) {
                /* DDR network side */
                BUG();  /* MN access can not come here */
-       } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
-            QLA82XX_ADDR_OCM0_MAX)) {
+       } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+            QLA8XXX_ADDR_OCM0_MAX)) {
                return 1;
-       } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
-            QLA82XX_ADDR_OCM1_MAX)) {
+       } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
+            QLA8XXX_ADDR_OCM1_MAX)) {
                return 1;
-       } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+       } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
            qdr_max)) {
                /* QDR network side */
-               window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
+               window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
                if (ha->qdr_sn_window == window)
                        return 1;
        }
@@ -690,7 +706,7 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
        return 0;
 }
 
-static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
+static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
                u64 off, void *data, int size)
 {
        unsigned long flags;
@@ -707,9 +723,9 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
         * If attempting to access unknown address or straddle hw windows,
         * do not access.
         */
-       start = qla4_8xxx_pci_set_window(ha, off);
+       start = qla4_82xx_pci_set_window(ha, off);
        if ((start == -1UL) ||
-           (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) {
+           (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
                write_unlock_irqrestore(&ha->hw_lock, flags);
                printk(KERN_ERR"%s out of bound pci memory access. "
                                "offset is 0x%llx\n", DRIVER_NAME, off);
@@ -763,7 +779,7 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
 }
 
 static int
-qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
+qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
                void *data, int size)
 {
        unsigned long flags;
@@ -780,9 +796,9 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
         * If attempting to access unknown address or straddle hw windows,
         * do not access.
         */
-       start = qla4_8xxx_pci_set_window(ha, off);
+       start = qla4_82xx_pci_set_window(ha, off);
        if ((start == -1UL) ||
-           (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) {
+           (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
                write_unlock_irqrestore(&ha->hw_lock, flags);
                printk(KERN_ERR"%s out of bound pci memory access. "
                                "offset is 0x%llx\n", DRIVER_NAME, off);
@@ -835,13 +851,13 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
 #define MTU_FUDGE_FACTOR 100
 
 static unsigned long
-qla4_8xxx_decode_crb_addr(unsigned long addr)
+qla4_82xx_decode_crb_addr(unsigned long addr)
 {
        int i;
        unsigned long base_addr, offset, pci_base;
 
        if (!qla4_8xxx_crb_table_initialized)
-               qla4_8xxx_crb_addr_transform_setup();
+               qla4_82xx_crb_addr_transform_setup();
 
        pci_base = ADDR_ERROR;
        base_addr = addr & 0xfff00000;
@@ -860,10 +876,10 @@ qla4_8xxx_decode_crb_addr(unsigned long addr)
 }
 
 static long rom_max_timeout = 100;
-static long qla4_8xxx_rom_lock_timeout = 100;
+static long qla4_82xx_rom_lock_timeout = 100;
 
 static int
-qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
+qla4_82xx_rom_lock(struct scsi_qla_host *ha)
 {
        int i;
        int done = 0, timeout = 0;
@@ -871,10 +887,10 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
        while (!done) {
                /* acquire semaphore2 from PCI HW block */
 
-               done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+               done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
                if (done == 1)
                        break;
-               if (timeout >= qla4_8xxx_rom_lock_timeout)
+               if (timeout >= qla4_82xx_rom_lock_timeout)
                        return -1;
 
                timeout++;
@@ -887,24 +903,24 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
                                cpu_relax();    /*This a nop instr on i386*/
                }
        }
-       qla4_8xxx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+       qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
        return 0;
 }
 
 static void
-qla4_8xxx_rom_unlock(struct scsi_qla_host *ha)
+qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
 {
-       qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+       qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
 }
 
 static int
-qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha)
+qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
 {
        long timeout = 0;
        long done = 0 ;
 
        while (done == 0) {
-               done = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+               done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
                done &= 2;
                timeout++;
                if (timeout >= rom_max_timeout) {
@@ -917,40 +933,41 @@ qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha)
 }
 
 static int
-qla4_8xxx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
 {
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
-       if (qla4_8xxx_wait_rom_done(ha)) {
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+       if (qla4_82xx_wait_rom_done(ha)) {
                printk("%s: Error waiting for rom done\n", DRIVER_NAME);
                return -1;
        }
        /* reset abyte_cnt and dummy_byte_cnt */
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
        udelay(10);
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
 
-       *valp = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+       *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
        return 0;
 }
 
 static int
-qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
 {
        int ret, loops = 0;
 
-       while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) {
+       while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
                udelay(100);
                loops++;
        }
        if (loops >= 50000) {
-               printk("%s: qla4_8xxx_rom_lock failed\n", DRIVER_NAME);
+               ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
+                          DRIVER_NAME);
                return -1;
        }
-       ret = qla4_8xxx_do_rom_fast_read(ha, addr, valp);
-       qla4_8xxx_rom_unlock(ha);
+       ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
+       qla4_82xx_rom_unlock(ha);
        return ret;
 }
 
@@ -959,7 +976,7 @@ qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
  * to put the ISP into operational state
  **/
 static int
-qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
+qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
 {
        int addr, val;
        int i ;
@@ -973,68 +990,68 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
        };
 
        /* Halt all the indiviual PEGs and other blocks of the ISP */
-       qla4_8xxx_rom_lock(ha);
+       qla4_82xx_rom_lock(ha);
 
        /* disable all I2Q */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
 
        /* disable all niu interrupts */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
        /* disable xge rx/tx */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
        /* disable xg1 rx/tx */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
        /* disable sideband mac */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
        /* disable ap0 mac */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
        /* disable ap1 mac */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
 
        /* halt sre */
-       val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+       val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
 
        /* halt epg */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
 
        /* halt timers */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
 
        /* halt pegs */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
        msleep(5);
 
        /* big hammer */
        if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
                /* don't reset CAM block on reset */
-               qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+               qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
        else
-               qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+               qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
 
-       qla4_8xxx_rom_unlock(ha);
+       qla4_82xx_rom_unlock(ha);
 
        /* Read the signature value from the flash.
         * Offset 0: Contain signature (0xcafecafe)
         * Offset 4: Offset and number of addr/value pairs
         * that present in CRB initialize sequence
         */
-       if (qla4_8xxx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
-           qla4_8xxx_rom_fast_read(ha, 4, &n) != 0) {
+       if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+           qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
                ql4_printk(KERN_WARNING, ha,
                        "[ERROR] Reading crb_init area: n: %08x\n", n);
                return -1;
@@ -1065,8 +1082,8 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
        }
 
        for (i = 0; i < n; i++) {
-               if (qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
-                   qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
+               if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+                   qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
                    0) {
                        kfree(buf);
                        return -1;
@@ -1080,7 +1097,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
                /* Translate internal CRB initialization
                 * address to PCI bus address
                 */
-               off = qla4_8xxx_decode_crb_addr((unsigned long)buf[i].addr) +
+               off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
                    QLA82XX_PCI_CRBSPACE;
                /* Not all CRB  addr/value pair to be written,
                 * some of them are skipped
@@ -1125,7 +1142,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
                        continue;
                }
 
-               qla4_8xxx_wr_32(ha, off, buf[i].data);
+               qla4_82xx_wr_32(ha, off, buf[i].data);
 
                /* ISP requires much bigger delay to settle down,
                 * else crb_window returns 0xffffffff
@@ -1142,25 +1159,25 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
        kfree(buf);
 
        /* Resetting the data and instruction cache */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
 
        /* Clear all protocol processing engines */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
 
        return 0;
 }
 
 static int
-qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
+qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
 {
        int  i, rval = 0;
        long size = 0;
@@ -1175,14 +1192,14 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
            ha->host_no, __func__, flashaddr, image_start));
 
        for (i = 0; i < size; i++) {
-               if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
-                   (qla4_8xxx_rom_fast_read(ha, flashaddr + 4,
+               if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+                   (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
                    (int *)&high))) {
                        rval = -1;
                        goto exit_load_from_flash;
                }
                data = ((u64)high << 32) | low ;
-               rval = qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8);
+               rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
                if (rval)
                        goto exit_load_from_flash;
 
@@ -1197,20 +1214,20 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
        udelay(100);
 
        read_lock(&ha->hw_lock);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+       qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
        read_unlock(&ha->hw_lock);
 
 exit_load_from_flash:
        return rval;
 }
 
-static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
+static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
 {
        u32 rst;
 
-       qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0);
-       if (qla4_8xxx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
+       qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+       if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
                printk(KERN_WARNING "%s: Error during CRB Initialization\n",
                    __func__);
                return QLA_ERROR;
@@ -1223,12 +1240,12 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
         * To get around this, QM is brought out of reset.
         */
 
-       rst = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+       rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
        /* unreset qm */
        rst &= ~(1 << 28);
-       qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+       qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
 
-       if (qla4_8xxx_load_from_flash(ha, image_start)) {
+       if (qla4_82xx_load_from_flash(ha, image_start)) {
                printk("%s: Error trying to load fw from flash!\n", __func__);
                return QLA_ERROR;
        }
@@ -1237,7 +1254,7 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
 }
 
 int
-qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
+qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
                u64 off, void *data, int size)
 {
        int i, j = 0, k, start, end, loop, sz[2], off0[2];
@@ -1249,12 +1266,12 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
         * If not MN, go check for MS or invalid.
         */
 
-       if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+       if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
                mem_crb = QLA82XX_CRB_QDR_NET;
        else {
                mem_crb = QLA82XX_CRB_DDR_NET;
-               if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0)
-                       return qla4_8xxx_pci_mem_read_direct(ha,
+               if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+                       return qla4_82xx_pci_mem_read_direct(ha,
                                        off, data, size);
        }
 
@@ -1270,16 +1287,16 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
 
        for (i = 0; i < loop; i++) {
                temp = off8 + (i << shift_amount);
-               qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+               qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
                temp = 0;
-               qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+               qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
                temp = MIU_TA_CTL_ENABLE;
-               qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
-               temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
-               qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+               qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+               temp = MIU_TA_CTL_START_ENABLE;
+               qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
 
                for (j = 0; j < MAX_CTL_CHECK; j++) {
-                       temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+                       temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
                        if ((temp & MIU_TA_CTL_BUSY) == 0)
                                break;
                }
@@ -1294,7 +1311,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
                start = off0[i] >> 2;
                end   = (off0[i] + sz[i] - 1) >> 2;
                for (k = start; k <= end; k++) {
-                       temp = qla4_8xxx_rd_32(ha,
+                       temp = qla4_82xx_rd_32(ha,
                                mem_crb + MIU_TEST_AGT_RDDATA(k));
                        word[i] |= ((uint64_t)temp << (32 * (k & 1)));
                }
@@ -1328,7 +1345,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
 }
 
 int
-qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
+qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
                u64 off, void *data, int size)
 {
        int i, j, ret = 0, loop, sz[2], off0;
@@ -1339,12 +1356,12 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
        /*
         * If not MN, go check for MS or invalid.
         */
-       if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+       if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
                mem_crb = QLA82XX_CRB_QDR_NET;
        else {
                mem_crb = QLA82XX_CRB_DDR_NET;
-               if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0)
-                       return qla4_8xxx_pci_mem_write_direct(ha,
+               if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+                       return qla4_82xx_pci_mem_write_direct(ha,
                                        off, data, size);
        }
 
@@ -1359,7 +1376,7 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
        startword = (off & 0xf)/8;
 
        for (i = 0; i < loop; i++) {
-               if (qla4_8xxx_pci_mem_read_2M(ha, off8 +
+               if (qla4_82xx_pci_mem_read_2M(ha, off8 +
                    (i << shift_amount), &word[i * scale], 8))
                        return -1;
        }
@@ -1395,27 +1412,27 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
 
        for (i = 0; i < loop; i++) {
                temp = off8 + (i << shift_amount);
-               qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+               qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
                temp = 0;
-               qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+               qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
                temp = word[i * scale] & 0xffffffff;
-               qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+               qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
                temp = (word[i * scale] >> 32) & 0xffffffff;
-               qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+               qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
                temp = word[i*scale + 1] & 0xffffffff;
-               qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
+               qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
                    temp);
                temp = (word[i*scale + 1] >> 32) & 0xffffffff;
-               qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
+               qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
                    temp);
 
-               temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
-               qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
-               temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
-               qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+               temp = MIU_TA_CTL_WRITE_ENABLE;
+               qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+               temp = MIU_TA_CTL_WRITE_START;
+               qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
 
                for (j = 0; j < MAX_CTL_CHECK; j++) {
-                       temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+                       temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
                        if ((temp & MIU_TA_CTL_BUSY) == 0)
                                break;
                }
@@ -1433,14 +1450,14 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
        return ret;
 }
 
-static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
+static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
 {
        u32 val = 0;
        int retries = 60;
 
        if (!pegtune_val) {
                do {
-                       val = qla4_8xxx_rd_32(ha, CRB_CMDPEG_STATE);
+                       val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
                        if ((val == PHAN_INITIALIZE_COMPLETE) ||
                            (val == PHAN_INITIALIZE_ACK))
                                return 0;
@@ -1450,7 +1467,7 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
                } while (--retries);
 
                if (!retries) {
-                       pegtune_val = qla4_8xxx_rd_32(ha,
+                       pegtune_val = qla4_82xx_rd_32(ha,
                                QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
                        printk(KERN_WARNING "%s: init failed, "
                                "pegtune_val = %x\n", __func__, pegtune_val);
@@ -1460,21 +1477,21 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
        return 0;
 }
 
-static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
+static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
 {
        uint32_t state = 0;
        int loops = 0;
 
        /* Window 1 call */
        read_lock(&ha->hw_lock);
-       state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE);
+       state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
        read_unlock(&ha->hw_lock);
 
        while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
                udelay(100);
                /* Window 1 call */
                read_lock(&ha->hw_lock);
-               state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE);
+               state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
                read_unlock(&ha->hw_lock);
 
                loops++;
@@ -1494,11 +1511,21 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
 {
        uint32_t drv_active;
 
-       drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
-       drv_active |= (1 << (ha->func_num * 4));
+       drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+       /*
+        * For ISP8324, drv_active register has 1 bit per function,
+        * shift 1 by func_num to set a bit for the function.
+        * For ISP8022, drv_active has 4 bits per function
+        */
+       if (is_qla8032(ha))
+               drv_active |= (1 << ha->func_num);
+       else
+               drv_active |= (1 << (ha->func_num * 4));
+
        ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
                   __func__, ha->host_no, drv_active);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
 }
 
 void
@@ -1506,50 +1533,87 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
 {
        uint32_t drv_active;
 
-       drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
-       drv_active &= ~(1 << (ha->func_num * 4));
+       drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+       /*
+        * For ISP8324, drv_active register has 1 bit per function,
+        * shift 1 by func_num to set a bit for the function.
+        * For ISP8022, drv_active has 4 bits per function
+        */
+       if (is_qla8032(ha))
+               drv_active &= ~(1 << (ha->func_num));
+       else
+               drv_active &= ~(1 << (ha->func_num * 4));
+
        ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
                   __func__, ha->host_no, drv_active);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
 }
 
-static inline int
-qla4_8xxx_need_reset(struct scsi_qla_host *ha)
+inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
 {
        uint32_t drv_state, drv_active;
        int rval;
 
-       drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
-       drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
-       rval = drv_state & (1 << (ha->func_num * 4));
+       drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+       drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+       /*
+        * For ISP8324, drv_active register has 1 bit per function,
+        * shift 1 by func_num to set a bit for the function.
+        * For ISP8022, drv_active has 4 bits per function
+        */
+       if (is_qla8032(ha))
+               rval = drv_state & (1 << ha->func_num);
+       else
+               rval = drv_state & (1 << (ha->func_num * 4));
+
        if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
                rval = 1;
 
        return rval;
 }
 
-static inline void
-qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
 {
        uint32_t drv_state;
 
-       drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
-       drv_state |= (1 << (ha->func_num * 4));
+       drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+       /*
+        * For ISP8324, drv_active register has 1 bit per function,
+        * shift 1 by func_num to set a bit for the function.
+        * For ISP8022, drv_active has 4 bits per function
+        */
+       if (is_qla8032(ha))
+               drv_state |= (1 << ha->func_num);
+       else
+               drv_state |= (1 << (ha->func_num * 4));
+
        ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
                   __func__, ha->host_no, drv_state);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
 }
 
-static inline void
-qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
 {
        uint32_t drv_state;
 
-       drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
-       drv_state &= ~(1 << (ha->func_num * 4));
+       drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+       /*
+        * For ISP8324, drv_active register has 1 bit per function,
+        * shift 1 by func_num to set a bit for the function.
+        * For ISP8022, drv_active has 4 bits per function
+        */
+       if (is_qla8032(ha))
+               drv_state &= ~(1 << ha->func_num);
+       else
+               drv_state &= ~(1 << (ha->func_num * 4));
+
        ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
                   __func__, ha->host_no, drv_state);
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
 }
 
 static inline void
@@ -1557,33 +1621,43 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
 {
        uint32_t qsnt_state;
 
-       qsnt_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
-       qsnt_state |= (2 << (ha->func_num * 4));
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+       qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+       /*
+        * For ISP8324, drv_active register has 1 bit per function,
+        * shift 1 by func_num to set a bit for the function.
+        * For ISP8022, drv_active has 4 bits per function.
+        */
+       if (is_qla8032(ha))
+               qsnt_state |= (1 << ha->func_num);
+       else
+               qsnt_state |= (2 << (ha->func_num * 4));
+
+       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
 }
 
 
 static int
-qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
+qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
 {
        uint16_t lnk;
 
        /* scrub dma mask expansion register */
-       qla4_8xxx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
+       qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
 
        /* Overwrite stale initialization register values */
-       qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0);
-       qla4_8xxx_wr_32(ha, CRB_RCVPEG_STATE, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
-       qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+       qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+       qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+       qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
 
-       if (qla4_8xxx_load_fw(ha, image_start) != QLA_SUCCESS) {
+       if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
                printk("%s: Error trying to start fw!\n", __func__);
                return QLA_ERROR;
        }
 
        /* Handshake with the card before we register the devices. */
-       if (qla4_8xxx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
+       if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
                printk("%s: Error during card handshake!\n", __func__);
                return QLA_ERROR;
        }
@@ -1593,11 +1667,10 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
        ha->link_width = (lnk >> 4) & 0x3f;
 
        /* Synchronize with Receive peg */
-       return qla4_8xxx_rcvpeg_ready(ha);
+       return qla4_82xx_rcvpeg_ready(ha);
 }
 
-static int
-qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
 {
        int rval = QLA_ERROR;
 
@@ -1615,7 +1688,7 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
 
        ql4_printk(KERN_INFO, ha,
            "FW: Attempting to load firmware from flash...\n");
-       rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
+       rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
 
        if (rval != QLA_SUCCESS) {
                ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
@@ -1626,9 +1699,9 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
        return rval;
 }
 
-static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
 {
-       if (qla4_8xxx_rom_lock(ha)) {
+       if (qla4_82xx_rom_lock(ha)) {
                /* Someone else is holding the lock. */
                dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
        }
@@ -1638,25 +1711,25 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
         * else died while holding it.
         * In either case, unlock.
         */
-       qla4_8xxx_rom_unlock(ha);
+       qla4_82xx_rom_unlock(ha);
 }
 
 static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
        uint32_t r_addr, r_stride, loop_cnt, i, r_value;
-       struct qla82xx_minidump_entry_crb *crb_hdr;
+       struct qla8xxx_minidump_entry_crb *crb_hdr;
        uint32_t *data_ptr = *d_ptr;
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
-       crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+       crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
        r_addr = crb_hdr->addr;
        r_stride = crb_hdr->crb_strd.addr_stride;
        loop_cnt = crb_hdr->op_count;
 
        for (i = 0; i < loop_cnt; i++) {
-               r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+               ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
                *data_ptr++ = cpu_to_le32(r_addr);
                *data_ptr++ = cpu_to_le32(r_value);
                r_addr += r_stride;
@@ -1665,19 +1738,19 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
 }
 
 static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
-                                struct qla82xx_minidump_entry_hdr *entry_hdr,
+                                struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                 uint32_t **d_ptr)
 {
        uint32_t addr, r_addr, c_addr, t_r_addr;
        uint32_t i, k, loop_count, t_value, r_cnt, r_value;
        unsigned long p_wait, w_time, p_mask;
        uint32_t c_value_w, c_value_r;
-       struct qla82xx_minidump_entry_cache *cache_hdr;
+       struct qla8xxx_minidump_entry_cache *cache_hdr;
        int rval = QLA_ERROR;
        uint32_t *data_ptr = *d_ptr;
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
-       cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+       cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
 
        loop_count = cache_hdr->op_count;
        r_addr = cache_hdr->read_addr;
@@ -1691,16 +1764,16 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
        p_mask = cache_hdr->cache_ctrl.poll_mask;
 
        for (i = 0; i < loop_count; i++) {
-               qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+               ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
 
                if (c_value_w)
-                       qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+                       ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
 
                if (p_mask) {
                        w_time = jiffies + p_wait;
                        do {
-                               c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
-                                                               0, 0);
+                               ha->isp_ops->rd_reg_indirect(ha, c_addr,
+                                                            &c_value_r);
                                if ((c_value_r & p_mask) == 0) {
                                        break;
                                } else if (time_after_eq(jiffies, w_time)) {
@@ -1712,7 +1785,7 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
 
                addr = r_addr;
                for (k = 0; k < r_cnt; k++) {
-                       r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
                        *data_ptr++ = cpu_to_le32(r_value);
                        addr += cache_hdr->read_ctrl.read_addr_stride;
                }
@@ -1724,9 +1797,9 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
 }
 
 static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr)
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr)
 {
-       struct qla82xx_minidump_entry_crb *crb_entry;
+       struct qla8xxx_minidump_entry_crb *crb_entry;
        uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
        uint32_t crb_addr;
        unsigned long wtime;
@@ -1736,58 +1809,59 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
        tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
                                                ha->fw_dump_tmplt_hdr;
-       crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+       crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
 
        crb_addr = crb_entry->addr;
        for (i = 0; i < crb_entry->op_count; i++) {
                opcode = crb_entry->crb_ctrl.opcode;
-               if (opcode & QLA82XX_DBG_OPCODE_WR) {
-                       qla4_8xxx_md_rw_32(ha, crb_addr,
-                                          crb_entry->value_1, 1);
-                       opcode &= ~QLA82XX_DBG_OPCODE_WR;
+               if (opcode & QLA8XXX_DBG_OPCODE_WR) {
+                       ha->isp_ops->wr_reg_indirect(ha, crb_addr,
+                                                    crb_entry->value_1);
+                       opcode &= ~QLA8XXX_DBG_OPCODE_WR;
                }
-               if (opcode & QLA82XX_DBG_OPCODE_RW) {
-                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
-                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
-                       opcode &= ~QLA82XX_DBG_OPCODE_RW;
+               if (opcode & QLA8XXX_DBG_OPCODE_RW) {
+                       ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+                       ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+                       opcode &= ~QLA8XXX_DBG_OPCODE_RW;
                }
-               if (opcode & QLA82XX_DBG_OPCODE_AND) {
-                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+               if (opcode & QLA8XXX_DBG_OPCODE_AND) {
+                       ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
                        read_value &= crb_entry->value_2;
-                       opcode &= ~QLA82XX_DBG_OPCODE_AND;
-                       if (opcode & QLA82XX_DBG_OPCODE_OR) {
+                       opcode &= ~QLA8XXX_DBG_OPCODE_AND;
+                       if (opcode & QLA8XXX_DBG_OPCODE_OR) {
                                read_value |= crb_entry->value_3;
-                               opcode &= ~QLA82XX_DBG_OPCODE_OR;
+                               opcode &= ~QLA8XXX_DBG_OPCODE_OR;
                        }
-                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+                       ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
                }
-               if (opcode & QLA82XX_DBG_OPCODE_OR) {
-                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+               if (opcode & QLA8XXX_DBG_OPCODE_OR) {
+                       ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
                        read_value |= crb_entry->value_3;
-                       qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
-                       opcode &= ~QLA82XX_DBG_OPCODE_OR;
+                       ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+                       opcode &= ~QLA8XXX_DBG_OPCODE_OR;
                }
-               if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+               if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
                        poll_time = crb_entry->crb_strd.poll_timeout;
                        wtime = jiffies + poll_time;
-                       read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+                       ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
 
                        do {
                                if ((read_value & crb_entry->value_2) ==
-                                   crb_entry->value_1)
+                                   crb_entry->value_1) {
                                        break;
-                               else if (time_after_eq(jiffies, wtime)) {
+                               else if (time_after_eq(jiffies, wtime)) {
                                        /* capturing dump failed */
                                        rval = QLA_ERROR;
                                        break;
-                               } else
-                                       read_value = qla4_8xxx_md_rw_32(ha,
-                                                               crb_addr, 0, 0);
+                               } else {
+                                       ha->isp_ops->rd_reg_indirect(ha,
+                                                       crb_addr, &read_value);
+                               }
                        } while (1);
-                       opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+                       opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
                }
 
-               if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+               if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
                        if (crb_entry->crb_strd.state_index_a) {
                                index = crb_entry->crb_strd.state_index_a;
                                addr = tmplt_hdr->saved_state_array[index];
@@ -1795,13 +1869,13 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
                                addr = crb_addr;
                        }
 
-                       read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
                        index = crb_entry->crb_ctrl.state_index_v;
                        tmplt_hdr->saved_state_array[index] = read_value;
-                       opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+                       opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
                }
 
-               if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+               if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
                        if (crb_entry->crb_strd.state_index_a) {
                                index = crb_entry->crb_strd.state_index_a;
                                addr = tmplt_hdr->saved_state_array[index];
@@ -1817,11 +1891,11 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
                                read_value = crb_entry->value_1;
                        }
 
-                       qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
-                       opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+                       ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
+                       opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
                }
 
-               if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+               if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
                        index = crb_entry->crb_ctrl.state_index_v;
                        read_value = tmplt_hdr->saved_state_array[index];
                        read_value <<= crb_entry->crb_ctrl.shl;
@@ -1831,7 +1905,7 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
                        read_value |= crb_entry->value_3;
                        read_value += crb_entry->value_1;
                        tmplt_hdr->saved_state_array[index] = read_value;
-                       opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+                       opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
                }
                crb_addr += crb_entry->crb_strd.addr_stride;
        }
@@ -1840,15 +1914,15 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
 }
 
 static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
        uint32_t r_addr, r_stride, loop_cnt, i, r_value;
-       struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+       struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
        uint32_t *data_ptr = *d_ptr;
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
-       ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+       ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
        r_addr = ocm_hdr->read_addr;
        r_stride = ocm_hdr->read_addr_stride;
        loop_cnt = ocm_hdr->op_count;
@@ -1863,20 +1937,20 @@ static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
                r_addr += r_stride;
        }
        DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
-                         __func__, (loop_cnt * sizeof(uint32_t))));
+               __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
        *d_ptr = data_ptr;
 }
 
 static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
        uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
-       struct qla82xx_minidump_entry_mux *mux_hdr;
+       struct qla8xxx_minidump_entry_mux *mux_hdr;
        uint32_t *data_ptr = *d_ptr;
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
-       mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+       mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
        r_addr = mux_hdr->read_addr;
        s_addr = mux_hdr->select_addr;
        s_stride = mux_hdr->select_value_stride;
@@ -1884,8 +1958,8 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
        loop_cnt = mux_hdr->op_count;
 
        for (i = 0; i < loop_cnt; i++) {
-               qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
-               r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+               ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+               ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
                *data_ptr++ = cpu_to_le32(s_value);
                *data_ptr++ = cpu_to_le32(r_value);
                s_value += s_stride;
@@ -1894,16 +1968,16 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
 }
 
 static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
        uint32_t addr, r_addr, c_addr, t_r_addr;
        uint32_t i, k, loop_count, t_value, r_cnt, r_value;
        uint32_t c_value_w;
-       struct qla82xx_minidump_entry_cache *cache_hdr;
+       struct qla8xxx_minidump_entry_cache *cache_hdr;
        uint32_t *data_ptr = *d_ptr;
 
-       cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+       cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
        loop_count = cache_hdr->op_count;
        r_addr = cache_hdr->read_addr;
        c_addr = cache_hdr->control_addr;
@@ -1914,11 +1988,11 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
        r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
 
        for (i = 0; i < loop_count; i++) {
-               qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
-               qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+               ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
+               ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
                addr = r_addr;
                for (k = 0; k < r_cnt; k++) {
-                       r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+                       ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
                        *data_ptr++ = cpu_to_le32(r_value);
                        addr += cache_hdr->read_ctrl.read_addr_stride;
                }
@@ -1928,27 +2002,27 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
 }
 
 static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
        uint32_t s_addr, r_addr;
        uint32_t r_stride, r_value, r_cnt, qid = 0;
        uint32_t i, k, loop_cnt;
-       struct qla82xx_minidump_entry_queue *q_hdr;
+       struct qla8xxx_minidump_entry_queue *q_hdr;
        uint32_t *data_ptr = *d_ptr;
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
-       q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+       q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
        s_addr = q_hdr->select_addr;
        r_cnt = q_hdr->rd_strd.read_addr_cnt;
        r_stride = q_hdr->rd_strd.read_addr_stride;
        loop_cnt = q_hdr->op_count;
 
        for (i = 0; i < loop_cnt; i++) {
-               qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+               ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
                r_addr = q_hdr->read_addr;
                for (k = 0; k < r_cnt; k++) {
-                       r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+                       ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
                        *data_ptr++ = cpu_to_le32(r_value);
                        r_addr += r_stride;
                }
@@ -1960,17 +2034,17 @@ static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
 #define MD_DIRECT_ROM_WINDOW           0x42110030
 #define MD_DIRECT_ROM_READ_BASE                0x42150000
 
-static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
        uint32_t r_addr, r_value;
        uint32_t i, loop_cnt;
-       struct qla82xx_minidump_entry_rdrom *rom_hdr;
+       struct qla8xxx_minidump_entry_rdrom *rom_hdr;
        uint32_t *data_ptr = *d_ptr;
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
-       rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+       rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
        r_addr = rom_hdr->read_addr;
        loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
 
@@ -1979,11 +2053,11 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
                           __func__, r_addr, loop_cnt));
 
        for (i = 0; i < loop_cnt; i++) {
-               qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
-                                  (r_addr & 0xFFFF0000), 1);
-               r_value = qla4_8xxx_md_rw_32(ha,
-                                            MD_DIRECT_ROM_READ_BASE +
-                                            (r_addr & 0x0000FFFF), 0, 0);
+               ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
+                                            (r_addr & 0xFFFF0000));
+               ha->isp_ops->rd_reg_indirect(ha,
+                               MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
+                               &r_value);
                *data_ptr++ = cpu_to_le32(r_value);
                r_addr += sizeof(uint32_t);
        }
@@ -1995,17 +2069,17 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
 #define MD_MIU_TEST_AGT_ADDR_HI                0x41000098
 
 static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
        uint32_t r_addr, r_value, r_data;
        uint32_t i, j, loop_cnt;
-       struct qla82xx_minidump_entry_rdmem *m_hdr;
+       struct qla8xxx_minidump_entry_rdmem *m_hdr;
        unsigned long flags;
        uint32_t *data_ptr = *d_ptr;
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
-       m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+       m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
        r_addr = m_hdr->read_addr;
        loop_cnt = m_hdr->read_data_size/16;
 
@@ -2033,17 +2107,19 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
 
        write_lock_irqsave(&ha->hw_lock, flags);
        for (i = 0; i < loop_cnt; i++) {
-               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+               ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
+                                            r_addr);
                r_value = 0;
-               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+               ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
+                                            r_value);
                r_value = MIU_TA_CTL_ENABLE;
-               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
-               r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
-               qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+               ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
+               r_value = MIU_TA_CTL_START_ENABLE;
+               ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
 
                for (j = 0; j < MAX_CTL_CHECK; j++) {
-                       r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
-                                                    0, 0);
+                       ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+                                                    &r_value);
                        if ((r_value & MIU_TA_CTL_BUSY) == 0)
                                break;
                }
@@ -2057,9 +2133,9 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
                }
 
                for (j = 0; j < 4; j++) {
-                       r_data = qla4_8xxx_md_rw_32(ha,
-                                                   MD_MIU_TEST_AGT_RDDATA[j],
-                                                   0, 0);
+                       ha->isp_ops->rd_reg_indirect(ha,
+                                                    MD_MIU_TEST_AGT_RDDATA[j],
+                                                    &r_data);
                        *data_ptr++ = cpu_to_le32(r_data);
                }
 
@@ -2074,25 +2150,215 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
        return QLA_SUCCESS;
 }
 
-static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
-                               struct qla82xx_minidump_entry_hdr *entry_hdr,
+static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                int index)
 {
-       entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+       entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
        DEBUG2(ql4_printk(KERN_INFO, ha,
                          "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
                          ha->host_no, index, entry_hdr->entry_type,
                          entry_hdr->d_ctrl.entry_capture_mask));
 }
 
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+       uint16_t s_stride, i;
+       uint32_t *data_ptr = *d_ptr;
+       uint32_t rval = QLA_SUCCESS;
+       struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
+
+       pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
+       s_addr = le32_to_cpu(pollrd_hdr->select_addr);
+       r_addr = le32_to_cpu(pollrd_hdr->read_addr);
+       s_value = le32_to_cpu(pollrd_hdr->select_value);
+       s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
+
+       poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+       poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
+
+       for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
+               ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+               poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+               while (1) {
+                       ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
+
+                       if ((r_value & poll_mask) != 0) {
+                               break;
+                       } else {
+                               msleep(1);
+                               if (--poll_wait == 0) {
+                                       ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+                                                  __func__);
+                                       rval = QLA_ERROR;
+                                       goto exit_process_pollrd;
+                               }
+                       }
+               }
+               ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+               *data_ptr++ = cpu_to_le32(s_value);
+               *data_ptr++ = cpu_to_le32(r_value);
+               s_value += s_stride;
+       }
+
+       *d_ptr = data_ptr;
+
+exit_process_pollrd:
+       return rval;
+}
+
+static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+       uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+       struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
+       sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
+       sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
+       sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
+       sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
+       sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
+       read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
+
+       for (i = 0; i < rdmux2_hdr->op_count; i++) {
+               ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
+               t_sel_val = sel_val1 & sel_val_mask;
+               *data_ptr++ = cpu_to_le32(t_sel_val);
+
+               ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+               ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+               *data_ptr++ = cpu_to_le32(data);
+
+               ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
+               t_sel_val = sel_val2 & sel_val_mask;
+               *data_ptr++ = cpu_to_le32(t_sel_val);
+
+               ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+               ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+               *data_ptr++ = cpu_to_le32(data);
+
+               sel_val1 += rdmux2_hdr->select_value_stride;
+               sel_val2 += rdmux2_hdr->select_value_stride;
+       }
+
+       *d_ptr = data_ptr;
+}
+
+static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t poll_wait, poll_mask, r_value, data;
+       uint32_t addr_1, addr_2, value_1, value_2;
+       uint32_t *data_ptr = *d_ptr;
+       uint32_t rval = QLA_SUCCESS;
+       struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
+
+       poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
+       addr_1 = le32_to_cpu(poll_hdr->addr_1);
+       addr_2 = le32_to_cpu(poll_hdr->addr_2);
+       value_1 = le32_to_cpu(poll_hdr->value_1);
+       value_2 = le32_to_cpu(poll_hdr->value_2);
+       poll_mask = le32_to_cpu(poll_hdr->poll_mask);
+
+       ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
+
+       poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+       while (1) {
+               ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+               if ((r_value & poll_mask) != 0) {
+                       break;
+               } else {
+                       msleep(1);
+                       if (--poll_wait == 0) {
+                               ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
+                                          __func__);
+                               rval = QLA_ERROR;
+                               goto exit_process_pollrdmwr;
+                       }
+               }
+       }
+
+       ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
+       data &= le32_to_cpu(poll_hdr->modify_mask);
+       ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
+       ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
+
+       poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+       while (1) {
+               ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+               if ((r_value & poll_mask) != 0) {
+                       break;
+               } else {
+                       msleep(1);
+                       if (--poll_wait == 0) {
+                               ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
+                                          __func__);
+                               rval = QLA_ERROR;
+                               goto exit_process_pollrdmwr;
+                       }
+               }
+       }
+
+       *data_ptr++ = cpu_to_le32(addr_2);
+       *data_ptr++ = cpu_to_le32(data);
+       *d_ptr = data_ptr;
+
+exit_process_pollrdmwr:
+       return rval;
+}
+
+static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t fl_addr, u32_count, rval;
+       struct qla8xxx_minidump_entry_rdrom *rom_hdr;
+       uint32_t *data_ptr = *d_ptr;
+
+       rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
+       fl_addr = le32_to_cpu(rom_hdr->read_addr);
+       u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+                         __func__, fl_addr, u32_count));
+
+       rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
+                                                (u8 *)(data_ptr), u32_count);
+
+       if (rval == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
+                          __func__, u32_count);
+               goto exit_process_rdrom;
+       }
+
+       data_ptr += u32_count;
+       *d_ptr = data_ptr;
+
+exit_process_rdrom:
+       return rval;
+}
+
 /**
- * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
  * @ha: pointer to adapter structure
  **/
 static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
 {
        int num_entry_hdr = 0;
-       struct qla82xx_minidump_entry_hdr *entry_hdr;
+       struct qla8xxx_minidump_entry_hdr *entry_hdr;
        struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
        uint32_t *data_ptr;
        uint32_t data_collected = 0;
@@ -2128,10 +2394,14 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
        timestamp = (u32)(jiffies_to_msecs(now) / 1000);
        tmplt_hdr->driver_timestamp = timestamp;
 
-       entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+       entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
                                        (((uint8_t *)ha->fw_dump_tmplt_hdr) +
                                         tmplt_hdr->first_entry_offset);
 
+       if (is_qla8032(ha))
+               tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
+                                       tmplt_hdr->ocm_window_reg[ha->func_num];
+
        /* Walk through the entry headers - validate/perform required action */
        for (i = 0; i < num_entry_hdr; i++) {
                if (data_collected >= ha->fw_dump_size) {
@@ -2144,7 +2414,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
                if (!(entry_hdr->d_ctrl.entry_capture_mask &
                      ha->fw_dump_capture_mask)) {
                        entry_hdr->d_ctrl.driver_flags |=
-                                               QLA82XX_DBG_SKIPPED_FLAG;
+                                               QLA8XXX_DBG_SKIPPED_FLAG;
                        goto skip_nxt_entry;
                }
 
@@ -2157,65 +2427,105 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
                 * debug data
                 */
                switch (entry_hdr->entry_type) {
-               case QLA82XX_RDEND:
-                       ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+               case QLA8XXX_RDEND:
+                       qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
                        break;
-               case QLA82XX_CNTRL:
+               case QLA8XXX_CNTRL:
                        rval = qla4_8xxx_minidump_process_control(ha,
                                                                  entry_hdr);
                        if (rval != QLA_SUCCESS) {
-                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
                                goto md_failed;
                        }
                        break;
-               case QLA82XX_RDCRB:
+               case QLA8XXX_RDCRB:
                        qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
                                                         &data_ptr);
                        break;
-               case QLA82XX_RDMEM:
+               case QLA8XXX_RDMEM:
                        rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
                                                                &data_ptr);
                        if (rval != QLA_SUCCESS) {
-                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
                                goto md_failed;
                        }
                        break;
-               case QLA82XX_BOARD:
-               case QLA82XX_RDROM:
-                       qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
-                                                        &data_ptr);
+               case QLA8XXX_BOARD:
+               case QLA8XXX_RDROM:
+                       if (is_qla8022(ha)) {
+                               qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
+                                                                &data_ptr);
+                       } else if (is_qla8032(ha)) {
+                               rval = qla4_83xx_minidump_process_rdrom(ha,
+                                                                   entry_hdr,
+                                                                   &data_ptr);
+                               if (rval != QLA_SUCCESS)
+                                       qla4_8xxx_mark_entry_skipped(ha,
+                                                                    entry_hdr,
+                                                                    i);
+                       }
                        break;
-               case QLA82XX_L2DTG:
-               case QLA82XX_L2ITG:
-               case QLA82XX_L2DAT:
-               case QLA82XX_L2INS:
+               case QLA8XXX_L2DTG:
+               case QLA8XXX_L2ITG:
+               case QLA8XXX_L2DAT:
+               case QLA8XXX_L2INS:
                        rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
                                                                &data_ptr);
                        if (rval != QLA_SUCCESS) {
-                               ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
                                goto md_failed;
                        }
                        break;
-               case QLA82XX_L1DAT:
-               case QLA82XX_L1INS:
+               case QLA8XXX_L1DTG:
+               case QLA8XXX_L1ITG:
+               case QLA8XXX_L1DAT:
+               case QLA8XXX_L1INS:
                        qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
                                                           &data_ptr);
                        break;
-               case QLA82XX_RDOCM:
+               case QLA8XXX_RDOCM:
                        qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
                                                         &data_ptr);
                        break;
-               case QLA82XX_RDMUX:
+               case QLA8XXX_RDMUX:
                        qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
                                                         &data_ptr);
                        break;
-               case QLA82XX_QUEUE:
+               case QLA8XXX_QUEUE:
                        qla4_8xxx_minidump_process_queue(ha, entry_hdr,
                                                         &data_ptr);
                        break;
-               case QLA82XX_RDNOP:
+               case QLA83XX_POLLRD:
+                       if (!is_qla8032(ha)) {
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               break;
+                       }
+                       rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
+                                                              &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               case QLA83XX_RDMUX2:
+                       if (!is_qla8032(ha)) {
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               break;
+                       }
+                       qla83xx_minidump_process_rdmux2(ha, entry_hdr,
+                                                       &data_ptr);
+                       break;
+               case QLA83XX_POLLRDMWR:
+                       if (!is_qla8032(ha)) {
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                               break;
+                       }
+                       rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
+                                                                 &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               case QLA8XXX_RDNOP:
                default:
-                       ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
                        break;
                }
 
@@ -2224,7 +2534,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
                                                ha->fw_dump_tmplt_size));
 skip_nxt_entry:
                /*  next entry in the template */
-               entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+               entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
                                (((uint8_t *)entry_hdr) +
                                 entry_hdr->entry_size);
        }
@@ -2264,33 +2574,45 @@ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
        kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
 }
 
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
+{
+       if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+           !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+               if (!qla4_8xxx_collect_md_data(ha)) {
+                       qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+                       set_bit(AF_82XX_FW_DUMPED, &ha->flags);
+               } else {
+                       ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
+                                  __func__);
+               }
+       }
+}
+
 /**
  * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
  * @ha: pointer to adapter structure
  *
  * Note: IDC lock must be held upon entry
  **/
-static int
-qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
 {
        int rval = QLA_ERROR;
        int i, timeout;
-       uint32_t old_count, count;
+       uint32_t old_count, count, idc_ctrl;
        int need_reset = 0, peg_stuck = 1;
 
-       need_reset = qla4_8xxx_need_reset(ha);
-
-       old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+       need_reset = ha->isp_ops->need_reset(ha);
+       old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
 
        for (i = 0; i < 10; i++) {
                timeout = msleep_interruptible(200);
                if (timeout) {
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                          QLA82XX_DEV_FAILED);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_FAILED);
                        return rval;
                }
 
-               count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+               count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
                if (count != old_count)
                        peg_stuck = 0;
        }
@@ -2298,13 +2620,13 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
        if (need_reset) {
                /* We are trying to perform a recovery here. */
                if (peg_stuck)
-                       qla4_8xxx_rom_lock_recovery(ha);
+                       ha->isp_ops->rom_lock_recovery(ha);
                goto dev_initialize;
        } else  {
                /* Start of day for this ha context. */
                if (peg_stuck) {
                        /* Either we are the first or recovery in progress. */
-                       qla4_8xxx_rom_lock_recovery(ha);
+                       ha->isp_ops->rom_lock_recovery(ha);
                        goto dev_initialize;
                } else {
                        /* Firmware already running. */
@@ -2316,46 +2638,53 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
 dev_initialize:
        /* set to DEV_INITIALIZING */
        ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
+       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                           QLA8XXX_DEV_INITIALIZING);
 
-       /* Driver that sets device state to initializating sets IDC version */
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
-
-       qla4_8xxx_idc_unlock(ha);
-       if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
-           !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
-               if (!qla4_8xxx_collect_md_data(ha)) {
-                       qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
-               } else {
-                       ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
-                       clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+       /*
+        * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after
+        * device goes to INIT state.
+        */
+       if (is_qla8032(ha)) {
+               idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+               if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+                       qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+                                        (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+                       set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
                }
        }
-       rval = qla4_8xxx_try_start_fw(ha);
-       qla4_8xxx_idc_lock(ha);
+
+       ha->isp_ops->idc_unlock(ha);
+
+       if (is_qla8022(ha))
+               qla4_8xxx_get_minidump(ha);
+
+       rval = ha->isp_ops->restart_firmware(ha);
+       ha->isp_ops->idc_lock(ha);
 
        if (rval != QLA_SUCCESS) {
                ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
                qla4_8xxx_clear_drv_active(ha);
-               qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
+               qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                   QLA8XXX_DEV_FAILED);
                return rval;
        }
 
 dev_ready:
        ql4_printk(KERN_INFO, ha, "HW State: READY\n");
-       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
+       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
 
        return rval;
 }
 
 /**
- * qla4_8xxx_need_reset_handler - Code to start reset sequence
+ * qla4_82xx_need_reset_handler - Code to start reset sequence
  * @ha: pointer to adapter structure
  *
  * Note: IDC lock must be held upon entry
  **/
 static void
-qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
+qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
 {
        uint32_t dev_state, drv_state, drv_active;
        uint32_t active_mask = 0xFFFFFFFF;
@@ -2365,12 +2694,12 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                "Performing ISP error recovery\n");
 
        if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
-               qla4_8xxx_idc_unlock(ha);
+               qla4_82xx_idc_unlock(ha);
                ha->isp_ops->disable_intrs(ha);
-               qla4_8xxx_idc_lock(ha);
+               qla4_82xx_idc_lock(ha);
        }
 
-       if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+       if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
                DEBUG2(ql4_printk(KERN_INFO, ha,
                                  "%s(%ld): reset acknowledged\n",
                                  __func__, ha->host_no));
@@ -2382,8 +2711,8 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
        /* wait for 10 seconds for reset ack from all functions */
        reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
 
-       drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
-       drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+       drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+       drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
 
        ql4_printk(KERN_INFO, ha,
                "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
@@ -2401,31 +2730,31 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
                 * When reset_owner times out, check which functions
                 * acked/did not ack
                 */
-               if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+               if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
                        ql4_printk(KERN_INFO, ha,
                                   "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
                                   __func__, ha->host_no, drv_state,
                                   drv_active);
                }
-               qla4_8xxx_idc_unlock(ha);
+               qla4_82xx_idc_unlock(ha);
                msleep(1000);
-               qla4_8xxx_idc_lock(ha);
+               qla4_82xx_idc_lock(ha);
 
-               drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
-               drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+               drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+               drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
        }
 
        /* Clear RESET OWNER as we are not going to use it any further */
-       clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+       clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
 
-       dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+       dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
        ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
                   dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
 
        /* Force to DEV_COLD unless someone else is starting a reset */
-       if (dev_state != QLA82XX_DEV_INITIALIZING) {
+       if (dev_state != QLA8XXX_DEV_INITIALIZING) {
                ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
-               qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+               qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
                qla4_8xxx_set_rst_ready(ha);
        }
 }
@@ -2437,9 +2766,104 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
 void
 qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
 {
-       qla4_8xxx_idc_lock(ha);
+       ha->isp_ops->idc_lock(ha);
        qla4_8xxx_set_qsnt_ready(ha);
-       qla4_8xxx_idc_unlock(ha);
+       ha->isp_ops->idc_unlock(ha);
+}
+
+static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+       int idc_ver;
+       uint32_t drv_active;
+
+       drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+       if (drv_active == (1 << (ha->func_num * 4))) {
+               qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
+                                   QLA82XX_IDC_VERSION);
+               ql4_printk(KERN_INFO, ha,
+                          "%s: IDC version updated to %d\n", __func__,
+                          QLA82XX_IDC_VERSION);
+       } else {
+               idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+               if (QLA82XX_IDC_VERSION != idc_ver) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+                                  __func__, QLA82XX_IDC_VERSION, idc_ver);
+               }
+       }
+}
+
+static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+       int idc_ver;
+       uint32_t drv_active;
+       int rval = QLA_SUCCESS;
+
+       drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+       if (drv_active == (1 << ha->func_num)) {
+               idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+               idc_ver &= (~0xFF);
+               idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
+               qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
+               ql4_printk(KERN_INFO, ha,
+                          "%s: IDC version updated to %d\n", __func__,
+                          idc_ver);
+       } else {
+               idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+               idc_ver &= 0xFF;
+               if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+                                  __func__, QLA83XX_IDC_VER_MAJ_VALUE,
+                                  idc_ver);
+                       rval = QLA_ERROR;
+                       goto exit_set_idc_ver;
+               }
+       }
+
+       /* Update IDC_MINOR_VERSION */
+       idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
+       idc_ver &= ~(0x03 << (ha->func_num * 2));
+       idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
+       qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+       return rval;
+}
+
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
+{
+       uint32_t drv_active;
+       int rval = QLA_SUCCESS;
+
+       if (test_bit(AF_INIT_DONE, &ha->flags))
+               goto exit_update_idc_reg;
+
+       ha->isp_ops->idc_lock(ha);
+       qla4_8xxx_set_drv_active(ha);
+
+       /*
+        * If we are the first driver to load and
+        * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
+        */
+       if (is_qla8032(ha)) {
+               drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+               if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
+                       qla4_83xx_clear_idc_dontreset(ha);
+       }
+
+       if (is_qla8022(ha)) {
+               qla4_82xx_set_idc_ver(ha);
+       } else if (is_qla8032(ha)) {
+               rval = qla4_83xx_set_idc_ver(ha);
+               if (rval == QLA_ERROR)
+                       qla4_8xxx_clear_drv_active(ha);
+       }
+
+       ha->isp_ops->idc_unlock(ha);
+
+exit_update_idc_reg:
+       return rval;
 }
 
 /**
@@ -2454,13 +2878,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        int rval = QLA_SUCCESS;
        unsigned long dev_init_timeout;
 
-       if (!test_bit(AF_INIT_DONE, &ha->flags)) {
-               qla4_8xxx_idc_lock(ha);
-               qla4_8xxx_set_drv_active(ha);
-               qla4_8xxx_idc_unlock(ha);
-       }
+       rval = qla4_8xxx_update_idc_reg(ha);
+       if (rval == QLA_ERROR)
+               goto exit_state_handler;
 
-       dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+       dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
        DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
                          dev_state, dev_state < MAX_STATES ?
                          qdev_state[dev_state] : "Unknown"));
@@ -2468,7 +2890,7 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
        /* wait for 30 seconds for device to go ready */
        dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
 
-       qla4_8xxx_idc_lock(ha);
+       ha->isp_ops->idc_lock(ha);
        while (1) {
 
                if (time_after_eq(jiffies, dev_init_timeout)) {
@@ -2477,65 +2899,75 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
                                   DRIVER_NAME,
                                   dev_state, dev_state < MAX_STATES ?
                                   qdev_state[dev_state] : "Unknown");
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                               QLA82XX_DEV_FAILED);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_FAILED);
                }
 
-               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+               dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
                ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
                           dev_state, dev_state < MAX_STATES ?
                           qdev_state[dev_state] : "Unknown");
 
                /* NOTE: Make sure idc unlocked upon exit of switch statement */
                switch (dev_state) {
-               case QLA82XX_DEV_READY:
+               case QLA8XXX_DEV_READY:
                        goto exit;
-               case QLA82XX_DEV_COLD:
+               case QLA8XXX_DEV_COLD:
                        rval = qla4_8xxx_device_bootstrap(ha);
                        goto exit;
-               case QLA82XX_DEV_INITIALIZING:
-                       qla4_8xxx_idc_unlock(ha);
+               case QLA8XXX_DEV_INITIALIZING:
+                       ha->isp_ops->idc_unlock(ha);
                        msleep(1000);
-                       qla4_8xxx_idc_lock(ha);
+                       ha->isp_ops->idc_lock(ha);
                        break;
-               case QLA82XX_DEV_NEED_RESET:
-                       if (!ql4xdontresethba) {
-                               qla4_8xxx_need_reset_handler(ha);
-                               /* Update timeout value after need
-                                * reset handler */
-                               dev_init_timeout = jiffies +
-                                       (ha->nx_dev_init_timeout * HZ);
-                       } else {
-                               qla4_8xxx_idc_unlock(ha);
-                               msleep(1000);
-                               qla4_8xxx_idc_lock(ha);
+               case QLA8XXX_DEV_NEED_RESET:
+                       /*
+                        * For ISP8324, if NEED_RESET is set by any driver,
+                        * it should be honored, irrespective of IDC_CTRL
+                        * DONTRESET_BIT0
+                        */
+                       if (is_qla8032(ha)) {
+                               qla4_83xx_need_reset_handler(ha);
+                       } else if (is_qla8022(ha)) {
+                               if (!ql4xdontresethba) {
+                                       qla4_82xx_need_reset_handler(ha);
+                                       /* Update timeout value after need
+                                        * reset handler */
+                                       dev_init_timeout = jiffies +
+                                               (ha->nx_dev_init_timeout * HZ);
+                               } else {
+                                       ha->isp_ops->idc_unlock(ha);
+                                       msleep(1000);
+                                       ha->isp_ops->idc_lock(ha);
+                               }
                        }
                        break;
-               case QLA82XX_DEV_NEED_QUIESCENT:
+               case QLA8XXX_DEV_NEED_QUIESCENT:
                        /* idc locked/unlocked in handler */
                        qla4_8xxx_need_qsnt_handler(ha);
                        break;
-               case QLA82XX_DEV_QUIESCENT:
-                       qla4_8xxx_idc_unlock(ha);
+               case QLA8XXX_DEV_QUIESCENT:
+                       ha->isp_ops->idc_unlock(ha);
                        msleep(1000);
-                       qla4_8xxx_idc_lock(ha);
+                       ha->isp_ops->idc_lock(ha);
                        break;
-               case QLA82XX_DEV_FAILED:
-                       qla4_8xxx_idc_unlock(ha);
+               case QLA8XXX_DEV_FAILED:
+                       ha->isp_ops->idc_unlock(ha);
                        qla4xxx_dead_adapter_cleanup(ha);
                        rval = QLA_ERROR;
-                       qla4_8xxx_idc_lock(ha);
+                       ha->isp_ops->idc_lock(ha);
                        goto exit;
                default:
-                       qla4_8xxx_idc_unlock(ha);
+                       ha->isp_ops->idc_unlock(ha);
                        qla4xxx_dead_adapter_cleanup(ha);
                        rval = QLA_ERROR;
-                       qla4_8xxx_idc_lock(ha);
+                       ha->isp_ops->idc_lock(ha);
                        goto exit;
                }
        }
 exit:
-       qla4_8xxx_idc_unlock(ha);
+       ha->isp_ops->idc_unlock(ha);
+exit_state_handler:
        return rval;
 }
 
@@ -2544,8 +2976,13 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
        int retval;
 
        /* clear the interrupt */
-       writel(0, &ha->qla4_8xxx_reg->host_int);
-       readl(&ha->qla4_8xxx_reg->host_int);
+       if (is_qla8032(ha)) {
+               writel(0, &ha->qla4_83xx_reg->risc_intr);
+               readl(&ha->qla4_83xx_reg->risc_intr);
+       } else if (is_qla8022(ha)) {
+               writel(0, &ha->qla4_82xx_reg->host_int);
+               readl(&ha->qla4_82xx_reg->host_int);
+       }
 
        retval = qla4_8xxx_device_state_handler(ha);
 
@@ -2579,13 +3016,13 @@ flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
 }
 
 static uint32_t *
-qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
+qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
     uint32_t faddr, uint32_t length)
 {
        uint32_t i;
        uint32_t val;
        int loops = 0;
-       while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) {
+       while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
                udelay(100);
                cond_resched();
                loops++;
@@ -2597,7 +3034,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
 
        /* Dword reads to flash. */
        for (i = 0; i < length/4; i++, faddr += 4) {
-               if (qla4_8xxx_do_rom_fast_read(ha, faddr, &val)) {
+               if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
                        ql4_printk(KERN_WARNING, ha,
                            "Do ROM fast read failed\n");
                        goto done_read;
@@ -2606,7 +3043,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
        }
 
 done_read:
-       qla4_8xxx_rom_unlock(ha);
+       qla4_82xx_rom_unlock(ha);
        return dwptr;
 }
 
@@ -2614,10 +3051,10 @@ done_read:
  * Address and length are byte address
  **/
 static uint8_t *
-qla4_8xxx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
                uint32_t offset, uint32_t length)
 {
-       qla4_8xxx_read_flash_data(ha, (uint32_t *)buf, offset, length);
+       qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
        return buf;
 }
 
@@ -2644,7 +3081,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
        const char *loc, *locations[] = { "DEF", "FLT" };
        uint16_t *wptr;
        uint16_t cnt, chksum;
-       uint32_t start;
+       uint32_t start, status;
        struct qla_flt_header *flt;
        struct qla_flt_region *region;
        struct ql82xx_hw_data *hw = &ha->hw;
@@ -2653,8 +3090,18 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
        wptr = (uint16_t *)ha->request_ring;
        flt = (struct qla_flt_header *)ha->request_ring;
        region = (struct qla_flt_region *)&flt[1];
-       qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
-                       flt_addr << 2, OPTROM_BURST_SIZE);
+
+       if (is_qla8022(ha)) {
+               qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+                                          flt_addr << 2, OPTROM_BURST_SIZE);
+       } else if (is_qla8032(ha)) {
+               status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
+                                                 (uint8_t *)ha->request_ring,
+                                                 0x400);
+               if (status != QLA_SUCCESS)
+                       goto no_flash_data;
+       }
+
        if (*wptr == __constant_cpu_to_le16(0xffff))
                goto no_flash_data;
        if (flt->version != __constant_cpu_to_le16(1)) {
@@ -2730,7 +3177,7 @@ done:
 }
 
 static void
-qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
+qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
 {
 #define FLASH_BLK_SIZE_4K       0x1000
 #define FLASH_BLK_SIZE_32K      0x8000
@@ -2748,7 +3195,7 @@ qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
 
        wptr = (uint16_t *)ha->request_ring;
        fdt = (struct qla_fdt_layout *)ha->request_ring;
-       qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+       qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
            hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
 
        if (*wptr == __constant_cpu_to_le16(0xffff))
@@ -2797,7 +3244,7 @@ done:
 }
 
 static void
-qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
+qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
 {
 #define QLA82XX_IDC_PARAM_ADDR      0x003e885c
        uint32_t *wptr;
@@ -2805,7 +3252,7 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
        if (!is_qla8022(ha))
                return;
        wptr = (uint32_t *)ha->request_ring;
-       qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+       qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
                        QLA82XX_IDC_PARAM_ADDR , 8);
 
        if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
@@ -2823,6 +3270,39 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
        return;
 }
 
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+                             int in_count)
+{
+       int i;
+
+       /* Load all mailbox registers, except mailbox 0. */
+       for (i = 1; i < in_count; i++)
+               writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
+
+       /* Wakeup firmware  */
+       writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
+       readl(&ha->qla4_82xx_reg->mailbox_in[0]);
+       writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
+       readl(&ha->qla4_82xx_reg->hint);
+}
+
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+       int intr_status;
+
+       intr_status = readl(&ha->qla4_82xx_reg->host_int);
+       if (intr_status & ISRX_82XX_RISC_INT) {
+               ha->mbox_status_count = out_count;
+               intr_status = readl(&ha->qla4_82xx_reg->host_status);
+               ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+               if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+                   test_bit(AF_INTx_ENABLED, &ha->flags))
+                       qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+                                       0xfbff);
+       }
+}
+
 int
 qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
 {
@@ -2834,8 +3314,12 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
                return ret;
 
        qla4_8xxx_get_flt_info(ha, flt_addr);
-       qla4_8xxx_get_fdt_info(ha);
-       qla4_8xxx_get_idc_param(ha);
+       if (is_qla8022(ha)) {
+               qla4_82xx_get_fdt_info(ha);
+               qla4_82xx_get_idc_param(ha);
+       } else if (is_qla8032(ha)) {
+               qla4_83xx_get_idc_param(ha);
+       }
 
        return QLA_SUCCESS;
 }
@@ -2869,36 +3353,36 @@ qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
 }
 
 /**
- * qla4_8xxx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
  * @ha: pointer to host adapter structure.
  **/
 int
-qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
+qla4_82xx_isp_reset(struct scsi_qla_host *ha)
 {
        int rval;
        uint32_t dev_state;
 
-       qla4_8xxx_idc_lock(ha);
-       dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+       qla4_82xx_idc_lock(ha);
+       dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
 
-       if (dev_state == QLA82XX_DEV_READY) {
+       if (dev_state == QLA8XXX_DEV_READY) {
                ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
-               qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                   QLA82XX_DEV_NEED_RESET);
-               set_bit(AF_82XX_RST_OWNER, &ha->flags);
+               qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+                   QLA8XXX_DEV_NEED_RESET);
+               set_bit(AF_8XXX_RST_OWNER, &ha->flags);
        } else
                ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
 
-       qla4_8xxx_idc_unlock(ha);
+       qla4_82xx_idc_unlock(ha);
 
        rval = qla4_8xxx_device_state_handler(ha);
 
-       qla4_8xxx_idc_lock(ha);
+       qla4_82xx_idc_lock(ha);
        qla4_8xxx_clear_rst_ready(ha);
-       qla4_8xxx_idc_unlock(ha);
+       qla4_82xx_idc_unlock(ha);
 
        if (rval == QLA_SUCCESS) {
-               ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
+               ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
                clear_bit(AF_FW_RECOVERY, &ha->flags);
        }
 
@@ -2979,8 +3463,7 @@ exit_validate_mac82:
 
 /* Interrupt handling helpers. */
 
-static int
-qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
+int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
 {
        uint32_t mbox_cmd[MBOX_REG_COUNT];
        uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3001,8 +3484,7 @@ qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
        return QLA_SUCCESS;
 }
 
-static int
-qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
+int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
 {
        uint32_t mbox_cmd[MBOX_REG_COUNT];
        uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3025,26 +3507,26 @@ qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
 }
 
 void
-qla4_8xxx_enable_intrs(struct scsi_qla_host *ha)
+qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
 {
        qla4_8xxx_mbx_intr_enable(ha);
 
        spin_lock_irq(&ha->hardware_lock);
        /* BIT 10 - reset */
-       qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+       qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
        spin_unlock_irq(&ha->hardware_lock);
        set_bit(AF_INTERRUPTS_ON, &ha->flags);
 }
 
 void
-qla4_8xxx_disable_intrs(struct scsi_qla_host *ha)
+qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
 {
        if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
                qla4_8xxx_mbx_intr_disable(ha);
 
        spin_lock_irq(&ha->hardware_lock);
        /* BIT 10 - set */
-       qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+       qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
        spin_unlock_irq(&ha->hardware_lock);
 }
 
index 30258479f100370400590278f725d6da470b49a1..9dc0bbfe50d5e220e1500de3e554859f62a42fde 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -25,6 +25,8 @@
 #define CRB_RCVPEG_STATE               QLA82XX_REG(0x13c)
 #define CRB_DMA_SHIFT                  QLA82XX_REG(0xcc)
 #define CRB_TEMP_STATE                 QLA82XX_REG(0x1b4)
+#define CRB_CMDPEG_CHECK_RETRY_COUNT   60
+#define CRB_CMDPEG_CHECK_DELAY         500
 
 #define qla82xx_get_temp_val(x)                ((x) >> 16)
 #define qla82xx_get_temp_state(x)      ((x) & 0xffff)
@@ -490,8 +492,8 @@ enum {
  * Base addresses of major components on-chip.
  * ====================== BASE ADDRESSES ON-CHIP ======================
  */
-#define QLA82XX_ADDR_DDR_NET           (0x0000000000000000ULL)
-#define QLA82XX_ADDR_DDR_NET_MAX       (0x000000000fffffffULL)
+#define QLA8XXX_ADDR_DDR_NET           (0x0000000000000000ULL)
+#define QLA8XXX_ADDR_DDR_NET_MAX       (0x000000000fffffffULL)
 
 /* Imbus address bit used to indicate a host address. This bit is
  * eliminated by the pcie bar and bar select before presentation
@@ -500,14 +502,15 @@ enum {
 #define QLA82XX_P2_ADDR_PCIE   (0x0000000800000000ULL)
 #define QLA82XX_P3_ADDR_PCIE   (0x0000008000000000ULL)
 #define QLA82XX_ADDR_PCIE_MAX  (0x0000000FFFFFFFFFULL)
-#define QLA82XX_ADDR_OCM0      (0x0000000200000000ULL)
-#define QLA82XX_ADDR_OCM0_MAX  (0x00000002000fffffULL)
-#define QLA82XX_ADDR_OCM1      (0x0000000200400000ULL)
-#define QLA82XX_ADDR_OCM1_MAX  (0x00000002004fffffULL)
-#define QLA82XX_ADDR_QDR_NET   (0x0000000300000000ULL)
+#define QLA8XXX_ADDR_OCM0      (0x0000000200000000ULL)
+#define QLA8XXX_ADDR_OCM0_MAX  (0x00000002000fffffULL)
+#define QLA8XXX_ADDR_OCM1      (0x0000000200400000ULL)
+#define QLA8XXX_ADDR_OCM1_MAX  (0x00000002004fffffULL)
+#define QLA8XXX_ADDR_QDR_NET   (0x0000000300000000ULL)
 
 #define QLA82XX_P2_ADDR_QDR_NET_MAX    (0x00000003001fffffULL)
 #define QLA82XX_P3_ADDR_QDR_NET_MAX    (0x0000000303ffffffULL)
+#define QLA8XXX_ADDR_QDR_NET_MAX       (0x0000000307ffffffULL)
 
 #define QLA82XX_PCI_CRBSPACE           (unsigned long)0x06000000
 #define QLA82XX_PCI_DIRECT_CRB         (unsigned long)0x04400000
@@ -517,6 +520,10 @@ enum {
 #define QLA82XX_PCI_QDR_NET            (unsigned long)0x04000000
 #define QLA82XX_PCI_QDR_NET_MAX                (unsigned long)0x043fffff
 
+/*  PCI Windowing for DDR regions.  */
+#define QLA8XXX_ADDR_IN_RANGE(addr, low, high)            \
+       (((addr) <= (high)) && ((addr) >= (low)))
+
 /*
  *   Register offsets for MN
  */
@@ -540,6 +547,11 @@ enum {
 #define MIU_TA_CTL_WRITE       4
 #define MIU_TA_CTL_BUSY                8
 
+#define MIU_TA_CTL_WRITE_ENABLE                (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START         (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |\
+                                        MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE                (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
 /*CAM RAM */
 # define QLA82XX_CAM_RAM_BASE  (QLA82XX_CRB_CAM + 0x02000)
 # define QLA82XX_CAM_RAM(reg)  (QLA82XX_CAM_RAM_BASE + (reg))
@@ -565,20 +577,53 @@ enum {
 /* Driver Coexistence Defines */
 #define QLA82XX_CRB_DRV_ACTIVE         (QLA82XX_CAM_RAM(0x138))
 #define QLA82XX_CRB_DEV_STATE          (QLA82XX_CAM_RAM(0x140))
-#define QLA82XX_CRB_DEV_PART_INFO      (QLA82XX_CAM_RAM(0x14c))
-#define QLA82XX_CRB_DRV_IDC_VERSION    (QLA82XX_CAM_RAM(0x174))
 #define QLA82XX_CRB_DRV_STATE          (QLA82XX_CAM_RAM(0x144))
 #define QLA82XX_CRB_DRV_SCRATCH                (QLA82XX_CAM_RAM(0x148))
 #define QLA82XX_CRB_DEV_PART_INFO      (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION    (QLA82XX_CAM_RAM(0x174))
+
+enum qla_regs {
+       QLA8XXX_PEG_HALT_STATUS1 = 0,
+       QLA8XXX_PEG_HALT_STATUS2,
+       QLA8XXX_PEG_ALIVE_COUNTER,
+       QLA8XXX_CRB_DRV_ACTIVE,
+       QLA8XXX_CRB_DEV_STATE,
+       QLA8XXX_CRB_DRV_STATE,
+       QLA8XXX_CRB_DRV_SCRATCH,
+       QLA8XXX_CRB_DEV_PART_INFO,
+       QLA8XXX_CRB_DRV_IDC_VERSION,
+       QLA8XXX_FW_VERSION_MAJOR,
+       QLA8XXX_FW_VERSION_MINOR,
+       QLA8XXX_FW_VERSION_SUB,
+       QLA8XXX_CRB_CMDPEG_STATE,
+       QLA8XXX_CRB_TEMP_STATE,
+};
+
+static const uint32_t qla4_82xx_reg_tbl[] = {
+       QLA82XX_PEG_HALT_STATUS1,
+       QLA82XX_PEG_HALT_STATUS2,
+       QLA82XX_PEG_ALIVE_COUNTER,
+       QLA82XX_CRB_DRV_ACTIVE,
+       QLA82XX_CRB_DEV_STATE,
+       QLA82XX_CRB_DRV_STATE,
+       QLA82XX_CRB_DRV_SCRATCH,
+       QLA82XX_CRB_DEV_PART_INFO,
+       QLA82XX_CRB_DRV_IDC_VERSION,
+       QLA82XX_FW_VERSION_MAJOR,
+       QLA82XX_FW_VERSION_MINOR,
+       QLA82XX_FW_VERSION_SUB,
+       CRB_CMDPEG_STATE,
+       CRB_TEMP_STATE,
+};
 
 /* Every driver should use these Device State */
-#define QLA82XX_DEV_COLD               1
-#define QLA82XX_DEV_INITIALIZING       2
-#define QLA82XX_DEV_READY              3
-#define QLA82XX_DEV_NEED_RESET         4
-#define QLA82XX_DEV_NEED_QUIESCENT     5
-#define QLA82XX_DEV_FAILED             6
-#define QLA82XX_DEV_QUIESCENT          7
+#define QLA8XXX_DEV_COLD               1
+#define QLA8XXX_DEV_INITIALIZING       2
+#define QLA8XXX_DEV_READY              3
+#define QLA8XXX_DEV_NEED_RESET         4
+#define QLA8XXX_DEV_NEED_QUIESCENT     5
+#define QLA8XXX_DEV_FAILED             6
+#define QLA8XXX_DEV_QUIESCENT          7
 #define MAX_STATES                     8 /* Increment if new state added */
 
 #define QLA82XX_IDC_VERSION            0x1
@@ -795,47 +840,51 @@ struct crb_addr_pair {
 /* Minidump related */
 
 /* Entry Type Defines */
-#define QLA82XX_RDNOP  0
-#define QLA82XX_RDCRB  1
-#define QLA82XX_RDMUX  2
-#define QLA82XX_QUEUE  3
-#define QLA82XX_BOARD  4
-#define QLA82XX_RDOCM  6
-#define QLA82XX_PREGS  7
-#define QLA82XX_L1DTG  8
-#define QLA82XX_L1ITG  9
-#define QLA82XX_L1DAT  11
-#define QLA82XX_L1INS  12
-#define QLA82XX_L2DTG  21
-#define QLA82XX_L2ITG  22
-#define QLA82XX_L2DAT  23
-#define QLA82XX_L2INS  24
-#define QLA82XX_RDROM  71
-#define QLA82XX_RDMEM  72
-#define QLA82XX_CNTRL  98
-#define QLA82XX_RDEND  255
+#define QLA8XXX_RDNOP  0
+#define QLA8XXX_RDCRB  1
+#define QLA8XXX_RDMUX  2
+#define QLA8XXX_QUEUE  3
+#define QLA8XXX_BOARD  4
+#define QLA8XXX_RDOCM  6
+#define QLA8XXX_PREGS  7
+#define QLA8XXX_L1DTG  8
+#define QLA8XXX_L1ITG  9
+#define QLA8XXX_L1DAT  11
+#define QLA8XXX_L1INS  12
+#define QLA8XXX_L2DTG  21
+#define QLA8XXX_L2ITG  22
+#define QLA8XXX_L2DAT  23
+#define QLA8XXX_L2INS  24
+#define QLA83XX_POLLRD 35
+#define QLA83XX_RDMUX2 36
+#define QLA83XX_POLLRDMWR  37
+#define QLA8XXX_RDROM  71
+#define QLA8XXX_RDMEM  72
+#define QLA8XXX_CNTRL  98
+#define QLA83XX_TLHDR  99
+#define QLA8XXX_RDEND  255
 
 /* Opcodes for Control Entries.
  * These Flags are bit fields.
  */
-#define QLA82XX_DBG_OPCODE_WR          0x01
-#define QLA82XX_DBG_OPCODE_RW          0x02
-#define QLA82XX_DBG_OPCODE_AND         0x04
-#define QLA82XX_DBG_OPCODE_OR          0x08
-#define QLA82XX_DBG_OPCODE_POLL                0x10
-#define QLA82XX_DBG_OPCODE_RDSTATE     0x20
-#define QLA82XX_DBG_OPCODE_WRSTATE     0x40
-#define QLA82XX_DBG_OPCODE_MDSTATE     0x80
+#define QLA8XXX_DBG_OPCODE_WR          0x01
+#define QLA8XXX_DBG_OPCODE_RW          0x02
+#define QLA8XXX_DBG_OPCODE_AND         0x04
+#define QLA8XXX_DBG_OPCODE_OR          0x08
+#define QLA8XXX_DBG_OPCODE_POLL                0x10
+#define QLA8XXX_DBG_OPCODE_RDSTATE     0x20
+#define QLA8XXX_DBG_OPCODE_WRSTATE     0x40
+#define QLA8XXX_DBG_OPCODE_MDSTATE     0x80
 
 /* Driver Flags */
-#define QLA82XX_DBG_SKIPPED_FLAG       0x80 /* driver skipped this entry  */
-#define QLA82XX_DBG_SIZE_ERR_FLAG      0x40 /* Entry vs Capture size
+#define QLA8XXX_DBG_SKIPPED_FLAG       0x80 /* driver skipped this entry  */
+#define QLA8XXX_DBG_SIZE_ERR_FLAG      0x40 /* Entry vs Capture size
                                              * mismatch */
 
 /* Driver_code is for driver to write some info about the entry
  * currently not used.
  */
-struct qla82xx_minidump_entry_hdr {
+struct qla8xxx_minidump_entry_hdr {
        uint32_t entry_type;
        uint32_t entry_size;
        uint32_t entry_capture_size;
@@ -848,8 +897,8 @@ struct qla82xx_minidump_entry_hdr {
 };
 
 /*  Read CRB entry header */
-struct qla82xx_minidump_entry_crb {
-       struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_crb {
+       struct qla8xxx_minidump_entry_hdr h;
        uint32_t addr;
        struct {
                uint8_t addr_stride;
@@ -871,8 +920,8 @@ struct qla82xx_minidump_entry_crb {
        uint32_t value_3;
 };
 
-struct qla82xx_minidump_entry_cache {
-       struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_cache {
+       struct qla8xxx_minidump_entry_hdr h;
        uint32_t tag_reg_addr;
        struct {
                uint16_t tag_value_stride;
@@ -895,8 +944,8 @@ struct qla82xx_minidump_entry_cache {
 };
 
 /* Read OCM */
-struct qla82xx_minidump_entry_rdocm {
-       struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_rdocm {
+       struct qla8xxx_minidump_entry_hdr h;
        uint32_t rsvd_0;
        uint32_t rsvd_1;
        uint32_t data_size;
@@ -908,24 +957,24 @@ struct qla82xx_minidump_entry_rdocm {
 };
 
 /* Read Memory */
-struct qla82xx_minidump_entry_rdmem {
-       struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_rdmem {
+       struct qla8xxx_minidump_entry_hdr h;
        uint32_t rsvd[6];
        uint32_t read_addr;
        uint32_t read_data_size;
 };
 
 /* Read ROM */
-struct qla82xx_minidump_entry_rdrom {
-       struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_rdrom {
+       struct qla8xxx_minidump_entry_hdr h;
        uint32_t rsvd[6];
        uint32_t read_addr;
        uint32_t read_data_size;
 };
 
 /* Mux entry */
-struct qla82xx_minidump_entry_mux {
-       struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_mux {
+       struct qla8xxx_minidump_entry_hdr h;
        uint32_t select_addr;
        uint32_t rsvd_0;
        uint32_t data_size;
@@ -937,8 +986,8 @@ struct qla82xx_minidump_entry_mux {
 };
 
 /* Queue entry */
-struct qla82xx_minidump_entry_queue {
-       struct qla82xx_minidump_entry_hdr h;
+struct qla8xxx_minidump_entry_queue {
+       struct qla8xxx_minidump_entry_hdr h;
        uint32_t select_addr;
        struct {
                uint16_t queue_id_stride;
@@ -956,23 +1005,6 @@ struct qla82xx_minidump_entry_queue {
        } rd_strd;
 };
 
-#define QLA82XX_MINIDUMP_OCM0_SIZE             (256 * 1024)
-#define QLA82XX_MINIDUMP_L1C_SIZE              (256 * 1024)
-#define QLA82XX_MINIDUMP_L2C_SIZE              1572864
-#define QLA82XX_MINIDUMP_COMMON_STR_SIZE       0
-#define QLA82XX_MINIDUMP_FCOE_STR_SIZE         0
-#define QLA82XX_MINIDUMP_MEM_SIZE              0
-#define QLA82XX_MAX_ENTRY_HDR                  4
-
-struct qla82xx_minidump {
-       uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
-       uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
-       uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
-       uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
-       uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
-       uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
-};
-
 #define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE       0x129
 #define RQST_TMPLT_SIZE                                0x0
 #define RQST_TMPLT                             0x1
@@ -982,6 +1014,16 @@ struct qla82xx_minidump {
 #define MD_MIU_TEST_AGT_ADDR_LO                        0x41000094
 #define MD_MIU_TEST_AGT_ADDR_HI                        0x41000098
 
+#define MD_MIU_TEST_AGT_WRDATA_LO              0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI              0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO             0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI             0x410000B4
+
+#define MD_MIU_TEST_AGT_RDDATA_LO              0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI              0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO             0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI             0x410000BC
+
 static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
                                0x410000AC, 0x410000B8, 0x410000BC };
 #endif
index 79243b76d17ef9c7a72ed404dabf2cbeb1f941f5..fbc546e893ac2bcad8ea7a42d430096ea2905429 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -18,6 +18,7 @@
 #include "ql4_glbl.h"
 #include "ql4_dbg.h"
 #include "ql4_inline.h"
+#include "ql4_83xx.h"
 
 /*
  * Driver version
@@ -160,7 +161,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
 static int qla4xxx_slave_alloc(struct scsi_device *device);
 static int qla4xxx_slave_configure(struct scsi_device *device);
 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
-static umode_t ql4_attr_is_visible(int param_type, int param);
+static umode_t qla4_attr_is_visible(int param_type, int param);
 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
                                      int reason);
@@ -203,7 +204,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
                                  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
                                  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
                                  CAP_MULTI_R2T,
-       .attr_is_visible        = ql4_attr_is_visible,
+       .attr_is_visible        = qla4_attr_is_visible,
        .create_session         = qla4xxx_session_create,
        .destroy_session        = qla4xxx_session_destroy,
        .start_conn             = qla4xxx_conn_start,
@@ -315,7 +316,7 @@ exit_send_ping:
        return rval;
 }
 
-static umode_t ql4_attr_is_visible(int param_type, int param)
+static umode_t qla4_attr_is_visible(int param_type, int param)
 {
        switch (param_type) {
        case ISCSI_HOST_PARAM:
@@ -1366,7 +1367,7 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
 
        conn = cls_conn->dd_data;
        qla_conn = conn->dd_data;
-       dst_addr = &qla_conn->qla_ep->dst_addr;
+       dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
 
        switch (param) {
        case ISCSI_PARAM_CONN_PORT:
@@ -2315,8 +2316,17 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                if (ha->nx_pcibase)
                        iounmap(
                            (struct device_reg_82xx __iomem *)ha->nx_pcibase);
-       } else if (ha->reg)
+       } else if (is_qla8032(ha)) {
+               if (ha->nx_pcibase)
+                       iounmap(
+                           (struct device_reg_83xx __iomem *)ha->nx_pcibase);
+       } else if (ha->reg) {
                iounmap(ha->reg);
+       }
+
+       if (ha->reset_tmplt.buff)
+               vfree(ha->reset_tmplt.buff);
+
        pci_release_regions(ha->pdev);
 }
 
@@ -2420,7 +2430,7 @@ static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
        uint32_t temp, temp_state, temp_val;
        int status = QLA_SUCCESS;
 
-       temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+       temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
 
        temp_state = qla82xx_get_temp_state(temp);
        temp_val = qla82xx_get_temp_val(temp);
@@ -2456,7 +2466,8 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
        uint32_t fw_heartbeat_counter;
        int status = QLA_SUCCESS;
 
-       fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+       fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
+                                                  QLA8XXX_PEG_ALIVE_COUNTER);
        /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
        if (fw_heartbeat_counter == 0xffffffff) {
                DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
@@ -2470,28 +2481,7 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
                /* FW not alive after 2 seconds */
                if (ha->seconds_since_last_heartbeat == 2) {
                        ha->seconds_since_last_heartbeat = 0;
-
-                       ql4_printk(KERN_INFO, ha,
-                                  "scsi(%ld): %s, Dumping hw/fw registers:\n "
-                                  " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
-                                  " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
-                                  " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
-                                  " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
-                                  ha->host_no, __func__,
-                                  qla4_8xxx_rd_32(ha,
-                                                  QLA82XX_PEG_HALT_STATUS1),
-                                  qla4_8xxx_rd_32(ha,
-                                                  QLA82XX_PEG_HALT_STATUS2),
-                                  qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
-                                                  0x3c),
-                                  qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
-                                                  0x3c),
-                                  qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
-                                                  0x3c),
-                                  qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
-                                                  0x3c),
-                                  qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
-                                                  0x3c));
+                       qla4_8xxx_dump_peg_reg(ha);
                        status = QLA_ERROR;
                }
        } else
@@ -2501,6 +2491,48 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
        return status;
 }
 
+static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
+{
+       uint32_t halt_status;
+       int halt_status_unrecoverable = 0;
+
+       halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+
+       if (is_qla8022(ha)) {
+               ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+                          __func__);
+               qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                               CRB_NIU_XG_PAUSE_CTL_P0 |
+                               CRB_NIU_XG_PAUSE_CTL_P1);
+
+               if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
+                       ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
+                                  __func__);
+               if (halt_status & HALT_STATUS_UNRECOVERABLE)
+                       halt_status_unrecoverable = 1;
+       } else if (is_qla8032(ha)) {
+               if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
+                       ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
+                                  __func__);
+               else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
+                       halt_status_unrecoverable = 1;
+       }
+
+       /*
+        * Since we cannot change dev_state in interrupt context,
+        * set appropriate DPC flag then wakeup DPC
+        */
+       if (halt_status_unrecoverable) {
+               set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+       } else {
+               ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
+                          __func__);
+               set_bit(DPC_RESET_HA, &ha->dpc_flags);
+       }
+       qla4xxx_mailbox_premature_completion(ha);
+       qla4xxx_wake_dpc(ha);
+}
+
 /**
  * qla4_8xxx_watchdog - Poll dev state
  * @ha: Pointer to host adapter structure.
@@ -2509,31 +2541,33 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
  **/
 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
 {
-       uint32_t dev_state, halt_status;
+       uint32_t dev_state;
 
        /* don't poll if reset is going on */
        if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
            test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
            test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
-               dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+               dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
 
                if (qla4_8xxx_check_temp(ha)) {
-                       ql4_printk(KERN_INFO, ha, "disabling pause"
-                                  " transmit on port 0 & 1.\n");
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
-                                       CRB_NIU_XG_PAUSE_CTL_P0 |
-                                       CRB_NIU_XG_PAUSE_CTL_P1);
+                       if (is_qla8022(ha)) {
+                               ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
+                               qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                               CRB_NIU_XG_PAUSE_CTL_P0 |
+                                               CRB_NIU_XG_PAUSE_CTL_P1);
+                       }
                        set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
                        qla4xxx_wake_dpc(ha);
-               } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
-                   !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
-                       if (!ql4xdontresethba) {
+               } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+                          !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+                       if (is_qla8032(ha) ||
+                           (is_qla8022(ha) && !ql4xdontresethba)) {
                                ql4_printk(KERN_INFO, ha, "%s: HW State: "
                                    "NEED RESET!\n", __func__);
                                set_bit(DPC_RESET_HA, &ha->dpc_flags);
                                qla4xxx_wake_dpc(ha);
                        }
-               } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
+               } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
                    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
                        ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
                            __func__);
@@ -2541,36 +2575,8 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
                        qla4xxx_wake_dpc(ha);
                } else  {
                        /* Check firmware health */
-                       if (qla4_8xxx_check_fw_alive(ha)) {
-                               ql4_printk(KERN_INFO, ha, "disabling pause"
-                                          " transmit on port 0 & 1.\n");
-                               qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
-                                               CRB_NIU_XG_PAUSE_CTL_P0 |
-                                               CRB_NIU_XG_PAUSE_CTL_P1);
-                               halt_status = qla4_8xxx_rd_32(ha,
-                                               QLA82XX_PEG_HALT_STATUS1);
-
-                               if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
-                                       ql4_printk(KERN_ERR, ha, "%s:"
-                                                  " Firmware aborted with"
-                                                  " error code 0x00006700."
-                                                  " Device is being reset\n",
-                                                  __func__);
-
-                               /* Since we cannot change dev_state in interrupt
-                                * context, set appropriate DPC flag then wakeup
-                                * DPC */
-                               if (halt_status & HALT_STATUS_UNRECOVERABLE)
-                                       set_bit(DPC_HA_UNRECOVERABLE,
-                                               &ha->dpc_flags);
-                               else {
-                                       ql4_printk(KERN_INFO, ha, "%s: detect "
-                                                  "abort needed!\n", __func__);
-                                       set_bit(DPC_RESET_HA, &ha->dpc_flags);
-                               }
-                               qla4xxx_mailbox_premature_completion(ha);
-                               qla4xxx_wake_dpc(ha);
-                       }
+                       if (qla4_8xxx_check_fw_alive(ha))
+                               qla4_8xxx_process_fw_error(ha);
                }
        }
 }
@@ -2652,11 +2658,10 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
        if (!pci_channel_offline(ha->pdev))
                pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
 
-       if (is_qla8022(ha)) {
+       if (is_qla80XX(ha))
                qla4_8xxx_watchdog(ha);
-       }
 
-       if (!is_qla8022(ha)) {
+       if (is_qla40XX(ha)) {
                /* Check for heartbeat interval. */
                if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
                    ha->heartbeat_interval != 0) {
@@ -2941,6 +2946,14 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
 
        set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
 
+       if (is_qla8032(ha) &&
+           !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
+               ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+                          __func__);
+               /* disable pause frame for ISP83xx */
+               qla4_83xx_disable_pause(ha);
+       }
+
        iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
 
        if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
@@ -2953,9 +2966,9 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
                goto recover_ha_init_adapter;
        }
 
-       /* For the ISP-82xx adapter, issue a stop_firmware if invoked
+       /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
         * from eh_host_reset or ioctl module */
-       if (is_qla8022(ha) && !reset_chip &&
+       if (is_qla80XX(ha) && !reset_chip &&
            test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
 
                DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2978,13 +2991,13 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
        }
 
        /* Issue full chip reset if recovering from a catastrophic error,
-        * or if stop_firmware fails for ISP-82xx.
+        * or if stop_firmware fails for ISP-8xxx.
         * This is the default case for ISP-4xxx */
-       if (!is_qla8022(ha) || reset_chip) {
-               if (!is_qla8022(ha))
+       if (is_qla40XX(ha) || reset_chip) {
+               if (is_qla40XX(ha))
                        goto chip_reset;
 
-               /* Check if 82XX firmware is alive or not
+               /* Check if 8XXX firmware is alive or not
                 * We may have arrived here from NEED_RESET
                 * detection only */
                if (test_bit(AF_FW_RECOVERY, &ha->flags))
@@ -3000,10 +3013,10 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
                        set_current_state(TASK_UNINTERRUPTIBLE);
                        schedule_timeout(HZ);
                }
-
+chip_reset:
                if (!test_bit(AF_FW_RECOVERY, &ha->flags))
                        qla4xxx_cmd_wait(ha);
-chip_reset:
+
                qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
                qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
                DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -3021,7 +3034,7 @@ recover_ha_init_adapter:
                /* For ISP-4xxx, force function 1 to always initialize
                 * before function 3 to prevent both funcions from
                 * stepping on top of the other */
-               if (!is_qla8022(ha) && (ha->mac_index == 3))
+               if (is_qla40XX(ha) && (ha->mac_index == 3))
                        ssleep(6);
 
                /* NOTE: AF_ONLINE flag set upon successful completion of
@@ -3039,11 +3052,12 @@ recover_ha_init_adapter:
                 * Since we don't want to block the DPC for too long
                 * with multiple resets in the same thread,
                 * utilize DPC to retry */
-               if (is_qla8022(ha)) {
-                       qla4_8xxx_idc_lock(ha);
-                       dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-                       qla4_8xxx_idc_unlock(ha);
-                       if (dev_state == QLA82XX_DEV_FAILED) {
+               if (is_qla80XX(ha)) {
+                       ha->isp_ops->idc_lock(ha);
+                       dev_state = qla4_8xxx_rd_direct(ha,
+                                                       QLA8XXX_CRB_DEV_STATE);
+                       ha->isp_ops->idc_unlock(ha);
+                       if (dev_state == QLA8XXX_DEV_FAILED) {
                                ql4_printk(KERN_INFO, ha, "%s: don't retry "
                                           "recover adapter. H/W is in Failed "
                                           "state\n", __func__);
@@ -3168,6 +3182,7 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
        struct scsi_qla_host *ha;
+       int status = QLA_SUCCESS;
 
        sess = cls_session->dd_data;
        ddb_entry = sess->dd_data;
@@ -3175,11 +3190,20 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
        ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
                   " unblock user space session\n", ha->host_no, __func__,
                   ddb_entry->fw_ddb_index);
-       iscsi_conn_start(ddb_entry->conn);
-       iscsi_conn_login_event(ddb_entry->conn,
-                              ISCSI_CONN_STATE_LOGGED_IN);
 
-       return QLA_SUCCESS;
+       if (!iscsi_is_session_online(cls_session)) {
+               iscsi_conn_start(ddb_entry->conn);
+               iscsi_conn_login_event(ddb_entry->conn,
+                                      ISCSI_CONN_STATE_LOGGED_IN);
+       } else {
+               ql4_printk(KERN_INFO, ha,
+                          "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
+                          ha->host_no, __func__, ddb_entry->fw_ddb_index,
+                          cls_session->sid);
+               status = QLA_ERROR;
+       }
+
+       return status;
 }
 
 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
@@ -3373,15 +3397,26 @@ static void qla4xxx_do_dpc(struct work_struct *work)
        /* post events to application */
        qla4xxx_do_work(ha);
 
-       if (is_qla8022(ha)) {
+       if (is_qla80XX(ha)) {
                if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
-                       qla4_8xxx_idc_lock(ha);
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                           QLA82XX_DEV_FAILED);
-                       qla4_8xxx_idc_unlock(ha);
+                       if (is_qla8032(ha)) {
+                               ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+                                          __func__);
+                               /* disable pause frame for ISP83xx */
+                               qla4_83xx_disable_pause(ha);
+                       }
+
+                       ha->isp_ops->idc_lock(ha);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_FAILED);
+                       ha->isp_ops->idc_unlock(ha);
                        ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
                        qla4_8xxx_device_state_handler(ha);
                }
+
+               if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags))
+                       qla4_83xx_post_idc_ack(ha);
+
                if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
                        qla4_8xxx_need_qsnt_handler(ha);
                }
@@ -3391,7 +3426,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
            (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
            test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
            test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
-               if (ql4xdontresethba) {
+               if ((is_qla8022(ha) && ql4xdontresethba) ||
+                   (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
                        DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
                            ha->host_no, __func__));
                        clear_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -3477,6 +3513,18 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
                ha->isp_ops->disable_intrs(ha);
        }
 
+       if (is_qla40XX(ha)) {
+               writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+                      &ha->reg->ctrl_status);
+               readl(&ha->reg->ctrl_status);
+       } else if (is_qla8022(ha)) {
+               writel(0, &ha->qla4_82xx_reg->host_int);
+               readl(&ha->qla4_82xx_reg->host_int);
+       } else if (is_qla8032(ha)) {
+               writel(0, &ha->qla4_83xx_reg->risc_intr);
+               readl(&ha->qla4_83xx_reg->risc_intr);
+       }
+
        /* Remove timer thread, if present */
        if (ha->timer_active)
                qla4xxx_stop_timer(ha);
@@ -3492,10 +3540,10 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
        /* Put firmware in known state */
        ha->isp_ops->reset_firmware(ha);
 
-       if (is_qla8022(ha)) {
-               qla4_8xxx_idc_lock(ha);
+       if (is_qla80XX(ha)) {
+               ha->isp_ops->idc_lock(ha);
                qla4_8xxx_clear_drv_active(ha);
-               qla4_8xxx_idc_unlock(ha);
+               ha->isp_ops->idc_unlock(ha);
        }
 
        /* Detach interrupts */
@@ -3542,16 +3590,20 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
        /* Mapping of IO base pointer, door bell read and write pointer */
 
        /* mapping of IO base pointer */
-       ha->qla4_8xxx_reg =
-           (struct device_reg_82xx  __iomem *)((uint8_t *)ha->nx_pcibase +
-           0xbc000 + (ha->pdev->devfn << 11));
+       if (is_qla8022(ha)) {
+               ha->qla4_82xx_reg = (struct device_reg_82xx  __iomem *)
+                                   ((uint8_t *)ha->nx_pcibase + 0xbc000 +
+                                    (ha->pdev->devfn << 11));
+               ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
+                                   QLA82XX_CAM_RAM_DB2);
+       } else if (is_qla8032(ha)) {
+               ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
+                                   ((uint8_t *)ha->nx_pcibase);
+       }
 
        db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
        db_len = pci_resource_len(pdev, 4);
 
-       ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
-           QLA82XX_CAM_RAM_DB2);
-
        return 0;
 iospace_error_exit:
        return -ENOMEM;
@@ -3639,23 +3691,64 @@ static struct isp_operations qla4xxx_isp_ops = {
        .rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
        .rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
        .get_sys_info           = qla4xxx_get_sys_info,
+       .queue_mailbox_command  = qla4xxx_queue_mbox_cmd,
+       .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
 };
 
-static struct isp_operations qla4_8xxx_isp_ops = {
+static struct isp_operations qla4_82xx_isp_ops = {
        .iospace_config         = qla4_8xxx_iospace_config,
        .pci_config             = qla4_8xxx_pci_config,
-       .disable_intrs          = qla4_8xxx_disable_intrs,
-       .enable_intrs           = qla4_8xxx_enable_intrs,
+       .disable_intrs          = qla4_82xx_disable_intrs,
+       .enable_intrs           = qla4_82xx_enable_intrs,
        .start_firmware         = qla4_8xxx_load_risc,
-       .intr_handler           = qla4_8xxx_intr_handler,
-       .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
-       .reset_chip             = qla4_8xxx_isp_reset,
+       .restart_firmware       = qla4_82xx_try_start_fw,
+       .intr_handler           = qla4_82xx_intr_handler,
+       .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
+       .need_reset             = qla4_8xxx_need_reset,
+       .reset_chip             = qla4_82xx_isp_reset,
        .reset_firmware         = qla4_8xxx_stop_firmware,
-       .queue_iocb             = qla4_8xxx_queue_iocb,
-       .complete_iocb          = qla4_8xxx_complete_iocb,
-       .rd_shdw_req_q_out      = qla4_8xxx_rd_shdw_req_q_out,
-       .rd_shdw_rsp_q_in       = qla4_8xxx_rd_shdw_rsp_q_in,
+       .queue_iocb             = qla4_82xx_queue_iocb,
+       .complete_iocb          = qla4_82xx_complete_iocb,
+       .rd_shdw_req_q_out      = qla4_82xx_rd_shdw_req_q_out,
+       .rd_shdw_rsp_q_in       = qla4_82xx_rd_shdw_rsp_q_in,
        .get_sys_info           = qla4_8xxx_get_sys_info,
+       .rd_reg_direct          = qla4_82xx_rd_32,
+       .wr_reg_direct          = qla4_82xx_wr_32,
+       .rd_reg_indirect        = qla4_82xx_md_rd_32,
+       .wr_reg_indirect        = qla4_82xx_md_wr_32,
+       .idc_lock               = qla4_82xx_idc_lock,
+       .idc_unlock             = qla4_82xx_idc_unlock,
+       .rom_lock_recovery      = qla4_82xx_rom_lock_recovery,
+       .queue_mailbox_command  = qla4_82xx_queue_mbox_cmd,
+       .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
+};
+
+static struct isp_operations qla4_83xx_isp_ops = {
+       .iospace_config         = qla4_8xxx_iospace_config,
+       .pci_config             = qla4_8xxx_pci_config,
+       .disable_intrs          = qla4_83xx_disable_intrs,
+       .enable_intrs           = qla4_83xx_enable_intrs,
+       .start_firmware         = qla4_8xxx_load_risc,
+       .restart_firmware       = qla4_83xx_start_firmware,
+       .intr_handler           = qla4_83xx_intr_handler,
+       .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
+       .need_reset             = qla4_8xxx_need_reset,
+       .reset_chip             = qla4_83xx_isp_reset,
+       .reset_firmware         = qla4_8xxx_stop_firmware,
+       .queue_iocb             = qla4_83xx_queue_iocb,
+       .complete_iocb          = qla4_83xx_complete_iocb,
+       .rd_shdw_req_q_out      = qla4_83xx_rd_shdw_req_q_out,
+       .rd_shdw_rsp_q_in       = qla4_83xx_rd_shdw_rsp_q_in,
+       .get_sys_info           = qla4_8xxx_get_sys_info,
+       .rd_reg_direct          = qla4_83xx_rd_reg,
+       .wr_reg_direct          = qla4_83xx_wr_reg,
+       .rd_reg_indirect        = qla4_83xx_rd_reg_indirect,
+       .wr_reg_indirect        = qla4_83xx_wr_reg_indirect,
+       .idc_lock               = qla4_83xx_drv_lock,
+       .idc_unlock             = qla4_83xx_drv_unlock,
+       .rom_lock_recovery      = qla4_83xx_rom_lock_recovery,
+       .queue_mailbox_command  = qla4_83xx_queue_mbox_cmd,
+       .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
 };
 
 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
@@ -3663,9 +3756,14 @@ uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
        return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
 }
 
-uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
 {
-       return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
+       return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
+}
+
+uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+{
+       return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out));
 }
 
 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
@@ -3673,9 +3771,14 @@ uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
        return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
 }
 
-uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
 {
-       return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
+       return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
+}
+
+uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+{
+       return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in));
 }
 
 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
@@ -5050,30 +5153,36 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
        ha->pdev = pdev;
        ha->host = host;
        ha->host_no = host->host_no;
+       ha->func_num = PCI_FUNC(ha->pdev->devfn);
 
        pci_enable_pcie_error_reporting(pdev);
 
        /* Setup Runtime configurable options */
        if (is_qla8022(ha)) {
-               ha->isp_ops = &qla4_8xxx_isp_ops;
-               rwlock_init(&ha->hw_lock);
+               ha->isp_ops = &qla4_82xx_isp_ops;
+               ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
                ha->qdr_sn_window = -1;
                ha->ddr_mn_window = -1;
                ha->curr_window = 255;
-               ha->func_num = PCI_FUNC(ha->pdev->devfn);
                nx_legacy_intr = &legacy_intr[ha->func_num];
                ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
                ha->nx_legacy_intr.tgt_status_reg =
                        nx_legacy_intr->tgt_status_reg;
                ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
                ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+       } else if (is_qla8032(ha)) {
+               ha->isp_ops = &qla4_83xx_isp_ops;
+               ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
        } else {
                ha->isp_ops = &qla4xxx_isp_ops;
        }
 
-       /* Set EEH reset type to fundamental if required by hba */
-       if (is_qla8022(ha))
+       if (is_qla80XX(ha)) {
+               rwlock_init(&ha->hw_lock);
+               ha->pf_bit = ha->func_num << 16;
+               /* Set EEH reset type to fundamental if required by hba */
                pdev->needs_freset = 1;
+       }
 
        /* Configure PCI I/O space. */
        ret = ha->isp_ops->iospace_config(ha);
@@ -5094,6 +5203,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
        init_completion(&ha->disable_acb_comp);
 
        spin_lock_init(&ha->hardware_lock);
+       spin_lock_init(&ha->work_lock);
 
        /* Initialize work list */
        INIT_LIST_HEAD(&ha->work_list);
@@ -5128,8 +5238,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
        if (ret)
                goto probe_failed;
 
-       if (is_qla8022(ha))
-               (void) qla4_8xxx_get_flash_info(ha);
+       if (is_qla80XX(ha))
+               qla4_8xxx_get_flash_info(ha);
+
+       if (is_qla8032(ha)) {
+               qla4_83xx_read_reset_template(ha);
+               /*
+                * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+                * If DONRESET_BIT0 is set, drivers should not set dev_state
+                * to NEED_RESET. But if NEED_RESET is set, drivers should
+                * should honor the reset.
+                */
+               if (ql4xdontresethba == 1)
+                       qla4_83xx_set_idc_dontreset(ha);
+       }
 
        /*
         * Initialize the Host adapter request/response queues and
@@ -5137,14 +5259,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
         * NOTE: interrupts enabled upon successful completion
         */
        status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+
+       /* Dont retry adapter initialization if IRQ allocation failed */
+       if (!test_bit(AF_IRQ_ATTACHED, &ha->flags))
+               goto skip_retry_init;
+
        while ((!test_bit(AF_ONLINE, &ha->flags)) &&
            init_retry_count++ < MAX_INIT_RETRIES) {
 
-               if (is_qla8022(ha)) {
-                       qla4_8xxx_idc_lock(ha);
-                       dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-                       qla4_8xxx_idc_unlock(ha);
-                       if (dev_state == QLA82XX_DEV_FAILED) {
+               if (is_qla80XX(ha)) {
+                       ha->isp_ops->idc_lock(ha);
+                       dev_state = qla4_8xxx_rd_direct(ha,
+                                                       QLA82XX_CRB_DEV_STATE);
+                       ha->isp_ops->idc_unlock(ha);
+                       if (dev_state == QLA8XXX_DEV_FAILED) {
                                ql4_printk(KERN_WARNING, ha, "%s: don't retry "
                                    "initialize adapter. H/W is in failed state\n",
                                    __func__);
@@ -5160,16 +5288,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
                status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        }
 
+skip_retry_init:
        if (!test_bit(AF_ONLINE, &ha->flags)) {
                ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
 
-               if (is_qla8022(ha) && ql4xdontresethba) {
+               if ((is_qla8022(ha) && ql4xdontresethba) ||
+                   (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
                        /* Put the device in failed state. */
                        DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
-                       qla4_8xxx_idc_lock(ha);
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                           QLA82XX_DEV_FAILED);
-                       qla4_8xxx_idc_unlock(ha);
+                       ha->isp_ops->idc_lock(ha);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_FAILED);
+                       ha->isp_ops->idc_unlock(ha);
                }
                ret = -ENODEV;
                goto remove_host;
@@ -5195,12 +5325,13 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
                goto remove_host;
        }
 
-       /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
+       /*
+        * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
         * (which is called indirectly by qla4xxx_initialize_adapter),
         * so that irqs will be registered after crbinit but before
         * mbx_intr_enable.
         */
-       if (!is_qla8022(ha)) {
+       if (is_qla40XX(ha)) {
                ret = qla4xxx_request_irqs(ha);
                if (ret) {
                        ql4_printk(KERN_WARNING, ha, "Failed to reserve "
@@ -5226,6 +5357,10 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
               ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
               ha->patch_number, ha->build_number);
 
+       /* Set the driver version */
+       if (is_qla80XX(ha))
+               qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
+
        if (qla4xxx_setup_boot_info(ha))
                ql4_printk(KERN_ERR, ha,
                           "%s: No iSCSI boot target configured\n", __func__);
@@ -5333,9 +5468,16 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
 {
        struct scsi_qla_host *ha;
 
+       /*
+        * If the PCI device is disabled then it means probe_adapter had
+        * failed and resources already cleaned up on probe_adapter exit.
+        */
+       if (!pci_is_enabled(pdev))
+               return;
+
        ha = pci_get_drvdata(pdev);
 
-       if (!is_qla8022(ha))
+       if (is_qla40XX(ha))
                qla4xxx_prevent_other_port_reinit(ha);
 
        /* destroy iface from sysfs */
@@ -5755,7 +5897,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
 
        ha = to_qla_host(cmd->device->host);
 
-       if (ql4xdontresethba) {
+       if (is_qla8032(ha) && ql4xdontresethba)
+               qla4_83xx_set_idc_dontreset(ha);
+
+       /*
+        * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other
+        * protocol drivers, we should not set device_state to
+        * NEED_RESET
+        */
+       if (ql4xdontresethba ||
+           (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
                DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
                     ha->host_no, __func__));
 
@@ -5779,7 +5930,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
        }
 
        if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
-               if (is_qla8022(ha))
+               if (is_qla80XX(ha))
                        set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
                else
                        set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -5874,7 +6025,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
                break;
        case SCSI_FIRMWARE_RESET:
                if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
-                       if (is_qla8022(ha))
+                       if (is_qla80XX(ha))
                                /* set firmware context reset */
                                set_bit(DPC_RESET_HA_FW_CONTEXT,
                                        &ha->dpc_flags);
@@ -6013,32 +6164,43 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                    "0x%x is the owner\n", ha->host_no, __func__,
                    ha->pdev->devfn);
 
-               qla4_8xxx_idc_lock(ha);
-               qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                   QLA82XX_DEV_COLD);
-
-               qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
-                   QLA82XX_IDC_VERSION);
+               ha->isp_ops->idc_lock(ha);
+               qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                   QLA8XXX_DEV_COLD);
+               ha->isp_ops->idc_unlock(ha);
+
+               rval = qla4_8xxx_update_idc_reg(ha);
+               if (rval == QLA_ERROR) {
+                       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
+                                  ha->host_no, __func__);
+                       ha->isp_ops->idc_lock(ha);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_FAILED);
+                       ha->isp_ops->idc_unlock(ha);
+                       goto exit_error_recovery;
+               }
 
-               qla4_8xxx_idc_unlock(ha);
                clear_bit(AF_FW_RECOVERY, &ha->flags);
                rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
-               qla4_8xxx_idc_lock(ha);
 
                if (rval != QLA_SUCCESS) {
                        ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
                            "FAILED\n", ha->host_no, __func__);
+                       ha->isp_ops->idc_lock(ha);
                        qla4_8xxx_clear_drv_active(ha);
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                           QLA82XX_DEV_FAILED);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_FAILED);
+                       ha->isp_ops->idc_unlock(ha);
                } else {
                        ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
                            "READY\n", ha->host_no, __func__);
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                           QLA82XX_DEV_READY);
+                       ha->isp_ops->idc_lock(ha);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+                                           QLA8XXX_DEV_READY);
                        /* Clear driver state register */
-                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+                       qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
                        qla4_8xxx_set_drv_active(ha);
+                       ha->isp_ops->idc_unlock(ha);
                        ret = qla4xxx_request_irqs(ha);
                        if (ret) {
                                ql4_printk(KERN_WARNING, ha, "Failed to "
@@ -6050,13 +6212,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                                rval = QLA_SUCCESS;
                        }
                }
-               qla4_8xxx_idc_unlock(ha);
        } else {
                ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
                    "the reset owner\n", ha->host_no, __func__,
                    ha->pdev->devfn);
-               if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
-                   QLA82XX_DEV_READY)) {
+               if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
+                    QLA8XXX_DEV_READY)) {
                        clear_bit(AF_FW_RECOVERY, &ha->flags);
                        rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                        if (rval == QLA_SUCCESS) {
@@ -6071,11 +6232,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                                        rval = QLA_SUCCESS;
                                }
                        }
-                       qla4_8xxx_idc_lock(ha);
+                       ha->isp_ops->idc_lock(ha);
                        qla4_8xxx_set_drv_active(ha);
-                       qla4_8xxx_idc_unlock(ha);
+                       ha->isp_ops->idc_unlock(ha);
                }
        }
+exit_error_recovery:
        clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
        return rval;
 }
@@ -6114,7 +6276,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
 
        ha->isp_ops->disable_intrs(ha);
 
-       if (is_qla8022(ha)) {
+       if (is_qla80XX(ha)) {
                if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
                        ret = PCI_ERS_RESULT_RECOVERED;
                        goto exit_slot_reset;
@@ -6180,6 +6342,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_QLOGIC,
+               .device         = PCI_DEVICE_ID_QLOGIC_ISP8324,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+       },
        {0, 0},
 };
 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
index 725034f4252c72a4603683ec7cb87aba1035b155..f6df2ea91ab5e84772adfc5b73853176a2073ad1 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2010 QLogic Corporation
+ * Copyright (c)  2003-2012 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k18"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
index 182d5a57ab7468a6ddf5c6bfb684bf1396cb7f7b..57fbd5a3d4e2dae3b29ec3337c3493eb4cf815c3 100644 (file)
@@ -109,6 +109,7 @@ static const char * scsi_debug_version_date = "20100324";
 #define DEF_OPT_BLKS 64
 #define DEF_PHYSBLK_EXP 0
 #define DEF_PTYPE   0
+#define DEF_REMOVABLE false
 #define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
 #define DEF_SECTOR_SIZE 512
 #define DEF_UNMAP_ALIGNMENT 0
@@ -193,11 +194,11 @@ static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
+static bool scsi_debug_removable = DEF_REMOVABLE;
 
 static int scsi_debug_cmnd_count = 0;
 
 #define DEV_READONLY(TGT)      (0)
-#define DEV_REMOVEABLE(TGT)    (0)
 
 static unsigned int sdebug_store_sectors;
 static sector_t sdebug_capacity;       /* in sectors */
@@ -919,7 +920,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
                return ret;
        }
        /* drops through here for a standard inquiry */
-       arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0;     /* Removable disk */
+       arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
        arr[2] = scsi_debug_scsi_level;
        arr[3] = 2;    /* response_data_format==2 */
        arr[4] = SDEBUG_LONG_INQ_SZ - 5;
@@ -1211,7 +1212,7 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target)
        p[11] = sdebug_sectors_per & 0xff;
        p[12] = (scsi_debug_sector_size >> 8) & 0xff;
        p[13] = scsi_debug_sector_size & 0xff;
-       if (DEV_REMOVEABLE(target))
+       if (scsi_debug_removable)
                p[20] |= 0x20; /* should agree with INQUIRY */
        if (1 == pcontrol)
                memset(p + 2, 0, sizeof(format_pg) - 2);
@@ -2754,6 +2755,7 @@ module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
@@ -2796,6 +2798,7 @@ MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
+MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
@@ -3205,6 +3208,25 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
 }
 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
 
+static ssize_t sdebug_removable_show(struct device_driver *ddp,
+                                    char *buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
+}
+static ssize_t sdebug_removable_store(struct device_driver *ddp,
+                                     const char *buf, size_t count)
+{
+       int n;
+
+       if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+               scsi_debug_removable = (n > 0);
+               return count;
+       }
+       return -EINVAL;
+}
+DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
+           sdebug_removable_store);
+
 
 /* Note: The following function creates attribute files in the
    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -3230,6 +3252,7 @@ static int do_create_driverfs_files(void)
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
+       ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
        ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
@@ -3255,6 +3278,7 @@ static void do_remove_driverfs_files(void)
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
+       driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
        driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
index cf8dfab9489fc9e0ff035c5b3699e95fff435171..43fca9170bf26e7a119e3674965b77b296887e05 100644 (file)
@@ -172,6 +172,7 @@ static struct {
        {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
        {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
        {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+       {"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
        {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
        {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
        {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
index faa790fba1347fc61b0869015e2a28bad4f113f2..da36a3a81a9ee2206f753a04d4fbbe91b883e00a 100644 (file)
@@ -2473,7 +2473,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
         * Try to transition the scsi device to SDEV_RUNNING or one of the
         * offlined states and goose the device queue if successful.
         */
-       if (sdev->sdev_state == SDEV_BLOCK)
+       if ((sdev->sdev_state == SDEV_BLOCK) ||
+           (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
                sdev->sdev_state = new_state;
        else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
                if (new_state == SDEV_TRANSPORT_OFFLINE ||
index 8818dd681c194a445ea937c6f1e4b0dd70efff5f..65123a21b97ec17ffb695a3e09dd06556bc5b0a8 100644 (file)
 struct sock *scsi_nl_sock = NULL;
 EXPORT_SYMBOL_GPL(scsi_nl_sock);
 
-static DEFINE_SPINLOCK(scsi_nl_lock);
-static struct list_head scsi_nl_drivers;
-
-static u32     scsi_nl_state;
-#define STATE_EHANDLER_BSY             0x00000001
-
-struct scsi_nl_transport {
-       int (*msg_handler)(struct sk_buff *);
-       void (*event_handler)(struct notifier_block *, unsigned long, void *);
-       unsigned int refcnt;
-       int flags;
-};
-
-/* flags values (bit flags) */
-#define HANDLER_DELETING               0x1
-
-static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
-       { {NULL, }, };
-
-
-struct scsi_nl_drvr {
-       struct list_head next;
-       int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
-                                u32 len, u32 pid);
-       void (*devt_handler)(struct notifier_block *nb,
-                                unsigned long event, void *notify_ptr);
-       struct scsi_host_template *hostt;
-       u64 vendor_id;
-       unsigned int refcnt;
-       int flags;
-};
-
-
-
 /**
  * scsi_nl_rcv_msg - Receive message handler.
  * @skb:               socket receive buffer
@@ -81,7 +47,6 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh;
        struct scsi_nl_hdr *hdr;
-       unsigned long flags;
        u32 rlen;
        int err, tport;
 
@@ -126,22 +91,24 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
                /*
                 * Deliver message to the appropriate transport
                 */
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-
                tport = hdr->transport;
-               if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
-                   !(transports[tport].flags & HANDLER_DELETING) &&
-                   (transports[tport].msg_handler)) {
-                       transports[tport].refcnt++;
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       err = transports[tport].msg_handler(skb);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-                       transports[tport].refcnt--;
-               } else
+               if (tport == SCSI_NL_TRANSPORT) {
+                       switch (hdr->msgtype) {
+                       case SCSI_NL_SHOST_VENDOR:
+                               /* Locate the driver that corresponds to the message */
+                               err = -ESRCH;
+                               break;
+                       default:
+                               err = -EBADR;
+                               break;
+                       }
+                       if (err)
+                               printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
+                                      __func__, hdr->msgtype, err);
+               }
+               else
                        err = -ENOENT;
 
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
 next_msg:
                if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
                        netlink_ack(skb, nlh, err);
@@ -150,333 +117,6 @@ next_msg:
        }
 }
 
-
-/**
- * scsi_nl_rcv_event - Event handler for a netlink socket.
- * @this:              event notifier block
- * @event:             event type
- * @ptr:               event payload
- *
- **/
-static int
-scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
-       struct netlink_notify *n = ptr;
-       struct scsi_nl_drvr *driver;
-       unsigned long flags;
-       int tport;
-
-       if (n->protocol != NETLINK_SCSITRANSPORT)
-               return NOTIFY_DONE;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       scsi_nl_state |= STATE_EHANDLER_BSY;
-
-       /*
-        * Pass event on to any transports that may be listening
-        */
-       for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
-               if (!(transports[tport].flags & HANDLER_DELETING) &&
-                   (transports[tport].event_handler)) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       transports[tport].event_handler(this, event, ptr);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-               }
-       }
-
-       /*
-        * Pass event on to any drivers that may be listening
-        */
-       list_for_each_entry(driver, &scsi_nl_drivers, next) {
-               if (!(driver->flags & HANDLER_DELETING) &&
-                   (driver->devt_handler)) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       driver->devt_handler(this, event, ptr);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-               }
-       }
-
-       scsi_nl_state &= ~STATE_EHANDLER_BSY;
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block scsi_netlink_notifier = {
-       .notifier_call  = scsi_nl_rcv_event,
-};
-
-
-/*
- * GENERIC SCSI transport receive and event handlers
- */
-
-/**
- * scsi_generic_msg_handler - receive message handler for GENERIC transport messages
- * @skb:               socket receive buffer
- **/
-static int
-scsi_generic_msg_handler(struct sk_buff *skb)
-{
-       struct nlmsghdr *nlh = nlmsg_hdr(skb);
-       struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
-       struct scsi_nl_drvr *driver;
-       struct Scsi_Host *shost;
-       unsigned long flags;
-       int err = 0, match, pid;
-
-       pid = NETLINK_CREDS(skb)->pid;
-
-       switch (snlh->msgtype) {
-       case SCSI_NL_SHOST_VENDOR:
-               {
-               struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
-
-               /* Locate the driver that corresponds to the message */
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-               match = 0;
-               list_for_each_entry(driver, &scsi_nl_drivers, next) {
-                       if (driver->vendor_id == msg->vendor_id) {
-                               match = 1;
-                               break;
-                       }
-               }
-
-               if ((!match) || (!driver->dmsg_handler)) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       err = -ESRCH;
-                       goto rcv_exit;
-               }
-
-               if (driver->flags & HANDLER_DELETING) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       err = -ESHUTDOWN;
-                       goto rcv_exit;
-               }
-
-               driver->refcnt++;
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-
-               /* if successful, scsi_host_lookup takes a shost reference */
-               shost = scsi_host_lookup(msg->host_no);
-               if (!shost) {
-                       err = -ENODEV;
-                       goto driver_exit;
-               }
-
-               /* is this host owned by the vendor ? */
-               if (shost->hostt != driver->hostt) {
-                       err = -EINVAL;
-                       goto vendormsg_put;
-               }
-
-               /* pass message on to the driver */
-               err = driver->dmsg_handler(shost, (void *)&msg[1],
-                                        msg->vmsg_datalen, pid);
-
-vendormsg_put:
-               /* release reference by scsi_host_lookup */
-               scsi_host_put(shost);
-
-driver_exit:
-               /* release our own reference on the registration object */
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-               driver->refcnt--;
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               break;
-               }
-
-       default:
-               err = -EBADR;
-               break;
-       }
-
-rcv_exit:
-       if (err)
-               printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
-                        __func__, snlh->msgtype, err);
-       return err;
-}
-
-
-/**
- * scsi_nl_add_transport -
- *    Registers message and event handlers for a transport. Enables
- *    receipt of netlink messages and events to a transport.
- *
- * @tport:             transport registering handlers
- * @msg_handler:       receive message handler callback
- * @event_handler:     receive event handler callback
- **/
-int
-scsi_nl_add_transport(u8 tport,
-       int (*msg_handler)(struct sk_buff *),
-       void (*event_handler)(struct notifier_block *, unsigned long, void *))
-{
-       unsigned long flags;
-       int err = 0;
-
-       if (tport >= SCSI_NL_MAX_TRANSPORTS)
-               return -EINVAL;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-
-       if (transports[tport].msg_handler || transports[tport].event_handler) {
-               err = -EALREADY;
-               goto register_out;
-       }
-
-       transports[tport].msg_handler = msg_handler;
-       transports[tport].event_handler = event_handler;
-       transports[tport].flags = 0;
-       transports[tport].refcnt = 0;
-
-register_out:
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
-
-
-/**
- * scsi_nl_remove_transport -
- *    Disable transport receiption of messages and events
- *
- * @tport:             transport deregistering handlers
- *
- **/
-void
-scsi_nl_remove_transport(u8 tport)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-
-       if (tport < SCSI_NL_MAX_TRANSPORTS) {
-               transports[tport].flags |= HANDLER_DELETING;
-
-               while (transports[tport].refcnt != 0) {
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       schedule_timeout_uninterruptible(HZ/4);
-                       spin_lock_irqsave(&scsi_nl_lock, flags);
-               }
-               transports[tport].msg_handler = NULL;
-               transports[tport].event_handler = NULL;
-               transports[tport].flags = 0;
-       }
-
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
-
-
-/**
- * scsi_nl_add_driver -
- *    A driver is registering its interfaces for SCSI netlink messages
- *
- * @vendor_id:          A unique identification value for the driver.
- * @hostt:             address of the driver's host template. Used
- *                     to verify an shost is bound to the driver
- * @nlmsg_handler:     receive message handler callback
- * @nlevt_handler:     receive event handler callback
- *
- * Returns:
- *   0 on Success
- *   error result otherwise
- **/
-int
-scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
-       int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
-                                u32 len, u32 pid),
-       void (*nlevt_handler)(struct notifier_block *nb,
-                                unsigned long event, void *notify_ptr))
-{
-       struct scsi_nl_drvr *driver;
-       unsigned long flags;
-
-       driver = kzalloc(sizeof(*driver), GFP_KERNEL);
-       if (unlikely(!driver)) {
-               printk(KERN_ERR "%s: allocation failure\n", __func__);
-               return -ENOMEM;
-       }
-
-       driver->dmsg_handler = nlmsg_handler;
-       driver->devt_handler = nlevt_handler;
-       driver->hostt = hostt;
-       driver->vendor_id = vendor_id;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-       list_add_tail(&driver->next, &scsi_nl_drivers);
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
-
-
-/**
- * scsi_nl_remove_driver -
- *    An driver is unregistering with the SCSI netlink messages
- *
- * @vendor_id:          The unique identification value for the driver.
- **/
-void
-scsi_nl_remove_driver(u64 vendor_id)
-{
-       struct scsi_nl_drvr *driver;
-       unsigned long flags;
-
-       spin_lock_irqsave(&scsi_nl_lock, flags);
-       if (scsi_nl_state & STATE_EHANDLER_BSY) {
-               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-               msleep(1);
-               spin_lock_irqsave(&scsi_nl_lock, flags);
-       }
-
-       list_for_each_entry(driver, &scsi_nl_drivers, next) {
-               if (driver->vendor_id == vendor_id) {
-                       driver->flags |= HANDLER_DELETING;
-                       while (driver->refcnt != 0) {
-                               spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                               schedule_timeout_uninterruptible(HZ/4);
-                               spin_lock_irqsave(&scsi_nl_lock, flags);
-                       }
-                       list_del(&driver->next);
-                       kfree(driver);
-                       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-                       return;
-               }
-       }
-
-       spin_unlock_irqrestore(&scsi_nl_lock, flags);
-
-       printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
-              __func__, (unsigned long long)vendor_id);
-       return;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
-
-
 /**
  * scsi_netlink_init - Called by SCSI subsystem to initialize
  *     the SCSI transport netlink interface
@@ -485,36 +125,19 @@ EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
 void
 scsi_netlink_init(void)
 {
-       int error;
        struct netlink_kernel_cfg cfg = {
                .input  = scsi_nl_rcv_msg,
                .groups = SCSI_NL_GRP_CNT,
        };
 
-       INIT_LIST_HEAD(&scsi_nl_drivers);
-
-       error = netlink_register_notifier(&scsi_netlink_notifier);
-       if (error) {
-               printk(KERN_ERR "%s: register of event handler failed - %d\n",
-                               __func__, error);
-               return;
-       }
-
        scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
-                                            THIS_MODULE, &cfg);
+                                            &cfg);
        if (!scsi_nl_sock) {
                printk(KERN_ERR "%s: register of receive handler failed\n",
                                __func__);
-               netlink_unregister_notifier(&scsi_netlink_notifier);
                return;
        }
 
-       /* Register the entry points for the generic SCSI transport */
-       error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
-                               scsi_generic_msg_handler, NULL);
-       if (error)
-               printk(KERN_ERR "%s: register of GENERIC transport handler"
-                               "  failed - %d\n", __func__, error);
        return;
 }
 
@@ -526,158 +149,10 @@ scsi_netlink_init(void)
 void
 scsi_netlink_exit(void)
 {
-       scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
-
        if (scsi_nl_sock) {
                netlink_kernel_release(scsi_nl_sock);
-               netlink_unregister_notifier(&scsi_netlink_notifier);
        }
 
        return;
 }
 
-
-/*
- * Exported Interfaces
- */
-
-/**
- * scsi_nl_send_transport_msg -
- *    Generic function to send a single message from a SCSI transport to
- *    a single process
- *
- * @pid:               receiving pid
- * @hdr:               message payload
- *
- **/
-void
-scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
-{
-       struct sk_buff *skb;
-       struct nlmsghdr *nlh;
-       const char *fn;
-       char *datab;
-       u32 len, skblen;
-       int err;
-
-       if (!scsi_nl_sock) {
-               err = -ENOENT;
-               fn = "netlink socket";
-               goto msg_fail;
-       }
-
-       len = NLMSG_SPACE(hdr->msglen);
-       skblen = NLMSG_SPACE(len);
-
-       skb = alloc_skb(skblen, GFP_KERNEL);
-       if (!skb) {
-               err = -ENOBUFS;
-               fn = "alloc_skb";
-               goto msg_fail;
-       }
-
-       nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
-       if (!nlh) {
-               err = -ENOBUFS;
-               fn = "nlmsg_put";
-               goto msg_fail_skb;
-       }
-       datab = NLMSG_DATA(nlh);
-       memcpy(datab, hdr, hdr->msglen);
-
-       err = nlmsg_unicast(scsi_nl_sock, skb, pid);
-       if (err < 0) {
-               fn = "nlmsg_unicast";
-               /* nlmsg_unicast already kfree_skb'd */
-               goto msg_fail;
-       }
-
-       return;
-
-msg_fail_skb:
-       kfree_skb(skb);
-msg_fail:
-       printk(KERN_WARNING
-               "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
-               "msglen %d: %s : err %d\n",
-               __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
-               fn, err);
-       return;
-}
-EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
-
-
-/**
- * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
- *                      to a specific process id.
- *
- * @pid:               process id of the receiver
- * @host_no:           host # sending the message
- * @vendor_id:         unique identifier for the driver's vendor
- * @data_len:          amount, in bytes, of vendor unique payload data
- * @data_buf:          pointer to vendor unique data buffer
- *
- * Returns:
- *   0 on successful return
- *   otherwise, failing error code
- *
- * Notes:
- *     This routine assumes no locks are held on entry.
- */
-int
-scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
-                        char *data_buf, u32 data_len)
-{
-       struct sk_buff *skb;
-       struct nlmsghdr *nlh;
-       struct scsi_nl_host_vendor_msg *msg;
-       u32 len, skblen;
-       int err;
-
-       if (!scsi_nl_sock) {
-               err = -ENOENT;
-               goto send_vendor_fail;
-       }
-
-       len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
-       skblen = NLMSG_SPACE(len);
-
-       skb = alloc_skb(skblen, GFP_KERNEL);
-       if (!skb) {
-               err = -ENOBUFS;
-               goto send_vendor_fail;
-       }
-
-       nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
-                               skblen - sizeof(*nlh), 0);
-       if (!nlh) {
-               err = -ENOBUFS;
-               goto send_vendor_fail_skb;
-       }
-       msg = NLMSG_DATA(nlh);
-
-       INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
-                               SCSI_NL_SHOST_VENDOR, len);
-       msg->vendor_id = vendor_id;
-       msg->host_no = host_no;
-       msg->vmsg_datalen = data_len;   /* bytes */
-       memcpy(&msg[1], data_buf, data_len);
-
-       err = nlmsg_unicast(scsi_nl_sock, skb, pid);
-       if (err)
-               /* nlmsg_multicast already kfree_skb'd */
-               goto send_vendor_fail;
-
-       return 0;
-
-send_vendor_fail_skb:
-       kfree_skb(skb);
-send_vendor_fail:
-       printk(KERN_WARNING
-               "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
-               __func__, host_no, err);
-       return err;
-}
-EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
-
-
index d947ffc20ceba301eaaf45973fee97dfba7fb7f7..3e58b2245f1fe2526aba4f55aef2a789ce74700d 100644 (file)
@@ -921,6 +921,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        if (*bflags & BLIST_RETRY_HWERROR)
                sdev->retry_hwerror = 1;
 
+       if (*bflags & BLIST_NO_DIF)
+               sdev->no_dif = 1;
+
        transport_configure_device(&sdev->sdev_gendev);
 
        if (sdev->host->hostt->slave_configure) {
index 093d4f6a54d2f1126a1d34775100e58f81801c55..ce5224c92edae64061dd7c336d588838132a2e57 100644 (file)
@@ -1031,33 +1031,31 @@ static void __scsi_remove_target(struct scsi_target *starget)
 void scsi_remove_target(struct device *dev)
 {
        struct Scsi_Host *shost = dev_to_shost(dev->parent);
-       struct scsi_target *starget, *found;
+       struct scsi_target *starget, *last = NULL;
        unsigned long flags;
 
- restart:
-       found = NULL;
+       /* remove targets being careful to lookup next entry before
+        * deleting the last
+        */
        spin_lock_irqsave(shost->host_lock, flags);
        list_for_each_entry(starget, &shost->__targets, siblings) {
                if (starget->state == STARGET_DEL)
                        continue;
                if (starget->dev.parent == dev || &starget->dev == dev) {
-                       found = starget;
-                       found->reap_ref++;
-                       break;
+                       /* assuming new targets arrive at the end */
+                       starget->reap_ref++;
+                       spin_unlock_irqrestore(shost->host_lock, flags);
+                       if (last)
+                               scsi_target_reap(last);
+                       last = starget;
+                       __scsi_remove_target(starget);
+                       spin_lock_irqsave(shost->host_lock, flags);
                }
        }
        spin_unlock_irqrestore(shost->host_lock, flags);
 
-       if (found) {
-               __scsi_remove_target(found);
-               scsi_target_reap(found);
-               /* in the case where @dev has multiple starget children,
-                * continue removing.
-                *
-                * FIXME: does such a case exist?
-                */
-               goto restart;
-       }
+       if (last)
+               scsi_target_reap(last);
 }
 EXPORT_SYMBOL(scsi_remove_target);
 
index fa1dfaa83e32986061586c4fcb2f6f8e9e23eaf9..31969f2e13ceff07e2304dd0cca84b27c655fa22 100644 (file)
@@ -2119,7 +2119,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_CREATE_SESSION:
                err = iscsi_if_create_session(priv, ep, ev,
-                                             NETLINK_CB(skb).pid,
+                                             NETLINK_CB(skb).portid,
                                              ev->u.c_session.initial_cmdsn,
                                              ev->u.c_session.cmds_max,
                                              ev->u.c_session.queue_depth);
@@ -2132,7 +2132,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                }
 
                err = iscsi_if_create_session(priv, ep, ev,
-                                       NETLINK_CB(skb).pid,
+                                       NETLINK_CB(skb).portid,
                                        ev->u.c_bound_session.initial_cmdsn,
                                        ev->u.c_bound_session.cmds_max,
                                        ev->u.c_bound_session.queue_depth);
@@ -2969,8 +2969,7 @@ static __init int iscsi_transport_init(void)
        if (err)
                goto unregister_conn_class;
 
-       nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
-                                   THIS_MODULE, &cfg);
+       nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
        if (!nls) {
                err = -ENOBUFS;
                goto unregister_session_class;
index 4df73e52a4f9fb285a886bbe9e313227e9f92c23..12f6fdfc11474a9363ae6c641959832d2c1d6440 100644 (file)
@@ -261,6 +261,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, 20, "%u\n", sdkp->protection_type);
 }
 
+static ssize_t
+sd_store_protection_type(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+       unsigned int val;
+       int err;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       err = kstrtouint(buf, 10, &val);
+
+       if (err)
+               return err;
+
+       if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION)
+               sdkp->protection_type = val;
+
+       return count;
+}
+
 static ssize_t
 sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
                        char *buf)
@@ -381,7 +403,8 @@ static struct device_attribute sd_disk_attrs[] = {
               sd_store_allow_restart),
        __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
               sd_store_manage_start_stop),
-       __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
+       __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type,
+              sd_store_protection_type),
        __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
        __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
        __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
@@ -804,9 +827,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
                SCpnt->cmnd[0] = WRITE_6;
                SCpnt->sc_data_direction = DMA_TO_DEVICE;
 
-               if (blk_integrity_rq(rq) &&
-                   sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
-                       goto out;
+               if (blk_integrity_rq(rq))
+                       sd_dif_prepare(rq, block, sdp->sector_size);
 
        } else if (rq_data_dir(rq) == READ) {
                SCpnt->cmnd[0] = READ_6;
@@ -1671,34 +1693,42 @@ sd_spinup_disk(struct scsi_disk *sdkp)
 /*
  * Determine whether disk supports Data Integrity Field.
  */
-static void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
+static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
 {
        struct scsi_device *sdp = sdkp->device;
        u8 type;
+       int ret = 0;
 
        if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
-               return;
+               return ret;
 
        type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
 
-       if (type == sdkp->protection_type || !sdkp->first_scan)
-               return;
+       if (type > SD_DIF_TYPE3_PROTECTION)
+               ret = -ENODEV;
+       else if (scsi_host_dif_capable(sdp->host, type))
+               ret = 1;
+
+       if (sdkp->first_scan || type != sdkp->protection_type)
+               switch (ret) {
+               case -ENODEV:
+                       sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
+                                 " protection type %u. Disabling disk!\n",
+                                 type);
+                       break;
+               case 1:
+                       sd_printk(KERN_NOTICE, sdkp,
+                                 "Enabling DIF Type %u protection\n", type);
+                       break;
+               case 0:
+                       sd_printk(KERN_NOTICE, sdkp,
+                                 "Disabling DIF Type %u protection\n", type);
+                       break;
+               }
 
        sdkp->protection_type = type;
 
-       if (type > SD_DIF_TYPE3_PROTECTION) {
-               sd_printk(KERN_ERR, sdkp, "formatted with unsupported " \
-                         "protection type %u. Disabling disk!\n", type);
-               sdkp->capacity = 0;
-               return;
-       }
-
-       if (scsi_host_dif_capable(sdp->host, type))
-               sd_printk(KERN_NOTICE, sdkp,
-                         "Enabling DIF Type %u protection\n", type);
-       else
-               sd_printk(KERN_NOTICE, sdkp,
-                         "Disabling DIF Type %u protection\n", type);
+       return ret;
 }
 
 static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
@@ -1794,7 +1824,10 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
        sector_size = get_unaligned_be32(&buffer[8]);
        lba = get_unaligned_be64(&buffer[0]);
 
-       sd_read_protection_type(sdkp, buffer);
+       if (sd_read_protection_type(sdkp, buffer) < 0) {
+               sdkp->capacity = 0;
+               return -ENODEV;
+       }
 
        if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
@@ -2632,7 +2665,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        }
 
        add_disk(gd);
-       sd_dif_config_host(sdkp);
+       if (sdkp->capacity)
+               sd_dif_config_host(sdkp);
 
        sd_revalidate_disk(gd);
 
index f703f4827b6f290c6272c5e4b6474a73a4f66b0a..47c52a6d733c0f757b4ef42236e89fba172b857c 100644 (file)
@@ -156,7 +156,7 @@ struct sd_dif_tuple {
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 
 extern void sd_dif_config_host(struct scsi_disk *);
-extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
+extern void sd_dif_prepare(struct request *rq, sector_t, unsigned int);
 extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
 
 #else /* CONFIG_BLK_DEV_INTEGRITY */
index e52d5bc42bc47330d2ff9b658c6eec33a9143da5..04998f36e5071bdda94ff225f39b0ca3616eeb7b 100644 (file)
@@ -366,7 +366,8 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
  *
  * Type 3 does not have a reference tag so no remapping is required.
  */
-int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
+void sd_dif_prepare(struct request *rq, sector_t hw_sector,
+                   unsigned int sector_sz)
 {
        const int tuple_sz = sizeof(struct sd_dif_tuple);
        struct bio *bio;
@@ -378,7 +379,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
        sdkp = rq->bio->bi_bdev->bd_disk->private_data;
 
        if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
-               return 0;
+               return;
 
        phys = hw_sector & 0xffffffff;
 
@@ -397,10 +398,9 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
 
                        for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
 
-                               if (be32_to_cpu(sdt->ref_tag) != virt)
-                                       goto error;
+                               if (be32_to_cpu(sdt->ref_tag) == virt)
+                                       sdt->ref_tag = cpu_to_be32(phys);
 
-                               sdt->ref_tag = cpu_to_be32(phys);
                                virt++;
                                phys++;
                        }
@@ -410,16 +410,6 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
 
                bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
        }
-
-       return 0;
-
-error:
-       kunmap_atomic(sdt);
-       sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
-                 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
-                 be16_to_cpu(sdt->app_tag));
-
-       return -EILSEQ;
 }
 
 /*
@@ -463,10 +453,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
                                        return;
                                }
 
-                               if (be32_to_cpu(sdt->ref_tag) != phys &&
-                                   sdt->app_tag != 0xffff)
-                                       sdt->ref_tag = 0xffffffff; /* Bad ref */
-                               else
+                               if (be32_to_cpu(sdt->ref_tag) == phys)
                                        sdt->ref_tag = cpu_to_be32(virt);
 
                                virt++;
index e41998cb098ebbea2a6243bda198cc463f857a35..98156a97c47259aaaa01df37accbcc8c86e7c27f 100644 (file)
@@ -37,6 +37,7 @@ static const char *verstr = "20101219";
 #include <linux/blkdev.h>
 #include <linux/moduleparam.h>
 #include <linux/cdev.h>
+#include <linux/idr.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
 
@@ -74,17 +75,14 @@ static const char *verstr = "20101219";
 #include "st_options.h"
 #include "st.h"
 
-static DEFINE_MUTEX(st_mutex);
 static int buffer_kbs;
 static int max_sg_segs;
 static int try_direct_io = TRY_DIRECT_IO;
 static int try_rdio = 1;
 static int try_wdio = 1;
 
-static int st_dev_max;
-static int st_nr_dev;
-
-static struct class *st_sysfs_class;
+static struct class st_sysfs_class;
+static struct device_attribute st_dev_attrs[];
 
 MODULE_AUTHOR("Kai Makisara");
 MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -173,13 +171,9 @@ static int debugging = DEBUG;
    24 bits) */
 #define SET_DENS_AND_BLK 0x10001
 
-static DEFINE_RWLOCK(st_dev_arr_lock);
-
 static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE;
 static int st_max_sg_segs = ST_MAX_SG;
 
-static struct scsi_tape **scsi_tapes = NULL;
-
 static int modes_defined;
 
 static int enlarge_buffer(struct st_buffer *, int, int);
@@ -198,7 +192,6 @@ static int st_remove(struct device *);
 
 static int do_create_sysfs_files(void);
 static void do_remove_sysfs_files(void);
-static int do_create_class_files(struct scsi_tape *, int, int);
 
 static struct scsi_driver st_template = {
        .owner                  = THIS_MODULE,
@@ -221,6 +214,10 @@ static void scsi_tape_release(struct kref *);
 #define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
 
 static DEFINE_MUTEX(st_ref_mutex);
+static DEFINE_SPINLOCK(st_index_lock);
+static DEFINE_SPINLOCK(st_use_lock);
+static DEFINE_IDR(st_index_idr);
+
 
 \f
 #include "osst_detect.h"
@@ -238,10 +235,9 @@ static struct scsi_tape *scsi_tape_get(int dev)
        struct scsi_tape *STp = NULL;
 
        mutex_lock(&st_ref_mutex);
-       write_lock(&st_dev_arr_lock);
+       spin_lock(&st_index_lock);
 
-       if (dev < st_dev_max && scsi_tapes != NULL)
-               STp = scsi_tapes[dev];
+       STp = idr_find(&st_index_idr, dev);
        if (!STp) goto out;
 
        kref_get(&STp->kref);
@@ -258,7 +254,7 @@ out_put:
        kref_put(&STp->kref, scsi_tape_release);
        STp = NULL;
 out:
-       write_unlock(&st_dev_arr_lock);
+       spin_unlock(&st_index_lock);
        mutex_unlock(&st_ref_mutex);
        return STp;
 }
@@ -1188,7 +1184,6 @@ static int st_open(struct inode *inode, struct file *filp)
        int dev = TAPE_NR(inode);
        char *name;
 
-       mutex_lock(&st_mutex);
        /*
         * We really want to do nonseekable_open(inode, filp); here, but some
         * versions of tar incorrectly call lseek on tapes and bail out if that
@@ -1197,24 +1192,22 @@ static int st_open(struct inode *inode, struct file *filp)
        filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
 
        if (!(STp = scsi_tape_get(dev))) {
-               mutex_unlock(&st_mutex);
                return -ENXIO;
        }
 
-       write_lock(&st_dev_arr_lock);
        filp->private_data = STp;
        name = tape_name(STp);
 
+       spin_lock(&st_use_lock);
        if (STp->in_use) {
-               write_unlock(&st_dev_arr_lock);
+               spin_unlock(&st_use_lock);
                scsi_tape_put(STp);
-               mutex_unlock(&st_mutex);
                DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
                return (-EBUSY);
        }
 
        STp->in_use = 1;
-       write_unlock(&st_dev_arr_lock);
+       spin_unlock(&st_use_lock);
        STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
 
        if (scsi_autopm_get_device(STp->device) < 0) {
@@ -1262,16 +1255,16 @@ static int st_open(struct inode *inode, struct file *filp)
                        retval = (-EIO);
                goto err_out;
        }
-       mutex_unlock(&st_mutex);
        return 0;
 
  err_out:
        normalize_buffer(STp->buffer);
+       spin_lock(&st_use_lock);
        STp->in_use = 0;
+       spin_unlock(&st_use_lock);
        scsi_tape_put(STp);
        if (resumed)
                scsi_autopm_put_device(STp->device);
-       mutex_unlock(&st_mutex);
        return retval;
 
 }
@@ -1403,9 +1396,9 @@ static int st_release(struct inode *inode, struct file *filp)
                do_door_lock(STp, 0);
 
        normalize_buffer(STp->buffer);
-       write_lock(&st_dev_arr_lock);
+       spin_lock(&st_use_lock);
        STp->in_use = 0;
-       write_unlock(&st_dev_arr_lock);
+       spin_unlock(&st_use_lock);
        scsi_autopm_put_device(STp->device);
        scsi_tape_put(STp);
 
@@ -3992,16 +3985,98 @@ static const struct file_operations st_fops =
        .llseek =       noop_llseek,
 };
 
+static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
+{
+       int i, error;
+       dev_t cdev_devno;
+       struct cdev *cdev;
+       struct device *dev;
+       struct st_modedef *STm = &(tape->modes[mode]);
+       char name[10];
+       int dev_num = tape->index;
+
+       cdev_devno = MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, rew));
+
+       cdev = cdev_alloc();
+       if (!cdev) {
+               pr_err("st%d: out of memory. Device not attached.\n", dev_num);
+               error = -ENOMEM;
+               goto out;
+       }
+       cdev->owner = THIS_MODULE;
+       cdev->ops = &st_fops;
+
+       error = cdev_add(cdev, cdev_devno, 1);
+       if (error) {
+               pr_err("st%d: Can't add %s-rewind mode %d\n", dev_num,
+                      rew ? "non" : "auto", mode);
+               pr_err("st%d: Device not attached.\n", dev_num);
+               goto out_free;
+       }
+       STm->cdevs[rew] = cdev;
+
+       i = mode << (4 - ST_NBR_MODE_BITS);
+       snprintf(name, 10, "%s%s%s", rew ? "n" : "",
+                tape->disk->disk_name, st_formats[i]);
+
+       dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
+                           cdev_devno, &tape->modes[mode], "%s", name);
+       if (IS_ERR(dev)) {
+               pr_err("st%d: device_create failed\n", dev_num);
+               error = PTR_ERR(dev);
+               goto out_free;
+       }
+
+       STm->devs[rew] = dev;
+
+       return 0;
+out_free:
+       cdev_del(STm->cdevs[rew]);
+       STm->cdevs[rew] = NULL;
+out:
+       return error;
+}
+
+static int create_cdevs(struct scsi_tape *tape)
+{
+       int mode, error;
+       for (mode = 0; mode < ST_NBR_MODES; ++mode) {
+               error = create_one_cdev(tape, mode, 0);
+               if (error)
+                       return error;
+               error = create_one_cdev(tape, mode, 1);
+               if (error)
+                       return error;
+       }
+
+       return sysfs_create_link(&tape->device->sdev_gendev.kobj,
+                                &tape->modes[0].devs[0]->kobj, "tape");
+}
+
+static void remove_cdevs(struct scsi_tape *tape)
+{
+       int mode, rew;
+       sysfs_remove_link(&tape->device->sdev_gendev.kobj, "tape");
+       for (mode = 0; mode < ST_NBR_MODES; mode++) {
+               struct st_modedef *STm = &(tape->modes[mode]);
+               for (rew = 0; rew < 2; rew++) {
+                       if (STm->cdevs[rew])
+                               cdev_del(STm->cdevs[rew]);
+                       if (STm->devs[rew])
+                               device_unregister(STm->devs[rew]);
+               }
+       }
+}
+
 static int st_probe(struct device *dev)
 {
        struct scsi_device *SDp = to_scsi_device(dev);
        struct gendisk *disk = NULL;
-       struct cdev *cdev = NULL;
        struct scsi_tape *tpnt = NULL;
        struct st_modedef *STm;
        struct st_partstat *STps;
        struct st_buffer *buffer;
-       int i, j, mode, dev_num, error;
+       int i, dev_num, error;
        char *stp;
 
        if (SDp->type != TYPE_TAPE)
@@ -4028,58 +4103,16 @@ static int st_probe(struct device *dev)
                goto out_buffer_free;
        }
 
-       write_lock(&st_dev_arr_lock);
-       if (st_nr_dev >= st_dev_max) {
-               struct scsi_tape **tmp_da;
-               int tmp_dev_max;
-
-               tmp_dev_max = max(st_nr_dev * 2, 8);
-               if (tmp_dev_max > ST_MAX_TAPES)
-                       tmp_dev_max = ST_MAX_TAPES;
-               if (tmp_dev_max <= st_nr_dev) {
-                       write_unlock(&st_dev_arr_lock);
-                       printk(KERN_ERR "st: Too many tape devices (max. %d).\n",
-                              ST_MAX_TAPES);
-                       goto out_put_disk;
-               }
-
-               tmp_da = kzalloc(tmp_dev_max * sizeof(struct scsi_tape *), GFP_ATOMIC);
-               if (tmp_da == NULL) {
-                       write_unlock(&st_dev_arr_lock);
-                       printk(KERN_ERR "st: Can't extend device array.\n");
-                       goto out_put_disk;
-               }
-
-               if (scsi_tapes != NULL) {
-                       memcpy(tmp_da, scsi_tapes,
-                              st_dev_max * sizeof(struct scsi_tape *));
-                       kfree(scsi_tapes);
-               }
-               scsi_tapes = tmp_da;
-
-               st_dev_max = tmp_dev_max;
-       }
-
-       for (i = 0; i < st_dev_max; i++)
-               if (scsi_tapes[i] == NULL)
-                       break;
-       if (i >= st_dev_max)
-               panic("scsi_devices corrupt (st)");
-
        tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
        if (tpnt == NULL) {
-               write_unlock(&st_dev_arr_lock);
                printk(KERN_ERR "st: Can't allocate device descriptor.\n");
                goto out_put_disk;
        }
        kref_init(&tpnt->kref);
        tpnt->disk = disk;
-       sprintf(disk->disk_name, "st%d", i);
        disk->private_data = &tpnt->driver;
        disk->queue = SDp->request_queue;
        tpnt->driver = &st_template;
-       scsi_tapes[i] = tpnt;
-       dev_num = i;
 
        tpnt->device = SDp;
        if (SDp->scsi_level <= 2)
@@ -4125,6 +4158,7 @@ static int st_probe(struct device *dev)
                STm->default_compression = ST_DONT_TOUCH;
                STm->default_blksize = (-1);    /* No forced size */
                STm->default_density = (-1);    /* No forced density */
+               STm->tape = tpnt;
        }
 
        for (i = 0; i < ST_NBR_PARTITIONS; i++) {
@@ -4144,38 +4178,34 @@ static int st_probe(struct device *dev)
            tpnt->blksize_changed = 0;
        mutex_init(&tpnt->lock);
 
-       st_nr_dev++;
-       write_unlock(&st_dev_arr_lock);
+       if (!idr_pre_get(&st_index_idr, GFP_KERNEL)) {
+               pr_warn("st: idr expansion failed\n");
+               error = -ENOMEM;
+               goto out_put_disk;
+       }
 
-       for (mode = 0; mode < ST_NBR_MODES; ++mode) {
-               STm = &(tpnt->modes[mode]);
-               for (j=0; j < 2; j++) {
-                       cdev = cdev_alloc();
-                       if (!cdev) {
-                               printk(KERN_ERR
-                                      "st%d: out of memory. Device not attached.\n",
-                                      dev_num);
-                               goto out_free_tape;
-                       }
-                       cdev->owner = THIS_MODULE;
-                       cdev->ops = &st_fops;
-
-                       error = cdev_add(cdev,
-                                        MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, j)),
-                                        1);
-                       if (error) {
-                               printk(KERN_ERR "st%d: Can't add %s-rewind mode %d\n",
-                                      dev_num, j ? "non" : "auto", mode);
-                               printk(KERN_ERR "st%d: Device not attached.\n", dev_num);
-                               goto out_free_tape;
-                       }
-                       STm->cdevs[j] = cdev;
+       spin_lock(&st_index_lock);
+       error = idr_get_new(&st_index_idr, tpnt, &dev_num);
+       spin_unlock(&st_index_lock);
+       if (error) {
+               pr_warn("st: idr allocation failed: %d\n", error);
+               goto out_put_disk;
+       }
 
-               }
-               error = do_create_class_files(tpnt, dev_num, mode);
-               if (error)
-                       goto out_free_tape;
+       if (dev_num > ST_MAX_TAPES) {
+               pr_err("st: Too many tape devices (max. %d).\n", ST_MAX_TAPES);
+               goto out_put_index;
        }
+
+       tpnt->index = dev_num;
+       sprintf(disk->disk_name, "st%d", dev_num);
+
+       dev_set_drvdata(dev, tpnt);
+
+
+       error = create_cdevs(tpnt);
+       if (error)
+               goto out_remove_devs;
        scsi_autopm_put_device(SDp);
 
        sdev_printk(KERN_NOTICE, SDp,
@@ -4186,28 +4216,12 @@ static int st_probe(struct device *dev)
 
        return 0;
 
-out_free_tape:
-       for (mode=0; mode < ST_NBR_MODES; mode++) {
-               STm = &(tpnt->modes[mode]);
-               sysfs_remove_link(&tpnt->device->sdev_gendev.kobj,
-                                 "tape");
-               for (j=0; j < 2; j++) {
-                       if (STm->cdevs[j]) {
-                               if (cdev == STm->cdevs[j])
-                                       cdev = NULL;
-                                       device_destroy(st_sysfs_class,
-                                                      MKDEV(SCSI_TAPE_MAJOR,
-                                                            TAPE_MINOR(i, mode, j)));
-                               cdev_del(STm->cdevs[j]);
-                       }
-               }
-       }
-       if (cdev)
-               cdev_del(cdev);
-       write_lock(&st_dev_arr_lock);
-       scsi_tapes[dev_num] = NULL;
-       st_nr_dev--;
-       write_unlock(&st_dev_arr_lock);
+out_remove_devs:
+       remove_cdevs(tpnt);
+out_put_index:
+       spin_lock(&st_index_lock);
+       idr_remove(&st_index_idr, dev_num);
+       spin_unlock(&st_index_lock);
 out_put_disk:
        put_disk(disk);
        kfree(tpnt);
@@ -4220,38 +4234,18 @@ out:
 
 static int st_remove(struct device *dev)
 {
-       struct scsi_device *SDp = to_scsi_device(dev);
-       struct scsi_tape *tpnt;
-       int i, j, mode;
-
-       scsi_autopm_get_device(SDp);
-       write_lock(&st_dev_arr_lock);
-       for (i = 0; i < st_dev_max; i++) {
-               tpnt = scsi_tapes[i];
-               if (tpnt != NULL && tpnt->device == SDp) {
-                       scsi_tapes[i] = NULL;
-                       st_nr_dev--;
-                       write_unlock(&st_dev_arr_lock);
-                       sysfs_remove_link(&tpnt->device->sdev_gendev.kobj,
-                                         "tape");
-                       for (mode = 0; mode < ST_NBR_MODES; ++mode) {
-                               for (j=0; j < 2; j++) {
-                                       device_destroy(st_sysfs_class,
-                                                      MKDEV(SCSI_TAPE_MAJOR,
-                                                            TAPE_MINOR(i, mode, j)));
-                                       cdev_del(tpnt->modes[mode].cdevs[j]);
-                                       tpnt->modes[mode].cdevs[j] = NULL;
-                               }
-                       }
+       struct scsi_tape *tpnt = dev_get_drvdata(dev);
+       int index = tpnt->index;
 
-                       mutex_lock(&st_ref_mutex);
-                       kref_put(&tpnt->kref, scsi_tape_release);
-                       mutex_unlock(&st_ref_mutex);
-                       return 0;
-               }
-       }
+       scsi_autopm_get_device(to_scsi_device(dev));
+       remove_cdevs(tpnt);
 
-       write_unlock(&st_dev_arr_lock);
+       mutex_lock(&st_ref_mutex);
+       kref_put(&tpnt->kref, scsi_tape_release);
+       mutex_unlock(&st_ref_mutex);
+       spin_lock(&st_index_lock);
+       idr_remove(&st_index_idr, index);
+       spin_unlock(&st_index_lock);
        return 0;
 }
 
@@ -4283,6 +4277,11 @@ static void scsi_tape_release(struct kref *kref)
        return;
 }
 
+static struct class st_sysfs_class = {
+       .name = "scsi_tape",
+       .dev_attrs = st_dev_attrs,
+};
+
 static int __init init_st(void)
 {
        int err;
@@ -4292,10 +4291,10 @@ static int __init init_st(void)
        printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n",
                verstr, st_fixed_buffer_size, st_max_sg_segs);
 
-       st_sysfs_class = class_create(THIS_MODULE, "scsi_tape");
-       if (IS_ERR(st_sysfs_class)) {
-               printk(KERN_ERR "Unable create sysfs class for SCSI tapes\n");
-               return PTR_ERR(st_sysfs_class);
+       err = class_register(&st_sysfs_class);
+       if (err) {
+               pr_err("Unable register sysfs class for SCSI tapes\n");
+               return err;
        }
 
        err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
@@ -4322,7 +4321,7 @@ err_chrdev:
        unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
                                 ST_MAX_TAPE_ENTRIES);
 err_class:
-       class_destroy(st_sysfs_class);
+       class_unregister(&st_sysfs_class);
        return err;
 }
 
@@ -4332,8 +4331,7 @@ static void __exit exit_st(void)
        scsi_unregister_driver(&st_template.gendrv);
        unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
                                 ST_MAX_TAPE_ENTRIES);
-       class_destroy(st_sysfs_class);
-       kfree(scsi_tapes);
+       class_unregister(&st_sysfs_class);
        printk(KERN_INFO "st: Unloaded.\n");
 }
 
@@ -4405,10 +4403,9 @@ static void do_remove_sysfs_files(void)
        driver_remove_file(sysfs, &driver_attr_try_direct_io);
 }
 
-
 /* The sysfs simple class interface */
 static ssize_t
-st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
+defined_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
@@ -4417,10 +4414,9 @@ st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
        return l;
 }
 
-DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
-
 static ssize_t
-st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
+default_blksize_show(struct device *dev, struct device_attribute *attr,
+                    char *buf)
 {
        struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
@@ -4429,10 +4425,10 @@ st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
        return l;
 }
 
-DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
 
 static ssize_t
-st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
+default_density_show(struct device *dev, struct device_attribute *attr,
+                    char *buf)
 {
        struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
@@ -4443,11 +4439,9 @@ st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
        return l;
 }
 
-DEVICE_ATTR(default_density, S_IRUGO, st_defdensity_show, NULL);
-
 static ssize_t
-st_defcompression_show(struct device *dev, struct device_attribute *attr,
-                      char *buf)
+default_compression_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
@@ -4456,28 +4450,14 @@ st_defcompression_show(struct device *dev, struct device_attribute *attr,
        return l;
 }
 
-DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
-
 static ssize_t
-st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
+options_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct st_modedef *STm = dev_get_drvdata(dev);
-       struct scsi_tape *STp;
-       int i, j, options;
+       struct scsi_tape *STp = STm->tape;
+       int options;
        ssize_t l = 0;
 
-       for (i=0; i < st_dev_max; i++) {
-               for (j=0; j < ST_NBR_MODES; j++)
-                       if (&scsi_tapes[i]->modes[j] == STm)
-                               break;
-               if (j < ST_NBR_MODES)
-                       break;
-       }
-       if (i == st_dev_max)
-               return 0;  /* should never happen */
-
-       STp = scsi_tapes[i];
-
        options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0;
        options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0;
        options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0;
@@ -4498,66 +4478,14 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
        return l;
 }
 
-DEVICE_ATTR(options, S_IRUGO, st_options_show, NULL);
-
-static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
-{
-       int i, rew, error;
-       char name[10];
-       struct device *st_class_member;
-
-       for (rew=0; rew < 2; rew++) {
-               /* Make sure that the minor numbers corresponding to the four
-                  first modes always get the same names */
-               i = mode << (4 - ST_NBR_MODE_BITS);
-               snprintf(name, 10, "%s%s%s", rew ? "n" : "",
-                        STp->disk->disk_name, st_formats[i]);
-               st_class_member =
-                       device_create(st_sysfs_class, &STp->device->sdev_gendev,
-                                     MKDEV(SCSI_TAPE_MAJOR,
-                                           TAPE_MINOR(dev_num, mode, rew)),
-                                     &STp->modes[mode], "%s", name);
-               if (IS_ERR(st_class_member)) {
-                       printk(KERN_WARNING "st%d: device_create failed\n",
-                              dev_num);
-                       error = PTR_ERR(st_class_member);
-                       goto out;
-               }
-
-               error = device_create_file(st_class_member,
-                                          &dev_attr_defined);
-               if (error) goto out;
-               error = device_create_file(st_class_member,
-                                          &dev_attr_default_blksize);
-               if (error) goto out;
-               error = device_create_file(st_class_member,
-                                          &dev_attr_default_density);
-               if (error) goto out;
-               error = device_create_file(st_class_member,
-                                          &dev_attr_default_compression);
-               if (error) goto out;
-               error = device_create_file(st_class_member,
-                                          &dev_attr_options);
-               if (error) goto out;
-
-               if (mode == 0 && rew == 0) {
-                       error = sysfs_create_link(&STp->device->sdev_gendev.kobj,
-                                                 &st_class_member->kobj,
-                                                 "tape");
-                       if (error) {
-                               printk(KERN_ERR
-                                      "st%d: Can't create sysfs link from SCSI device.\n",
-                                      dev_num);
-                               goto out;
-                       }
-               }
-       }
-
-       return 0;
-
-out:
-       return error;
-}
+static struct device_attribute st_dev_attrs[] = {
+       __ATTR_RO(defined),
+       __ATTR_RO(default_blksize),
+       __ATTR_RO(default_density),
+       __ATTR_RO(default_compression),
+       __ATTR_RO(options),
+       __ATTR_NULL,
+};
 
 /* The following functions may be useful for a larger audience. */
 static int sgl_map_user_pages(struct st_buffer *STbp,
index b548923785eda33ab21e297bb494f8b7729909de..f3eee0f9f40c01631ad3e62cab4d7c4c5c557f56 100644 (file)
@@ -66,6 +66,8 @@ struct st_modedef {
        unsigned char default_compression;      /* 0 = don't touch, etc */
        short default_density;  /* Forced density, -1 = no value */
        int default_blksize;    /* Forced blocksize, -1 = no value */
+       struct scsi_tape *tape;
+       struct device *devs[2];  /* Auto-rewind and non-rewind devices */
        struct cdev *cdevs[2];  /* Auto-rewind and non-rewind devices */
 };
 
@@ -76,7 +78,7 @@ struct st_modedef {
 #define ST_MODE_SHIFT (7 - ST_NBR_MODE_BITS)
 #define ST_MODE_MASK ((ST_NBR_MODES - 1) << ST_MODE_SHIFT)
 
-#define ST_MAX_TAPES 128
+#define ST_MAX_TAPES (1 << (20 - (ST_NBR_MODE_BITS + 1)))
 #define ST_MAX_TAPE_ENTRIES  (ST_MAX_TAPES << (ST_NBR_MODE_BITS + 1))
 
 /* The status related to each partition */
@@ -99,6 +101,7 @@ struct scsi_tape {
        struct mutex lock;      /* For serialization */
        struct completion wait; /* For SCSI commands */
        struct st_buffer *buffer;
+       int index;
 
        /* Drive characteristics */
        unsigned char omit_blklims;
index 2d198a01a41024a338b405c78b19d34d6829caf0..ecc31a1f73fccb3852591df2fe6f79ff5e7de416 100644 (file)
@@ -325,6 +325,12 @@ config SPI_S3C64XX
        help
          SPI driver for Samsung S3C64XX and newer SoCs.
 
+config SPI_SC18IS602
+       tristate "NXP SC18IS602/602B/603 I2C to SPI bridge"
+       depends on I2C
+       help
+         SPI driver for NXP SC18IS602/602B/603 I2C to SPI bridge.
+
 config SPI_SH_MSIOF
        tristate "SuperH MSIOF SPI controller"
        depends on SUPERH && HAVE_CLK
@@ -364,11 +370,12 @@ config SPI_STMP3XXX
        help
          SPI driver for Freescale STMP37xx/378x SoC SSP interface
 
-config SPI_TEGRA
-       tristate "Nvidia Tegra SPI controller"
-       depends on ARCH_TEGRA && TEGRA20_APB_DMA
+config SPI_MXS
+       tristate "Freescale MXS SPI controller"
+       depends on ARCH_MXS
+       select STMP_DEVICE
        help
-         SPI driver for NVidia Tegra SoCs
+         SPI driver for Freescale MXS devices.
 
 config SPI_TI_SSP
        tristate "TI Sequencer Serial Port - SPI Support"
index 3920dcf4c7400e2c64bfb82803316f5f27535a63..22fd3a7251bcbd5833ea80506fb9eeaa6f38b104 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_SPI_LM70_LLP)            += spi-lm70llp.o
 obj-$(CONFIG_SPI_MPC512x_PSC)          += spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)          += spi-mpc52xx-psc.o
 obj-$(CONFIG_SPI_MPC52xx)              += spi-mpc52xx.o
+obj-$(CONFIG_SPI_MXS)                  += spi-mxs.o
 obj-$(CONFIG_SPI_NUC900)               += spi-nuc900.o
 obj-$(CONFIG_SPI_OC_TINY)              += spi-oc-tiny.o
 obj-$(CONFIG_SPI_OMAP_UWIRE)           += spi-omap-uwire.o
@@ -51,13 +52,13 @@ obj-$(CONFIG_SPI_S3C24XX)           += spi-s3c24xx-hw.o
 spi-s3c24xx-hw-y                       := spi-s3c24xx.o
 spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
 obj-$(CONFIG_SPI_S3C64XX)              += spi-s3c64xx.o
+obj-$(CONFIG_SPI_SC18IS602)            += spi-sc18is602.o
 obj-$(CONFIG_SPI_SH)                   += spi-sh.o
 obj-$(CONFIG_SPI_SH_HSPI)              += spi-sh-hspi.o
 obj-$(CONFIG_SPI_SH_MSIOF)             += spi-sh-msiof.o
 obj-$(CONFIG_SPI_SH_SCI)               += spi-sh-sci.o
 obj-$(CONFIG_SPI_SIRF)         += spi-sirf.o
 obj-$(CONFIG_SPI_STMP3XXX)             += spi-stmp.o
-obj-$(CONFIG_SPI_TEGRA)                        += spi-tegra.o
 obj-$(CONFIG_SPI_TI_SSP)               += spi-ti-ssp.o
 obj-$(CONFIG_SPI_TLE62X0)              += spi-tle62x0.o
 obj-$(CONFIG_SPI_TOPCLIFF_PCH)         += spi-topcliff-pch.o
index c00d00e96ee43784d8cd37715258f760a54bcbf8..f1fec2a19d101b110ec3da178299400afb2a1e29 100644 (file)
@@ -307,8 +307,6 @@ static const struct of_device_id altera_spi_match[] = {
        {},
 };
 MODULE_DEVICE_TABLE(of, altera_spi_match);
-#else /* CONFIG_OF */
-#define altera_spi_match NULL
 #endif /* CONFIG_OF */
 
 static struct platform_driver altera_spi_driver = {
@@ -318,7 +316,7 @@ static struct platform_driver altera_spi_driver = {
                .name = DRV_NAME,
                .owner = THIS_MODULE,
                .pm = NULL,
-               .of_match_table = altera_spi_match,
+               .of_match_table = of_match_ptr(altera_spi_match),
        },
 };
 module_platform_driver(altera_spi_driver);
index 0b56cfc71fabe28968d1f6d2a299bc5c281861e9..a2b50c516b318b38f13d131bd665c65c7d7f4d25 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
 
 #include <linux/spi/spi.h>
 #include <linux/spi/spi_bitbang.h>
@@ -46,6 +48,7 @@ struct spi_gpio {
        struct spi_bitbang              bitbang;
        struct spi_gpio_platform_data   pdata;
        struct platform_device          *pdev;
+       int                             cs_gpios[0];
 };
 
 /*----------------------------------------------------------------------*/
@@ -89,15 +92,21 @@ struct spi_gpio {
 
 /*----------------------------------------------------------------------*/
 
-static inline const struct spi_gpio_platform_data * __pure
-spi_to_pdata(const struct spi_device *spi)
+static inline struct spi_gpio * __pure
+spi_to_spi_gpio(const struct spi_device *spi)
 {
        const struct spi_bitbang        *bang;
-       const struct spi_gpio           *spi_gpio;
+       struct spi_gpio                 *spi_gpio;
 
        bang = spi_master_get_devdata(spi->master);
        spi_gpio = container_of(bang, struct spi_gpio, bitbang);
-       return &spi_gpio->pdata;
+       return spi_gpio;
+}
+
+static inline struct spi_gpio_platform_data * __pure
+spi_to_pdata(const struct spi_device *spi)
+{
+       return &spi_to_spi_gpio(spi)->pdata;
 }
 
 /* this is #defined to avoid unused-variable warnings when inlining */
@@ -210,7 +219,8 @@ static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
 
 static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
 {
-       unsigned long cs = (unsigned long) spi->controller_data;
+       struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+       unsigned int cs = spi_gpio->cs_gpios[spi->chip_select];
 
        /* set initial clock polarity */
        if (is_active)
@@ -224,12 +234,27 @@ static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
 
 static int spi_gpio_setup(struct spi_device *spi)
 {
-       unsigned long   cs = (unsigned long) spi->controller_data;
-       int             status = 0;
+       unsigned int            cs;
+       int                     status = 0;
+       struct spi_gpio         *spi_gpio = spi_to_spi_gpio(spi);
+       struct device_node      *np = spi->master->dev.of_node;
 
        if (spi->bits_per_word > 32)
                return -EINVAL;
 
+       if (np) {
+               /*
+                * In DT environments, the CS GPIOs have already been
+                * initialized from the "cs-gpios" property of the node.
+                */
+               cs = spi_gpio->cs_gpios[spi->chip_select];
+       } else {
+               /*
+                * ... otherwise, take it from spi->controller_data
+                */
+               cs = (unsigned int) spi->controller_data;
+       }
+
        if (!spi->controller_state) {
                if (cs != SPI_GPIO_NO_CHIPSELECT) {
                        status = gpio_request(cs, dev_name(&spi->dev));
@@ -239,8 +264,12 @@ static int spi_gpio_setup(struct spi_device *spi)
                                        !(spi->mode & SPI_CS_HIGH));
                }
        }
-       if (!status)
+       if (!status) {
                status = spi_bitbang_setup(spi);
+               /* in case it was initialized from static board data */
+               spi_gpio->cs_gpios[spi->chip_select] = cs;
+       }
+
        if (status) {
                if (!spi->controller_state && cs != SPI_GPIO_NO_CHIPSELECT)
                        gpio_free(cs);
@@ -250,7 +279,8 @@ static int spi_gpio_setup(struct spi_device *spi)
 
 static void spi_gpio_cleanup(struct spi_device *spi)
 {
-       unsigned long   cs = (unsigned long) spi->controller_data;
+       struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+       unsigned int cs = spi_gpio->cs_gpios[spi->chip_select];
 
        if (cs != SPI_GPIO_NO_CHIPSELECT)
                gpio_free(cs);
@@ -313,6 +343,55 @@ done:
        return value;
 }
 
+#ifdef CONFIG_OF
+static struct of_device_id spi_gpio_dt_ids[] = {
+       { .compatible = "spi-gpio" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
+
+static int spi_gpio_probe_dt(struct platform_device *pdev)
+{
+       int ret;
+       u32 tmp;
+       struct spi_gpio_platform_data   *pdata;
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *of_id =
+                       of_match_device(spi_gpio_dt_ids, &pdev->dev);
+
+       if (!of_id)
+               return 0;
+
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
+
+       pdata->sck = of_get_named_gpio(np, "gpio-sck", 0);
+       pdata->miso = of_get_named_gpio(np, "gpio-miso", 0);
+       pdata->mosi = of_get_named_gpio(np, "gpio-mosi", 0);
+
+       ret = of_property_read_u32(np, "num-chipselects", &tmp);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "num-chipselects property not found\n");
+               goto error_free;
+       }
+
+       pdata->num_chipselect = tmp;
+       pdev->dev.platform_data = pdata;
+
+       return 1;
+
+error_free:
+       devm_kfree(&pdev->dev, pdata);
+       return ret;
+}
+#else
+static inline int spi_gpio_probe_dt(struct platform_device *pdev)
+{
+       return 0;
+}
+#endif
+
 static int __devinit spi_gpio_probe(struct platform_device *pdev)
 {
        int                             status;
@@ -320,6 +399,13 @@ static int __devinit spi_gpio_probe(struct platform_device *pdev)
        struct spi_gpio                 *spi_gpio;
        struct spi_gpio_platform_data   *pdata;
        u16 master_flags = 0;
+       bool use_of = 0;
+
+       status = spi_gpio_probe_dt(pdev);
+       if (status < 0)
+               return status;
+       if (status > 0)
+               use_of = 1;
 
        pdata = pdev->dev.platform_data;
 #ifdef GENERIC_BITBANG
@@ -331,7 +417,8 @@ static int __devinit spi_gpio_probe(struct platform_device *pdev)
        if (status < 0)
                return status;
 
-       master = spi_alloc_master(&pdev->dev, sizeof *spi_gpio);
+       master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio) +
+                                       (sizeof(int) * SPI_N_CHIPSEL));
        if (!master) {
                status = -ENOMEM;
                goto gpio_free;
@@ -348,6 +435,23 @@ static int __devinit spi_gpio_probe(struct platform_device *pdev)
        master->num_chipselect = SPI_N_CHIPSEL;
        master->setup = spi_gpio_setup;
        master->cleanup = spi_gpio_cleanup;
+#ifdef CONFIG_OF
+       master->dev.of_node = pdev->dev.of_node;
+
+       if (use_of) {
+               int i;
+               struct device_node *np = pdev->dev.of_node;
+
+               /*
+                * In DT environments, take the CS GPIO from the "cs-gpios"
+                * property of the node.
+                */
+
+               for (i = 0; i < SPI_N_CHIPSEL; i++)
+                       spi_gpio->cs_gpios[i] =
+                               of_get_named_gpio(np, "cs-gpios", i);
+       }
+#endif
 
        spi_gpio->bitbang.master = spi_master_get(master);
        spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
@@ -408,8 +512,11 @@ static int __devexit spi_gpio_remove(struct platform_device *pdev)
 MODULE_ALIAS("platform:" DRIVER_NAME);
 
 static struct platform_driver spi_gpio_driver = {
-       .driver.name    = DRIVER_NAME,
-       .driver.owner   = THIS_MODULE,
+       .driver = {
+               .name   = DRIVER_NAME,
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(spi_gpio_dt_ids),
+       },
        .probe          = spi_gpio_probe,
        .remove         = __devexit_p(spi_gpio_remove),
 };
index de7ebb65e5356addb5678b039d49b087445f40a5..c9a0d8467de6a5b275c0bcd735e2c0e155fb3b7c 100644 (file)
@@ -197,6 +197,7 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
 #define MX51_ECSPI_CONFIG_SCLKPOL(cs)  (1 << ((cs) +  4))
 #define MX51_ECSPI_CONFIG_SBBCTRL(cs)  (1 << ((cs) +  8))
 #define MX51_ECSPI_CONFIG_SSBPOL(cs)   (1 << ((cs) + 12))
+#define MX51_ECSPI_CONFIG_SCLKCTL(cs)  (1 << ((cs) + 20))
 
 #define MX51_ECSPI_INT         0x10
 #define MX51_ECSPI_INT_TEEN            (1 <<  0)
@@ -287,9 +288,10 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
        if (config->mode & SPI_CPHA)
                cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
 
-       if (config->mode & SPI_CPOL)
+       if (config->mode & SPI_CPOL) {
                cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
-
+               cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+       }
        if (config->mode & SPI_CS_HIGH)
                cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
 
index 4c63f772780adaf64e1412af167c791123eecb13..0a1e39e94d06a0292f4ebb60eb4f36b1a374ad6d 100644 (file)
@@ -494,7 +494,7 @@ free_master:
 
 static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
 {
-       struct spi_master *master = dev_get_drvdata(dev);
+       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
        struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
 
        flush_workqueue(mps->workqueue);
@@ -503,6 +503,7 @@ static int __devexit mpc512x_psc_spi_do_remove(struct device *dev)
        free_irq(mps->irq, mps);
        if (mps->psc)
                iounmap(mps->psc);
+       spi_master_put(master);
 
        return 0;
 }
index 66047156d90dbc8d1bd9980492050629ad1f40d1..bd47d262d53faa252003a6e707104a6459e7c6ce 100644 (file)
@@ -481,7 +481,7 @@ static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op)
 
 static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op)
 {
-       struct spi_master *master = dev_get_drvdata(&op->dev);
+       struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
        struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
 
        flush_workqueue(mps->workqueue);
@@ -490,6 +490,7 @@ static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op)
        free_irq(mps->irq, mps);
        if (mps->psc)
                iounmap(mps->psc);
+       spi_master_put(master);
 
        return 0;
 }
index cb3a3830b0a53ef42d4f02d86bd16b6c5e3d7a16..045410650212cf95bf431e63fb5d4951447a4209 100644 (file)
@@ -454,7 +454,7 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
                                GFP_KERNEL);
                if (!ms->gpio_cs) {
                        rc = -ENOMEM;
-                       goto err_alloc;
+                       goto err_alloc_gpio;
                }
 
                for (i = 0; i < ms->gpio_cs_count; i++) {
@@ -514,12 +514,13 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
 
  err_register:
        dev_err(&ms->master->dev, "initialization failed\n");
-       spi_master_put(master);
  err_gpio:
        while (i-- > 0)
                gpio_free(ms->gpio_cs[i]);
 
        kfree(ms->gpio_cs);
+ err_alloc_gpio:
+       spi_master_put(master);
  err_alloc:
  err_init:
        iounmap(regs);
@@ -528,7 +529,7 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
 
 static int __devexit mpc52xx_spi_remove(struct platform_device *op)
 {
-       struct spi_master *master = dev_get_drvdata(&op->dev);
+       struct spi_master *master = spi_master_get(dev_get_drvdata(&op->dev));
        struct mpc52xx_spi *ms = spi_master_get_devdata(master);
        int i;
 
@@ -540,8 +541,8 @@ static int __devexit mpc52xx_spi_remove(struct platform_device *op)
 
        kfree(ms->gpio_cs);
        spi_unregister_master(master);
-       spi_master_put(master);
        iounmap(ms->regs);
+       spi_master_put(master);
 
        return 0;
 }
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
new file mode 100644 (file)
index 0000000..edf1360
--- /dev/null
@@ -0,0 +1,674 @@
+/*
+ * Freescale MXS SPI master driver
+ *
+ * Copyright 2012 DENX Software Engineering, GmbH.
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * Rework and transition to new API by:
+ * Marek Vasut <marex@denx.de>
+ *
+ * Based on previous attempt by:
+ * Fabio Estevam <fabio.estevam@freescale.com>
+ *
+ * Based on code from U-Boot bootloader by:
+ * Marek Vasut <marex@denx.de>
+ *
+ * Based on spi-stmp.c, which is:
+ * Author: Dmitry Pervushin <dimka@embeddedalley.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/stmp_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/mxs-spi.h>
+
+#define DRIVER_NAME            "mxs-spi"
+
+/* Use 10S timeout for very long transfers, it should suffice. */
+#define SSP_TIMEOUT            10000
+
+#define SG_MAXLEN              0xff00
+
+struct mxs_spi {
+       struct mxs_ssp          ssp;
+       struct completion       c;
+};
+
+static int mxs_spi_setup_transfer(struct spi_device *dev,
+                               struct spi_transfer *t)
+{
+       struct mxs_spi *spi = spi_master_get_devdata(dev->master);
+       struct mxs_ssp *ssp = &spi->ssp;
+       uint8_t bits_per_word;
+       uint32_t hz = 0;
+
+       bits_per_word = dev->bits_per_word;
+       if (t && t->bits_per_word)
+               bits_per_word = t->bits_per_word;
+
+       if (bits_per_word != 8) {
+               dev_err(&dev->dev, "%s, unsupported bits_per_word=%d\n",
+                                       __func__, bits_per_word);
+               return -EINVAL;
+       }
+
+       hz = dev->max_speed_hz;
+       if (t && t->speed_hz)
+               hz = min(hz, t->speed_hz);
+       if (hz == 0) {
+               dev_err(&dev->dev, "Cannot continue with zero clock\n");
+               return -EINVAL;
+       }
+
+       mxs_ssp_set_clk_rate(ssp, hz);
+
+       writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
+                    BF_SSP_CTRL1_WORD_LENGTH
+                    (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
+                    ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
+                    ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
+                    ssp->base + HW_SSP_CTRL1(ssp));
+
+       writel(0x0, ssp->base + HW_SSP_CMD0);
+       writel(0x0, ssp->base + HW_SSP_CMD1);
+
+       return 0;
+}
+
+static int mxs_spi_setup(struct spi_device *dev)
+{
+       int err = 0;
+
+       if (!dev->bits_per_word)
+               dev->bits_per_word = 8;
+
+       if (dev->mode & ~(SPI_CPOL | SPI_CPHA))
+               return -EINVAL;
+
+       err = mxs_spi_setup_transfer(dev, NULL);
+       if (err) {
+               dev_err(&dev->dev,
+                       "Failed to setup transfer, error = %d\n", err);
+       }
+
+       return err;
+}
+
+static uint32_t mxs_spi_cs_to_reg(unsigned cs)
+{
+       uint32_t select = 0;
+
+       /*
+        * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
+        *
+        * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ
+        * in HW_SSP_CTRL0 register do have multiple usage, please refer to
+        * the datasheet for further details. In SPI mode, they are used to
+        * toggle the chip-select lines (nCS pins).
+        */
+       if (cs & 1)
+               select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
+       if (cs & 2)
+               select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
+
+       return select;
+}
+
+static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs)
+{
+       const uint32_t mask =
+               BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ;
+       uint32_t select;
+       struct mxs_ssp *ssp = &spi->ssp;
+
+       writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+       select = mxs_spi_cs_to_reg(cs);
+       writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+}
+
+static inline void mxs_spi_enable(struct mxs_spi *spi)
+{
+       struct mxs_ssp *ssp = &spi->ssp;
+
+       writel(BM_SSP_CTRL0_LOCK_CS,
+               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+       writel(BM_SSP_CTRL0_IGNORE_CRC,
+               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+}
+
+static inline void mxs_spi_disable(struct mxs_spi *spi)
+{
+       struct mxs_ssp *ssp = &spi->ssp;
+
+       writel(BM_SSP_CTRL0_LOCK_CS,
+               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+       writel(BM_SSP_CTRL0_IGNORE_CRC,
+               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+}
+
+static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
+{
+       const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
+       struct mxs_ssp *ssp = &spi->ssp;
+       uint32_t reg;
+
+       do {
+               reg = readl_relaxed(ssp->base + offset);
+
+               if (!set)
+                       reg = ~reg;
+
+               reg &= mask;
+
+               if (reg == mask)
+                       return 0;
+       } while (time_before(jiffies, timeout));
+
+       return -ETIMEDOUT;
+}
+
+static void mxs_ssp_dma_irq_callback(void *param)
+{
+       struct mxs_spi *spi = param;
+       complete(&spi->c);
+}
+
+static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
+{
+       struct mxs_ssp *ssp = dev_id;
+       dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
+               __func__, __LINE__,
+               readl(ssp->base + HW_SSP_CTRL1(ssp)),
+               readl(ssp->base + HW_SSP_STATUS(ssp)));
+       return IRQ_HANDLED;
+}
+
+static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
+                           unsigned char *buf, int len,
+                           int *first, int *last, int write)
+{
+       struct mxs_ssp *ssp = &spi->ssp;
+       struct dma_async_tx_descriptor *desc = NULL;
+       const bool vmalloced_buf = is_vmalloc_addr(buf);
+       const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
+       const int sgs = DIV_ROUND_UP(len, desc_len);
+       int sg_count;
+       int min, ret;
+       uint32_t ctrl0;
+       struct page *vm_page;
+       void *sg_buf;
+       struct {
+               uint32_t                pio[4];
+               struct scatterlist      sg;
+       } *dma_xfer;
+
+       if (!len)
+               return -EINVAL;
+
+       dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL);
+       if (!dma_xfer)
+               return -ENOMEM;
+
+       INIT_COMPLETION(spi->c);
+
+       ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
+       ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
+
+       if (*first)
+               ctrl0 |= BM_SSP_CTRL0_LOCK_CS;
+       if (!write)
+               ctrl0 |= BM_SSP_CTRL0_READ;
+
+       /* Queue the DMA data transfer. */
+       for (sg_count = 0; sg_count < sgs; sg_count++) {
+               min = min(len, desc_len);
+
+               /* Prepare the transfer descriptor. */
+               if ((sg_count + 1 == sgs) && *last)
+                       ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
+
+               if (ssp->devid == IMX23_SSP)
+                       ctrl0 |= min;
+
+               dma_xfer[sg_count].pio[0] = ctrl0;
+               dma_xfer[sg_count].pio[3] = min;
+
+               if (vmalloced_buf) {
+                       vm_page = vmalloc_to_page(buf);
+                       if (!vm_page) {
+                               ret = -ENOMEM;
+                               goto err_vmalloc;
+                       }
+                       sg_buf = page_address(vm_page) +
+                               ((size_t)buf & ~PAGE_MASK);
+               } else {
+                       sg_buf = buf;
+               }
+
+               sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
+               ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
+                       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+               len -= min;
+               buf += min;
+
+               /* Queue the PIO register write transfer. */
+               desc = dmaengine_prep_slave_sg(ssp->dmach,
+                               (struct scatterlist *)dma_xfer[sg_count].pio,
+                               (ssp->devid == IMX23_SSP) ? 1 : 4,
+                               DMA_TRANS_NONE,
+                               sg_count ? DMA_PREP_INTERRUPT : 0);
+               if (!desc) {
+                       dev_err(ssp->dev,
+                               "Failed to get PIO reg. write descriptor.\n");
+                       ret = -EINVAL;
+                       goto err_mapped;
+               }
+
+               desc = dmaengine_prep_slave_sg(ssp->dmach,
+                               &dma_xfer[sg_count].sg, 1,
+                               write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+                               DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+               if (!desc) {
+                       dev_err(ssp->dev,
+                               "Failed to get DMA data write descriptor.\n");
+                       ret = -EINVAL;
+                       goto err_mapped;
+               }
+       }
+
+       /*
+        * The last descriptor must have this callback,
+        * to finish the DMA transaction.
+        */
+       desc->callback = mxs_ssp_dma_irq_callback;
+       desc->callback_param = spi;
+
+       /* Start the transfer. */
+       dmaengine_submit(desc);
+       dma_async_issue_pending(ssp->dmach);
+
+       ret = wait_for_completion_timeout(&spi->c,
+                               msecs_to_jiffies(SSP_TIMEOUT));
+       if (!ret) {
+               dev_err(ssp->dev, "DMA transfer timeout\n");
+               ret = -ETIMEDOUT;
+               goto err_vmalloc;
+       }
+
+       ret = 0;
+
+err_vmalloc:
+       while (--sg_count >= 0) {
+err_mapped:
+               dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
+                       write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       }
+
+       kfree(dma_xfer);
+
+       return ret;
+}
+
+static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
+                           unsigned char *buf, int len,
+                           int *first, int *last, int write)
+{
+       struct mxs_ssp *ssp = &spi->ssp;
+
+       if (*first)
+               mxs_spi_enable(spi);
+
+       mxs_spi_set_cs(spi, cs);
+
+       while (len--) {
+               if (*last && len == 0)
+                       mxs_spi_disable(spi);
+
+               if (ssp->devid == IMX23_SSP) {
+                       writel(BM_SSP_CTRL0_XFER_COUNT,
+                               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+                       writel(1,
+                               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+               } else {
+                       writel(1, ssp->base + HW_SSP_XFER_SIZE);
+               }
+
+               if (write)
+                       writel(BM_SSP_CTRL0_READ,
+                               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+               else
+                       writel(BM_SSP_CTRL0_READ,
+                               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+               writel(BM_SSP_CTRL0_RUN,
+                               ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+               if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
+                       return -ETIMEDOUT;
+
+               if (write)
+                       writel(*buf, ssp->base + HW_SSP_DATA(ssp));
+
+               writel(BM_SSP_CTRL0_DATA_XFER,
+                            ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+               if (!write) {
+                       if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
+                                               BM_SSP_STATUS_FIFO_EMPTY, 0))
+                               return -ETIMEDOUT;
+
+                       *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
+               }
+
+               if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
+                       return -ETIMEDOUT;
+
+               buf++;
+       }
+
+       if (len <= 0)
+               return 0;
+
+       return -ETIMEDOUT;
+}
+
+static int mxs_spi_transfer_one(struct spi_master *master,
+                               struct spi_message *m)
+{
+       struct mxs_spi *spi = spi_master_get_devdata(master);
+       struct mxs_ssp *ssp = &spi->ssp;
+       int first, last;
+       struct spi_transfer *t, *tmp_t;
+       int status = 0;
+       int cs;
+
+       first = last = 0;
+
+       cs = m->spi->chip_select;
+
+       list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
+
+               status = mxs_spi_setup_transfer(m->spi, t);
+               if (status)
+                       break;
+
+               if (&t->transfer_list == m->transfers.next)
+                       first = 1;
+               if (&t->transfer_list == m->transfers.prev)
+                       last = 1;
+               if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) {
+                       dev_err(ssp->dev,
+                               "Cannot send and receive simultaneously\n");
+                       status = -EINVAL;
+                       break;
+               }
+
+               /*
+                * Small blocks can be transfered via PIO.
+                * Measured by empiric means:
+                *
+                * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
+                *
+                * DMA only: 2.164808 seconds, 473.0KB/s
+                * Combined: 1.676276 seconds, 610.9KB/s
+                */
+               if (t->len < 32) {
+                       writel(BM_SSP_CTRL1_DMA_ENABLE,
+                               ssp->base + HW_SSP_CTRL1(ssp) +
+                               STMP_OFFSET_REG_CLR);
+
+                       if (t->tx_buf)
+                               status = mxs_spi_txrx_pio(spi, cs,
+                                               (void *)t->tx_buf,
+                                               t->len, &first, &last, 1);
+                       if (t->rx_buf)
+                               status = mxs_spi_txrx_pio(spi, cs,
+                                               t->rx_buf, t->len,
+                                               &first, &last, 0);
+               } else {
+                       writel(BM_SSP_CTRL1_DMA_ENABLE,
+                               ssp->base + HW_SSP_CTRL1(ssp) +
+                               STMP_OFFSET_REG_SET);
+
+                       if (t->tx_buf)
+                               status = mxs_spi_txrx_dma(spi, cs,
+                                               (void *)t->tx_buf, t->len,
+                                               &first, &last, 1);
+                       if (t->rx_buf)
+                               status = mxs_spi_txrx_dma(spi, cs,
+                                               t->rx_buf, t->len,
+                                               &first, &last, 0);
+               }
+
+               if (status) {
+                       stmp_reset_block(ssp->base);
+                       break;
+               }
+
+               m->actual_length += t->len;
+               first = last = 0;
+       }
+
+       m->status = 0;
+       spi_finalize_current_message(master);
+
+       return status;
+}
+
+static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param)
+{
+       struct mxs_ssp *ssp = param;
+
+       if (!mxs_dma_is_apbh(chan))
+               return false;
+
+       if (chan->chan_id != ssp->dma_channel)
+               return false;
+
+       chan->private = &ssp->dma_data;
+
+       return true;
+}
+
+static const struct of_device_id mxs_spi_dt_ids[] = {
+       { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
+       { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
+
+static int __devinit mxs_spi_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *of_id =
+                       of_match_device(mxs_spi_dt_ids, &pdev->dev);
+       struct device_node *np = pdev->dev.of_node;
+       struct spi_master *master;
+       struct mxs_spi *spi;
+       struct mxs_ssp *ssp;
+       struct resource *iores, *dmares;
+       struct pinctrl *pinctrl;
+       struct clk *clk;
+       void __iomem *base;
+       int devid, dma_channel, clk_freq;
+       int ret = 0, irq_err, irq_dma;
+       dma_cap_mask_t mask;
+
+       /*
+        * Default clock speed for the SPI core. 160MHz seems to
+        * work reasonably well with most SPI flashes, so use this
+        * as a default. Override with "clock-frequency" DT prop.
+        */
+       const int clk_freq_default = 160000000;
+
+       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq_err = platform_get_irq(pdev, 0);
+       irq_dma = platform_get_irq(pdev, 1);
+       if (!iores || irq_err < 0 || irq_dma < 0)
+               return -EINVAL;
+
+       base = devm_request_and_ioremap(&pdev->dev, iores);
+       if (!base)
+               return -EADDRNOTAVAIL;
+
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl))
+               return PTR_ERR(pinctrl);
+
+       clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       if (np) {
+               devid = (enum mxs_ssp_id) of_id->data;
+               /*
+                * TODO: This is a temporary solution and should be changed
+                * to use generic DMA binding later when the helpers get in.
+                */
+               ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
+                                          &dma_channel);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "Failed to get DMA channel\n");
+                       return -EINVAL;
+               }
+
+               ret = of_property_read_u32(np, "clock-frequency",
+                                          &clk_freq);
+               if (ret)
+                       clk_freq = clk_freq_default;
+       } else {
+               dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+               if (!dmares)
+                       return -EINVAL;
+               devid = pdev->id_entry->driver_data;
+               dma_channel = dmares->start;
+               clk_freq = clk_freq_default;
+       }
+
+       master = spi_alloc_master(&pdev->dev, sizeof(*spi));
+       if (!master)
+               return -ENOMEM;
+
+       master->transfer_one_message = mxs_spi_transfer_one;
+       master->setup = mxs_spi_setup;
+       master->mode_bits = SPI_CPOL | SPI_CPHA;
+       master->num_chipselect = 3;
+       master->dev.of_node = np;
+       master->flags = SPI_MASTER_HALF_DUPLEX;
+
+       spi = spi_master_get_devdata(master);
+       ssp = &spi->ssp;
+       ssp->dev = &pdev->dev;
+       ssp->clk = clk;
+       ssp->base = base;
+       ssp->devid = devid;
+       ssp->dma_channel = dma_channel;
+
+       init_completion(&spi->c);
+
+       ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
+                              DRIVER_NAME, ssp);
+       if (ret)
+               goto out_master_free;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       ssp->dma_data.chan_irq = irq_dma;
+       ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp);
+       if (!ssp->dmach) {
+               dev_err(ssp->dev, "Failed to request DMA\n");
+               goto out_master_free;
+       }
+
+       clk_prepare_enable(ssp->clk);
+       clk_set_rate(ssp->clk, clk_freq);
+       ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
+
+       stmp_reset_block(ssp->base);
+
+       platform_set_drvdata(pdev, master);
+
+       ret = spi_register_master(master);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
+               goto out_free_dma;
+       }
+
+       return 0;
+
+out_free_dma:
+       dma_release_channel(ssp->dmach);
+       clk_disable_unprepare(ssp->clk);
+out_master_free:
+       spi_master_put(master);
+       return ret;
+}
+
+static int __devexit mxs_spi_remove(struct platform_device *pdev)
+{
+       struct spi_master *master;
+       struct mxs_spi *spi;
+       struct mxs_ssp *ssp;
+
+       master = spi_master_get(platform_get_drvdata(pdev));
+       spi = spi_master_get_devdata(master);
+       ssp = &spi->ssp;
+
+       spi_unregister_master(master);
+
+       dma_release_channel(ssp->dmach);
+
+       clk_disable_unprepare(ssp->clk);
+
+       spi_master_put(master);
+
+       return 0;
+}
+
+static struct platform_driver mxs_spi_driver = {
+       .probe  = mxs_spi_probe,
+       .remove = __devexit_p(mxs_spi_remove),
+       .driver = {
+               .name   = DRIVER_NAME,
+               .owner  = THIS_MODULE,
+               .of_match_table = mxs_spi_dt_ids,
+       },
+};
+
+module_platform_driver(mxs_spi_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("MXS SPI master driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-spi");
index 5d59a69a90640aa172ee53aaf3c4a62e294b3f38..474e2174e08a5576d8ec432fd7a52e49b8baf782 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/err.h>
 
 #include <linux/spi/spi.h>
 
@@ -140,13 +142,6 @@ struct omap2_mcspi_cs {
        u32                     chconf0;
 };
 
-#define MOD_REG_BIT(val, mask, set) do { \
-       if (set) \
-               val |= mask; \
-       else \
-               val &= ~mask; \
-} while (0)
-
 static inline void mcspi_write_reg(struct spi_master *master,
                int idx, u32 val)
 {
@@ -205,7 +200,11 @@ static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
        else
                rw = OMAP2_MCSPI_CHCONF_DMAW;
 
-       MOD_REG_BIT(l, rw, enable);
+       if (enable)
+               l |= rw;
+       else
+               l &= ~rw;
+
        mcspi_write_chconf0(spi, l);
 }
 
@@ -224,7 +223,11 @@ static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
        u32 l;
 
        l = mcspi_cached_chconf0(spi);
-       MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
+       if (cs_active)
+               l |= OMAP2_MCSPI_CHCONF_FORCE;
+       else
+               l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+
        mcspi_write_chconf0(spi, l);
 }
 
@@ -239,9 +242,8 @@ static void omap2_mcspi_set_master_mode(struct spi_master *master)
         * to single-channel master mode
         */
        l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
-       MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
-       MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
-       MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
+       l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
+       l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
        mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
 
        ctx->modulctrl = l;
@@ -260,16 +262,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
        list_for_each_entry(cs, &ctx->cs, node)
                __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
 }
-static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
-{
-       pm_runtime_mark_last_busy(mcspi->dev);
-       pm_runtime_put_autosuspend(mcspi->dev);
-}
-
-static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
-{
-       return pm_runtime_get_sync(mcspi->dev);
-}
 
 static int omap2_prepare_transfer(struct spi_master *master)
 {
@@ -325,49 +317,27 @@ static void omap2_mcspi_tx_callback(void *data)
        omap2_mcspi_set_dma_req(spi, 0, 0);
 }
 
-static unsigned
-omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+static void omap2_mcspi_tx_dma(struct spi_device *spi,
+                               struct spi_transfer *xfer,
+                               struct dma_slave_config cfg)
 {
        struct omap2_mcspi      *mcspi;
-       struct omap2_mcspi_cs   *cs = spi->controller_state;
        struct omap2_mcspi_dma  *mcspi_dma;
        unsigned int            count;
-       int                     word_len, element_count;
-       int                     elements = 0;
-       u32                     l;
        u8                      * rx;
        const u8                * tx;
        void __iomem            *chstat_reg;
-       struct dma_slave_config cfg;
-       enum dma_slave_buswidth width;
-       unsigned es;
+       struct omap2_mcspi_cs   *cs = spi->controller_state;
 
        mcspi = spi_master_get_devdata(spi->master);
        mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-       l = mcspi_cached_chconf0(spi);
+       count = xfer->len;
 
+       rx = xfer->rx_buf;
+       tx = xfer->tx_buf;
        chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
 
-       if (cs->word_len <= 8) {
-               width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-               es = 1;
-       } else if (cs->word_len <= 16) {
-               width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-               es = 2;
-       } else {
-               width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-               es = 4;
-       }
-
-       memset(&cfg, 0, sizeof(cfg));
-       cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
-       cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
-       cfg.src_addr_width = width;
-       cfg.dst_addr_width = width;
-       cfg.src_maxburst = 1;
-       cfg.dst_maxburst = 1;
-
-       if (xfer->tx_buf && mcspi_dma->dma_tx) {
+       if (mcspi_dma->dma_tx) {
                struct dma_async_tx_descriptor *tx;
                struct scatterlist sg;
 
@@ -378,7 +348,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
                sg_dma_len(&sg) = xfer->len;
 
                tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
-                       DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_tx_callback;
                        tx->callback_param = spi;
@@ -387,8 +357,50 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
                        /* FIXME: fall back to PIO? */
                }
        }
+       dma_async_issue_pending(mcspi_dma->dma_tx);
+       omap2_mcspi_set_dma_req(spi, 0, 1);
+
+       wait_for_completion(&mcspi_dma->dma_tx_completion);
+       dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
+                        DMA_TO_DEVICE);
+
+       /* for TX_ONLY mode, be sure all words have shifted out */
+       if (rx == NULL) {
+               if (mcspi_wait_for_reg_bit(chstat_reg,
+                                       OMAP2_MCSPI_CHSTAT_TXS) < 0)
+                       dev_err(&spi->dev, "TXS timed out\n");
+               else if (mcspi_wait_for_reg_bit(chstat_reg,
+                                       OMAP2_MCSPI_CHSTAT_EOT) < 0)
+                       dev_err(&spi->dev, "EOT timed out\n");
+       }
+}
+
+static unsigned
+omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+                               struct dma_slave_config cfg,
+                               unsigned es)
+{
+       struct omap2_mcspi      *mcspi;
+       struct omap2_mcspi_dma  *mcspi_dma;
+       unsigned int            count;
+       u32                     l;
+       int                     elements = 0;
+       int                     word_len, element_count;
+       struct omap2_mcspi_cs   *cs = spi->controller_state;
+       mcspi = spi_master_get_devdata(spi->master);
+       mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+       count = xfer->len;
+       word_len = cs->word_len;
+       l = mcspi_cached_chconf0(spi);
 
-       if (xfer->rx_buf && mcspi_dma->dma_rx) {
+       if (word_len <= 8)
+               element_count = count;
+       else if (word_len <= 16)
+               element_count = count >> 1;
+       else /* word_len <= 32 */
+               element_count = count >> 2;
+
+       if (mcspi_dma->dma_rx) {
                struct dma_async_tx_descriptor *tx;
                struct scatterlist sg;
                size_t len = xfer->len - es;
@@ -403,108 +415,120 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
                sg_dma_len(&sg) = len;
 
                tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
-                       DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+                               DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
+                               DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_rx_callback;
                        tx->callback_param = spi;
                        dmaengine_submit(tx);
                } else {
-                       /* FIXME: fall back to PIO? */
-               }
-       }
-
-       count = xfer->len;
-       word_len = cs->word_len;
-
-       rx = xfer->rx_buf;
-       tx = xfer->tx_buf;
-
-       if (word_len <= 8) {
-               element_count = count;
-       } else if (word_len <= 16) {
-               element_count = count >> 1;
-       } else /* word_len <= 32 */ {
-               element_count = count >> 2;
-       }
-
-       if (tx != NULL) {
-               dma_async_issue_pending(mcspi_dma->dma_tx);
-               omap2_mcspi_set_dma_req(spi, 0, 1);
-       }
-
-       if (rx != NULL) {
-               dma_async_issue_pending(mcspi_dma->dma_rx);
-               omap2_mcspi_set_dma_req(spi, 1, 1);
-       }
-
-       if (tx != NULL) {
-               wait_for_completion(&mcspi_dma->dma_tx_completion);
-               dma_unmap_single(mcspi->dev, xfer->tx_dma, count,
-                                DMA_TO_DEVICE);
-
-               /* for TX_ONLY mode, be sure all words have shifted out */
-               if (rx == NULL) {
-                       if (mcspi_wait_for_reg_bit(chstat_reg,
-                                               OMAP2_MCSPI_CHSTAT_TXS) < 0)
-                               dev_err(&spi->dev, "TXS timed out\n");
-                       else if (mcspi_wait_for_reg_bit(chstat_reg,
-                                               OMAP2_MCSPI_CHSTAT_EOT) < 0)
-                               dev_err(&spi->dev, "EOT timed out\n");
+                               /* FIXME: fall back to PIO? */
                }
        }
 
-       if (rx != NULL) {
-               wait_for_completion(&mcspi_dma->dma_rx_completion);
-               dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
-                                DMA_FROM_DEVICE);
-               omap2_mcspi_set_enable(spi, 0);
+       dma_async_issue_pending(mcspi_dma->dma_rx);
+       omap2_mcspi_set_dma_req(spi, 1, 1);
 
-               elements = element_count - 1;
+       wait_for_completion(&mcspi_dma->dma_rx_completion);
+       dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
+                        DMA_FROM_DEVICE);
+       omap2_mcspi_set_enable(spi, 0);
 
-               if (l & OMAP2_MCSPI_CHCONF_TURBO) {
-                       elements--;
+       elements = element_count - 1;
 
-                       if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
-                                  & OMAP2_MCSPI_CHSTAT_RXS)) {
-                               u32 w;
-
-                               w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
-                               if (word_len <= 8)
-                                       ((u8 *)xfer->rx_buf)[elements++] = w;
-                               else if (word_len <= 16)
-                                       ((u16 *)xfer->rx_buf)[elements++] = w;
-                               else /* word_len <= 32 */
-                                       ((u32 *)xfer->rx_buf)[elements++] = w;
-                       } else {
-                               dev_err(&spi->dev,
-                                       "DMA RX penultimate word empty");
-                               count -= (word_len <= 8)  ? 2 :
-                                       (word_len <= 16) ? 4 :
-                                       /* word_len <= 32 */ 8;
-                               omap2_mcspi_set_enable(spi, 1);
-                               return count;
-                       }
-               }
+       if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+               elements--;
 
                if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
-                               & OMAP2_MCSPI_CHSTAT_RXS)) {
+                                  & OMAP2_MCSPI_CHSTAT_RXS)) {
                        u32 w;
 
                        w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
                        if (word_len <= 8)
-                               ((u8 *)xfer->rx_buf)[elements] = w;
+                               ((u8 *)xfer->rx_buf)[elements++] = w;
                        else if (word_len <= 16)
-                               ((u16 *)xfer->rx_buf)[elements] = w;
+                               ((u16 *)xfer->rx_buf)[elements++] = w;
                        else /* word_len <= 32 */
-                               ((u32 *)xfer->rx_buf)[elements] = w;
+                               ((u32 *)xfer->rx_buf)[elements++] = w;
                } else {
-                       dev_err(&spi->dev, "DMA RX last word empty");
-                       count -= (word_len <= 8)  ? 1 :
-                                (word_len <= 16) ? 2 :
-                              /* word_len <= 32 */ 4;
+                       dev_err(&spi->dev, "DMA RX penultimate word empty");
+                       count -= (word_len <= 8)  ? 2 :
+                               (word_len <= 16) ? 4 :
+                               /* word_len <= 32 */ 8;
+                       omap2_mcspi_set_enable(spi, 1);
+                       return count;
                }
-               omap2_mcspi_set_enable(spi, 1);
        }
+       if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
+                               & OMAP2_MCSPI_CHSTAT_RXS)) {
+               u32 w;
+
+               w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+               if (word_len <= 8)
+                       ((u8 *)xfer->rx_buf)[elements] = w;
+               else if (word_len <= 16)
+                       ((u16 *)xfer->rx_buf)[elements] = w;
+               else /* word_len <= 32 */
+                       ((u32 *)xfer->rx_buf)[elements] = w;
+       } else {
+               dev_err(&spi->dev, "DMA RX last word empty");
+               count -= (word_len <= 8)  ? 1 :
+                        (word_len <= 16) ? 2 :
+                      /* word_len <= 32 */ 4;
+       }
+       omap2_mcspi_set_enable(spi, 1);
+       return count;
+}
+
+static unsigned
+omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+{
+       struct omap2_mcspi      *mcspi;
+       struct omap2_mcspi_cs   *cs = spi->controller_state;
+       struct omap2_mcspi_dma  *mcspi_dma;
+       unsigned int            count;
+       u32                     l;
+       u8                      *rx;
+       const u8                *tx;
+       struct dma_slave_config cfg;
+       enum dma_slave_buswidth width;
+       unsigned es;
+
+       mcspi = spi_master_get_devdata(spi->master);
+       mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+       l = mcspi_cached_chconf0(spi);
+
+
+       if (cs->word_len <= 8) {
+               width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+               es = 1;
+       } else if (cs->word_len <= 16) {
+               width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+               es = 2;
+       } else {
+               width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               es = 4;
+       }
+
+       memset(&cfg, 0, sizeof(cfg));
+       cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+       cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+       cfg.src_addr_width = width;
+       cfg.dst_addr_width = width;
+       cfg.src_maxburst = 1;
+       cfg.dst_maxburst = 1;
+
+       rx = xfer->rx_buf;
+       tx = xfer->tx_buf;
+
+       count = xfer->len;
+
+       if (tx != NULL)
+               omap2_mcspi_tx_dma(spi, xfer, cfg);
+
+       if (rx != NULL)
+               return omap2_mcspi_rx_dma(spi, xfer, cfg, es);
+
        return count;
 }
 
@@ -848,12 +872,13 @@ static int omap2_mcspi_setup(struct spi_device *spi)
                        return ret;
        }
 
-       ret = omap2_mcspi_enable_clocks(mcspi);
+       ret = pm_runtime_get_sync(mcspi->dev);
        if (ret < 0)
                return ret;
 
        ret = omap2_mcspi_setup_transfer(spi, NULL);
-       omap2_mcspi_disable_clocks(mcspi);
+       pm_runtime_mark_last_busy(mcspi->dev);
+       pm_runtime_put_autosuspend(mcspi->dev);
 
        return ret;
 }
@@ -1067,7 +1092,7 @@ static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
        struct omap2_mcspi_regs *ctx = &mcspi->ctx;
        int                     ret = 0;
 
-       ret = omap2_mcspi_enable_clocks(mcspi);
+       ret = pm_runtime_get_sync(mcspi->dev);
        if (ret < 0)
                return ret;
 
@@ -1076,7 +1101,8 @@ static int __devinit omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
        ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
 
        omap2_mcspi_set_master_mode(master);
-       omap2_mcspi_disable_clocks(mcspi);
+       pm_runtime_mark_last_busy(mcspi->dev);
+       pm_runtime_put_autosuspend(mcspi->dev);
        return 0;
 }
 
@@ -1124,6 +1150,7 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
        static int              bus_num = 1;
        struct device_node      *node = pdev->dev.of_node;
        const struct of_device_id *match;
+       struct pinctrl *pinctrl;
 
        master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
        if (master == NULL) {
@@ -1219,6 +1246,11 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
        if (status < 0)
                goto dma_chnl_free;
 
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl))
+               dev_warn(&pdev->dev,
+                       "pins are not configured from the driver\n");
+
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
        pm_runtime_enable(&pdev->dev);
@@ -1238,7 +1270,6 @@ dma_chnl_free:
        kfree(mcspi->dma_channels);
 free_master:
        spi_master_put(master);
-       platform_set_drvdata(pdev, NULL);
        return status;
 }
 
@@ -1252,12 +1283,11 @@ static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
        mcspi = spi_master_get_devdata(master);
        dma_channels = mcspi->dma_channels;
 
-       omap2_mcspi_disable_clocks(mcspi);
+       pm_runtime_put_sync(mcspi->dev);
        pm_runtime_disable(&pdev->dev);
 
        spi_unregister_master(master);
        kfree(dma_channels);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
@@ -1278,20 +1308,21 @@ static int omap2_mcspi_resume(struct device *dev)
        struct omap2_mcspi_regs *ctx = &mcspi->ctx;
        struct omap2_mcspi_cs   *cs;
 
-       omap2_mcspi_enable_clocks(mcspi);
+       pm_runtime_get_sync(mcspi->dev);
        list_for_each_entry(cs, &ctx->cs, node) {
                if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
                        /*
                         * We need to toggle CS state for OMAP take this
                         * change in account.
                         */
-                       MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1);
+                       cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
                        __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
-                       MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0);
+                       cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
                        __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
                }
        }
-       omap2_mcspi_disable_clocks(mcspi);
+       pm_runtime_mark_last_busy(mcspi->dev);
+       pm_runtime_put_autosuspend(mcspi->dev);
        return 0;
 }
 #else
index 9b0caddce5037b5b347615969ae9c6bd22eaf8f7..b17c09cf0a054c271cbd2f9e4c0841a0364d2706 100644 (file)
 #define ORION_SPI_CLK_PRESCALE_MASK    0x1F
 
 struct orion_spi {
-       struct work_struct      work;
-
-       /* Lock access to transfer list.        */
-       spinlock_t              lock;
-
-       struct list_head        msg_queue;
        struct spi_master       *master;
        void __iomem            *base;
        unsigned int            max_speed;
@@ -49,8 +43,6 @@ struct orion_spi {
        struct clk              *clk;
 };
 
-static struct workqueue_struct *orion_spi_wq;
-
 static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
 {
        return orion_spi->base + reg;
@@ -277,73 +269,78 @@ out:
 }
 
 
-static void orion_spi_work(struct work_struct *work)
+static int orion_spi_transfer_one_message(struct spi_master *master,
+                                          struct spi_message *m)
 {
-       struct orion_spi *orion_spi =
-               container_of(work, struct orion_spi, work);
-
-       spin_lock_irq(&orion_spi->lock);
-       while (!list_empty(&orion_spi->msg_queue)) {
-               struct spi_message *m;
-               struct spi_device *spi;
-               struct spi_transfer *t = NULL;
-               int par_override = 0;
-               int status = 0;
-               int cs_active = 0;
-
-               m = container_of(orion_spi->msg_queue.next, struct spi_message,
-                                queue);
+       struct orion_spi *orion_spi = spi_master_get_devdata(master);
+       struct spi_device *spi = m->spi;
+       struct spi_transfer *t = NULL;
+       int par_override = 0;
+       int status = 0;
+       int cs_active = 0;
 
-               list_del_init(&m->queue);
-               spin_unlock_irq(&orion_spi->lock);
+       /* Load defaults */
+       status = orion_spi_setup_transfer(spi, NULL);
 
-               spi = m->spi;
+       if (status < 0)
+               goto msg_done;
 
-               /* Load defaults */
-               status = orion_spi_setup_transfer(spi, NULL);
+       list_for_each_entry(t, &m->transfers, transfer_list) {
+               /* make sure buffer length is even when working in 16
+                * bit mode*/
+               if ((t->bits_per_word == 16) && (t->len & 1)) {
+                       dev_err(&spi->dev,
+                               "message rejected : "
+                               "odd data length %d while in 16 bit mode\n",
+                               t->len);
+                       status = -EIO;
+                       goto msg_done;
+               }
 
-               if (status < 0)
+               if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
+                       dev_err(&spi->dev,
+                               "message rejected : "
+                               "device min speed (%d Hz) exceeds "
+                               "required transfer speed (%d Hz)\n",
+                               orion_spi->min_speed, t->speed_hz);
+                       status = -EIO;
                        goto msg_done;
+               }
 
-               list_for_each_entry(t, &m->transfers, transfer_list) {
-                       if (par_override || t->speed_hz || t->bits_per_word) {
-                               par_override = 1;
-                               status = orion_spi_setup_transfer(spi, t);
-                               if (status < 0)
-                                       break;
-                               if (!t->speed_hz && !t->bits_per_word)
-                                       par_override = 0;
-                       }
-
-                       if (!cs_active) {
-                               orion_spi_set_cs(orion_spi, 1);
-                               cs_active = 1;
-                       }
-
-                       if (t->len)
-                               m->actual_length +=
-                                       orion_spi_write_read(spi, t);
-
-                       if (t->delay_usecs)
-                               udelay(t->delay_usecs);
-
-                       if (t->cs_change) {
-                               orion_spi_set_cs(orion_spi, 0);
-                               cs_active = 0;
-                       }
+               if (par_override || t->speed_hz || t->bits_per_word) {
+                       par_override = 1;
+                       status = orion_spi_setup_transfer(spi, t);
+                       if (status < 0)
+                               break;
+                       if (!t->speed_hz && !t->bits_per_word)
+                               par_override = 0;
                }
 
-msg_done:
-               if (cs_active)
-                       orion_spi_set_cs(orion_spi, 0);
+               if (!cs_active) {
+                       orion_spi_set_cs(orion_spi, 1);
+                       cs_active = 1;
+               }
 
-               m->status = status;
-               m->complete(m->context);
+               if (t->len)
+                       m->actual_length += orion_spi_write_read(spi, t);
 
-               spin_lock_irq(&orion_spi->lock);
+               if (t->delay_usecs)
+                       udelay(t->delay_usecs);
+
+               if (t->cs_change) {
+                       orion_spi_set_cs(orion_spi, 0);
+                       cs_active = 0;
+               }
        }
 
-       spin_unlock_irq(&orion_spi->lock);
+msg_done:
+       if (cs_active)
+               orion_spi_set_cs(orion_spi, 0);
+
+       m->status = status;
+       spi_finalize_current_message(master);
+
+       return 0;
 }
 
 static int __init orion_spi_reset(struct orion_spi *orion_spi)
@@ -376,75 +373,6 @@ static int orion_spi_setup(struct spi_device *spi)
        return 0;
 }
 
-static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
-{
-       struct orion_spi *orion_spi;
-       struct spi_transfer *t = NULL;
-       unsigned long flags;
-
-       m->actual_length = 0;
-       m->status = 0;
-
-       /* reject invalid messages and transfers */
-       if (list_empty(&m->transfers) || !m->complete)
-               return -EINVAL;
-
-       orion_spi = spi_master_get_devdata(spi->master);
-
-       list_for_each_entry(t, &m->transfers, transfer_list) {
-               unsigned int bits_per_word = spi->bits_per_word;
-
-               if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
-                       dev_err(&spi->dev,
-                               "message rejected : "
-                               "invalid transfer data buffers\n");
-                       goto msg_rejected;
-               }
-
-               if (t->bits_per_word)
-                       bits_per_word = t->bits_per_word;
-
-               if ((bits_per_word != 8) && (bits_per_word != 16)) {
-                       dev_err(&spi->dev,
-                               "message rejected : "
-                               "invalid transfer bits_per_word (%d bits)\n",
-                               bits_per_word);
-                       goto msg_rejected;
-               }
-               /*make sure buffer length is even when working in 16 bit mode*/
-               if ((t->bits_per_word == 16) && (t->len & 1)) {
-                       dev_err(&spi->dev,
-                               "message rejected : "
-                               "odd data length (%d) while in 16 bit mode\n",
-                               t->len);
-                       goto msg_rejected;
-               }
-
-               if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
-                       dev_err(&spi->dev,
-                               "message rejected : "
-                               "device min speed (%d Hz) exceeds "
-                               "required transfer speed (%d Hz)\n",
-                               orion_spi->min_speed, t->speed_hz);
-                       goto msg_rejected;
-               }
-       }
-
-
-       spin_lock_irqsave(&orion_spi->lock, flags);
-       list_add_tail(&m->queue, &orion_spi->msg_queue);
-       queue_work(orion_spi_wq, &orion_spi->work);
-       spin_unlock_irqrestore(&orion_spi->lock, flags);
-
-       return 0;
-msg_rejected:
-       /* Message rejected and not queued */
-       m->status = -EINVAL;
-       if (m->complete)
-               m->complete(m->context);
-       return -EINVAL;
-}
-
 static int __init orion_spi_probe(struct platform_device *pdev)
 {
        struct spi_master *master;
@@ -474,7 +402,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
        master->mode_bits = 0;
 
        master->setup = orion_spi_setup;
-       master->transfer = orion_spi_transfer;
+       master->transfer_one_message = orion_spi_transfer_one_message;
        master->num_chipselect = ORION_NUM_CHIPSELECTS;
 
        dev_set_drvdata(&pdev->dev, master);
@@ -507,11 +435,6 @@ static int __init orion_spi_probe(struct platform_device *pdev)
        }
        spi->base = ioremap(r->start, SZ_1K);
 
-       INIT_WORK(&spi->work, orion_spi_work);
-
-       spin_lock_init(&spi->lock);
-       INIT_LIST_HEAD(&spi->msg_queue);
-
        if (orion_spi_reset(spi) < 0)
                goto out_rel_mem;
 
@@ -536,14 +459,12 @@ out:
 static int __exit orion_spi_remove(struct platform_device *pdev)
 {
        struct spi_master *master;
-       struct orion_spi *spi;
        struct resource *r;
+       struct orion_spi *spi;
 
        master = dev_get_drvdata(&pdev->dev);
        spi = spi_master_get_devdata(master);
 
-       cancel_work_sync(&spi->work);
-
        clk_disable_unprepare(spi->clk);
        clk_put(spi->clk);
 
@@ -574,21 +495,13 @@ static struct platform_driver orion_spi_driver = {
 
 static int __init orion_spi_init(void)
 {
-       orion_spi_wq = create_singlethread_workqueue(
-                               orion_spi_driver.driver.name);
-       if (orion_spi_wq == NULL)
-               return -ENOMEM;
-
        return platform_driver_probe(&orion_spi_driver, orion_spi_probe);
 }
 module_init(orion_spi_init);
 
 static void __exit orion_spi_exit(void)
 {
-       flush_workqueue(orion_spi_wq);
        platform_driver_unregister(&orion_spi_driver);
-
-       destroy_workqueue(orion_spi_wq);
 }
 module_exit(orion_spi_exit);
 
index 6abbe23c39b4751a3da3fb4d940912771c0fbfbe..919464102d33afb2d493ace07c81c80403d1d51d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
  *
- * Copyright (C) 2008-2009 ST-Ericsson AB
+ * Copyright (C) 2008-2012 ST-Ericsson AB
  * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
  *
  * Author: Linus Walleij <linus.walleij@stericsson.com>
@@ -40,6 +40,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
 #include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
 
 /*
  * This macro is used to define some register default values.
@@ -356,6 +359,8 @@ struct vendor_data {
  * @sgt_rx: scattertable for the RX transfer
  * @sgt_tx: scattertable for the TX transfer
  * @dummypage: a dummy page used for driving data on the bus with DMA
+ * @cur_cs: current chip select (gpio)
+ * @chipselects: list of chipselects (gpios)
  */
 struct pl022 {
        struct amba_device              *adev;
@@ -363,6 +368,10 @@ struct pl022 {
        resource_size_t                 phybase;
        void __iomem                    *virtbase;
        struct clk                      *clk;
+       /* Two optional pin states - default & sleep */
+       struct pinctrl                  *pinctrl;
+       struct pinctrl_state            *pins_default;
+       struct pinctrl_state            *pins_sleep;
        struct spi_master               *master;
        struct pl022_ssp_controller     *master_info;
        /* Message per-transfer pump */
@@ -389,6 +398,8 @@ struct pl022 {
        char                            *dummypage;
        bool                            dma_running;
 #endif
+       int cur_cs;
+       int *chipselects;
 };
 
 /**
@@ -433,6 +444,14 @@ static void null_cs_control(u32 command)
        pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
 }
 
+static void pl022_cs_control(struct pl022 *pl022, u32 command)
+{
+       if (gpio_is_valid(pl022->cur_cs))
+               gpio_set_value(pl022->cur_cs, command);
+       else
+               pl022->cur_chip->cs_control(command);
+}
+
 /**
  * giveback - current spi_message is over, schedule next message and call
  * callback of this message. Assumes that caller already
@@ -479,7 +498,7 @@ static void giveback(struct pl022 *pl022)
                if (next_msg && next_msg->spi != pl022->cur_msg->spi)
                        next_msg = NULL;
                if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
-                       pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
+                       pl022_cs_control(pl022, SSP_CHIP_DESELECT);
                else
                        pl022->next_msg_cs_active = true;
 
@@ -818,8 +837,7 @@ static void dma_callback(void *data)
        /* Update total bytes transferred */
        msg->actual_length += pl022->cur_transfer->len;
        if (pl022->cur_transfer->cs_change)
-               pl022->cur_chip->
-                       cs_control(SSP_CHIP_DESELECT);
+               pl022_cs_control(pl022, SSP_CHIP_DESELECT);
 
        /* Move to next transfer */
        msg->state = next_transfer(pl022);
@@ -1252,8 +1270,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
                /* Update total bytes transferred */
                msg->actual_length += pl022->cur_transfer->len;
                if (pl022->cur_transfer->cs_change)
-                       pl022->cur_chip->
-                               cs_control(SSP_CHIP_DESELECT);
+                       pl022_cs_control(pl022, SSP_CHIP_DESELECT);
                /* Move to next transfer */
                msg->state = next_transfer(pl022);
                tasklet_schedule(&pl022->pump_transfers);
@@ -1338,7 +1355,7 @@ static void pump_transfers(unsigned long data)
 
                /* Reselect chip select only if cs_change was requested */
                if (previous->cs_change)
-                       pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+                       pl022_cs_control(pl022, SSP_CHIP_SELECT);
        } else {
                /* STATE_START */
                message->state = STATE_RUNNING;
@@ -1377,7 +1394,7 @@ static void do_interrupt_dma_transfer(struct pl022 *pl022)
 
        /* Enable target chip, if not already active */
        if (!pl022->next_msg_cs_active)
-               pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+               pl022_cs_control(pl022, SSP_CHIP_SELECT);
 
        if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
                /* Error path */
@@ -1429,12 +1446,12 @@ static void do_polling_transfer(struct pl022 *pl022)
                        if (previous->delay_usecs)
                                udelay(previous->delay_usecs);
                        if (previous->cs_change)
-                               pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+                               pl022_cs_control(pl022, SSP_CHIP_SELECT);
                } else {
                        /* STATE_START */
                        message->state = STATE_RUNNING;
                        if (!pl022->next_msg_cs_active)
-                               pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+                               pl022_cs_control(pl022, SSP_CHIP_SELECT);
                }
 
                /* Configuration Changing Per Transfer */
@@ -1466,7 +1483,7 @@ static void do_polling_transfer(struct pl022 *pl022)
                /* Update total byte transferred */
                message->actual_length += pl022->cur_transfer->len;
                if (pl022->cur_transfer->cs_change)
-                       pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
+                       pl022_cs_control(pl022, SSP_CHIP_DESELECT);
                /* Move to next transfer */
                message->state = next_transfer(pl022);
        }
@@ -1495,6 +1512,7 @@ static int pl022_transfer_one_message(struct spi_master *master,
 
        /* Setup the SPI using the per chip configuration */
        pl022->cur_chip = spi_get_ctldata(msg->spi);
+       pl022->cur_cs = pl022->chipselects[msg->spi->chip_select];
 
        restore_state(pl022);
        flush(pl022);
@@ -1766,12 +1784,14 @@ static const struct pl022_config_chip pl022_default_chip_info = {
 static int pl022_setup(struct spi_device *spi)
 {
        struct pl022_config_chip const *chip_info;
+       struct pl022_config_chip chip_info_dt;
        struct chip_data *chip;
        struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
        int status = 0;
        struct pl022 *pl022 = spi_master_get_devdata(spi->master);
        unsigned int bits = spi->bits_per_word;
        u32 tmp;
+       struct device_node *np = spi->dev.of_node;
 
        if (!spi->max_speed_hz)
                return -EINVAL;
@@ -1794,10 +1814,32 @@ static int pl022_setup(struct spi_device *spi)
        chip_info = spi->controller_data;
 
        if (chip_info == NULL) {
-               chip_info = &pl022_default_chip_info;
-               /* spi_board_info.controller_data not is supplied */
-               dev_dbg(&spi->dev,
-                       "using default controller_data settings\n");
+               if (np) {
+                       chip_info_dt = pl022_default_chip_info;
+
+                       chip_info_dt.hierarchy = SSP_MASTER;
+                       of_property_read_u32(np, "pl022,interface",
+                               &chip_info_dt.iface);
+                       of_property_read_u32(np, "pl022,com-mode",
+                               &chip_info_dt.com_mode);
+                       of_property_read_u32(np, "pl022,rx-level-trig",
+                               &chip_info_dt.rx_lev_trig);
+                       of_property_read_u32(np, "pl022,tx-level-trig",
+                               &chip_info_dt.tx_lev_trig);
+                       of_property_read_u32(np, "pl022,ctrl-len",
+                               &chip_info_dt.ctrl_len);
+                       of_property_read_u32(np, "pl022,wait-state",
+                               &chip_info_dt.wait_state);
+                       of_property_read_u32(np, "pl022,duplex",
+                               &chip_info_dt.duplex);
+
+                       chip_info = &chip_info_dt;
+               } else {
+                       chip_info = &pl022_default_chip_info;
+                       /* spi_board_info.controller_data not is supplied */
+                       dev_dbg(&spi->dev,
+                               "using default controller_data settings\n");
+               }
        } else
                dev_dbg(&spi->dev,
                        "using user supplied controller_data settings\n");
@@ -1840,8 +1882,9 @@ static int pl022_setup(struct spi_device *spi)
        chip->xfer_type = chip_info->com_mode;
        if (!chip_info->cs_control) {
                chip->cs_control = null_cs_control;
-               dev_warn(&spi->dev,
-                        "chip select function is NULL for this chip\n");
+               if (!gpio_is_valid(pl022->chipselects[spi->chip_select]))
+                       dev_warn(&spi->dev,
+                                "invalid chip select\n");
        } else
                chip->cs_control = chip_info->cs_control;
 
@@ -1986,6 +2029,34 @@ static void pl022_cleanup(struct spi_device *spi)
        kfree(chip);
 }
 
+static struct pl022_ssp_controller *
+pl022_platform_data_dt_get(struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       struct pl022_ssp_controller *pd;
+       u32 tmp;
+
+       if (!np) {
+               dev_err(dev, "no dt node defined\n");
+               return NULL;
+       }
+
+       pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
+       if (!pd) {
+               dev_err(dev, "cannot allocate platform data memory\n");
+               return NULL;
+       }
+
+       pd->bus_id = -1;
+       of_property_read_u32(np, "num-cs", &tmp);
+       pd->num_chipselect = tmp;
+       of_property_read_u32(np, "pl022,autosuspend-delay",
+                            &pd->autosuspend_delay);
+       pd->rt = of_property_read_bool(np, "pl022,rt");
+
+       return pd;
+}
+
 static int __devinit
 pl022_probe(struct amba_device *adev, const struct amba_id *id)
 {
@@ -1993,22 +2064,31 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
        struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
        struct spi_master *master;
        struct pl022 *pl022 = NULL;     /*Data for this driver */
-       int status = 0;
+       struct device_node *np = adev->dev.of_node;
+       int status = 0, i, num_cs;
 
        dev_info(&adev->dev,
                 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
-       if (platform_info == NULL) {
-               dev_err(&adev->dev, "probe - no platform data supplied\n");
-               status = -ENODEV;
-               goto err_no_pdata;
+       if (!platform_info && IS_ENABLED(CONFIG_OF))
+               platform_info = pl022_platform_data_dt_get(dev);
+
+       if (!platform_info) {
+               dev_err(dev, "probe: no platform data defined\n");
+               return -ENODEV;
+       }
+
+       if (platform_info->num_chipselect) {
+               num_cs = platform_info->num_chipselect;
+       } else {
+               dev_err(dev, "probe: no chip select defined\n");
+               return -ENODEV;
        }
 
        /* Allocate master with space for data */
        master = spi_alloc_master(dev, sizeof(struct pl022));
        if (master == NULL) {
                dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
-               status = -ENOMEM;
-               goto err_no_master;
+               return -ENOMEM;
        }
 
        pl022 = spi_master_get_devdata(master);
@@ -2016,19 +2096,71 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
        pl022->master_info = platform_info;
        pl022->adev = adev;
        pl022->vendor = id->data;
+       pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
+                                         GFP_KERNEL);
+
+       pl022->pinctrl = devm_pinctrl_get(dev);
+       if (IS_ERR(pl022->pinctrl)) {
+               status = PTR_ERR(pl022->pinctrl);
+               goto err_no_pinctrl;
+       }
+
+       pl022->pins_default = pinctrl_lookup_state(pl022->pinctrl,
+                                                PINCTRL_STATE_DEFAULT);
+       /* enable pins to be muxed in and configured */
+       if (!IS_ERR(pl022->pins_default)) {
+               status = pinctrl_select_state(pl022->pinctrl,
+                               pl022->pins_default);
+               if (status)
+                       dev_err(dev, "could not set default pins\n");
+       } else
+               dev_err(dev, "could not get default pinstate\n");
+
+       pl022->pins_sleep = pinctrl_lookup_state(pl022->pinctrl,
+                                              PINCTRL_STATE_SLEEP);
+       if (IS_ERR(pl022->pins_sleep))
+               dev_dbg(dev, "could not get sleep pinstate\n");
 
        /*
         * Bus Number Which has been Assigned to this SSP controller
         * on this board
         */
        master->bus_num = platform_info->bus_id;
-       master->num_chipselect = platform_info->num_chipselect;
+       master->num_chipselect = num_cs;
        master->cleanup = pl022_cleanup;
        master->setup = pl022_setup;
        master->prepare_transfer_hardware = pl022_prepare_transfer_hardware;
        master->transfer_one_message = pl022_transfer_one_message;
        master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
        master->rt = platform_info->rt;
+       master->dev.of_node = dev->of_node;
+
+       if (platform_info->num_chipselect && platform_info->chipselects) {
+               for (i = 0; i < num_cs; i++)
+                       pl022->chipselects[i] = platform_info->chipselects[i];
+       } else if (IS_ENABLED(CONFIG_OF)) {
+               for (i = 0; i < num_cs; i++) {
+                       int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+
+                       if (cs_gpio == -EPROBE_DEFER) {
+                               status = -EPROBE_DEFER;
+                               goto err_no_gpio;
+                       }
+
+                       pl022->chipselects[i] = cs_gpio;
+
+                       if (gpio_is_valid(cs_gpio)) {
+                               if (devm_gpio_request(dev, cs_gpio, "ssp-pl022"))
+                                       dev_err(&adev->dev,
+                                               "could not request %d gpio\n",
+                                               cs_gpio);
+                               else if (gpio_direction_output(cs_gpio, 1))
+                                       dev_err(&adev->dev,
+                                               "could set gpio %d as output\n",
+                                               cs_gpio);
+                       }
+               }
+       }
 
        /*
         * Supports mode 0-3, loopback, and active low CS. Transfers are
@@ -2045,7 +2177,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
                goto err_no_ioregion;
 
        pl022->phybase = adev->res.start;
-       pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
+       pl022->virtbase = devm_ioremap(dev, adev->res.start,
+                                      resource_size(&adev->res));
        if (pl022->virtbase == NULL) {
                status = -ENOMEM;
                goto err_no_ioremap;
@@ -2055,7 +2188,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
 
        pm_runtime_resume(dev);
 
-       pl022->clk = clk_get(&adev->dev, NULL);
+       pl022->clk = devm_clk_get(&adev->dev, NULL);
        if (IS_ERR(pl022->clk)) {
                status = PTR_ERR(pl022->clk);
                dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
@@ -2083,8 +2216,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
               SSP_CR1(pl022->virtbase));
        load_ssp_default_config(pl022);
 
-       status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
-                            pl022);
+       status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
+                                 0, "pl022", pl022);
        if (status < 0) {
                dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
                goto err_no_irq;
@@ -2124,22 +2257,18 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
  err_spi_register:
        if (platform_info->enable_dma)
                pl022_dma_remove(pl022);
-
-       free_irq(adev->irq[0], pl022);
  err_no_irq:
        clk_disable(pl022->clk);
  err_no_clk_en:
        clk_unprepare(pl022->clk);
  err_clk_prep:
-       clk_put(pl022->clk);
  err_no_clk:
-       iounmap(pl022->virtbase);
  err_no_ioremap:
        amba_release_regions(adev);
  err_no_ioregion:
+ err_no_gpio:
+ err_no_pinctrl:
        spi_master_put(master);
- err_no_master:
- err_no_pdata:
        return status;
 }
 
@@ -2161,20 +2290,55 @@ pl022_remove(struct amba_device *adev)
        if (pl022->master_info->enable_dma)
                pl022_dma_remove(pl022);
 
-       free_irq(adev->irq[0], pl022);
        clk_disable(pl022->clk);
        clk_unprepare(pl022->clk);
-       clk_put(pl022->clk);
        pm_runtime_disable(&adev->dev);
-       iounmap(pl022->virtbase);
        amba_release_regions(adev);
        tasklet_disable(&pl022->pump_transfers);
        spi_unregister_master(pl022->master);
-       spi_master_put(pl022->master);
        amba_set_drvdata(adev, NULL);
        return 0;
 }
 
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME)
+/*
+ * These two functions are used from both suspend/resume and
+ * the runtime counterparts to handle external resources like
+ * clocks, pins and regulators when going to sleep.
+ */
+static void pl022_suspend_resources(struct pl022 *pl022)
+{
+       int ret;
+
+       clk_disable(pl022->clk);
+
+       /* Optionally let pins go into sleep states */
+       if (!IS_ERR(pl022->pins_sleep)) {
+               ret = pinctrl_select_state(pl022->pinctrl,
+                                          pl022->pins_sleep);
+               if (ret)
+                       dev_err(&pl022->adev->dev,
+                               "could not set pins to sleep state\n");
+       }
+}
+
+static void pl022_resume_resources(struct pl022 *pl022)
+{
+       int ret;
+
+       /* Optionaly enable pins to be muxed in and configured */
+       if (!IS_ERR(pl022->pins_default)) {
+               ret = pinctrl_select_state(pl022->pinctrl,
+                                          pl022->pins_default);
+               if (ret)
+                       dev_err(&pl022->adev->dev,
+                               "could not set default pins\n");
+       }
+
+       clk_enable(pl022->clk);
+}
+#endif
+
 #ifdef CONFIG_SUSPEND
 static int pl022_suspend(struct device *dev)
 {
@@ -2186,6 +2350,7 @@ static int pl022_suspend(struct device *dev)
                dev_warn(dev, "cannot suspend master\n");
                return ret;
        }
+       pl022_suspend_resources(pl022);
 
        dev_dbg(dev, "suspended\n");
        return 0;
@@ -2196,6 +2361,8 @@ static int pl022_resume(struct device *dev)
        struct pl022 *pl022 = dev_get_drvdata(dev);
        int ret;
 
+       pl022_resume_resources(pl022);
+
        /* Start the queue running */
        ret = spi_master_resume(pl022->master);
        if (ret)
@@ -2212,8 +2379,7 @@ static int pl022_runtime_suspend(struct device *dev)
 {
        struct pl022 *pl022 = dev_get_drvdata(dev);
 
-       clk_disable(pl022->clk);
-
+       pl022_suspend_resources(pl022);
        return 0;
 }
 
@@ -2221,8 +2387,7 @@ static int pl022_runtime_resume(struct device *dev)
 {
        struct pl022 *pl022 = dev_get_drvdata(dev);
 
-       clk_enable(pl022->clk);
-
+       pl022_resume_resources(pl022);
        return 0;
 }
 #endif
index 8ee7d790ce49424d4595321f6abc068bfcf7eb02..a2a080b7f42b2d9cb515b940301509466a92c7af 100644 (file)
@@ -611,6 +611,7 @@ static int __devinit s3c24xx_spi_probe(struct platform_device *pdev)
        if (!pdata->set_cs) {
                if (pdata->pin_cs < 0) {
                        dev_err(&pdev->dev, "No chipselect pin\n");
+                       err = -EINVAL;
                        goto err_register;
                }
 
index 0e2a02228d5e5be55a59b79c306b6a4719e804b2..1a81c90a4a71dc69ce4ec459342a98af3d26b872 100644 (file)
@@ -835,9 +835,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
                return ERR_PTR(-EINVAL);
        }
 
-       for_each_child_of_node(slave_np, data_np)
-               if (!strcmp(data_np->name, "controller-data"))
-                       break;
+       data_np = of_get_child_by_name(slave_np, "controller-data");
        if (!data_np) {
                dev_err(&spi->dev, "child node 'controller-data' not found\n");
                return ERR_PTR(-EINVAL);
@@ -847,6 +845,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
        if (!cs) {
                dev_err(&spi->dev, "could not allocate memory for controller"
                                        " data\n");
+               of_node_put(data_np);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -855,11 +854,13 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
                dev_err(&spi->dev, "chip select gpio is not specified or "
                                        "invalid\n");
                kfree(cs);
+               of_node_put(data_np);
                return ERR_PTR(-EINVAL);
        }
 
        of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
        cs->fb_delay = fb_delay;
+       of_node_put(data_np);
        return cs;
 }
 
@@ -976,7 +977,8 @@ err_msgq:
        spi_set_ctldata(spi, NULL);
 
 err_gpio_req:
-       kfree(cs);
+       if (spi->dev.of_node)
+               kfree(cs);
 
        return err;
 }
@@ -1409,7 +1411,7 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int s3c64xx_spi_suspend(struct device *dev)
 {
-       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+       struct spi_master *master = dev_get_drvdata(dev);
        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 
        spi_master_suspend(master);
@@ -1428,7 +1430,7 @@ static int s3c64xx_spi_suspend(struct device *dev)
 
 static int s3c64xx_spi_resume(struct device *dev)
 {
-       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+       struct spi_master *master = dev_get_drvdata(dev);
        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
        struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
 
@@ -1452,7 +1454,7 @@ static int s3c64xx_spi_resume(struct device *dev)
 #ifdef CONFIG_PM_RUNTIME
 static int s3c64xx_spi_runtime_suspend(struct device *dev)
 {
-       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+       struct spi_master *master = dev_get_drvdata(dev);
        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 
        clk_disable(sdd->clk);
@@ -1463,7 +1465,7 @@ static int s3c64xx_spi_runtime_suspend(struct device *dev)
 
 static int s3c64xx_spi_runtime_resume(struct device *dev)
 {
-       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+       struct spi_master *master = dev_get_drvdata(dev);
        struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
 
        clk_enable(sdd->src_clk);
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
new file mode 100644 (file)
index 0000000..9eda21d
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * NXP SC18IS602/603 SPI driver
+ *
+ * Copyright (C) Guenter Roeck <linux@roeck-us.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/platform_data/sc18is602.h>
+
+enum chips { sc18is602, sc18is602b, sc18is603 };
+
+#define SC18IS602_BUFSIZ               200
+#define SC18IS602_CLOCK                        7372000
+
+#define SC18IS602_MODE_CPHA            BIT(2)
+#define SC18IS602_MODE_CPOL            BIT(3)
+#define SC18IS602_MODE_LSB_FIRST       BIT(5)
+#define SC18IS602_MODE_CLOCK_DIV_4     0x0
+#define SC18IS602_MODE_CLOCK_DIV_16    0x1
+#define SC18IS602_MODE_CLOCK_DIV_64    0x2
+#define SC18IS602_MODE_CLOCK_DIV_128   0x3
+
+struct sc18is602 {
+       struct spi_master       *master;
+       struct device           *dev;
+       u8                      ctrl;
+       u32                     freq;
+       u32                     speed;
+
+       /* I2C data */
+       struct i2c_client       *client;
+       enum chips              id;
+       u8                      buffer[SC18IS602_BUFSIZ + 1];
+       int                     tlen;   /* Data queued for tx in buffer */
+       int                     rindex; /* Receive data index in buffer */
+};
+
+static int sc18is602_wait_ready(struct sc18is602 *hw, int len)
+{
+       int i, err;
+       int usecs = 1000000 * len / hw->speed + 1;
+       u8 dummy[1];
+
+       for (i = 0; i < 10; i++) {
+               err = i2c_master_recv(hw->client, dummy, 1);
+               if (err >= 0)
+                       return 0;
+               usleep_range(usecs, usecs * 2);
+       }
+       return -ETIMEDOUT;
+}
+
+static int sc18is602_txrx(struct sc18is602 *hw, struct spi_message *msg,
+                         struct spi_transfer *t, bool do_transfer)
+{
+       unsigned int len = t->len;
+       int ret;
+
+       if (hw->tlen == 0) {
+               /* First byte (I2C command) is chip select */
+               hw->buffer[0] = 1 << msg->spi->chip_select;
+               hw->tlen = 1;
+               hw->rindex = 0;
+       }
+       /*
+        * We can not immediately send data to the chip, since each I2C message
+        * resembles a full SPI message (from CS active to CS inactive).
+        * Enqueue messages up to the first read or until do_transfer is true.
+        */
+       if (t->tx_buf) {
+               memcpy(&hw->buffer[hw->tlen], t->tx_buf, len);
+               hw->tlen += len;
+               if (t->rx_buf)
+                       do_transfer = true;
+               else
+                       hw->rindex = hw->tlen - 1;
+       } else if (t->rx_buf) {
+               /*
+                * For receive-only transfers we still need to perform a dummy
+                * write to receive data from the SPI chip.
+                * Read data starts at the end of transmit data (minus 1 to
+                * account for CS).
+                */
+               hw->rindex = hw->tlen - 1;
+               memset(&hw->buffer[hw->tlen], 0, len);
+               hw->tlen += len;
+               do_transfer = true;
+       }
+
+       if (do_transfer && hw->tlen > 1) {
+               ret = sc18is602_wait_ready(hw, SC18IS602_BUFSIZ);
+               if (ret < 0)
+                       return ret;
+               ret = i2c_master_send(hw->client, hw->buffer, hw->tlen);
+               if (ret < 0)
+                       return ret;
+               if (ret != hw->tlen)
+                       return -EIO;
+
+               if (t->rx_buf) {
+                       int rlen = hw->rindex + len;
+
+                       ret = sc18is602_wait_ready(hw, hw->tlen);
+                       if (ret < 0)
+                               return ret;
+                       ret = i2c_master_recv(hw->client, hw->buffer, rlen);
+                       if (ret < 0)
+                               return ret;
+                       if (ret != rlen)
+                               return -EIO;
+                       memcpy(t->rx_buf, &hw->buffer[hw->rindex], len);
+               }
+               hw->tlen = 0;
+       }
+       return len;
+}
+
+static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
+{
+       u8 ctrl = 0;
+       int ret;
+
+       if (mode & SPI_CPHA)
+               ctrl |= SC18IS602_MODE_CPHA;
+       if (mode & SPI_CPOL)
+               ctrl |= SC18IS602_MODE_CPOL;
+       if (mode & SPI_LSB_FIRST)
+               ctrl |= SC18IS602_MODE_LSB_FIRST;
+
+       /* Find the closest clock speed */
+       if (hz >= hw->freq / 4) {
+               ctrl |= SC18IS602_MODE_CLOCK_DIV_4;
+               hw->speed = hw->freq / 4;
+       } else if (hz >= hw->freq / 16) {
+               ctrl |= SC18IS602_MODE_CLOCK_DIV_16;
+               hw->speed = hw->freq / 16;
+       } else if (hz >= hw->freq / 64) {
+               ctrl |= SC18IS602_MODE_CLOCK_DIV_64;
+               hw->speed = hw->freq / 64;
+       } else {
+               ctrl |= SC18IS602_MODE_CLOCK_DIV_128;
+               hw->speed = hw->freq / 128;
+       }
+
+       /*
+        * Don't do anything if the control value did not change. The initial
+        * value of 0xff for hw->ctrl ensures that the correct mode will be set
+        * with the first call to this function.
+        */
+       if (ctrl == hw->ctrl)
+               return 0;
+
+       ret = i2c_smbus_write_byte_data(hw->client, 0xf0, ctrl);
+       if (ret < 0)
+               return ret;
+
+       hw->ctrl = ctrl;
+
+       return 0;
+}
+
+static int sc18is602_check_transfer(struct spi_device *spi,
+                                   struct spi_transfer *t, int tlen)
+{
+       int bpw;
+       uint32_t hz;
+
+       if (t && t->len + tlen > SC18IS602_BUFSIZ)
+               return -EINVAL;
+
+       bpw = spi->bits_per_word;
+       if (t && t->bits_per_word)
+               bpw = t->bits_per_word;
+       if (bpw != 8)
+               return -EINVAL;
+
+       hz = spi->max_speed_hz;
+       if (t && t->speed_hz)
+               hz = t->speed_hz;
+       if (hz == 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int sc18is602_transfer_one(struct spi_master *master,
+                                 struct spi_message *m)
+{
+       struct sc18is602 *hw = spi_master_get_devdata(master);
+       struct spi_device *spi = m->spi;
+       struct spi_transfer *t;
+       int status = 0;
+
+       /* SC18IS602 does not support CS2 */
+       if (hw->id == sc18is602 && spi->chip_select == 2) {
+               status = -ENXIO;
+               goto error;
+       }
+
+       hw->tlen = 0;
+       list_for_each_entry(t, &m->transfers, transfer_list) {
+               u32 hz = t->speed_hz ? : spi->max_speed_hz;
+               bool do_transfer;
+
+               status = sc18is602_check_transfer(spi, t, hw->tlen);
+               if (status < 0)
+                       break;
+
+               status = sc18is602_setup_transfer(hw, hz, spi->mode);
+               if (status < 0)
+                       break;
+
+               do_transfer = t->cs_change || list_is_last(&t->transfer_list,
+                                                          &m->transfers);
+
+               if (t->len) {
+                       status = sc18is602_txrx(hw, m, t, do_transfer);
+                       if (status < 0)
+                               break;
+                       m->actual_length += status;
+               }
+               status = 0;
+
+               if (t->delay_usecs)
+                       udelay(t->delay_usecs);
+       }
+error:
+       m->status = status;
+       spi_finalize_current_message(master);
+
+       return status;
+}
+
+static int sc18is602_setup(struct spi_device *spi)
+{
+       if (!spi->bits_per_word)
+               spi->bits_per_word = 8;
+
+       if (spi->mode & ~(SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST))
+               return -EINVAL;
+
+       return sc18is602_check_transfer(spi, NULL, 0);
+}
+
+static int sc18is602_probe(struct i2c_client *client,
+                          const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct device_node *np = dev->of_node;
+       struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
+       struct sc18is602 *hw;
+       struct spi_master *master;
+       int error;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+                                    I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+               return -EINVAL;
+
+       master = spi_alloc_master(dev, sizeof(struct sc18is602));
+       if (!master)
+               return -ENOMEM;
+
+       hw = spi_master_get_devdata(master);
+       i2c_set_clientdata(client, hw);
+
+       hw->master = master;
+       hw->client = client;
+       hw->dev = dev;
+       hw->ctrl = 0xff;
+
+       hw->id = id->driver_data;
+
+       switch (hw->id) {
+       case sc18is602:
+       case sc18is602b:
+               master->num_chipselect = 4;
+               hw->freq = SC18IS602_CLOCK;
+               break;
+       case sc18is603:
+               master->num_chipselect = 2;
+               if (pdata) {
+                       hw->freq = pdata->clock_frequency;
+               } else {
+                       const __be32 *val;
+                       int len;
+
+                       val = of_get_property(np, "clock-frequency", &len);
+                       if (val && len >= sizeof(__be32))
+                               hw->freq = be32_to_cpup(val);
+               }
+               if (!hw->freq)
+                       hw->freq = SC18IS602_CLOCK;
+               break;
+       }
+       master->bus_num = client->adapter->nr;
+       master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+       master->setup = sc18is602_setup;
+       master->transfer_one_message = sc18is602_transfer_one;
+       master->dev.of_node = np;
+
+       error = spi_register_master(master);
+       if (error)
+               goto error_reg;
+
+       return 0;
+
+error_reg:
+       spi_master_put(master);
+       return error;
+}
+
+static int sc18is602_remove(struct i2c_client *client)
+{
+       struct sc18is602 *hw = i2c_get_clientdata(client);
+       struct spi_master *master = hw->master;
+
+       spi_unregister_master(master);
+
+       return 0;
+}
+
+static const struct i2c_device_id sc18is602_id[] = {
+       { "sc18is602", sc18is602 },
+       { "sc18is602b", sc18is602b },
+       { "sc18is603", sc18is603 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, sc18is602_id);
+
+static struct i2c_driver sc18is602_driver = {
+       .driver = {
+               .name = "sc18is602",
+       },
+       .probe = sc18is602_probe,
+       .remove = sc18is602_remove,
+       .id_table = sc18is602_id,
+};
+
+module_i2c_driver(sc18is602_driver);
+
+MODULE_DESCRIPTION("SC18IC602/603 SPI Master Driver");
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_LICENSE("GPL");
index 934138c7b3d3c34f94ce4aebe0a33b8b32a02034..796c077ef439dacd3e22fd85415724f72316276d 100644 (file)
@@ -283,7 +283,7 @@ static int __devinit hspi_probe(struct platform_device *pdev)
        ret = spi_register_master(master);
        if (ret < 0) {
                dev_err(&pdev->dev, "spi_register_master error.\n");
-               goto error2;
+               goto error1;
        }
 
        pm_runtime_enable(&pdev->dev);
@@ -292,8 +292,6 @@ static int __devinit hspi_probe(struct platform_device *pdev)
 
        return 0;
 
- error2:
-       devm_iounmap(hspi->dev, hspi->addr);
  error1:
        clk_put(clk);
  error0:
@@ -310,7 +308,6 @@ static int __devexit hspi_remove(struct platform_device *pdev)
 
        clk_put(hspi->clk);
        spi_unregister_master(hspi->master);
-       devm_iounmap(hspi->dev, hspi->addr);
 
        return 0;
 }
index 58e38528532393f89c0b8688469ad3af8a982b21..911e904b3c84db0052eb9d4f028e99d3f5f8c017 100644 (file)
@@ -594,9 +594,7 @@ static int __devexit stmp_spi_remove(struct platform_device *dev)
        struct stmp_spi *ss;
        struct spi_master *master;
 
-       master = platform_get_drvdata(dev);
-       if (master == NULL)
-               goto out0;
+       master = spi_master_get(platform_get_drvdata(dev));
        ss = spi_master_get_devdata(master);
 
        spi_unregister_master(master);
@@ -609,8 +607,6 @@ static int __devexit stmp_spi_remove(struct platform_device *dev)
        destroy_workqueue(ss->workqueue);
        iounmap(ss->regs);
        spi_master_put(master);
-       platform_set_drvdata(dev, NULL);
-out0:
        return 0;
 }
 
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
deleted file mode 100644 (file)
index 488d9b6..0000000
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Driver for Nvidia TEGRA spi controller.
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * Author:
- *     Erik Gilling <konkers@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <linux/spi/spi.h>
-#include <linux/dmaengine.h>
-
-#include <mach/dma.h>
-
-#define SLINK_COMMAND          0x000
-#define   SLINK_BIT_LENGTH(x)          (((x) & 0x1f) << 0)
-#define   SLINK_WORD_SIZE(x)           (((x) & 0x1f) << 5)
-#define   SLINK_BOTH_EN                        (1 << 10)
-#define   SLINK_CS_SW                  (1 << 11)
-#define   SLINK_CS_VALUE               (1 << 12)
-#define   SLINK_CS_POLARITY            (1 << 13)
-#define   SLINK_IDLE_SDA_DRIVE_LOW     (0 << 16)
-#define   SLINK_IDLE_SDA_DRIVE_HIGH    (1 << 16)
-#define   SLINK_IDLE_SDA_PULL_LOW      (2 << 16)
-#define   SLINK_IDLE_SDA_PULL_HIGH     (3 << 16)
-#define   SLINK_IDLE_SDA_MASK          (3 << 16)
-#define   SLINK_CS_POLARITY1           (1 << 20)
-#define   SLINK_CK_SDA                 (1 << 21)
-#define   SLINK_CS_POLARITY2           (1 << 22)
-#define   SLINK_CS_POLARITY3           (1 << 23)
-#define   SLINK_IDLE_SCLK_DRIVE_LOW    (0 << 24)
-#define   SLINK_IDLE_SCLK_DRIVE_HIGH   (1 << 24)
-#define   SLINK_IDLE_SCLK_PULL_LOW     (2 << 24)
-#define   SLINK_IDLE_SCLK_PULL_HIGH    (3 << 24)
-#define   SLINK_IDLE_SCLK_MASK         (3 << 24)
-#define   SLINK_M_S                    (1 << 28)
-#define   SLINK_WAIT                   (1 << 29)
-#define   SLINK_GO                     (1 << 30)
-#define   SLINK_ENB                    (1 << 31)
-
-#define SLINK_COMMAND2         0x004
-#define   SLINK_LSBFE                  (1 << 0)
-#define   SLINK_SSOE                   (1 << 1)
-#define   SLINK_SPIE                   (1 << 4)
-#define   SLINK_BIDIROE                        (1 << 6)
-#define   SLINK_MODFEN                 (1 << 7)
-#define   SLINK_INT_SIZE(x)            (((x) & 0x1f) << 8)
-#define   SLINK_CS_ACTIVE_BETWEEN      (1 << 17)
-#define   SLINK_SS_EN_CS(x)            (((x) & 0x3) << 18)
-#define   SLINK_SS_SETUP(x)            (((x) & 0x3) << 20)
-#define   SLINK_FIFO_REFILLS_0         (0 << 22)
-#define   SLINK_FIFO_REFILLS_1         (1 << 22)
-#define   SLINK_FIFO_REFILLS_2         (2 << 22)
-#define   SLINK_FIFO_REFILLS_3         (3 << 22)
-#define   SLINK_FIFO_REFILLS_MASK      (3 << 22)
-#define   SLINK_WAIT_PACK_INT(x)       (((x) & 0x7) << 26)
-#define   SLINK_SPC0                   (1 << 29)
-#define   SLINK_TXEN                   (1 << 30)
-#define   SLINK_RXEN                   (1 << 31)
-
-#define SLINK_STATUS           0x008
-#define   SLINK_COUNT(val)             (((val) >> 0) & 0x1f)
-#define   SLINK_WORD(val)              (((val) >> 5) & 0x1f)
-#define   SLINK_BLK_CNT(val)           (((val) >> 0) & 0xffff)
-#define   SLINK_MODF                   (1 << 16)
-#define   SLINK_RX_UNF                 (1 << 18)
-#define   SLINK_TX_OVF                 (1 << 19)
-#define   SLINK_TX_FULL                        (1 << 20)
-#define   SLINK_TX_EMPTY               (1 << 21)
-#define   SLINK_RX_FULL                        (1 << 22)
-#define   SLINK_RX_EMPTY               (1 << 23)
-#define   SLINK_TX_UNF                 (1 << 24)
-#define   SLINK_RX_OVF                 (1 << 25)
-#define   SLINK_TX_FLUSH               (1 << 26)
-#define   SLINK_RX_FLUSH               (1 << 27)
-#define   SLINK_SCLK                   (1 << 28)
-#define   SLINK_ERR                    (1 << 29)
-#define   SLINK_RDY                    (1 << 30)
-#define   SLINK_BSY                    (1 << 31)
-
-#define SLINK_MAS_DATA         0x010
-#define SLINK_SLAVE_DATA       0x014
-
-#define SLINK_DMA_CTL          0x018
-#define   SLINK_DMA_BLOCK_SIZE(x)      (((x) & 0xffff) << 0)
-#define   SLINK_TX_TRIG_1              (0 << 16)
-#define   SLINK_TX_TRIG_4              (1 << 16)
-#define   SLINK_TX_TRIG_8              (2 << 16)
-#define   SLINK_TX_TRIG_16             (3 << 16)
-#define   SLINK_TX_TRIG_MASK           (3 << 16)
-#define   SLINK_RX_TRIG_1              (0 << 18)
-#define   SLINK_RX_TRIG_4              (1 << 18)
-#define   SLINK_RX_TRIG_8              (2 << 18)
-#define   SLINK_RX_TRIG_16             (3 << 18)
-#define   SLINK_RX_TRIG_MASK           (3 << 18)
-#define   SLINK_PACKED                 (1 << 20)
-#define   SLINK_PACK_SIZE_4            (0 << 21)
-#define   SLINK_PACK_SIZE_8            (1 << 21)
-#define   SLINK_PACK_SIZE_16           (2 << 21)
-#define   SLINK_PACK_SIZE_32           (3 << 21)
-#define   SLINK_PACK_SIZE_MASK         (3 << 21)
-#define   SLINK_IE_TXC                 (1 << 26)
-#define   SLINK_IE_RXC                 (1 << 27)
-#define   SLINK_DMA_EN                 (1 << 31)
-
-#define SLINK_STATUS2          0x01c
-#define   SLINK_TX_FIFO_EMPTY_COUNT(val)       (((val) & 0x3f) >> 0)
-#define   SLINK_RX_FIFO_FULL_COUNT(val)                (((val) & 0x3f) >> 16)
-
-#define SLINK_TX_FIFO          0x100
-#define SLINK_RX_FIFO          0x180
-
-static const unsigned long spi_tegra_req_sels[] = {
-       TEGRA_DMA_REQ_SEL_SL2B1,
-       TEGRA_DMA_REQ_SEL_SL2B2,
-       TEGRA_DMA_REQ_SEL_SL2B3,
-       TEGRA_DMA_REQ_SEL_SL2B4,
-};
-
-#define BB_LEN                 32
-
-struct spi_tegra_data {
-       struct spi_master       *master;
-       struct platform_device  *pdev;
-       spinlock_t              lock;
-
-       struct clk              *clk;
-       void __iomem            *base;
-       unsigned long           phys;
-
-       u32                     cur_speed;
-
-       struct list_head        queue;
-       struct spi_transfer     *cur;
-       unsigned                cur_pos;
-       unsigned                cur_len;
-       unsigned                cur_bytes_per_word;
-
-       /* The tegra spi controller has a bug which causes the first word
-        * in PIO transactions to be garbage.  Since packed DMA transactions
-        * require transfers to be 4 byte aligned we need a bounce buffer
-        * for the generic case.
-        */
-       int                     dma_req_len;
-       struct dma_chan         *rx_dma;
-       struct dma_slave_config sconfig;
-       struct dma_async_tx_descriptor  *rx_dma_desc;
-       dma_cookie_t            rx_cookie;
-       u32                     *rx_bb;
-       dma_addr_t              rx_bb_phys;
-};
-
-static void tegra_spi_rx_dma_complete(void *args);
-static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
-                                           unsigned long reg)
-{
-       return readl(tspi->base + reg);
-}
-
-static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
-                                   unsigned long val,
-                                   unsigned long reg)
-{
-       writel(val, tspi->base + reg);
-}
-
-static void spi_tegra_go(struct spi_tegra_data *tspi)
-{
-       unsigned long val;
-
-       wmb();
-
-       val = spi_tegra_readl(tspi, SLINK_DMA_CTL);
-       val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
-       val |= SLINK_DMA_BLOCK_SIZE(tspi->dma_req_len / 4 - 1);
-       spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
-       tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma,
-                               tspi->rx_bb_phys, tspi->dma_req_len,
-                               DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
-       if (!tspi->rx_dma_desc) {
-               dev_err(&tspi->pdev->dev, "dmaengine slave prep failed\n");
-               return;
-       }
-       tspi->rx_dma_desc->callback = tegra_spi_rx_dma_complete;
-       tspi->rx_dma_desc->callback_param = tspi;
-       tspi->rx_cookie = dmaengine_submit(tspi->rx_dma_desc);
-       dma_async_issue_pending(tspi->rx_dma);
-
-       val |= SLINK_DMA_EN;
-       spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
-}
-
-static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi,
-                                 struct spi_transfer *t)
-{
-       unsigned len = min(t->len - tspi->cur_pos, BB_LEN *
-                          tspi->cur_bytes_per_word);
-       u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos;
-       int i, j;
-       unsigned long val;
-
-       val = spi_tegra_readl(tspi, SLINK_COMMAND);
-       val &= ~SLINK_WORD_SIZE(~0);
-       val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1);
-       spi_tegra_writel(tspi, val, SLINK_COMMAND);
-
-       for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
-               val = 0;
-               for (j = 0; j < tspi->cur_bytes_per_word; j++)
-                       val |= tx_buf[i + j] << j * 8;
-
-               spi_tegra_writel(tspi, val, SLINK_TX_FIFO);
-       }
-
-       tspi->dma_req_len = len / tspi->cur_bytes_per_word * 4;
-
-       return len;
-}
-
-static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi,
-                                 struct spi_transfer *t)
-{
-       unsigned len = tspi->cur_len;
-       u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos;
-       int i, j;
-       unsigned long val;
-
-       for (i = 0; i < len; i += tspi->cur_bytes_per_word) {
-               val = tspi->rx_bb[i / tspi->cur_bytes_per_word];
-               for (j = 0; j < tspi->cur_bytes_per_word; j++)
-                       rx_buf[i + j] = (val >> (j * 8)) & 0xff;
-       }
-
-       return len;
-}
-
-static void spi_tegra_start_transfer(struct spi_device *spi,
-                                   struct spi_transfer *t)
-{
-       struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
-       u32 speed;
-       u8 bits_per_word;
-       unsigned long val;
-
-       speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
-       bits_per_word = t->bits_per_word ? t->bits_per_word  :
-               spi->bits_per_word;
-
-       tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1;
-
-       if (speed != tspi->cur_speed)
-               clk_set_rate(tspi->clk, speed);
-
-       if (tspi->cur_speed == 0)
-               clk_prepare_enable(tspi->clk);
-
-       tspi->cur_speed = speed;
-
-       val = spi_tegra_readl(tspi, SLINK_COMMAND2);
-       val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN;
-       if (t->rx_buf)
-               val |= SLINK_RXEN;
-       if (t->tx_buf)
-               val |= SLINK_TXEN;
-       val |= SLINK_SS_EN_CS(spi->chip_select);
-       val |= SLINK_SPIE;
-       spi_tegra_writel(tspi, val, SLINK_COMMAND2);
-
-       val = spi_tegra_readl(tspi, SLINK_COMMAND);
-       val &= ~SLINK_BIT_LENGTH(~0);
-       val |= SLINK_BIT_LENGTH(bits_per_word - 1);
-
-       /* FIXME: should probably control CS manually so that we can be sure
-        * it does not go low between transfer and to support delay_usecs
-        * correctly.
-        */
-       val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW;
-
-       if (spi->mode & SPI_CPHA)
-               val |= SLINK_CK_SDA;
-
-       if (spi->mode & SPI_CPOL)
-               val |= SLINK_IDLE_SCLK_DRIVE_HIGH;
-       else
-               val |= SLINK_IDLE_SCLK_DRIVE_LOW;
-
-       val |= SLINK_M_S;
-
-       spi_tegra_writel(tspi, val, SLINK_COMMAND);
-
-       spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS);
-
-       tspi->cur = t;
-       tspi->cur_pos = 0;
-       tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t);
-
-       spi_tegra_go(tspi);
-}
-
-static void spi_tegra_start_message(struct spi_device *spi,
-                                   struct spi_message *m)
-{
-       struct spi_transfer *t;
-
-       m->actual_length = 0;
-       m->status = 0;
-
-       t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
-       spi_tegra_start_transfer(spi, t);
-}
-
-static void handle_spi_rx_dma_complete(struct spi_tegra_data *tspi)
-{
-       unsigned long flags;
-       struct spi_message *m;
-       struct spi_device *spi;
-       int timeout = 0;
-       unsigned long val;
-
-       /* the SPI controller may come back with both the BSY and RDY bits
-        * set.  In this case we need to wait for the BSY bit to clear so
-        * that we are sure the DMA is finished.  1000 reads was empirically
-        * determined to be long enough.
-        */
-       while (timeout++ < 1000) {
-               if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY))
-                       break;
-       }
-
-       spin_lock_irqsave(&tspi->lock, flags);
-
-       val = spi_tegra_readl(tspi, SLINK_STATUS);
-       val |= SLINK_RDY;
-       spi_tegra_writel(tspi, val, SLINK_STATUS);
-
-       m = list_first_entry(&tspi->queue, struct spi_message, queue);
-
-       if (timeout >= 1000)
-               m->status = -EIO;
-
-       spi = m->state;
-
-       tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur);
-       m->actual_length += tspi->cur_pos;
-
-       if (tspi->cur_pos < tspi->cur->len) {
-               tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur);
-               spi_tegra_go(tspi);
-       } else if (!list_is_last(&tspi->cur->transfer_list,
-                                &m->transfers)) {
-               tspi->cur =  list_first_entry(&tspi->cur->transfer_list,
-                                             struct spi_transfer,
-                                             transfer_list);
-               spi_tegra_start_transfer(spi, tspi->cur);
-       } else {
-               list_del(&m->queue);
-
-               m->complete(m->context);
-
-               if (!list_empty(&tspi->queue)) {
-                       m = list_first_entry(&tspi->queue, struct spi_message,
-                                            queue);
-                       spi = m->state;
-                       spi_tegra_start_message(spi, m);
-               } else {
-                       clk_disable_unprepare(tspi->clk);
-                       tspi->cur_speed = 0;
-               }
-       }
-
-       spin_unlock_irqrestore(&tspi->lock, flags);
-}
-
-static void tegra_spi_rx_dma_complete(void *args)
-{
-       struct spi_tegra_data *tspi = args;
-       handle_spi_rx_dma_complete(tspi);
-}
-
-static int spi_tegra_setup(struct spi_device *spi)
-{
-       struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
-       unsigned long cs_bit;
-       unsigned long val;
-       unsigned long flags;
-
-       dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
-               spi->bits_per_word,
-               spi->mode & SPI_CPOL ? "" : "~",
-               spi->mode & SPI_CPHA ? "" : "~",
-               spi->max_speed_hz);
-
-
-       switch (spi->chip_select) {
-       case 0:
-               cs_bit = SLINK_CS_POLARITY;
-               break;
-
-       case 1:
-               cs_bit = SLINK_CS_POLARITY1;
-               break;
-
-       case 2:
-               cs_bit = SLINK_CS_POLARITY2;
-               break;
-
-       case 4:
-               cs_bit = SLINK_CS_POLARITY3;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       spin_lock_irqsave(&tspi->lock, flags);
-
-       val = spi_tegra_readl(tspi, SLINK_COMMAND);
-       if (spi->mode & SPI_CS_HIGH)
-               val |= cs_bit;
-       else
-               val &= ~cs_bit;
-       spi_tegra_writel(tspi, val, SLINK_COMMAND);
-
-       spin_unlock_irqrestore(&tspi->lock, flags);
-
-       return 0;
-}
-
-static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
-{
-       struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
-       struct spi_transfer *t;
-       unsigned long flags;
-       int was_empty;
-
-       if (list_empty(&m->transfers) || !m->complete)
-               return -EINVAL;
-
-       list_for_each_entry(t, &m->transfers, transfer_list) {
-               if (t->bits_per_word < 0 || t->bits_per_word > 32)
-                       return -EINVAL;
-
-               if (t->len == 0)
-                       return -EINVAL;
-
-               if (!t->rx_buf && !t->tx_buf)
-                       return -EINVAL;
-       }
-
-       m->state = spi;
-
-       spin_lock_irqsave(&tspi->lock, flags);
-       was_empty = list_empty(&tspi->queue);
-       list_add_tail(&m->queue, &tspi->queue);
-
-       if (was_empty)
-               spi_tegra_start_message(spi, m);
-
-       spin_unlock_irqrestore(&tspi->lock, flags);
-
-       return 0;
-}
-
-static int __devinit spi_tegra_probe(struct platform_device *pdev)
-{
-       struct spi_master       *master;
-       struct spi_tegra_data   *tspi;
-       struct resource         *r;
-       int ret;
-       dma_cap_mask_t mask;
-
-       master = spi_alloc_master(&pdev->dev, sizeof *tspi);
-       if (master == NULL) {
-               dev_err(&pdev->dev, "master allocation failed\n");
-               return -ENOMEM;
-       }
-
-       /* the spi->mode bits understood by this driver: */
-       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
-       master->bus_num = pdev->id;
-
-       master->setup = spi_tegra_setup;
-       master->transfer = spi_tegra_transfer;
-       master->num_chipselect = 4;
-
-       dev_set_drvdata(&pdev->dev, master);
-       tspi = spi_master_get_devdata(master);
-       tspi->master = master;
-       tspi->pdev = pdev;
-       spin_lock_init(&tspi->lock);
-
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               ret = -ENODEV;
-               goto err0;
-       }
-
-       if (!request_mem_region(r->start, resource_size(r),
-                               dev_name(&pdev->dev))) {
-               ret = -EBUSY;
-               goto err0;
-       }
-
-       tspi->phys = r->start;
-       tspi->base = ioremap(r->start, resource_size(r));
-       if (!tspi->base) {
-               dev_err(&pdev->dev, "can't ioremap iomem\n");
-               ret = -ENOMEM;
-               goto err1;
-       }
-
-       tspi->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(tspi->clk)) {
-               dev_err(&pdev->dev, "can not get clock\n");
-               ret = PTR_ERR(tspi->clk);
-               goto err2;
-       }
-
-       INIT_LIST_HEAD(&tspi->queue);
-
-       dma_cap_zero(mask);
-       dma_cap_set(DMA_SLAVE, mask);
-       tspi->rx_dma = dma_request_channel(mask, NULL, NULL);
-       if (!tspi->rx_dma) {
-               dev_err(&pdev->dev, "can not allocate rx dma channel\n");
-               ret = -ENODEV;
-               goto err3;
-       }
-
-       tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
-                                        &tspi->rx_bb_phys, GFP_KERNEL);
-       if (!tspi->rx_bb) {
-               dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
-               ret = -ENOMEM;
-               goto err4;
-       }
-
-       /* Dmaengine Dma slave config */
-       tspi->sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
-       tspi->sconfig.dst_addr = tspi->phys + SLINK_RX_FIFO;
-       tspi->sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       tspi->sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       tspi->sconfig.slave_id = spi_tegra_req_sels[pdev->id];
-       tspi->sconfig.src_maxburst = 1;
-       tspi->sconfig.dst_maxburst = 1;
-       ret = dmaengine_device_control(tspi->rx_dma,
-                       DMA_SLAVE_CONFIG, (unsigned long) &tspi->sconfig);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "can not do slave configure for dma %d\n",
-                       ret);
-               goto err4;
-       }
-
-       master->dev.of_node = pdev->dev.of_node;
-       ret = spi_register_master(master);
-
-       if (ret < 0)
-               goto err5;
-
-       return ret;
-
-err5:
-       dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
-                         tspi->rx_bb, tspi->rx_bb_phys);
-err4:
-       dma_release_channel(tspi->rx_dma);
-err3:
-       clk_put(tspi->clk);
-err2:
-       iounmap(tspi->base);
-err1:
-       release_mem_region(r->start, resource_size(r));
-err0:
-       spi_master_put(master);
-       return ret;
-}
-
-static int __devexit spi_tegra_remove(struct platform_device *pdev)
-{
-       struct spi_master       *master;
-       struct spi_tegra_data   *tspi;
-       struct resource         *r;
-
-       master = dev_get_drvdata(&pdev->dev);
-       tspi = spi_master_get_devdata(master);
-
-       spi_unregister_master(master);
-       dma_release_channel(tspi->rx_dma);
-       dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
-                         tspi->rx_bb, tspi->rx_bb_phys);
-
-       clk_put(tspi->clk);
-       iounmap(tspi->base);
-
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(r->start, resource_size(r));
-
-       return 0;
-}
-
-MODULE_ALIAS("platform:spi_tegra");
-
-#ifdef CONFIG_OF
-static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
-       { .compatible = "nvidia,tegra20-spi", },
-       {}
-};
-MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
-#else /* CONFIG_OF */
-#define spi_tegra_of_match_table NULL
-#endif /* CONFIG_OF */
-
-static struct platform_driver spi_tegra_driver = {
-       .driver = {
-               .name =         "spi_tegra",
-               .owner =        THIS_MODULE,
-               .of_match_table = spi_tegra_of_match_table,
-       },
-       .probe =        spi_tegra_probe,
-       .remove =       __devexit_p(spi_tegra_remove),
-};
-module_platform_driver(spi_tegra_driver);
-
-MODULE_LICENSE("GPL");
index 0ce5c12aab5520d3761f33c7e3cf34530aabd8e2..24421024deaf6d6bd19b3da4063ba932f5a3ddb8 100644 (file)
@@ -316,18 +316,7 @@ static struct spi_driver tle62x0_driver = {
        .remove         = __devexit_p(tle62x0_remove),
 };
 
-static __init int tle62x0_init(void)
-{
-       return spi_register_driver(&tle62x0_driver);
-}
-
-static __exit void tle62x0_exit(void)
-{
-       spi_unregister_driver(&tle62x0_driver);
-}
-
-module_init(tle62x0_init);
-module_exit(tle62x0_exit);
+module_spi_driver(tle62x0_driver);
 
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("TLE62x0 SPI driver");
index 1284c9b74653f28992335ff87f84467b5d9223aa..135f7406f4bfc3e34e37d39f76577e6089288bef 100644 (file)
@@ -1536,8 +1536,6 @@ static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
 
        pci_iounmap(board_dat->pdev, data->io_remap_addr);
        spi_unregister_master(data->master);
-       spi_master_put(data->master);
-       platform_set_drvdata(plat_dev, NULL);
 
        return 0;
 }
index 7e2ddc042f5bff19954bc4d61200a36bf6523609..c6250867a95d4cb0de8ec92e498b995aaf84d99f 100644 (file)
@@ -190,16 +190,30 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
 {
        struct ssb_bus *bus = mcore->dev->bus;
 
-       mcore->flash_buswidth = 2;
-       if (bus->chipco.dev) {
-               mcore->flash_window = 0x1c000000;
-               mcore->flash_window_size = 0x02000000;
+       /* When there is no chipcommon on the bus there is 4MB flash */
+       if (!bus->chipco.dev) {
+               mcore->flash_buswidth = 2;
+               mcore->flash_window = SSB_FLASH1;
+               mcore->flash_window_size = SSB_FLASH1_SZ;
+               return;
+       }
+
+       /* There is ChipCommon, so use it to read info about flash */
+       switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
+       case SSB_CHIPCO_FLASHT_STSER:
+       case SSB_CHIPCO_FLASHT_ATSER:
+               pr_err("Serial flash not supported\n");
+               break;
+       case SSB_CHIPCO_FLASHT_PARA:
+               pr_debug("Found parallel flash\n");
+               mcore->flash_window = SSB_FLASH2;
+               mcore->flash_window_size = SSB_FLASH2_SZ;
                if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
                               & SSB_CHIPCO_CFG_DS16) == 0)
                        mcore->flash_buswidth = 1;
-       } else {
-               mcore->flash_window = 0x1fc00000;
-               mcore->flash_window_size = 0x00400000;
+               else
+                       mcore->flash_buswidth = 2;
+               break;
        }
 }
 
index a807129c7b5a93979e8dc52de5d167d98a327940..7b0ba92e7e46730d1d5c72137c9f1b2b9ddc3201 100644 (file)
@@ -47,7 +47,7 @@ static HLIST_HEAD(binder_dead_nodes);
 static struct dentry *binder_debugfs_dir_entry_root;
 static struct dentry *binder_debugfs_dir_entry_proc;
 static struct binder_node *binder_context_mgr_node;
-static uid_t binder_context_mgr_uid = -1;
+static kuid_t binder_context_mgr_uid = INVALID_UID;
 static int binder_last_id;
 static struct workqueue_struct *binder_deferred_workqueue;
 
@@ -356,77 +356,28 @@ struct binder_transaction {
        unsigned int    flags;
        long    priority;
        long    saved_priority;
-       uid_t   sender_euid;
+       kuid_t  sender_euid;
 };
 
 static void
 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
 
-/*
- * copied from get_unused_fd_flags
- */
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 {
        struct files_struct *files = proc->files;
-       int fd, error;
-       struct fdtable *fdt;
        unsigned long rlim_cur;
        unsigned long irqs;
 
        if (files == NULL)
                return -ESRCH;
 
-       error = -EMFILE;
-       spin_lock(&files->file_lock);
-
-repeat:
-       fdt = files_fdtable(files);
-       fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);
-
-       /*
-        * N.B. For clone tasks sharing a files structure, this test
-        * will limit the total number of files that can be opened.
-        */
-       rlim_cur = 0;
-       if (lock_task_sighand(proc->tsk, &irqs)) {
-               rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
-               unlock_task_sighand(proc->tsk, &irqs);
-       }
-       if (fd >= rlim_cur)
-               goto out;
-
-       /* Do we need to expand the fd array or fd set?  */
-       error = expand_files(files, fd);
-       if (error < 0)
-               goto out;
-
-       if (error) {
-               /*
-                * If we needed to expand the fs array we
-                * might have blocked - try again.
-                */
-               error = -EMFILE;
-               goto repeat;
-       }
-
-       __set_open_fd(fd, fdt);
-       if (flags & O_CLOEXEC)
-               __set_close_on_exec(fd, fdt);
-       else
-               __clear_close_on_exec(fd, fdt);
-       files->next_fd = fd + 1;
-
-       /* Sanity check */
-       if (fdt->fd[fd] != NULL) {
-               pr_warn("get_unused_fd: slot %d not NULL!\n", fd);
-               fdt->fd[fd] = NULL;
-       }
+       if (!lock_task_sighand(proc->tsk, &irqs))
+               return -EMFILE;
 
-       error = fd;
+       rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
+       unlock_task_sighand(proc->tsk, &irqs);
 
-out:
-       spin_unlock(&files->file_lock);
-       return error;
+       return __alloc_fd(files, 0, rlim_cur, flags);
 }
 
 /*
@@ -435,28 +386,8 @@ out:
 static void task_fd_install(
        struct binder_proc *proc, unsigned int fd, struct file *file)
 {
-       struct files_struct *files = proc->files;
-       struct fdtable *fdt;
-
-       if (files == NULL)
-               return;
-
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       BUG_ON(fdt->fd[fd] != NULL);
-       rcu_assign_pointer(fdt->fd[fd], file);
-       spin_unlock(&files->file_lock);
-}
-
-/*
- * copied from __put_unused_fd in open.c
- */
-static void __put_unused_fd(struct files_struct *files, unsigned int fd)
-{
-       struct fdtable *fdt = files_fdtable(files);
-       __clear_open_fd(fd, fdt);
-       if (fd < files->next_fd)
-               files->next_fd = fd;
+       if (proc->files)
+               __fd_install(proc->files, fd, file);
 }
 
 /*
@@ -464,27 +395,12 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
  */
 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 {
-       struct file *filp;
-       struct files_struct *files = proc->files;
-       struct fdtable *fdt;
        int retval;
 
-       if (files == NULL)
+       if (proc->files == NULL)
                return -ESRCH;
 
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       if (fd >= fdt->max_fds)
-               goto out_unlock;
-       filp = fdt->fd[fd];
-       if (!filp)
-               goto out_unlock;
-       rcu_assign_pointer(fdt->fd[fd], NULL);
-       __clear_close_on_exec(fd, fdt);
-       __put_unused_fd(files, fd);
-       spin_unlock(&files->file_lock);
-       retval = filp_close(filp, files);
-
+       retval = __close_fd(proc->files, fd);
        /* can't restart close syscall because file table entry was cleared */
        if (unlikely(retval == -ERESTARTSYS ||
                     retval == -ERESTARTNOINTR ||
@@ -493,10 +409,6 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
                retval = -EINTR;
 
        return retval;
-
-out_unlock:
-       spin_unlock(&files->file_lock);
-       return -EBADF;
 }
 
 static void binder_set_nice(long nice)
@@ -2427,7 +2339,7 @@ retry:
                }
                tr.code = t->code;
                tr.flags = t->flags;
-               tr.sender_euid = t->sender_euid;
+               tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
 
                if (t->from) {
                        struct task_struct *sender = t->from->proc->tsk;
@@ -2705,12 +2617,12 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        ret = -EBUSY;
                        goto err;
                }
-               if (binder_context_mgr_uid != -1) {
-                       if (binder_context_mgr_uid != current->cred->euid) {
+               if (uid_valid(binder_context_mgr_uid)) {
+                       if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
                                pr_err("binder: BINDER_SET_"
                                       "CONTEXT_MGR bad uid %d != %d\n",
-                                      current->cred->euid,
-                                      binder_context_mgr_uid);
+                                      from_kuid(&init_user_ns, current->cred->euid),
+                                      from_kuid(&init_user_ns, binder_context_mgr_uid));
                                ret = -EPERM;
                                goto err;
                        }
@@ -2793,6 +2705,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        const char *failure_string;
        struct binder_buffer *buffer;
 
+       if (proc->tsk != current)
+               return -EINVAL;
+
        if ((vma->vm_end - vma->vm_start) > SZ_4M)
                vma->vm_end = vma->vm_start + SZ_4M;
 
@@ -2857,7 +2772,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        binder_insert_free_buffer(proc, buffer);
        proc->free_async_space = proc->buffer_size / 2;
        barrier();
-       proc->files = get_files_struct(proc->tsk);
+       proc->files = get_files_struct(current);
        proc->vma = vma;
        proc->vma_vm_mm = vma->vm_mm;
 
index 1154a99dc8dbbe482e8970bf0e5f9d519f87af3e..d0dabcf015a9b50bc453547477dbf9bd2ab6c3d2 100644 (file)
@@ -827,7 +827,7 @@ void gether_cleanup(void)
                return;
 
        unregister_netdev(the_dev->net);
-       flush_work_sync(&the_dev->work);
+       flush_work(&the_dev->work);
        free_netdev(the_dev->net);
 
        the_dev = NULL;
index 3abb31df8f28289a2cdad7a5c41c1ab3063123e2..20d0aec52e72fded49622158d11926a7edc6af13 100644 (file)
@@ -95,7 +95,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
        init_MUTEX(&netlink_mutex);
 #endif
 
-       sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg);
+       sock = netlink_kernel_create(&init_net, unit, &cfg);
 
        if (sock)
                rcv_cb = cb;
@@ -135,7 +135,7 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
        }
        memcpy(nlmsg_data(nlh), msg, len);
 
-       NETLINK_CB(skb).pid = 0;
+       NETLINK_CB(skb).portid = 0;
        NETLINK_CB(skb).dst_group = 0;
 
        ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
index 24d8eebc1d10f2d35c4beb8d4b22849d101bb38c..094fdc366f30a21d767107303b902294cd26358f 100644 (file)
@@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
        list_add_tail(&msg->node, &nvec->tx_data);
        spin_unlock_irqrestore(&nvec->tx_lock, flags);
 
-       queue_work(system_nrt_wq, &nvec->tx_work);
+       schedule_work(&nvec->tx_work);
 
        return 0;
 }
@@ -471,7 +471,7 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
        if (!nvec_msg_is_event(nvec->rx))
                complete(&nvec->ec_transfer);
 
-       queue_work(system_nrt_wq, &nvec->rx_work);
+       schedule_work(&nvec->rx_work);
 }
 
 /**
index c8287438e0dc437475e71bcb456fc0de1d0352ed..3434e6ec01426677ff576de960ffbf65440af8d6 100644 (file)
@@ -592,9 +592,8 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
                 * in particular in the case of mmap'd dmabufs)
                 */
                fput(vma->vm_file);
-               get_file(obj->filp);
                vma->vm_pgoff = 0;
-               vma->vm_file  = obj->filp;
+               vma->vm_file  = get_file(obj->filp);
 
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        }
index 0ca857ac473e91e3171c0963f85a4be214b64947..48aa1361903e3b5c4f7ae49e38112e30f0e05dac 100644 (file)
@@ -119,7 +119,9 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
        *total_flags = new_flags;
 }
 
-static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void wbsoft_tx(struct ieee80211_hw *dev,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
 {
        struct wbsoft_priv *priv = dev->priv;
 
index 2ab31e4f02cc741ee5d7b1c603a9158b34f335c4..67789b8345d25cb7a5fad998863aab3c9d999689 100644 (file)
@@ -694,17 +694,14 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
 static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
                                            int delay)
 {
-       cancel_delayed_work(&(tz->poll_queue));
-
-       if (!delay)
-               return;
-
        if (delay > 1000)
-               queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
-                                     round_jiffies(msecs_to_jiffies(delay)));
+               mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+                                round_jiffies(msecs_to_jiffies(delay)));
+       else if (delay)
+               mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+                                msecs_to_jiffies(delay));
        else
-               queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
-                                     msecs_to_jiffies(delay));
+               cancel_delayed_work(&tz->poll_queue);
 }
 
 static void thermal_zone_device_passive(struct thermal_zone_device *tz,
index 1e456dca4f60be02835d7503436fe8f26450c118..2944ff88fdc0b2ea9705850e080a755aca440a8d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/irq.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/list.h>
@@ -35,6 +36,7 @@
 #include <xen/page.h>
 #include <xen/events.h>
 #include <xen/interface/io/console.h>
+#include <xen/interface/sched.h>
 #include <xen/hvc-console.h>
 #include <xen/xenbus.h>
 
index 0083bc1f63f43a37c8cc5f531da88f6a24729952..5b95b4f28cf347e56cbdc3b73a249b0feae56630 100644 (file)
@@ -765,7 +765,7 @@ static void hvsi_flush_output(struct hvsi_struct *hp)
 
        /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
        cancel_delayed_work_sync(&hp->writer);
-       flush_work_sync(&hp->handshaker);
+       flush_work(&hp->handshaker);
 
        /*
         * it's also possible that our timeout expired and hvsi_write_worker
index 0aeb5a38d2963bcc6bc4e010b3c2376a788aa55b..b4ba0670dc547d8f332de158d9ea8295d5a10c8a 100644 (file)
@@ -1729,7 +1729,7 @@ void ipwireless_hardware_free(struct ipw_hardware *hw)
 
        ipwireless_stop_interrupts(hw);
 
-       flush_work_sync(&hw->work_rx);
+       flush_work(&hw->work_rx);
 
        for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
                if (hw->packet_assembler[i] != NULL)
index d2af155dec8b91e1f915feb8b6c9f8be26f9e815..57102e66165a99e392b3c5675e68774507440aee 100644 (file)
@@ -435,8 +435,8 @@ void ipwireless_network_free(struct ipw_network *network)
        network->shutting_down = 1;
 
        ipwireless_ppp_close(network);
-       flush_work_sync(&network->work_go_online);
-       flush_work_sync(&network->work_go_offline);
+       flush_work(&network->work_go_online);
+       flush_work(&network->work_go_offline);
 
        ipwireless_stop_interrupts(network->hardware);
        ipwireless_associate_network(network->hardware, NULL);
index 3f63d834cbc93ee03f65afdf0b00463438daa007..c0b334327d9313e71b099b2ff0667671be417c50 100644 (file)
@@ -122,7 +122,7 @@ static void kgdboc_unregister_kbd(void)
                        i--;
                }
        }
-       flush_work_sync(&kgdboc_restore_input_work);
+       flush_work(&kgdboc_restore_input_work);
 }
 #else /* ! CONFIG_KDB_KEYBOARD */
 #define kgdboc_register_kbd(x) 0
index ccc2f35adff1dd8c27d86676461e6c094f7e68aa..6ede6fd92b4cd00e939dca479e0f3e3a0a0d923e 100644 (file)
@@ -1227,7 +1227,7 @@ static int serial_omap_suspend(struct device *dev)
        struct uart_omap_port *up = dev_get_drvdata(dev);
 
        uart_suspend_port(&serial_omap_reg, &up->port);
-       flush_work_sync(&up->qos_work);
+       flush_work(&up->qos_work);
 
        return 0;
 }
index 7c5866920622911ab608b4da5485ccadb487bedb..b0b39b823ccf16ec2e0b82264721a0be7e54005f 100644 (file)
@@ -61,7 +61,7 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf)
 }
 
 static void tty_audit_log(const char *description, struct task_struct *tsk,
-                         uid_t loginuid, unsigned sessionid, int major,
+                         kuid_t loginuid, unsigned sessionid, int major,
                          int minor, unsigned char *data, size_t size)
 {
        struct audit_buffer *ab;
@@ -69,11 +69,14 @@ static void tty_audit_log(const char *description, struct task_struct *tsk,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
        if (ab) {
                char name[sizeof(tsk->comm)];
-               uid_t uid = task_uid(tsk);
+               kuid_t uid = task_uid(tsk);
 
                audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u "
                                 "major=%d minor=%d comm=", description,
-                                tsk->pid, uid, loginuid, sessionid,
+                                tsk->pid,
+                                from_kuid(&init_user_ns, uid),
+                                from_kuid(&init_user_ns, loginuid),
+                                sessionid,
                                 major, minor);
                get_task_comm(name, tsk);
                audit_log_untrustedstring(ab, name);
@@ -89,7 +92,7 @@ static void tty_audit_log(const char *description, struct task_struct *tsk,
  *     Generate an audit message from the contents of @buf, which is owned by
  *     @tsk with @loginuid.  @buf->mutex must be locked.
  */
-static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid,
+static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
                               unsigned int sessionid,
                               struct tty_audit_buf *buf)
 {
@@ -112,7 +115,7 @@ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid,
  */
 static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
 {
-       uid_t auid = audit_get_loginuid(current);
+       kuid_t auid = audit_get_loginuid(current);
        unsigned int sessionid = audit_get_sessionid(current);
        tty_audit_buf_push(current, auid, sessionid, buf);
 }
@@ -179,7 +182,7 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
        }
 
        if (should_audit && audit_enabled) {
-               uid_t auid;
+               kuid_t auid;
                unsigned int sessionid;
 
                auid = audit_get_loginuid(current);
@@ -199,7 +202,7 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
  * reference to the tty audit buffer if available.
  * Flush the buffer or return an appropriate error code.
  */
-int tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid)
+int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
 {
        struct tty_audit_buf *buf = ERR_PTR(-EPERM);
        unsigned long flags;
index 8a5a8b064616a05eb0eefe517c86b2cc9b43f2f6..2ea176b2280e60f201aefec32af4beb8d7892702 100644 (file)
@@ -1166,10 +1166,8 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
        struct file *p = NULL;
 
        spin_lock(&redirect_lock);
-       if (redirect) {
-               get_file(redirect);
-               p = redirect;
-       }
+       if (redirect)
+               p = get_file(redirect);
        spin_unlock(&redirect_lock);
 
        if (p) {
@@ -2264,8 +2262,7 @@ static int tioccons(struct file *file)
                spin_unlock(&redirect_lock);
                return -EBUSY;
        }
-       get_file(file);
-       redirect = file;
+       redirect = get_file(file);
        spin_unlock(&redirect_lock);
        return 0;
 }
@@ -2809,6 +2806,13 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
 }
 #endif
 
+static int this_tty(const void *t, struct file *file, unsigned fd)
+{
+       if (likely(file->f_op->read != tty_read))
+               return 0;
+       return file_tty(file) != t ? 0 : fd + 1;
+}
+       
 /*
  * This implements the "Secure Attention Key" ---  the idea is to
  * prevent trojan horses by killing all processes associated with this
@@ -2836,8 +2840,6 @@ void __do_SAK(struct tty_struct *tty)
        struct task_struct *g, *p;
        struct pid *session;
        int             i;
-       struct file     *filp;
-       struct fdtable *fdt;
 
        if (!tty)
                return;
@@ -2867,27 +2869,12 @@ void __do_SAK(struct tty_struct *tty)
                        continue;
                }
                task_lock(p);
-               if (p->files) {
-                       /*
-                        * We don't take a ref to the file, so we must
-                        * hold ->file_lock instead.
-                        */
-                       spin_lock(&p->files->file_lock);
-                       fdt = files_fdtable(p->files);
-                       for (i = 0; i < fdt->max_fds; i++) {
-                               filp = fcheck_files(p->files, i);
-                               if (!filp)
-                                       continue;
-                               if (filp->f_op->read == tty_read &&
-                                   file_tty(filp) == tty) {
-                                       printk(KERN_NOTICE "SAK: killed process %d"
-                                           " (%s): fd#%d opened to the tty\n",
-                                           task_pid_nr(p), p->comm, i);
-                                       force_sig(SIGKILL, p);
-                                       break;
-                               }
-                       }
-                       spin_unlock(&p->files->file_lock);
+               i = iterate_fd(p->files, 0, this_tty, tty);
+               if (i != 0) {
+                       printk(KERN_NOTICE "SAK: killed process %d"
+                           " (%s): fd#%d opened to the tty\n",
+                                   task_pid_nr(p), p->comm, i - 1);
+                       force_sig(SIGKILL, p);
                }
                task_unlock(p);
        } while_each_thread(g, p);
index 4d7b56268c79408a35b2a3990990778b9359f071..0f2a2c5e704c3560755069f1c102f6714e0ae4b6 100644 (file)
@@ -523,9 +523,9 @@ static int tty_ldisc_halt(struct tty_struct *tty)
  */
 static void tty_ldisc_flush_works(struct tty_struct *tty)
 {
-       flush_work_sync(&tty->hangup_work);
-       flush_work_sync(&tty->SAK_work);
-       flush_work_sync(&tty->buf.work);
+       flush_work(&tty->hangup_work);
+       flush_work(&tty->SAK_work);
+       flush_work(&tty->buf.work);
 }
 
 /**
index 975e9c6691d619c4b96a8103b71e97075c1a9a14..807627b36cc855dae7228daffbe7ce392b694f7f 100644 (file)
@@ -718,7 +718,7 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
        del_timer_sync(&instance->resubmit_timer);
        usb_free_urb(int_urb);
 
-       flush_work_sync(&instance->status_check_work);
+       flush_work(&instance->status_check_work);
 }
 
 static int speedtch_pre_reset(struct usb_interface *intf)
index e1f8b2c973fe1e7df698e918328fd5bc421906e6..defff43950bc11a6ae49ee6d0c80f446e31701c9 100644 (file)
@@ -2262,7 +2262,7 @@ static void uea_stop(struct uea_softc *sc)
        usb_free_urb(sc->urb_int);
 
        /* flush the work item, when no one can schedule it */
-       flush_work_sync(&sc->task);
+       flush_work(&sc->task);
 
        release_firmware(sc->dsp_firm);
        uea_leaves(INS_TO_USBDEV(sc));
index 829aba75a6dfef28f1ce79055f5b73d6df883c68..64c4ec10d1fcbe9db29220c5e79e33c9116de17c 100644 (file)
@@ -224,8 +224,8 @@ struct ffs_data {
        /* File permissions, written once when fs is mounted */
        struct ffs_file_perms {
                umode_t                         mode;
-               uid_t                           uid;
-               gid_t                           gid;
+               kuid_t                          uid;
+               kgid_t                          gid;
        }                               file_perms;
 
        /*
@@ -340,7 +340,7 @@ ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
 
 static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
        __attribute__((warn_unused_result, nonnull));
-static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+static char *ffs_prepare_buffer(const char __user *buf, size_t len)
        __attribute__((warn_unused_result, nonnull));
 
 
@@ -1147,10 +1147,19 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
                        break;
 
                case 3:
-                       if (!memcmp(opts, "uid", 3))
-                               data->perms.uid = value;
+                       if (!memcmp(opts, "uid", 3)) {
+                               data->perms.uid = make_kuid(current_user_ns(), value);
+                               if (!uid_valid(data->perms.uid)) {
+                                       pr_err("%s: unmapped value: %lu\n", opts, value);
+                                       return -EINVAL;
+                               }
+                       }
                        else if (!memcmp(opts, "gid", 3))
-                               data->perms.gid = value;
+                               data->perms.gid = make_kgid(current_user_ns(), value);
+                               if (!gid_valid(data->perms.gid)) {
+                                       pr_err("%s: unmapped value: %lu\n", opts, value);
+                                       return -EINVAL;
+                               }
                        else
                                goto invalid;
                        break;
@@ -1179,8 +1188,8 @@ ffs_fs_mount(struct file_system_type *t, int flags,
        struct ffs_sb_fill_data data = {
                .perms = {
                        .mode = S_IFREG | 0600,
-                       .uid = 0,
-                       .gid = 0
+                       .uid = GLOBAL_ROOT_UID,
+                       .gid = GLOBAL_ROOT_GID,
                },
                .root_mode = S_IFDIR | 0500,
        };
@@ -2436,7 +2445,7 @@ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
                : mutex_lock_interruptible(mutex);
 }
 
-static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+static char *ffs_prepare_buffer(const char __user *buf, size_t len)
 {
        char *data;
 
index 4bb6d53f2de3ff1bc3b97c05a28a8705a133e7a0..76494cabf4e46e28cb2771b1fdd3566b8e6cc5b2 100644 (file)
@@ -1985,8 +1985,8 @@ gadgetfs_make_inode (struct super_block *sb,
        if (inode) {
                inode->i_ino = get_next_ino();
                inode->i_mode = mode;
-               inode->i_uid = default_uid;
-               inode->i_gid = default_gid;
+               inode->i_uid = make_kuid(&init_user_ns, default_uid);
+               inode->i_gid = make_kgid(&init_user_ns, default_gid);
                inode->i_atime = inode->i_mtime = inode->i_ctime
                                = CURRENT_TIME;
                inode->i_private = data;
index b9c46900c2c16bac8577876b42cfa52953ce0846..6458764994efe5d2523ca0cfded7af6a0fd91432 100644 (file)
@@ -834,7 +834,7 @@ void gether_cleanup(void)
                return;
 
        unregister_netdev(the_dev->net);
-       flush_work_sync(&the_dev->work);
+       flush_work(&the_dev->work);
        free_netdev(the_dev->net);
 
        the_dev = NULL;
index 6780010e9c3cf77ca88d4e2647e9eab312ae1b3b..4a1d64d92338e7653035b054decf6e714909a41f 100644 (file)
@@ -893,7 +893,7 @@ static void ohci_stop (struct usb_hcd *hcd)
        ohci_dump (ohci, 1);
 
        if (quirk_nec(ohci))
-               flush_work_sync(&ohci->nec_work);
+               flush_work(&ohci->nec_work);
 
        ohci_usb_reset (ohci);
        ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
index 81f1f9a0be8f160d55b6dd3decce5cf00a448b1a..ceee2119bffa46bf4f25e79c9686712e75129b9d 100644 (file)
@@ -1230,7 +1230,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
        isp->timer.data = 0;
        set_bit(WORK_STOP, &isp->todo);
        del_timer_sync(&isp->timer);
-       flush_work_sync(&isp->work);
+       flush_work(&isp->work);
 
        put_device(&i2c->dev);
        the_transceiver = NULL;
index 17830c9c7cc623b7b26cdac7568aca508f7251e7..56097c6d072d0797ecc9c13c9d2d231aa3e9afa8 100644 (file)
@@ -1014,7 +1014,7 @@ static void vfio_group_try_dissolve_container(struct vfio_group *group)
 
 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
 {
-       struct file *filep;
+       struct fd f;
        struct vfio_container *container;
        struct vfio_iommu_driver *driver;
        int ret = 0;
@@ -1022,17 +1022,17 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
        if (atomic_read(&group->container_users))
                return -EINVAL;
 
-       filep = fget(container_fd);
-       if (!filep)
+       f = fdget(container_fd);
+       if (!f.file)
                return -EBADF;
 
        /* Sanity check, is this really our fd? */
-       if (filep->f_op != &vfio_fops) {
-               fput(filep);
+       if (f.file->f_op != &vfio_fops) {
+               fdput(f);
                return -EINVAL;
        }
 
-       container = filep->private_data;
+       container = f.file->private_data;
        WARN_ON(!container); /* fget ensures we don't race vfio_release */
 
        mutex_lock(&container->group_lock);
@@ -1054,8 +1054,7 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
 
 unlock_out:
        mutex_unlock(&container->group_lock);
-       fput(filep);
-
+       fdput(f);
        return ret;
 }
 
index ef82a0d18489365e64a7ab0958bf01f654cfc8b1..99ac2cb08b43bcb3b570969d752bff25126f298d 100644 (file)
@@ -636,8 +636,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 
 static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
 {
-       struct file *eventfp, *filep = NULL,
-                   *pollstart = NULL, *pollstop = NULL;
+       struct file *eventfp, *filep = NULL;
+       bool pollstart = false, pollstop = false;
        struct eventfd_ctx *ctx = NULL;
        u32 __user *idxp = argp;
        struct vhost_virtqueue *vq;
@@ -763,8 +763,8 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
                        break;
                }
                if (eventfp != vq->kick) {
-                       pollstop = filep = vq->kick;
-                       pollstart = vq->kick = eventfp;
+                       pollstop = (filep = vq->kick) != NULL;
+                       pollstart = (vq->kick = eventfp) != NULL;
                } else
                        filep = eventfp;
                break;
index d1f881e8030ed42828c690dcab66fe7bb3f99cb6..2e0f3bab61143410971a45f3686f0796823bdd21 100644 (file)
@@ -257,19 +257,17 @@ int get_img(struct mdp_img *img, struct fb_info *info,
            unsigned long *start, unsigned long *len,
            struct file **filep)
 {
-       int put_needed, ret = 0;
-       struct file *file;
-
-       file = fget_light(img->memory_id, &put_needed);
-       if (file == NULL)
+       int ret = 0;
+       struct fd f = fdget(img->memory_id);
+       if (f.file == NULL)
                return -1;
 
-       if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
+       if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
                *start = info->fix.smem_start;
                *len = info->fix.smem_len;
        } else
                ret = -1;
-       fput_light(file, put_needed);
+       fdput(f);
 
        return ret;
 }
index 3f5acc7771da34f6f08549c7c8db27b40d102af9..6b5e6e0e202f253cada3b80ddf5d14db9b4b0bd7 100644 (file)
@@ -906,7 +906,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
                r = -ENOMEM;
                goto err_wq;
        }
-       INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
+       INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
        INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
 
        dev_set_drvdata(&dssdev->dev, td);
@@ -962,8 +962,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
                        goto err_irq;
                }
 
-               INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work,
-                                       taal_te_timeout_work_callback);
+               INIT_DEFERRABLE_WORK(&td->te_timeout_work,
+                                    taal_te_timeout_work_callback);
 
                dev_dbg(&dssdev->dev, "Using GPIO TE\n");
        }
index b07e8864f82fd4f034f5b59cf2009850bb3ec6d1..05ee04667af1bdca019c8d4be15b78ce3d421b0f 100644 (file)
@@ -4306,7 +4306,7 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
         * and is sending the data.
         */
 
-       __cancel_delayed_work(&dsi->framedone_timeout_work);
+       cancel_delayed_work(&dsi->framedone_timeout_work);
 
        dsi_handle_framedone(dsidev, 0);
 }
@@ -4863,8 +4863,8 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
        mutex_init(&dsi->lock);
        sema_init(&dsi->bus_lock, 1);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
-                       dsi_framedone_timeout_work_callback);
+       INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
+                            dsi_framedone_timeout_work_callback);
 
 #ifdef DSI_CATCH_MISSING_TE
        init_timer(&dsi->te_timer);
index 7595581d032cc9d9c5a7f04ee12b861dc76051ee..c60d1629c91601f366fef151a7d45f51736b1378 100644 (file)
@@ -373,11 +373,22 @@ static void unmask_evtchn(int port)
 {
        struct shared_info *s = HYPERVISOR_shared_info;
        unsigned int cpu = get_cpu();
+       int do_hypercall = 0, evtchn_pending = 0;
 
        BUG_ON(!irqs_disabled());
 
-       /* Slow path (hypercall) if this is a non-local port. */
-       if (unlikely(cpu != cpu_from_evtchn(port))) {
+       if (unlikely((cpu != cpu_from_evtchn(port))))
+               do_hypercall = 1;
+       else
+               evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]);
+
+       if (unlikely(evtchn_pending && xen_hvm_domain()))
+               do_hypercall = 1;
+
+       /* Slow path (hypercall) if this is a non-local port or if this is
+        * an hvm domain and an event is pending (hvm domains don't have
+        * their own implementation of irq_enable). */
+       if (do_hypercall) {
                struct evtchn_unmask unmask = { .port = port };
                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
        } else {
@@ -390,7 +401,7 @@ static void unmask_evtchn(int port)
                 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
                 * the interrupt edge' if the channel is masked.
                 */
-               if (sync_test_bit(port, &s->evtchn_pending[0]) &&
+               if (evtchn_pending &&
                    !sync_test_and_set_bit(port / BITS_PER_LONG,
                                           &vcpu_info->evtchn_pending_sel))
                        vcpu_info->evtchn_upcall_pending = 1;
@@ -831,6 +842,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                struct irq_info *info = info_for_irq(irq);
                WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
        }
+       irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 
 out:
        mutex_unlock(&irq_mapping_update_lock);
index 7f1241608489de05a33e957a77145f4faa605f0e..5df9fd847b2eebbc87fe668e136d1c0684a7905c 100644 (file)
@@ -446,7 +446,7 @@ static void mn_release(struct mmu_notifier *mn,
        spin_unlock(&priv->lock);
 }
 
-struct mmu_notifier_ops gntdev_mmu_ops = {
+static struct mmu_notifier_ops gntdev_mmu_ops = {
        .release                = mn_release,
        .invalidate_page        = mn_invl_page,
        .invalidate_range_start = mn_invl_range_start,
index 006726688baf4f0a535896dd38c9a3ea2198c617..b2b0a375b3484b162b0c2ebc6cd09d5584f8ec60 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 #include <linux/hardirq.h>
 
 #include <xen/xen.h>
@@ -47,6 +48,7 @@
 #include <xen/interface/memory.h>
 #include <xen/hvc-console.h>
 #include <asm/xen/hypercall.h>
+#include <asm/xen/interface.h>
 
 #include <asm/pgtable.h>
 #include <asm/sync_bitops.h>
@@ -285,10 +287,9 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 }
 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 
-void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
-                                   unsigned long frame, int flags,
-                                   unsigned page_off,
-                                   unsigned length)
+static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
+                                          unsigned long frame, int flags,
+                                          unsigned page_off, unsigned length)
 {
        gnttab_shared.v2[ref].sub_page.frame = frame;
        gnttab_shared.v2[ref].sub_page.page_off = page_off;
@@ -345,9 +346,9 @@ bool gnttab_subpage_grants_available(void)
 }
 EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
 
-void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
-                                 int flags, domid_t trans_domid,
-                                 grant_ref_t trans_gref)
+static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
+                                        int flags, domid_t trans_domid,
+                                        grant_ref_t trans_gref)
 {
        gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
        gnttab_shared.v2[ref].transitive.gref = trans_gref;
@@ -823,6 +824,52 @@ unsigned int gnttab_max_grant_frames(void)
 }
 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 
+/* Handling of paged out grant targets (GNTST_eagain) */
+#define MAX_DELAY 256
+static inline void
+gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
+                                               const char *func)
+{
+       unsigned delay = 1;
+
+       do {
+               BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
+               if (*status == GNTST_eagain)
+                       msleep(delay++);
+       } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
+
+       if (delay >= MAX_DELAY) {
+               printk(KERN_ERR "%s: %s eagain grant\n", func, current->comm);
+               *status = GNTST_bad_page;
+       }
+}
+
+void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
+{
+       struct gnttab_map_grant_ref *op;
+
+       if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
+               BUG();
+       for (op = batch; op < batch + count; op++)
+               if (op->status == GNTST_eagain)
+                       gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
+                                               &op->status, __func__);
+}
+EXPORT_SYMBOL_GPL(gnttab_batch_map);
+
+void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
+{
+       struct gnttab_copy *op;
+
+       if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
+               BUG();
+       for (op = batch; op < batch + count; op++)
+               if (op->status == GNTST_eagain)
+                       gnttab_retry_eagain_gop(GNTTABOP_copy, op,
+                                               &op->status, __func__);
+}
+EXPORT_SYMBOL_GPL(gnttab_batch_copy);
+
 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count)
@@ -836,6 +883,12 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
        if (ret)
                return ret;
 
+       /* Retry eagain maps */
+       for (i = 0; i < count; i++)
+               if (map_ops[i].status == GNTST_eagain)
+                       gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
+                                               &map_ops[i].status, __func__);
+
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return ret;
 
index ccee0f16bcf8c10e994855a6efadff99b5b6bda1..ef6389580b8c78dd30a8c7738098c51b6ef3133a 100644 (file)
@@ -76,7 +76,7 @@ static void free_page_list(struct list_head *pages)
  */
 static int gather_array(struct list_head *pagelist,
                        unsigned nelem, size_t size,
-                       void __user *data)
+                       const void __user *data)
 {
        unsigned pageidx;
        void *pagedata;
@@ -246,61 +246,117 @@ struct mmap_batch_state {
        domid_t domain;
        unsigned long va;
        struct vm_area_struct *vma;
-       int err;
-
-       xen_pfn_t __user *user;
+       /* A tristate:
+        *      0 for no errors
+        *      1 if at least one error has happened (and no
+        *          -ENOENT errors have happened)
+        *      -ENOENT if at least 1 -ENOENT has happened.
+        */
+       int global_error;
+       /* An array for individual errors */
+       int *err;
+
+       /* User-space mfn array to store errors in the second pass for V1. */
+       xen_pfn_t __user *user_mfn;
 };
 
 static int mmap_batch_fn(void *data, void *state)
 {
        xen_pfn_t *mfnp = data;
        struct mmap_batch_state *st = state;
+       int ret;
+
+       ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
+                                        st->vma->vm_page_prot, st->domain);
 
-       if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
-                                      st->vma->vm_page_prot, st->domain) < 0) {
-               *mfnp |= 0xf0000000U;
-               st->err++;
+       /* Store error code for second pass. */
+       *(st->err++) = ret;
+
+       /* And see if it affects the global_error. */
+       if (ret < 0) {
+               if (ret == -ENOENT)
+                       st->global_error = -ENOENT;
+               else {
+                       /* Record that at least one error has happened. */
+                       if (st->global_error == 0)
+                               st->global_error = 1;
+               }
        }
        st->va += PAGE_SIZE;
 
        return 0;
 }
 
-static int mmap_return_errors(void *data, void *state)
+static int mmap_return_errors_v1(void *data, void *state)
 {
        xen_pfn_t *mfnp = data;
        struct mmap_batch_state *st = state;
-
-       return put_user(*mfnp, st->user++);
+       int err = *(st->err++);
+
+       /*
+        * V1 encodes the error codes in the 32bit top nibble of the
+        * mfn (with its known limitations vis-a-vis 64 bit callers).
+        */
+       *mfnp |= (err == -ENOENT) ?
+                               PRIVCMD_MMAPBATCH_PAGED_ERROR :
+                               PRIVCMD_MMAPBATCH_MFN_ERROR;
+       return __put_user(*mfnp, st->user_mfn++);
 }
 
 static struct vm_operations_struct privcmd_vm_ops;
 
-static long privcmd_ioctl_mmap_batch(void __user *udata)
+static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 {
        int ret;
-       struct privcmd_mmapbatch m;
+       struct privcmd_mmapbatch_v2 m;
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        unsigned long nr_pages;
        LIST_HEAD(pagelist);
+       int *err_array = NULL;
        struct mmap_batch_state state;
 
        if (!xen_initial_domain())
                return -EPERM;
 
-       if (copy_from_user(&m, udata, sizeof(m)))
-               return -EFAULT;
+       switch (version) {
+       case 1:
+               if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
+                       return -EFAULT;
+               /* Returns per-frame error in m.arr. */
+               m.err = NULL;
+               if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
+                       return -EFAULT;
+               break;
+       case 2:
+               if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
+                       return -EFAULT;
+               /* Returns per-frame error code in m.err. */
+               if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
+                       return -EFAULT;
+               break;
+       default:
+               return -EINVAL;
+       }
 
        nr_pages = m.num;
        if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
                return -EINVAL;
 
-       ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
-                          m.arr);
+       ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
 
-       if (ret || list_empty(&pagelist))
+       if (ret)
                goto out;
+       if (list_empty(&pagelist)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
+       if (err_array == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        down_write(&mm->mmap_sem);
 
@@ -315,24 +371,37 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
                goto out;
        }
 
-       state.domain = m.dom;
-       state.vma = vma;
-       state.va = m.addr;
-       state.err = 0;
+       state.domain        = m.dom;
+       state.vma           = vma;
+       state.va            = m.addr;
+       state.global_error  = 0;
+       state.err           = err_array;
 
-       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                            &pagelist, mmap_batch_fn, &state);
+       /* mmap_batch_fn guarantees ret == 0 */
+       BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
+                            &pagelist, mmap_batch_fn, &state));
 
        up_write(&mm->mmap_sem);
 
-       if (state.err > 0) {
-               state.user = m.arr;
+       if (state.global_error && (version == 1)) {
+               /* Write back errors in second pass. */
+               state.user_mfn = (xen_pfn_t *)m.arr;
+               state.err      = err_array;
                ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                              &pagelist,
-                              mmap_return_errors, &state);
+                                    &pagelist, mmap_return_errors_v1, &state);
+       } else if (version == 2) {
+               ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
+               if (ret)
+                       ret = -EFAULT;
        }
 
+       /* If we have not had any EFAULT-like global errors then set the global
+        * error to -ENOENT if necessary. */
+       if ((ret == 0) && (state.global_error == -ENOENT))
+               ret = -ENOENT;
+
 out:
+       kfree(err_array);
        free_page_list(&pagelist);
 
        return ret;
@@ -354,7 +423,11 @@ static long privcmd_ioctl(struct file *file,
                break;
 
        case IOCTL_PRIVCMD_MMAPBATCH:
-               ret = privcmd_ioctl_mmap_batch(udata);
+               ret = privcmd_ioctl_mmap_batch(udata, 1);
+               break;
+
+       case IOCTL_PRIVCMD_MMAPBATCH_V2:
+               ret = privcmd_ioctl_mmap_batch(udata, 2);
                break;
 
        default:
@@ -380,10 +453,6 @@ static struct vm_operations_struct privcmd_vm_ops = {
 
 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       /* Unsupported for auto-translate guests. */
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return -ENOSYS;
-
        /* DONTCOPY is essential for Xen because copy_page_range doesn't know
         * how to recreate these mappings */
        vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
index 4d519488d3045355672b8f48e3c1e2eb4ac64864..58db6df866ef3338f6fc787acd552fe03d0a0b69 100644 (file)
@@ -52,7 +52,7 @@ static unsigned long xen_io_tlb_nslabs;
  * Quick lookup value of the bus address of the IOTLB.
  */
 
-u64 start_dma_addr;
+static u64 start_dma_addr;
 
 static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 {
@@ -144,31 +144,72 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
        } while (i < nslabs);
        return 0;
 }
+static unsigned long xen_set_nslabs(unsigned long nr_tbl)
+{
+       if (!nr_tbl) {
+               xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
+               xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
+       } else
+               xen_io_tlb_nslabs = nr_tbl;
 
-void __init xen_swiotlb_init(int verbose)
+       return xen_io_tlb_nslabs << IO_TLB_SHIFT;
+}
+
+enum xen_swiotlb_err {
+       XEN_SWIOTLB_UNKNOWN = 0,
+       XEN_SWIOTLB_ENOMEM,
+       XEN_SWIOTLB_EFIXUP
+};
+
+static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
+{
+       switch (err) {
+       case XEN_SWIOTLB_ENOMEM:
+               return "Cannot allocate Xen-SWIOTLB buffer\n";
+       case XEN_SWIOTLB_EFIXUP:
+               return "Failed to get contiguous memory for DMA from Xen!\n"\
+                   "You either: don't have the permissions, do not have"\
+                   " enough free memory under 4GB, or the hypervisor memory"\
+                   " is too fragmented!";
+       default:
+               break;
+       }
+       return "";
+}
+int __ref xen_swiotlb_init(int verbose, bool early)
 {
-       unsigned long bytes;
+       unsigned long bytes, order;
        int rc = -ENOMEM;
-       unsigned long nr_tbl;
-       char *m = NULL;
+       enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
        unsigned int repeat = 3;
 
-       nr_tbl = swiotlb_nr_tbl();
-       if (nr_tbl)
-               xen_io_tlb_nslabs = nr_tbl;
-       else {
-               xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
-               xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
+       xen_io_tlb_nslabs = swiotlb_nr_tbl();
 retry:
-       bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
-
+       bytes = xen_set_nslabs(xen_io_tlb_nslabs);
+       order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
        /*
         * Get IO TLB memory from any location.
         */
-       xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
+       if (early)
+               xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
+       else {
+#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
+#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
+               while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+                       xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
+                       if (xen_io_tlb_start)
+                               break;
+                       order--;
+               }
+               if (order != get_order(bytes)) {
+                       pr_warn("Warning: only able to allocate %ld MB "
+                               "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+                       xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
+                       bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
+               }
+       }
        if (!xen_io_tlb_start) {
-               m = "Cannot allocate Xen-SWIOTLB buffer!\n";
+               m_ret = XEN_SWIOTLB_ENOMEM;
                goto error;
        }
        xen_io_tlb_end = xen_io_tlb_start + bytes;
@@ -179,17 +220,22 @@ retry:
                               bytes,
                               xen_io_tlb_nslabs);
        if (rc) {
-               free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
-               m = "Failed to get contiguous memory for DMA from Xen!\n"\
-                   "You either: don't have the permissions, do not have"\
-                   " enough free memory under 4GB, or the hypervisor memory"\
-                   "is too fragmented!";
+               if (early)
+                       free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
+               else {
+                       free_pages((unsigned long)xen_io_tlb_start, order);
+                       xen_io_tlb_start = NULL;
+               }
+               m_ret = XEN_SWIOTLB_EFIXUP;
                goto error;
        }
        start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
-       swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
-
-       return;
+       if (early) {
+               swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
+               rc = 0;
+       } else
+               rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
+       return rc;
 error:
        if (repeat--) {
                xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
@@ -198,10 +244,13 @@ error:
                      (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
                goto retry;
        }
-       xen_raw_printk("%s (rc:%d)", m, rc);
-       panic("%s (rc:%d)", m, rc);
+       pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
+       if (early)
+               panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
+       else
+               free_pages((unsigned long)xen_io_tlb_start, order);
+       return rc;
 }
-
 void *
 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                           dma_addr_t *dma_handle, gfp_t flags,
@@ -466,14 +515,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
 
-int
-xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-                  enum dma_data_direction dir)
-{
-       return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
-
 /*
  * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
  * concerning calls here are the same as for swiotlb_unmap_page() above.
@@ -494,14 +535,6 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
 
-void
-xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-                    enum dma_data_direction dir)
-{
-       return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
-
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
  * after a transfer.
index fdb6d229c9bbf5e59198189f95e70502116a6ca2..5e5ad7e2885832f64f2131aa43aead50548cae0a 100644 (file)
@@ -114,7 +114,7 @@ static void xen_sysfs_version_destroy(void)
 
 /* UUID */
 
-static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
+static ssize_t uuid_show_fallback(struct hyp_sysfs_attr *attr, char *buffer)
 {
        char *vm, *val;
        int ret;
@@ -135,6 +135,17 @@ static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
        return ret;
 }
 
+static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+       xen_domain_handle_t uuid;
+       int ret;
+       ret = HYPERVISOR_xen_version(XENVER_guest_handle, uuid);
+       if (ret)
+               return uuid_show_fallback(attr, buffer);
+       ret = sprintf(buffer, "%pU\n", uuid);
+       return ret;
+}
+
 HYPERVISOR_ATTR_RO(uuid);
 
 static int __init xen_sysfs_uuid_init(void)
index 89f264c67420c2448f9fe029e8193fa438369ae8..144564e5eb29e1ef59200fd079647656195280c8 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
 #include <asm/xen/hypervisor.h>
+#include <xen/tmem.h>
 
 #define TMEM_CONTROL               0
 #define TMEM_NEW_POOL              1
index b590ee067fcd3d3755efc35499f7bdee4151f41d..316df65163cfa5d6485a256edf2cdab24e8805d6 100644 (file)
@@ -98,7 +98,6 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
 
                dst_cx->type = cx->type;
                dst_cx->latency = cx->latency;
-               dst_cx->power = cx->power;
 
                dst_cx->dpcnt = 0;
                set_xen_guest_handle(dst_cx->dp, NULL);
index 92ff01dbeb1036880dd747dae11d813a305dd569..961d664e2d2faacde4272fa83023919c6ebcf3ca 100644 (file)
@@ -362,6 +362,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
        else {
                dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
                __pci_reset_function_locked(dev);
+               pci_restore_state(dev);
        }
        /* Now disable the device (this also ensures some private device
         * data is setup before we export)
@@ -681,14 +682,14 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
                dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
                        " by HVM, kill it\n");
                kill_domain_by_device(psdev);
-               goto release;
+               goto end;
        }
 
        if (!test_bit(_XEN_PCIB_AERHANDLER,
                (unsigned long *)&psdev->pdev->sh_info->flags)) {
                dev_err(&dev->dev,
                        "guest with no AER driver should have been killed\n");
-               goto release;
+               goto end;
        }
        result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
 
@@ -698,9 +699,9 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
                        "No AER slot_reset service or disconnected!\n");
                kill_domain_by_device(psdev);
        }
-release:
-       pcistub_device_put(psdev);
 end:
+       if (psdev)
+               pcistub_device_put(psdev);
        up_write(&pcistub_sem);
        return result;
 
@@ -739,14 +740,14 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
                dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
                        " by HVM, kill it\n");
                kill_domain_by_device(psdev);
-               goto release;
+               goto end;
        }
 
        if (!test_bit(_XEN_PCIB_AERHANDLER,
                (unsigned long *)&psdev->pdev->sh_info->flags)) {
                dev_err(&dev->dev,
                        "guest with no AER driver should have been killed\n");
-               goto release;
+               goto end;
        }
        result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
 
@@ -756,9 +757,9 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
                        "No AER mmio_enabled service or disconnected!\n");
                kill_domain_by_device(psdev);
        }
-release:
-       pcistub_device_put(psdev);
 end:
+       if (psdev)
+               pcistub_device_put(psdev);
        up_write(&pcistub_sem);
        return result;
 }
@@ -797,7 +798,7 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
                dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
                        " by HVM, kill it\n");
                kill_domain_by_device(psdev);
-               goto release;
+               goto end;
        }
 
        /*Guest owns the device yet no aer handler regiested, kill guest*/
@@ -805,7 +806,7 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
                (unsigned long *)&psdev->pdev->sh_info->flags)) {
                dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
                kill_domain_by_device(psdev);
-               goto release;
+               goto end;
        }
        result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
 
@@ -815,9 +816,9 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
                        "No AER error_detected service or disconnected!\n");
                kill_domain_by_device(psdev);
        }
-release:
-       pcistub_device_put(psdev);
 end:
+       if (psdev)
+               pcistub_device_put(psdev);
        up_write(&pcistub_sem);
        return result;
 }
@@ -851,7 +852,7 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
                dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
                        " by HVM, kill it\n");
                kill_domain_by_device(psdev);
-               goto release;
+               goto end;
        }
 
        if (!test_bit(_XEN_PCIB_AERHANDLER,
@@ -859,13 +860,13 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
                dev_err(&dev->dev,
                        "guest with no AER driver should have been killed\n");
                kill_domain_by_device(psdev);
-               goto release;
+               goto end;
        }
        common_process(psdev, 1, XEN_PCI_OP_aer_resume,
                       PCI_ERS_RESULT_RECOVERED);
-release:
-       pcistub_device_put(psdev);
 end:
+       if (psdev)
+               pcistub_device_put(psdev);
        up_write(&pcistub_sem);
        return;
 }
@@ -897,17 +898,41 @@ static inline int str_to_slot(const char *buf, int *domain, int *bus,
                              int *slot, int *func)
 {
        int err;
+       char wc = '*';
 
        err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
-       if (err == 4)
+       switch (err) {
+       case 3:
+               *func = -1;
+               err = sscanf(buf, " %x:%x:%x.%c", domain, bus, slot, &wc);
+               break;
+       case 2:
+               *slot = *func = -1;
+               err = sscanf(buf, " %x:%x:*.%c", domain, bus, &wc);
+               if (err >= 2)
+                       ++err;
+               break;
+       }
+       if (err == 4 && wc == '*')
                return 0;
        else if (err < 0)
                return -EINVAL;
 
        /* try again without domain */
        *domain = 0;
+       wc = '*';
        err = sscanf(buf, " %x:%x.%x", bus, slot, func);
-       if (err == 3)
+       switch (err) {
+       case 2:
+               *func = -1;
+               err = sscanf(buf, " %x:%x.%c", bus, slot, &wc);
+               break;
+       case 1:
+               *slot = *func = -1;
+               err = sscanf(buf, " %x:*.%c", bus, &wc) + 1;
+               break;
+       }
+       if (err == 3 && wc == '*')
                return 0;
 
        return -EINVAL;
@@ -930,6 +955,19 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
 {
        struct pcistub_device_id *pci_dev_id;
        unsigned long flags;
+       int rc = 0;
+
+       if (slot < 0) {
+               for (slot = 0; !rc && slot < 32; ++slot)
+                       rc = pcistub_device_id_add(domain, bus, slot, func);
+               return rc;
+       }
+
+       if (func < 0) {
+               for (func = 0; !rc && func < 8; ++func)
+                       rc = pcistub_device_id_add(domain, bus, slot, func);
+               return rc;
+       }
 
        pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
        if (!pci_dev_id)
@@ -952,15 +990,15 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
 static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
 {
        struct pcistub_device_id *pci_dev_id, *t;
-       int devfn = PCI_DEVFN(slot, func);
        int err = -ENOENT;
        unsigned long flags;
 
        spin_lock_irqsave(&device_ids_lock, flags);
        list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
                                 slot_list) {
-               if (pci_dev_id->domain == domain
-                   && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
+               if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
+                   && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
+                   && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
                        /* Don't break; here because it's possible the same
                         * slot could be in the list more than once
                         */
@@ -987,7 +1025,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
        struct config_field *field;
 
        psdev = pcistub_device_find(domain, bus, slot, func);
-       if (!psdev || !psdev->dev) {
+       if (!psdev) {
                err = -ENODEV;
                goto out;
        }
@@ -1011,6 +1049,8 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
        if (err)
                kfree(field);
 out:
+       if (psdev)
+               pcistub_device_put(psdev);
        return err;
 }
 
@@ -1115,10 +1155,9 @@ static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
 
        err = str_to_slot(buf, &domain, &bus, &slot, &func);
        if (err)
-               goto out;
+               return err;
 
        psdev = pcistub_device_find(domain, bus, slot, func);
-
        if (!psdev)
                goto out;
 
@@ -1134,6 +1173,8 @@ static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
        if (dev_data->isr_on)
                dev_data->ack_intr = 1;
 out:
+       if (psdev)
+               pcistub_device_put(psdev);
        if (!err)
                err = count;
        return err;
@@ -1216,15 +1257,16 @@ static ssize_t permissive_add(struct device_driver *drv, const char *buf,
        err = str_to_slot(buf, &domain, &bus, &slot, &func);
        if (err)
                goto out;
+       if (slot < 0 || func < 0) {
+               err = -EINVAL;
+               goto out;
+       }
        psdev = pcistub_device_find(domain, bus, slot, func);
        if (!psdev) {
                err = -ENODEV;
                goto out;
        }
-       if (!psdev->dev) {
-               err = -ENODEV;
-               goto release;
-       }
+
        dev_data = pci_get_drvdata(psdev->dev);
        /* the driver data for a device should never be null at this point */
        if (!dev_data) {
@@ -1297,17 +1339,51 @@ static int __init pcistub_init(void)
 
        if (pci_devs_to_hide && *pci_devs_to_hide) {
                do {
+                       char wc = '*';
+
                        parsed = 0;
 
                        err = sscanf(pci_devs_to_hide + pos,
                                     " (%x:%x:%x.%x) %n",
                                     &domain, &bus, &slot, &func, &parsed);
-                       if (err != 4) {
+                       switch (err) {
+                       case 3:
+                               func = -1;
+                               err = sscanf(pci_devs_to_hide + pos,
+                                            " (%x:%x:%x.%c) %n",
+                                            &domain, &bus, &slot, &wc,
+                                            &parsed);
+                               break;
+                       case 2:
+                               slot = func = -1;
+                               err = sscanf(pci_devs_to_hide + pos,
+                                            " (%x:%x:*.%c) %n",
+                                            &domain, &bus, &wc, &parsed) + 1;
+                               break;
+                       }
+
+                       if (err != 4 || wc != '*') {
                                domain = 0;
+                               wc = '*';
                                err = sscanf(pci_devs_to_hide + pos,
                                             " (%x:%x.%x) %n",
                                             &bus, &slot, &func, &parsed);
-                               if (err != 3)
+                               switch (err) {
+                               case 2:
+                                       func = -1;
+                                       err = sscanf(pci_devs_to_hide + pos,
+                                                    " (%x:%x.%c) %n",
+                                                    &bus, &slot, &wc,
+                                                    &parsed);
+                                       break;
+                               case 1:
+                                       slot = func = -1;
+                                       err = sscanf(pci_devs_to_hide + pos,
+                                                    " (%x:*.%c) %n",
+                                                    &bus, &wc, &parsed) + 1;
+                                       break;
+                               }
+                               if (err != 3 || wc != '*')
                                        goto parse_error;
                        }
 
index b3e146edb51d947b3bb7d22b63e52fac32adcbc3..bcf3ba4a6ec1543a7500ef0de36269dec1ff728d 100644 (file)
@@ -490,8 +490,7 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
 
        op.host_addr = arbitrary_virt_to_machine(pte).maddr;
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-               BUG();
+       gnttab_batch_map(&op, 1);
 
        if (op.status != GNTST_okay) {
                free_vm_area(area);
@@ -572,8 +571,7 @@ int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
        gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
                          dev->otherend_id);
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-               BUG();
+       gnttab_batch_map(&op, 1);
 
        if (op.status != GNTST_okay) {
                xenbus_dev_fatal(dev, op.status,
index 52fe7ad076669ce40992d0d423c26c37f1568819..c5aa55c5d371e495e1a5c4a39051225d5fc27dd7 100644 (file)
@@ -224,7 +224,7 @@ int xb_init_comms(void)
                int err;
                err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
                                                0, "xenbus", &xb_waitq);
-               if (err <= 0) {
+               if (err < 0) {
                        printk(KERN_ERR "XENBUS request irq failed %i\n", err);
                        return err;
                }
index be738c43104bea15dfca7422055d2f0cd79ecf9f..d730008007624b4227780457a96c77f8662c248e 100644 (file)
@@ -107,7 +107,7 @@ static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
        return 0;
 }
 
-const struct file_operations xenbus_backend_fops = {
+static const struct file_operations xenbus_backend_fops = {
        .open = xenbus_backend_open,
        .mmap = xenbus_backend_mmap,
        .unlocked_ioctl = xenbus_backend_ioctl,
index b793723e724de767ad7cf9abac808548d78a21ba..038b71dbf03c980fd9024284f2e5cca1bfb3db95 100644 (file)
@@ -324,8 +324,8 @@ static int cmp_dev(struct device *dev, void *data)
        return 0;
 }
 
-struct xenbus_device *xenbus_device_find(const char *nodename,
-                                        struct bus_type *bus)
+static struct xenbus_device *xenbus_device_find(const char *nodename,
+                                               struct bus_type *bus)
 {
        struct xb_find_info info = { .dev = NULL, .nodename = nodename };
 
@@ -719,17 +719,47 @@ static int __init xenstored_local_init(void)
        return err;
 }
 
+enum xenstore_init {
+       UNKNOWN,
+       PV,
+       HVM,
+       LOCAL,
+};
 static int __init xenbus_init(void)
 {
        int err = 0;
+       enum xenstore_init usage = UNKNOWN;
+       uint64_t v = 0;
 
        if (!xen_domain())
                return -ENODEV;
 
        xenbus_ring_ops_init();
 
-       if (xen_hvm_domain()) {
-               uint64_t v = 0;
+       if (xen_pv_domain())
+               usage = PV;
+       if (xen_hvm_domain())
+               usage = HVM;
+       if (xen_hvm_domain() && xen_initial_domain())
+               usage = LOCAL;
+       if (xen_pv_domain() && !xen_start_info->store_evtchn)
+               usage = LOCAL;
+       if (xen_pv_domain() && xen_start_info->store_evtchn)
+               xenstored_ready = 1;
+
+       switch (usage) {
+       case LOCAL:
+               err = xenstored_local_init();
+               if (err)
+                       goto out_error;
+               xen_store_interface = mfn_to_virt(xen_store_mfn);
+               break;
+       case PV:
+               xen_store_evtchn = xen_start_info->store_evtchn;
+               xen_store_mfn = xen_start_info->store_mfn;
+               xen_store_interface = mfn_to_virt(xen_store_mfn);
+               break;
+       case HVM:
                err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
                if (err)
                        goto out_error;
@@ -738,18 +768,12 @@ static int __init xenbus_init(void)
                if (err)
                        goto out_error;
                xen_store_mfn = (unsigned long)v;
-               xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
-       } else {
-               xen_store_evtchn = xen_start_info->store_evtchn;
-               xen_store_mfn = xen_start_info->store_mfn;
-               if (xen_store_evtchn)
-                       xenstored_ready = 1;
-               else {
-                       err = xenstored_local_init();
-                       if (err)
-                               goto out_error;
-               }
-               xen_store_interface = mfn_to_virt(xen_store_mfn);
+               xen_store_interface =
+                       ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
+               break;
+       default:
+               pr_warn("Xenstore state unknown\n");
+               break;
        }
 
        /* Initialize the interface to xenstore. */
index a31b54d488398675fc01eaa2878f0a21ec49fe46..3159a37d966d57755b6bd6d0d2a19379309c7e6e 100644 (file)
@@ -21,6 +21,7 @@
 #include <xen/xenbus.h>
 #include <xen/events.h>
 #include <xen/page.h>
+#include <xen/xen.h>
 
 #include <xen/platform_pci.h>
 
index bce15cf4a8df1708a5237a12e7223ca56b093e8f..131dec04794ee3ee1af70c2fd3f1dd2220774070 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/rwsem.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <asm/xen/hypervisor.h>
 #include <xen/xenbus.h>
 #include <xen/xen.h>
 #include "xenbus_comms.h"
@@ -622,7 +623,7 @@ static void xs_reset_watches(void)
 {
        int err, supported = 0;
 
-       if (!xen_hvm_domain())
+       if (!xen_hvm_domain() || xen_initial_domain())
                return;
 
        err = xenbus_scanf(XBT_NIL, "control",
index a84b53c01436334b68688dca58e75bd54462b023..459b9ac45cf5e267ca6d4ae3abc1cc895e8d9127 100644 (file)
@@ -30,7 +30,8 @@ static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
 
        if (ret) {
                ret->i_mode = mode;
-               ret->i_uid = ret->i_gid = 0;
+               ret->i_uid = GLOBAL_ROOT_UID;
+               ret->i_gid = GLOBAL_ROOT_GID;
                ret->i_blocks = 0;
                ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
        }
index fdc9ff045ef8c3073519115b0e383bd65e97c333..eeb14030d8a24e67f113d65f1da1578d04b3de89 100644 (file)
@@ -42,7 +42,6 @@ fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
 fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
 fw-shipped-$(CONFIG_CHELSIO_T3) += cxgb3/t3b_psram-1.1.0.bin \
                                   cxgb3/t3c_psram-1.1.0.bin \
-                                  cxgb3/t3fw-7.10.0.bin \
                                   cxgb3/ael2005_opt_edc.bin \
                                   cxgb3/ael2005_twx_edc.bin \
                                   cxgb3/ael2020_twx_edc.bin
diff --git a/firmware/cxgb3/t3fw-7.10.0.bin.ihex b/firmware/cxgb3/t3fw-7.10.0.bin.ihex
deleted file mode 100644 (file)
index 96399d8..0000000
+++ /dev/null
@@ -1,1935 +0,0 @@
-:1000000060007400200380002003700000001000D6
-:1000100000002000E100028400070000E1000288E7
-:1000200000010000E0000000E00000A0010000006E
-:1000300044444440E3000183200200002001E0002A
-:100040002001FF101FFFD0001FFFC000E300043C91
-:100050000200000020006C841FFFC2A020006CCCB6
-:100060001FFFC2A420006D0C1FFFC2A820006D80DE
-:100070001FFFC2AC200003C0C00000E43100EA3121
-:1000800000A13100A03103020002ED306E2A05000C
-:10009000ED3100020002160012FFDBC03014FFDA5F
-:1000A000D30FD30FD30F03431F244C107249F0D347
-:1000B0000FD30FD30F12FFD5230A00240A00D30F4A
-:1000C000D30FD30F03431F244C107249F0D30FD327
-:1000D0000FD30F14FFCE03421F14FFCB03421F1296
-:1000E000FFCCC0302D37302D37342D37382D373CED
-:1000F000233D017233ED00020012FFC4C0302F37E0
-:10010000002F37102F37202F3730233D017233ED6A
-:1001100000020012FFBEC0302737002737102737F4
-:1001200020273730233D017233ED03020012FFB95F
-:1001300013FFBA0C0200932012FFB913FFB90C028F
-:1001400000932012FFB8C0319320822012FFB71312
-:10015000FFB7932012FFB715FFB316FFB6C030D715
-:100160002005660160001B00000000000000000088
-:10017000043605000200D30FD30F05330C6E3B1479
-:100180000747140704437631E604360505330C6F40
-:100190003BED00020012FFA615FFA3230A00D720A3
-:1001A000070443043E0505330C0747146F3BF00377
-:1001B000020012FFA1C03014FFA1D30FD30FD30F41
-:1001C0009340B4447249F2D30FD30FD30F14FF9B63
-:1001D000834014FF9B834012FF9B230A0014FF9A65
-:1001E000D30FD30FD30F9340B4447249F2D30FD33C
-:1001F0000FD30F14FF95834012FF95C92F832084DE
-:10020000218522BC22743B0F8650B4559630B433FE
-:100210007433F463FFE60000653FE1655FDE12FFC3
-:100220007C230A0028374028374428374828374C91
-:10023000233D017233ED03020000020012FF7AC079
-:1002400032032E0503020012FF7813FF819320C0B2
-:1002500011014931004831010200C00014FF7E0441
-:10026000D23115FF7D945014FF7D04D33115FF7CEE
-:10027000945014FF7C04D43115FF7C24560014FFE5
-:100280007B04D53115FF7B24560010FF7A03000054
-:10029000000000000000000000000000000000005E
-:1002A000000000000000000000000000000000004E
-:1002B000000000000000000000000000000000003E
-:1002C000000000000000000000000000000000002E
-:1002D000000000000000000000000000000000001E
-:1002E000000000000000000000000000000000000E
-:1002F00000000000000000000000000000000000FE
-:1003000000000000000000000000000000000000ED
-:1003100000000000000000000000000000000000DD
-:1003200000000000000000000000000000000000CD
-:1003300000000000000000000000000000000000BD
-:1003400000000000000000000000000000000000AD
-:10035000000000000000000000000000000000009D
-:10036000000000000000000000000000000000008D
-:10037000000000000000000000000000000000007D
-:10038000000000000000000000000000000000006D
-:10039000000000000000000000000000000000005D
-:1003A000000000000000000000000000000000004D
-:1003B000000000000000000000000000000000003D
-:1003C000000000000000000000000000000000002D
-:1003D000000000000000000000000000000000001D
-:1003E000000000000000000000000000000000000D
-:1003F00000000000000000000000000000000000FD
-:1004000000000000000000000000000000000000EC
-:1004100000000000000000000000000000000000DC
-:1004200063FFFC000000000000000000000000006E
-:100430000000000000000000000000001FFC0000A1
-:100440001FFC0000E30005C81FFC00001FFC0000AB
-:10045000E30005C81FFC00001FFC0000E30005C806
-:100460001FFFC0001FFFC000E30005C81FFFC00042
-:100470001FFFC018E30005C81FFFC0181FFFC018EA
-:10048000E30005E01FFFC0181FFFC294E30005E072
-:100490001FFFC2941FFFC294E300085C1FFFC2A0AD
-:1004A0001FFFC59CE300085C200000002000016ADB
-:1004B000E3000B582000018020000180E3000CC401
-:1004C0002000020020000203E3000CC42000021CF4
-:1004D00020000220E3000CC8200002202000022699
-:1004E000E3000CCC2000023C20000240E3000CD4CE
-:1004F0002000024020000249E3000CD82000024CFA
-:1005000020000250E3000CE42000025020000259B9
-:10051000E3000CE82000025C20000260E3000CF421
-:100520002000026020000269E3000CF82000026C49
-:1005300020000270E3000D04200002702000027908
-:10054000E3000D082000028C2000028CE3000D1453
-:100550002000029020000293E3000D14200002AC62
-:10056000200002B0E3000D18200002D0200002F2AB
-:10057000E3000D1C200003B0200003B0E3000D4099
-:10058000200003B0200003B0E3000D40200003B0C2
-:10059000200003B0E3000D40200003B0200003B0B2
-:1005A000E3000D40200003B020006EA4E3000D40E6
-:1005B00020006EA420006EA4E30078340000000048
-:1005C00000000000000000001FFC00001FFC0000F5
-:1005D0001FFFC5A01FFFC69020006EA820006EA8B8
-:1005E000DEFFFE000000080CDEADBEEF1FFFC2B054
-:1005F0001FFCFE001FFFC0A41FFFC5D0300000007D
-:10060000003FFFFF8040000010000000080FFFFFC8
-:100610001FFFC27D000FFFFF804FFFFF8000000023
-:1006200000000880B000000560500000600000007D
-:1006300040000011350000004100000010000001E2
-:100640002000000000001000400000000500000035
-:10065000800000190400000000000800E100020012
-:1006600010000005806000007000000020000009FC
-:10067000001FF8008000001EA0000000F80000002D
-:1006800007FFFFFF080000001800000001008001C4
-:10069000420000001FFFC22D1FFFC0EC00010080C0
-:1006A000604000001A0000000C0000001000000A6A
-:1006B000000030000001000080000018FC00000075
-:1006C0008000000100004000600008008000001C65
-:1006D0008000001A030000008000040004030403EB
-:1006E00050000003FFFFBFFF1FFFC3E400000FFF28
-:1006F000FFFFF000000016D00000FFF7A50000008B
-:100700001FFFC4C01FFFC4710001000800000B20C0
-:10071000202FFF801FFFC46500002C00FFFEFFF8A4
-:1007200000FFFFFF1FFFC58800002000FFFFDFFF65
-:100730000000FFEF010011001FFFC3E21FFFC5A073
-:10074000FFFFEFFF0000FFFB1FFFC6501FFFBEB003
-:10075000FFFFF7FF1FFFC0740000FFFD1FFFC64033
-:100760000001FBD01FFFC5C01FFFC6801FFFC5A132
-:10077000E0FFFE001FFFC5B0000080001FFFC54C5A
-:100780001FFFC5C41FFFC0781FFFC4E41FFCFFD8B4
-:10079000000100817FFFFFFFE1000600000027103D
-:1007A0001FFCFE301FFCFE701FFFC5481FFFC56009
-:1007B0000003D0901FFFC5742B5063802B507980AD
-:1007C0002B5090802B50A6801FFFC4790100110F81
-:1007D000202FFE0020300080202FFF000000FFFFB0
-:1007E0000001FFF82B50B2002B50B208000100109E
-:1007F0002B50B1802B50B2802B50BA000001001159
-:100800002B50BD282B50BC802B50BDA020300000A9
-:10081000DFFFFE005000000200C0000002000000E8
-:10082000FFFFF7F41FFFC07C000FF800044000003A
-:10083000001000000C4000001C400000E00000A080
-:100840001FFFC5501FFD00081FFFC5641FFFC578AF
-:100850001FFFC58CE1000690E10006EC00000000DF
-:100860000000000000000000000000000100000087
-:100870000000000000000000000000002010004008
-:10088000201000402010004020140080200C0000A8
-:10089000200C0000200C00002010004020140080DC
-:1008A0002014008020140080201800C0201C0100AB
-:1008B000201C0100201C010020200140201800C045
-:1008C000201800C0201800C0201C0100201800C003
-:1008D000201800C0201800C0201C0100202001406A
-:1008E00020200140202001402020094020200940F4
-:1008F000202009402020094020240980FFFFFFFF1D
-:10090000FFFFFFFFFFFFFFFF0000000000000000EF
-:1009100000000000000000000000000020005588DA
-:1009200020005458200055882000558820005394FA
-:100930002000539420005394200051D4200051D41F
-:10094000200051CC2000513820004FE020004DC045
-:1009500020004B94000000000000000020005558CB
-:1009600020005424200054C8200054C82000527C89
-:100970002000527C2000527C2000527C2000527CBF
-:10098000200051C42000527C20004F0020004D70F8
-:1009900020004B40000000000000000020000BF091
-:1009A00020003ADC200004C02000473020000BE883
-:1009B000200041F4200003F0200046F020004B1CF2
-:1009C00020003F0020003E1C20003A58200038E85C
-:1009D00020003658200031B820003C7820002DD06F
-:1009E0002000286420006828200023F0200020D068
-:1009F0002000207C20001D68200018602000158841
-:100A000020000E5420000C3420001134200013204C
-:100A1000200043EC20003EB420000BF8200004C06E
-:100A200000000000000000000000000000000000C6
-:100A300000000000000000000000000000000000B6
-:100A400000000000000000000000000000000000A6
-:100A50000000000000000000000000000000000096
-:100A60000000000000000000000000000000000086
-:100A70000000000000000000000000000000000076
-:100A80000000000000000000000000000000000066
-:100A90000000000000000000000000000000000056
-:100AA0003264000000000000326400006400640052
-:100AB00064006400640064006400640000000000DE
-:100AC0000000000000000000000000000000000026
-:100AD0000000000000000000000000000000000016
-:100AE0000000000000000000000000000000000006
-:100AF00000000000000000000000000000000000F6
-:100B000000000000000010000000000000000000D5
-:100B100000000000000000000000000000001000C5
-:100B200000000000000000000000000000000000C5
-:100B300000432380000000000000000000000000CF
-:100B400000000000000000000000000000000000A5
-:100B50000000000000000000005C94015D94025E53
-:100B600094035F94004300000000000000000000B8
-:100B70000000000000000000000000000000000075
-:100B80000000000000000000000000000000000065
-:100B90000000000000000000005C90015D90025E1B
-:100BA00090035F9000530000000000000000000070
-:100BB0000000000000000000000000000000000035
-:100BC0000000000000000000000000000000000025
-:100BD0000000000000000000009C94001D90019D9A
-:100BE00094029E94039F94040894050994060A9421
-:100BF000070B94004300000000000000000000000C
-:100C000000000000000000000000000000000000E4
-:100C10000000000000000000009C90019D90029EDA
-:100C200090071D90039F90047890057990067A9024
-:100C3000077B90005300000000000000000000004F
-:100C400000000000000000000000000000000000A4
-:100C5000000000000000000000DC94001D9001DD99
-:100C60009402DE9403DF940404940505940606942C
-:100C70000707940808940909940A0A940B0B940036
-:100C80004300000000000000000000000000000021
-:100C9000000000000000000000DC9001DD9002DE9A
-:100CA000900B1D9003DF9004B49005B59006B690AC
-:100CB00007B79008B89009B9900ABA900BBB90009A
-:100CC0005300000063FFFC0020006C6010FFFF0A6F
-:100CD0000000000020006C8400D23110FFFE0A00EA
-:100CE0000000000020006CCC00D33110FFFE0A0091
-:100CF0000000000020006D0C00D43110FFFE0A003F
-:100D00000000000020006D8000D53110FFFE0A00B9
-:100D10000000000063FFFC00E00000A012FFF7826B
-:100D200020028257C82163FFFC12FFF303E830045E
-:100D3000EE3005C03093209421952263FFFC000023
-:100D40001FFFD000000400201FFFC5A01FFFC6909A
-:100D5000200A0011FFFB13FFFB03E631010200161E
-:100D6000FFFA17FFFAD30F776B069060B4667763CC
-:100D7000F85415F3541AA50F140063FFF90000008E
-:100D80006C1004C020D10F006C1004C0C71AEF060D
-:100D9000D830BC2BD72085720D4211837105450BCD
-:100DA000957202330C2376017B3B04233D0893713B
-:100DB000A32D12EEFE19EEFEA2767D632C2E0A0004
-:100DC000088202280A01038E380E0E42C8EE29A6B8
-:100DD0007E6D4A0500208800308C8271D10FC0F0F2
-:100DE000028F387FC0EA63FFE400C0F1C050037E89
-:100DF0000CA2EE0E3D1208820203F538050542CB27
-:100E00005729A67E2FDC100F4F366DFA050020887B
-:100E100000308CBC75C03008E208280A0105833810
-:100E2000030342C93E29A67E0D480CD30F6D8A05E7
-:100E300000208800B08C8271D10FC05008F5387541
-:100E4000C0C163FFBBC06002863876C0DA63FFD4DE
-:100E50006C101216EED8C1F9C1E8C1C72B221E28AA
-:100E6000221DC0D07B81352920060BB702299CFAB0
-:100E7000655008282072288CFF2824726491642A07
-:100E8000B0000CA80C64816F0EA90C6492BB7FA10A
-:100E90003FC1CE7CA13669AC336000370029200603
-:100EA000D7D0299CFACC57282072288CFF2824728E
-:100EB0006491392AD0000CA80C6481680EA90C64D6
-:100EC000931F7FA10BC1CE7CA10268AC06C020D1CC
-:100ED0000F2D25028A32C0900A6E5065E5B529248F
-:100EE00067090F4765F5B12C200C1FEEB30CCE112E
-:100EF000AFEE29E286B44879830260058219EEAF2D
-:100F000009C90A2992A36890078F2009FF0C65F58B
-:100F10006E2FE28564F56865559628221D7B810554
-:100F2000D9B060000200C0908B9417EEA50B881416
-:100F300087740B0B47A87718EEA309BB100877023C
-:100F400097F018EEA117EEA208A8010B8802074738
-:100F5000021BEE9E97F10B880298F22790232B90AC
-:100F60002204781006BB1007471208BB0228902104
-:100F70000777100C88100788020B880217EE968BF3
-:100F80003307BB0187340B880298F3979997F48B4A
-:100F90009587399BF588968B3898F688979BF897B4
-:100FA000F998F717EE8D28E28507C7082D74CF084A
-:100FB000480B28E68565550F2B221E28221D7B89AC
-:100FC000022B0A0064BF052CB00728B000DA200607
-:100FD000880A28824CC0D10B8000DBA065AFE76394
-:100FE000FEEA0000292072659E946004E72A2072C0
-:100FF00065AEBF6004DE00002EB0032C2067D4E095
-:1010000065C1058A328C330AFF500C4554BC5564C7
-:10101000F4EB19EE72882A09A90109880C64821F71
-:10102000C0926000DD2ED0032A2067D4E065A0D8EE
-:101030008A328B330AFC500B4554BC5564C4BE192C
-:10104000EE67882A09A9017989D50BEA5064A4E3DF
-:101050000CEE11C0F02F16132E16168AE78CE82A14
-:1010600016128EE9DFC0AAEA7EAB01B1CF0BA85001
-:101070006583468837DBC0AE89991E789B022BCCEE
-:10108000012B161B29120E2B0A0029161A7FC307E3
-:101090007FC9027EAB01C0B165B49D8B352F0A00BC
-:1010A0002A0A007AC30564C3CB2F0A0165F4892B91
-:1010B00012162B1619005104C0C100CC1A2CCCFFFB
-:1010C0002C16170CFC132C16182B121A2A121BDCC8
-:1010D000505819B6C0D0C0902E5CF42C12172812AC
-:1010E000182F121B2A121A08FF010CAA01883407B4
-:1010F0004C0AAB8B2812192BC6162F86082A860994
-:101100002E74102924672E70038975B1EA2A74039E
-:10111000B09909490C659DB42B20672D250265B354
-:10112000FA2B221E2C221D7BC901C0B064BD9D2C50
-:10113000B00728B000DA2006880A28824CC0D10BFC
-:101140008000DBA065AFE763FD8289BAB199659045
-:101150009788341CEE2398BA8F331EEE1C0F4F5421
-:101160002FB42C8D2A8A320EDD020CAC017DC966AB
-:101170000A49516F92608A3375A65B2CB0130AED51
-:10118000510DCD010D0D410C0C417DC9492EB01200
-:10119000B0EE65E3C6C0D08E378CB88A368FB97C86
-:1011A000A3077AC9027EFB01C0D1CED988350AAD2A
-:1011B000020E8E0878EB022DAC0189B7DAC0AF9B26
-:1011C00079BB01B1CADCB0C0B07DA3077AD9027C7B
-:1011D000EB01C0B164B161C091292467C020D10F77
-:1011E00000008ADAB1AA64A0C02C20672D25026510
-:1011F000C3111DEDF68A321EEDFB0DAD010EDD0CA7
-:1012000065D28A0A4E516FE202600281C0902924A1
-:1012100067090F4765F2F828221D7B89022B0A0017
-:1012200064BCA92CB00728B000DA2006880A2882FE
-:101230004CC0D10B8000DBA065AFE763FC8E0000E3
-:101240000CE9506492ED0CEF11C080281611AFBF6D
-:101250002F16198EF88BF7DAE08FF92B1610ABFBEF
-:101260007FBB01B1EA0CA8506580D68837DCE0AFBF
-:1012700089991C789B022CEC012C161B29120C2C32
-:101280000A0029161A7AE3077AE9027FBB01C0C176
-:1012900065C2A58B352C0A002A0A007AE30564E1B1
-:1012A000CA2C0A0164CE0D60028E88341BEDCD98E5
-:1012B000DA8F331EEDC60F4F542FD42C8C2A8A326E
-:1012C0000ECC020BAB010CBB0C65BF0A0A49516E78
-:1012D000920263FF018A330AAB5064BEF92CD0132B
-:1012E0000AEE510ECE010E0E410C0C410ECC0C65D7
-:1012F000CEE42FD012B0FF65F26EC0B08E378CD81E
-:101300008A362FD2097CA3077AC9027EFB01C0B1BD
-:1013100065BEC38835DBA0AE8E78EB01B1AB89D753
-:10132000DAC0AF9D79DB01B1CAC0C07BA3077AB92F
-:10133000027DEB01C0C165CE9DC090292467C0200D
-:10134000D10F88378C3698140CE90C29161408F83C
-:101350000C981D78FB07281214B088281614891DD4
-:101360009F159B16C0F02B121429161A2B161B8BD7
-:10137000147AE30B7AE90688158E1678EB01C0F132
-:1013800065F1BA29121A2F12118A352E121B9A1AD8
-:10139000AFEE2F1210C0A0AF9F79FB01B1EE9F11ED
-:1013A000881AC0F098107AE30A7EA9052A12017AF9
-:1013B0008B01C0F164F08160018389368B37991706
-:1013C0000BE80C981F09C90C29161578EB07281291
-:1013D00015B088281615D9C09A199E188A1F2E1282
-:1013E000152A161A2E161BDAC0C0E08C177F930B35
-:1013F0007FA90688188F1978FB01C0E165E13E29B5
-:10140000121A2F12138A352E121B9A1BAFEE2F12AF
-:1014100012C0A0AF9F79FB01B1EE9F13881BC0F0F3
-:1014200098127AE30A7EA9052A12037A8B01C0F189
-:1014300065F10A2E12162E16192A121B005104C02D
-:10144000E100EE1AB0EE2E16170EFF132F16180F2E
-:10145000CC01ACAA2F121A0EBC01ACFC7FCB01B19F
-:10146000AA2A161B2C161A63FC5E00007FB30263C7
-:10147000FE3163FE2B7EB30263FC3063FC2A000066
-:101480006450C0DA20DBC058168AC020D10FC0914A
-:1014900063FD7A00C09163FA44DA20DB70C0D12E7C
-:1014A0000A80C09A2924682C7007581575D2A0D1DB
-:1014B0000F03470B18ED4DDB70A8287873022B7DC6
-:1014C000F8D9B063FA6100002A2C74DB40580EEEA4
-:1014D00063FAE4000029221D2D25027B9901C0B08A
-:1014E000C9B62CB00728B000DA2006880A28824C3A
-:1014F000C0D10B8000DBA065AFE7C020D10FC09149
-:1015000063FBFF00022A0258024C0AA202060000F6
-:10151000022A025802490AA202060000DB70DA2001
-:10152000C0D12E0A80C09E2924682C7007581554FB
-:10153000C020D10FC09463FBC9C09663FBC4C096A2
-:1015400063FBBF002A2C74DB30DC405BFE0FDBA0AA
-:10155000C2A02AB4002C200C63FF27008D358CB765
-:101560007DCB0263FDD263FC6D8F358ED77FEB029E
-:1015700063FDC563FC6000006C1004C020D10F0047
-:101580006C1004C020D10F006C10042B221E2822E6
-:101590001DC0A0C0942924062A25027B8901DBA056
-:1015A000C9B913ED04DA2028B0002CB00703880A6B
-:1015B00028824CC0D10B8000DBA065AFE7C020D1F2
-:1015C0000F0000006C10042C20062A210268C805B8
-:1015D00028CCF965812E0A094C6591048F30C1B879
-:1015E0000F8F147FB00528212365812716ECF3297E
-:1015F000629E6F98026000F819ECEF2992266890BD
-:10160000078A2009AA0C65A0E72A629D64A0E12B45
-:10161000200C0CB911A6992D92866FD9026000DBBF
-:101620001DECE70DBD0A2DD2A368D0078E200DEE6C
-:101630000C65E0C7279285C0E06470BF1DECEC68C4
-:10164000434E1CECEB8A2B0CAA029A708920089955
-:10165000110D99029971882A98748F329F752821EB
-:1016600004088811987718ECDC0CBF11A6FF2DF246
-:1016700085A8B82E84CF2DDC282DF685C85A2A2CB3
-:1016800074DB40580E81D2A0D10FC020D10F0000D2
-:101690000029CCF96490B12C20668931B1CC0C0CB6
-:1016A000472C24666EC60260008509F85065807F6D
-:1016B0001CECD18A2B0F08400B881008AA020CAA38
-:1016C000029A7089200899110D99029971883398AE
-:1016D000738C329C728A2A9A748934997563FF7D5F
-:1016E00000CC57DA20DB30DC4058155FC020D10F2A
-:1016F00000DA20C0B65815EE63FFE500DA20581571
-:10170000EC63FFDC00DA20DB30DC40DD5058167A79
-:10171000D2A0D10FC858DA20DB305814C72A2102D2
-:1017200065AFBDC09409A90229250263FFB200007C
-:101730002B21045814731DECADC0E02E24668F30AD
-:101740002B200C0F8F1463FF66292138C088798302
-:101750001F8C310CFC5064CF562B2104C0C0581490
-:10176000681DECA2C0E08F302B200C0F8F1463FF9C
-:101770003E2C20662B2104B1CC0C0C472C2466583F
-:1017800014601DEC9AC0E02E24668F302B200C0FC5
-:101790008F1463FF1A0000006C1004C0B7C0A116BC
-:1017A000EC9615EC88D720D840B822C04005350209
-:1017B0009671957002A438040442C94B1AEC7B1947
-:1017C000EC7C29A67EC140D30F6D4A0500808800BD
-:1017D000208C220A88A272D10FC05008A53875B09B
-:1017E000E363FFD76C10069313941129200665520A
-:1017F00088C0716898052A9CF965A29816EC6F2933
-:1018000021028A1309094C6590CD8AA00A6A512ADF
-:10181000ACFD65A0C2CC5FDB30DA208C115815120C
-:10182000C0519A13C7BF9BA98E132EE20968E060CE
-:101830002F629E1DEC606FF8026000842DD2266836
-:10184000D0052F22007DF9782C629DC79064C0706E
-:101850009C108A132B200C2AA0200CBD11A6DD0A97
-:101860004F14BFA809880129D286AF88288C09792E
-:101870008B591FEC520FBF0A2FF2A368F0052822E4
-:10188000007F894729D285D4906590756000430018
-:10189000002B200C1FEC4A0CBD11A6DD29D2860FAF
-:1018A000BF0A6E96102FF2A368F00488207F890586
-:1018B00029D285659165DA2058157DC95C6001FFE4
-:1018C00000DA20C0B658157A60000C00C09063FFA3
-:1018D000B50000DA205815766551E48D138C11DBC4
-:1018E000D08DD0022A020D6D515813E39A1364A1D2
-:1018F000CEC75F8FA195A9C0510F0F479F1163FEFF
-:10190000FD00C091C0F12820062C2066288CF9A784
-:10191000CC0C0C472C24666FC6098D138DD170DE5C
-:1019200002290A00099D02648159C9D38A102B211A
-:10193000045813F38A13C0B02B24662EA2092AA0E0
-:10194000200E28141CEC298D1315EC1DC1700A778C
-:101950003685562DDC28AC2C9C12DED0A8557CD3C5
-:10196000022EDDF8D3E0DA40055B02DC305BFF8A53
-:10197000D4A028200CB455C0D02B0A882F0A800C84
-:101980008C11A6CC29C285AF3FAB9929C6851CEC2A
-:1019900012DEF0AC882D84CF28120229120378F3CE
-:1019A000022EFDF8289020D3E007880CC1700808AB
-:1019B00047289420087736657FAB891313EC10898C
-:1019C00090C0F47797491BEC0EC1CA2821048513F7
-:1019D000099E4006EE11875304881185520E880235
-:1019E0000C88029BA09FA18F2B9DA598A497A795DB
-:1019F000A603FF029FA22C200C1EEBF7AECE0CCC50
-:101A00001106CC082BC2852DE4CF2BBC202BC6851C
-:101A10002A2C748B11580D9CD2A0D10F28203DC0C8
-:101A2000E07C877F2E24670E0A4765A07B1AEBF5C2
-:101A300088201EEBE38F138EE48FF40888110A8848
-:101A4000020F8F14AFEE1FEBF098910FEE029E90F5
-:101A50001EEBEFC0801AEBE02CD285AABAB8CC28D6
-:101A6000A4CF2CD6852C21022F20720ECC02B1FFE0
-:101A70002F24722C2502C020D10F871387700707EF
-:101A80004763FD6E282138C099798B0263FE9ADD89
-:101A9000F063FE9500DA20DB308C11DD505815968E
-:101AA000D2A0D10FC0E163FF7A8B138C11DD50C03F
-:101AB000AA2E0A802A2468DA205813F1D2A0D10F66
-:101AC000C020D10F6C1006292102C0D07597102AB2
-:101AD00032047FA70A8B357FBF052D25020DD90261
-:101AE000090C4C65C18216EBB41EEBB228629EC095
-:101AF000FA78F30260018829E2266890078A2009B3
-:101B0000AA0C65A17A2A629DDFA064A1772B200C24
-:101B10000CBC11A6CC29C286C08C79830260015707
-:101B200019EBA709B90A2992A368900788200988A8
-:101B30000C65814327C2851CEBA964713A89310980
-:101B40008B140CBB016FB11D2C20669F10B1CC0C07
-:101B50000C472C24666EC60260014009FF5065F1F7
-:101B60003A8A102AAC188934C0C47F973C18EBA974
-:101B70001BEBA88F359C719B708B209D7408BB025A
-:101B80009B72C08298751BEBA40F08409B730F8853
-:101B90001198777FF70B2F2102284A0008FF022FA8
-:101BA0002502C0B4600004000000C0B07E97048F1E
-:101BB000362F25227D970488372825217C9736C02B
-:101BC000F1C0900AF9382F3C200909426490861927
-:101BD000EB7618EB7728967E00F08800A08C00F05A
-:101BE0008800A08C00F08800A08C2A629D2DE4A2C1
-:101BF0002AAC182A669D89307797388F338A321835
-:101C0000EB8007BE0B2C2104B4BB04CC1198E0C0C0
-:101C10008498E1882B9DE59AE69FE71AEB78099F67
-:101C20004006FF110FCC020A880298E2C1FC0FCCDB
-:101C3000022CE604C9B82C200C1EEB670CCA11AEAE
-:101C4000CC06AA0829A2852DC4CF09B90B29A685DF
-:101C5000CF5CC020D10FC081C0900F8938C0877978
-:101C6000880263FF7263FF6600CC57DA20DB30DC4A
-:101C7000405813FDC020D10FDA2058148D63FFE8BF
-:101C8000C0A063FE82DA20C0B658148963FFD90071
-:101C9000DB402A2C74580CFCD2A0D10F8A102B21C7
-:101CA000045813171EEB44C0D02D246663FEB10008
-:101CB0006C1006D62019EB3F1EEB4128610217EB92
-:101CC0003E08084C65805F8A300A6A5169A3572B29
-:101CD000729E6EB83F2A922668A0048C607AC9343E
-:101CE0002A729D2C4CFECAAB2B600CB64F0CBD115A
-:101CF000A7DD28D2860EBE0A78FB269C112EE2A311
-:101D00002C160068E0052F62007EF91522D285CFDF
-:101D10002560000D00DA60C0B6581465C85A60012D
-:101D20000F00DA60581462655106DC40DB308D30FC
-:101D3000DA600D6D515812D0D3A064A0F384A1C015
-:101D40005104044763FF6D00C0B02C60668931B157
-:101D5000CC0C0C472C64666FC60270960A2B61048B
-:101D60005812E7C0B02B64666550B42A3C10C0E737
-:101D7000DC20C0D1C0F002DF380F0F4264F09019B0
-:101D8000EB0A18EB0B28967E8D106DDA0500A08803
-:101D900000C08CC0A089301DEB1A77975388328C15
-:101DA000108F3302CE0BC02492E12261049DE00427
-:101DB00022118D6B9BE59FE798E61FEB1009984079
-:101DC0000688110822020FDD02C18D9DE208220261
-:101DD00092E4B4C22E600C1FEB000CE811A7882C13
-:101DE0008285AFEE0C220B2BE4CF228685D2A0D1C8
-:101DF0000F28600CD2A08C1119EAF80C8D11A9885B
-:101E0000A7DD2ED2852B84CF0ECC0B2CD685D10FFF
-:101E1000C0F00ADF387FE80263FF6C63FF600000F8
-:101E20002A6C74C0B2DC20DD405812C5C0B063FF1C
-:101E300063C020D10F0000006C10042920062A2264
-:101E40001EC0392C221D232468C0307AC107DDA0B2
-:101E5000600004000000C0D06E9738C08F2E0A804A
-:101E60002B2014C0962924060EBB022E21022B24FF
-:101E7000147E8004232502DE307AC10EC8ABDBD08D
-:101E8000DA202C0A00580B062E21020E0F4CC8FE39
-:101E90006000690068956528210208084C65805C2F
-:101EA0001AEAC61EEAC42BA29EC09A7B9B5E2BE256
-:101EB0002668B0048C207BC95329A29D1FEAC16407
-:101EC000904A9390C0C31DEAD52B21049D9608BB70
-:101ED000110CBB029B979B911CEAD2C08523E4A204
-:101EE0002BA29D2824068DFA282102B0DD2BBC30C0
-:101EF0002BA69D9DFA0C8802282502C8D2C020D1AD
-:101F00000F8EF912EAC82E2689C020D10FDA20C020
-:101F1000B65813E7C020D10F6C10062A2006941083
-:101F200068A80528ACF965825029210209094C6589
-:101F3000920ACC5FDB30DA208C1058134BC051D39F
-:101F4000A0C7AF9A3AC0D01CEA9D14EAA31EEA9C2F
-:101F50008F3A16EA99B1FB64B13128629E6F88020C
-:101F60006001ED294C332992266890078A2009AA3E
-:101F70000C65A1DC2A629DC08E64A1D42B200C0CC0
-:101F8000B7110677082972867983026001CD0CB9F2
-:101F90000A2992A36890082C220009CC0C65C1BBC9
-:101FA0002772856471B5282006288CF96481E52C98
-:101FB00020668931B1CC0C0C472C24666EC60260B9
-:101FC00001A109F85065819B2A21048CE488361E02
-:101FD000EA7D088914A9CC08084709881019EA92F3
-:101FE0000ECC029C7099718C2A1EEA9008CC020ECD
-:101FF000CC029C722E302C293013283012049910F8
-:102000000688100CEE109F740EAE0209880208EECE
-:10201000029E738C3704AA119C758938C0F4997696
-:102020008839C0C1987718EA828E359C7B9E780EDD
-:102030008E1408EE029E7A8E301CEA7177E73088A3
-:102040003289339C7C9F7D0E9C4006CC118F2B29BE
-:1020500076132D76112876120CAA0218EA68C1C9E7
-:102060000CAA022A761008FF029F7EC0AA60000117
-:10207000C0A6A4BC0CB911A6992892852DC4CF087E
-:10208000A80B289685655100C020D10F2B200C0C81
-:10209000B7110677082A72860CB90A6FA902600187
-:1020A000182992A36890082A220009AA0C65A109A0
-:1020B0002A728564A1032C203D0C2C4064C08C8CBA
-:1020C000350C8C1464C0848FE57CF37F8C360C8CCB
-:1020D0001464C0777CF374283013C0FC78F86CC0AB
-:1020E00090292467090C4765C0D719EA4718EA45C3
-:1020F0008F208C3508FF110C8C1408FF0288E49F98
-:10210000A1AC8C09CC029CA08C369FA30C8C14AC87
-:102110008809880298A218EA3DA4BC2F72852DC4B4
-:10212000CF2FFC102F76852F210229207208FF0265
-:10213000B2992924722F2502C020D10F00CC57DA82
-:1021400020DB308C105812C8C020D10FC09163FF23
-:102150008FDA20C0B658135663FFE100DA20581317
-:102160005463FFD82B21045811E61EEA152B200CCE
-:10217000C0D02D24668F3A63FE4DDA20DB30DC4080
-:10218000DD505813DDD2A0D10F2A2C748B10580BC0
-:10219000BED2A0D10F292138C08879832E8C310C72
-:1021A000FC5064CE222B2104C0C05811D5C0D01ED3
-:1021B000EA048F3A2B200C63FE0DDA2058133C639F
-:1021C000FF7ADA205BFF1CD2A0D10F002C20662BF7
-:1021D0002104B1CC0C0C472C24665811C91EE9F817
-:1021E0002B200CC0D02D24668F3A63FDDA0000004E
-:1021F0006C10089514C061C1B0D9402A203DC04080
-:102200000BAA010A64382A200629160568A8052C9D
-:10221000ACF965C33F1DE9EA6440052F120464F27E
-:10222000A02621021EE9E606064C6562E615E9E2F3
-:102230006440D98A352930039A130A990C6490CCEA
-:102240002C200C8B139C100CCC11A5CC9C112CC2F7
-:1022500086B4BB7CB3026002D78F100EFE0A2EE25A
-:10226000A368E0098620D30F0E660C6562C2881150
-:102270002882856482BA891364905EDA80D9308CB2
-:10228000201EE9E01FE9E11DE9CE8B138DD4D4B007
-:102290007FB718B88A293C10853608C6110E660229
-:1022A0009681058514A5D50F550295800418146DE7
-:1022B0008927889608CB110888140EBB02A8D82954
-:1022C0009C200F88029BA198A088929BA308881449
-:1022D000A8D80F880298A22AAC1019E9CCC0C08FE8
-:1022E000131EE9BD86118D10286285AEDD08FF0B37
-:1022F0002CD4CF2821022F66858B352A207209889D
-:1023000002ABAA2825022A2472C020D10F29529E8E
-:1023100018E9A96F980260020B28822668800829B4
-:10232000220008990C6591FC2A529DC1CE9A126434
-:10233000A1F22B200C2620060CB8110588082D824E
-:10234000860EBE0A7DC3026002052EE2A368E00885
-:102350002F22000EFF0C65F1F6288285D780DE80E3
-:102360006482009816266CF96462012C206688311C
-:102370002CCC010C0C472C24666EC6026001BC08F4
-:10238000FD5065D1B61DE9AB1CE98F19E9962A21EC
-:10239000048B2D2830102F211D0C88100BFB090AEF
-:1023A00088020988020CBB026441529B709D71989F
-:1023B00072C04D8D35D9E064D06ED730DBD0D830C7
-:1023C0007FD714273C10BCE92632168C3996E69C40
-:1023D000E78A37B4382AE6080B131464304A2A8295
-:1023E0001686799A9696978C778A7D9C982B821779
-:1023F0002C7C209A9A2A9C189B99867BB03B298C2E
-:10240000086DB9218BC996A52692162AAC18B899E1
-:102410009BA196A08BC786CD9BA22B921596A49BC1
-:10242000A386CB2CCC2026A605C0346BD4200D3B34
-:102430000C0DD8090E880A7FB705C0909988BC8812
-:10244000C0900B1A126DAA069988998B288C18C017
-:10245000D01BE97A1CE97916E96EB1FF2A211C2309
-:10246000E6130F0F4F26E6122F251D7FA906C0F099
-:10247000C08028251D05F6111AE9678F202BE61567
-:102480002CE6162DE61726E6180AFA022AE6142983
-:102490002006299CF96490F829200C8D14C0801A1C
-:1024A000E94E0C9C11AA99A5CCDA202BC285289460
-:1024B000CF0B4B0B2BC685C0B08C155811BBD2A0CF
-:1024C000D10F8A356FA546D8308BD56DA90C8A8679
-:1024D0000A8A14CBA77AB335288C10C080282467C9
-:1024E000080B4765B10BDA20DB302C12055811DEE2
-:1024F000D3A0C0C1C0D02DA4039C1463FD22863696
-:102500006461059B709D719872C04D63FEA4C0818B
-:1025100063FFC9008814CC87DA20DB308C15581192
-:10252000D2C020D10FDA20C0B658126163FFE40098
-:1025300000DA208B1058125E63FFD8009E178A12B3
-:102540002B21045810EF8E17C09029246663FE34A7
-:10255000C08063FE06DA20DB308C15DD505812E6B1
-:10256000D2A0D10FDA2058125263FFA7002B2138D6
-:10257000C0A87BAB026001048C310CFC5064CE041B
-:102580008A122B2104C0C098175810DD8E1763FDE6
-:10259000F32D21382DDCFF0D0D4F2D253865DEF78D
-:1025A00028206A7F87050826416460A3C09016E949
-:1025B000141CE9232A200723E61BB1AA0CFD0226DE
-:1025C000E61A2B200A29E61D2DE61E0CBB022BE67F
-:1025D0001C8B260A0A472BE6208B282AE53E2BE691
-:1025E000212924072820062A2064688346B44463EE
-:1025F000FEA5DB30DA208C158D142E0A80C08E28C3
-:10260000246858111FD2A0D10F2E7C4819E8ED2A5A
-:1026100032162B76129D712D761328761489960A20
-:102620002A14AA990C9902997069ED71C14663FD4B
-:102630008100000064AFB51DE8E22C20168DD20A9F
-:10264000CC0C00D10400CC1AACBC9C2963FF9D00CB
-:102650002B21046EB81E2C2066B8CC0C0C472C2401
-:1026600066C9C09E178A125810A68E17C0348F20D4
-:10267000C0D02D2466C06826240663FF2E8A122B44
-:1026800021042C20669817B1CC0C0C472C246658DA
-:10269000109C8E178716C0D02D246663FCE68D35FE
-:1026A000C08064D04AD9E0DC30DBE0DF301AE8E5F6
-:1026B000B188B4FF16E8E584C92D9DFF87C82CCCEE
-:1026C0001027D63006460127D6320A440117E8DF24
-:1026D00024D631A74727D63324F21596B794B68D62
-:1026E000C3BCBB9DB58D35299C107D83C22F211D98
-:1026F000C14663FD330000006C1006292006289CAB
-:10270000F86582BF2921022B200C09094C6590E154
-:1027100016E8AA0CBA11A6AA2DA2862C0A127DC30D
-:102720000260028C19E8A609B90A2992A3689007E9
-:102730008C2009CC0C65C27829A2856492722D6226
-:102740009E1AE89C6FD80260026E2AA22629160102
-:1027500068A0082B22000ABB0C65B25C29629DC1EF
-:102760008C6492542A21200A806099102C203CC746
-:10277000EF000F3E010B3EB1BD0FDB390BBB098FE4
-:10278000260DBD112DDC1C0D0D410EDD038E27B174
-:10279000DD0D0D410FEE0C0DBB0B2BBC1C0BB7025E
-:1027A0007EC71C2C21257BCB162D1AFC0CBA0C0DD8
-:1027B000A16000093E01073EB1780987390B770A0D
-:1027C00077EB0260020A2C2123282121B1CC0C0CCA
-:1027D0004F2C25237C8B29B0CD2D2523C855DA20FD
-:1027E000DB30581095292102CC96C0E80E9E022EAF
-:1027F0002502CC57DA20DB30DC4058111BC020D139
-:102800000F2C20668931B1CC0C0C472C24666EC687
-:10281000026001D309FD5065D1CD2F0A012E301180
-:1028200029221464E01128221B090C4400C1040071
-:10283000FA1A0A880228261B2E3010C0A0C0B094B5
-:102840001295131CE85F88302CC022088D147787FE
-:1028500004C0F10CFA38C041C0F225203CC0840805
-:1028600058010F5F010F4B3805354007BB10C0F012
-:10287000084F3808FF100FBB0228ECFEC0F0084FCD
-:1028800038842B0BA8100AFF102A21200F88020B76
-:10289000880208440218E86E8F1108440228212596
-:1028A0000A2A140828140488110A88022A21049488
-:1028B000F08B2004E41008BB1104BB02C04A04BB27
-:1028C000029BF1842A08AB110BEB0294F40A541119
-:1028D0000B44020555100D1B4094F707BB100B5518
-:1028E00002085502C08195F68433C05094F3B19428
-:1028F0008B3295F898F99BF2C080C1BC24261499BC
-:10290000FA9BF598FB853895FC843A94FD8B3B9BAC
-:10291000FE883998FF853525F6108436851324F610
-:10292000118B3784122BF612C0B064C07E893077C9
-:1029300097438D3288332E30108F111CE83109995E
-:10294000400699112CF614C0C42CF6158C2B2DF6CC
-:102950001A28F61B2BF61904A81109880208EE02A2
-:1029600019E827C18008EE0209C90229F6162EF6D9
-:1029700018C09E600001C09A2F200C18E8170CFEAA
-:1029800011A8FFA6EE2DE2852BF4CF0D9D0B2DE6B1
-:1029900085C87F8A268929A7AA9A260A990C090937
-:1029A00048292525655050C020D10F00C09A63FFEB
-:1029B000C6DA2058113F63FE38DA20C0B658113C01
-:1029C00063FE2E0068973C2B9CFD64BE24C020D182
-:1029D0000FDA20DB705810F8C0C0C0D10ADA390A0B
-:1029E000DC3865CDE063FE098A102B2104580FC442
-:1029F000C0B02B246663FE21DB402A2C745809A248
-:102A0000D2A0D10FDA20580FC963FCF76C1004C0B4
-:102A100020D10F006C1004290A801EE80E1FE80E5A
-:102A20001CE7E60C2B11ACBB2C2CFC2DB2850FCC7B
-:102A3000029ED19CD0C051C07013E80A14E8091856
-:102A4000E8072AB285A82804240A234691A986B853
-:102A5000AA2AB685A98827849F25649FD10F0000E4
-:102A60006C100AD630283010292006288CF9648290
-:102A70009B68980B2A9CF965A1B2022A02580FABF9
-:102A800089371BE7CFC89164520E2A21020A0C4CE9
-:102A900065C2588D3019E7C874D7052E212365E229
-:102AA0009E2F929E1AE7C46FF8026002532AA22654
-:102AB00068A0082C22000ACC0C65C2442A929D64AE
-:102AC000A23E9A151FE7BE8D67C1E6C8DD2B6206E0
-:102AD00018E7BC64B0052880217B8B432B200C18A1
-:102AE000E7B60CBC11A8CC29C28679EB460FBE0A0A
-:102AF0002EE2A368E0052F22007EF9372CC2859CC8
-:102B00001864C2332B212F87660B7B360B790C6F31
-:102B10009D266ED2462C203D7BC740CE5560001EC0
-:102B20002A200CC1B28C205811229A1864A2458D1B
-:102B30006763FFCFC0C063FFC5D7B063FFD300C0DA
-:102B4000E06000022E60030EDB0C6EB20EDC700C37
-:102B5000EA11AA6A2AAC20580199D7A0DA20DB70C2
-:102B6000C1C82D21205810BC8C268B279A160CBB6F
-:102B70000C7AB3348F18896399F3886298F28E6562
-:102B80009EF82D60108A189D1768D729C0D09DA97E
-:102B90002C22182B22139CAB9BAA97A58E667E73C2
-:102BA00002600097CF5860001FDA208B1658108201
-:102BB00065A13863FFBDC081C0908F18C0A29AF98B
-:102BC00099FB98FA97F563FFD2DB30DA20DC4058A6
-:102BD0001026C051D6A0C0C02BA0102CA4039B1758
-:102BE0002C1208022A02066B02DF702D60038E177A
-:102BF0009D149E100CDD11C0E0AD6D2DDC20580140
-:102C0000188C148B16ACAC2C64038A268929ABAAC9
-:102C10000A990C9A26886609094829252507880CEF
-:102C200098662F2218A7FF2F261863FE96DA20DB5E
-:102C300030DC40DD50581130D2A0D10FC0302C20F4
-:102C4000668961B1CC0C0C472C24666EC60260000C
-:102C5000D2C03009FD5065D0CA8E6764E0696470E7
-:102C600066DB608C18DF70DA202D60038E170CDDB8
-:102C7000119E10AD6D2DDC201EE7755800F923263E
-:102C800018DA208B16DC402F2213DD50B1FF2F26DF
-:102C900013580FC5D2A0D10F0028203D0848406529
-:102CA0008DE76F953EDA308DB56D990C8CA80C8C44
-:102CB00014CACF7CD32D2AAC10C090292467090DEB
-:102CC0004764DDC5600092002C1208066B022D6C73
-:102CD00020077F028E17DA209E101EE75C58007DC9
-:102CE00063FF9A00C09163FFD1000000655081DA54
-:102CF00020DB60DC40580FDCC020C0F02FA403D1E3
-:102D00000FDA20C0B658106A63FFE000006F95022A
-:102D100063FD6CDA20DB30DC40DD50C4E0580F5836
-:102D2000D2A0D10F8A152B2104580EF52324662832
-:102D30006010981763FF2100DA2058105D63FFAB25
-:102D4000C858DB30DA20580F3C2A210265AF9CC0FE
-:102D50009409A90229250263FF91DB30DC40DD5094
-:102D6000C0A32E0A802A2468DA20580F45D2A0D1A9
-:102D70000FC020D10FDA202B200C58107263FF6B8C
-:102D80006C1004282006C062288CF8658125C0508C
-:102D9000C7DF2B221BC0E12A206B29212300A104BD
-:102DA000B099292523B1AA00EC1A0BC4010A0A44E0
-:102DB0002A246B04E4390DCC030CBB012B261B64C5
-:102DC000406929200C1BE6FC0C9A110BAA082FA2C3
-:102DD000861BE6FA6FF9026000B60B9B0A2BB2A3C2
-:102DE00068B0082C22000BCC0C65C0A42BA2851D5A
-:102DF000E71E64B09B8C2B2421040DCC029CB08870
-:102E000020C0C50888110C880298B1882A0844118E
-:102E100098B48F3494B79FB5C0401EE6EF2DA285BD
-:102E20000E9E0825E4CF2DDC282DA6852921020938
-:102E3000094C68941A689820C9402A210265A00BA1
-:102E40002A221E2B221D7AB10265A079C020D10F43
-:102E50002C212365CFDE6000082E21212D21237E29
-:102E6000DBD52B221E2F221D2525027BF901C0B0A8
-:102E700064BFC413E6D02CB00728B000DA20038862
-:102E80000A28824CC0D10B8000DBA065AFE763FF4E
-:102E9000A62A2C74C0B02C0A02580E2F1CE6F49CF3
-:102EA000A08B2008BB1106BB029BA1893499A263A9
-:102EB000FF790000262468DA20DB30DC40DD505842
-:102EC000108ED2A0D10FDA202B200C580FF9C02081
-:102ED000D10F00006C1006073D14C080DC30DB40D1
-:102EE000DA20C047C02123BC3003283808084277C5
-:102EF0004001B1DD64815A1EE6AC19E6AD29E67EDB
-:102F0000D30F6DDA0500508800308CC0E0C020255A
-:102F1000A03C14E6ABB6D38FC0C0D00F87142440BA
-:102F2000220F8940941077F704C081048238C0F1E1
-:102F30000B2810C044C02204540104FD3802520181
-:102F400002FE3808DD10821C07EE100E6E020EDD48
-:102F500002242CFEC0E004FE380AEE100E88020D9A
-:102F600088028DAB1EE69B08D8020E880298B0C07E
-:102F7000E80428100E5E0184A025A125084411084C
-:102F80004402052514045511043402C0810E8E3903
-:102F900094B18FAA84109FB475660C26A11FC0F24D
-:102FA000062614600009000026A120C0F20626149F
-:102FB0000565020F770107873905E61007781008C5
-:102FC000660206550295B625A1040AE611085811B5
-:102FD00008280208660296B7C060644056649053A1
-:102FE000067E11C0F489C288C30B340B96459847FE
-:102FF000994618E6829F410459110E99021FE680F6
-:10300000020E4708D80298420E99029F40C1E00E76
-:10301000990299442FA00CB4380CF91114E66F1ED4
-:10302000E666A4FFAE992E928526F4CF0E880B2873
-:103030009685D10F2BA00C1FE6601CE6670CBE1115
-:10304000ACBBAFEE2DE28526B4CF0D3D0B2DE68552
-:10305000D10FC08005283878480263FEA263FE962F
-:103060006C1006C0C06570F18830C03008871477D6
-:103070008712C0B0C0A619E652299022C030CC9762
-:10308000C031600003C0B0C0A6C0E0C091C0D4C0D1
-:103090008225203C0B3F109712831CC070085801FA
-:1030A0000D5D01089738C0800B98380777100488A9
-:1030B00010086802087702C0800D98382D3CFE0881
-:1030C00088100D9E388D2B0AEE1008EE0207EE02D6
-:1030D0000CB8100FDD02053B400EDD029D4089203B
-:1030E000043D100899110D99022D210409A9020827
-:1030F000DD119941872A05B9100D3D020ABB110D5A
-:10310000BB02087702974428212587120828140457
-:103110008811071E4007EE100E99027566092621D8
-:103120001F062614600006002621200626140868C3
-:10313000029B47098802984629200CD2C0C0800C07
-:103140009E111BE6251FE61CAB99AFEE2DE28528EC
-:1031500094CF0DAD0B2DE685D10FDD40C0A6C0B0DC
-:103160008E51CAE0B2AAB1BB2DDC108F500E78365A
-:10317000981008770C9FD898D989538F5299119934
-:10318000DB9FDA7E8309B1CC255C10C97763FFCF62
-:1031900088108D1108E70C9751AD8DD7F078DB01C1
-:1031A000B1F79D5397528830C03008871408884083
-:1031B000648ED565BEC963FEBC0000006C1004D7E8
-:1031C00020B03A8820C0308221CAA0742B1E2972F8
-:1031D000046D080FC980C9918575B133A2527A3B3D
-:1031E0000B742B0863FFE900649FECD10FD240D130
-:1031F0000F0000006C100AD6302E3027D950DA406C
-:1032000015E5F02430269A1529160464E00264932B
-:10321000732920062A9CF865A3CE2A2102270A04D6
-:103220000A0B4C65B3978C3074C7052D212365D4E8
-:10323000A0C0A62B0A032C2200580F3664A3B9178E
-:10324000E5DE8E389A1664E3BA2F6027285021C92C
-:10325000F37E8311C2B08C202A200C580F55D7A0C2
-:10326000CDA16004A200C2B08C202A200C580F29E6
-:10327000D7A064A4862F212E8B680FBF360FB90C00
-:103280006F9D54296027D5B06E920528203D7B8F15
-:103290004CDA20DB50C1C42D211F580EEF8B269A2B
-:1032A000189A1989272AAC380B990C7A9353896399
-:1032B000C08099738F6298789F728E659E798D67B2
-:1032C0009D7B8C6695759C7A8E687E53026000B1FA
-:1032D0008B1465B050600038DBF063FFA5008A14E2
-:1032E000C9A92E60030E9B0C6EB2A5DC500CEA112E
-:1032F000AA6A2AAC285BFFB1D5A063FF93C0E06344
-:10330000FFE2DA208B18580EAC65A2B163FF9E0075
-:1033100000DA20DB308C15580E54D6A0C0C0C0D1C6
-:103320002D16042CA403DC70DA20DB60DF502D6046
-:1033300003C0E09E109D171EE5B90CDD110D6D0850
-:103340002DDC285BFF478E668F678817AF5FA8A8C4
-:1033500028640375FB01B1EE8A189E669F67892673
-:103360008829AA9909880C99268E6808084805EECC
-:103370000C28252515E5939E6865EECC63FEE600D6
-:103380000000C9432F21232B21212FFC010F0F4FB8
-:103390002F25237FBB026003142C20668961B1CCEA
-:1033A0000C0C472C24666EC60260022809FD50658D
-:1033B000D22264E1B62E602764E1B0DC70DF50DA1F
-:1033C00020DB601EE5AB2D6003C08098100CDD1182
-:1033D000AD6D2DDC285BFF22644181C0442B0A00C7
-:1033E0008C202A200C580ECB0AA70265A00FC0B073
-:1033F0002C22002A200C580EC7D7A064AFEFDA2089
-:10340000C1BCC1C82D21208F188E268929AFEE9E00
-:10341000260E990C090948292525580E8FC090C001
-:1034200050C0C288609A191EE566C0A12EE022082D
-:103430008F14778704C0810E8938C0800B93102DBC
-:10344000203C2921200CDC0104DB010929140BA8F4
-:10345000380CA5380D3D401CE57E8B2B08881007E5
-:1034600055100855020533022821250F154003BBCE
-:10347000020CBB0207551005D3100828140ADD11F1
-:103480000488110988020533022921040833029BAC
-:1034900070C0808A201BE57708AA110BAA029A71D6
-:1034A000C0A1852A9376957408931103DD020ADD85
-:1034B000029D778C63C1DC9C738B6298789A799BB0
-:1034C00072232214C0C0B1352526149C7B9D7593B0
-:1034D0007A2B621A9B7C2A621C9A7D28621D987E38
-:1034E00025621B957F2362172376102D62182D7697
-:1034F000112C62192C761264E0B98E6077E73DC01A
-:10350000FE13E53E1DE53FC1818A628B6304951180
-:103510000E9C4006CC110C5502247615085502C0AD
-:10352000802D76148D2B2B761B2A761A287619255A
-:10353000761803DD022D76166000030000C0FA2E17
-:10354000200C19E52518E51CA9E90CEE11A8EEC020
-:10355000802DE2852894CF0DFD0B2DE685DA208B9A
-:10356000198C158D14580D90D2A0D10FDC70DF503E
-:10357000DB602D6C28C0A01EE53E9A10DA205BFEB1
-:103580005563FE53002B203D0B4B4065BC826FE51D
-:1035900027DA308F556DE90C8EAA0E8E14C9E87E9D
-:1035A000F3162AAC10C090292467090F4764FC6009
-:1035B00060015F00C0FA63FF85C09163FFE8881473
-:1035C000658168DA20DB608C15580DA7C020C0909B
-:1035D00029A403D10F8A162B2104580CC9C0A02A94
-:1035E00024668E6863FDCA00002B9CF965B0FDDA85
-:1035F00020580CCE63FC220000DA20C0B6580E2CF6
-:1036000063FFBA002B200C0CBE11A7EE2DE286C181
-:10361000C27DC30260011819E4E909B90A2992A31D
-:103620006890082A220009AA0C65A10326E2856495
-:1036300060FD2C20668931B1CC0C0C472C24666FC0
-:10364000C60270960C8A162B2104580CADC0D02DE2
-:1036500024668E3077E74D1CE4E91BE4E98F32885D
-:1036600033C0A42D21040E994006991104DD1109DF
-:10367000DD029A61C19009DD029B60C0908B2B9D99
-:10368000649F66986799650CBB029B6228200C1AA0
-:10369000E4D2AA8A0C8811A7882F828529A4CF2F6B
-:1036A000FC202F86858A1465A0A6C020D10FB0FC0F
-:1036B0008B142C2523C8B7022A02066B02580CDE95
-:1036C0002A210265AEF7C0D80DAD022D250263FE9A
-:1036D000EC008E14C8E8DA20DB30580CD72A21021F
-:1036E00065AEDA07AF022F250263FED100DA20DBD8
-:1036F000308C158D14580E80D2A0D10FDA202B20DB
-:103700000C580DEB63FEB600DA202B200C580E0D82
-:1037100063FEAADA20DB308C152D12042E0A8028D5
-:103720000A00282468580CD663FAE500C020D10F9F
-:10373000DA20580DDF8914CD92DA20DB308C155851
-:103740000D4ADBA0C020C0A02AB403D10FC020D1F5
-:103750000F2A2C748B1558064CD2A0D10F000000F4
-:103760006C100E28210224160108084C6583A91F3D
-:10377000E49229F29E6F98026003AD1EE48E29E266
-:10378000266890082A220009AA0C65A39B24F29DB2
-:103790006443952A31160A4B412B240BB4BB0B0B07
-:1037A000472B240C0CB611AF66286286C1CC78C3B7
-:1037B0000260037F19E48209B90A2992A36890077D
-:1037C0008C2009CC0C65C36B276285647365293135
-:1037D00009C0D02D24668C3599139C2A88369C14F8
-:1037E000982B8E3798159E169E2C8C38C0E10C5C59
-:1037F000149C179C2D88392925042E251D28251C4D
-:103800002C3028C0822C243C2930290C0C4708C8B5
-:103810000129243D29311598189912090841089960
-:103820000C299CEC29251F7EC725921C8212282A70
-:1038300000082060991B01023E00093EB128098260
-:1038400039891B0E221102990C821C29251F821C0A
-:10385000941D951E24211F15E4880451609A10C1FF
-:10386000802B1610252014961F05054301063E00E7
-:103870000D3EB16B0DB6398B3C2D9CFC08663606AF
-:10388000441C893D2E26132E26142E26152E246B1D
-:1038900025241406D61CC05025261825261B2524B1
-:1038A000672524682832112525232525242525254B
-:1038B00025252C2925222D25202B252124252E26A2
-:1038C000252F14E46F16E46D1BE45298192D211C6A
-:1038D000C08498719B70892095759577957F967CAB
-:1038E000967E98799B7894731BE46714E4680C388F
-:1038F000400288100C064015E464016610947D9B1C
-:1039000074841D1BE444086602957B18E431851E0F
-:103910000B99029972997A0866022B121096768694
-:103920001F6FD2026001C8C0A0991A6D080AB1AA1F
-:1039300000A10400E81A7D8B0263FFEE891AC0E043
-:10394000961F1DE43E2B1610951E941D28203D2920
-:10395000761A297612C040C051C0B22D76130806DF
-:10396000408D170B8801065E380AEE101BE44A08EA
-:103970005438B0A609661188140B44102B761B042A
-:10398000EE028B1614E44308DA1406EE020D8810DA
-:103990002A761E86131AE41C04EE020D66110866D0
-:1039A000022E76160D14141EE41A0D44110BD814B1
-:1039B0000866020A44022E76182E76102476172600
-:1039C000761FC084287619287611C76F0C24400F03
-:1039D00044111CE3FB26761D26761C2676152676DA
-:1039E000148A262676242676252976222E762028E5
-:1039F00076218E1888150DB91016E4278BC70D880F
-:103A0000110E5E39ADBB851904EE022676230988B6
-:103A100002861F89102876260A04480544110505E8
-:103A2000480E551105440204EE02851E841D2E76B3
-:103A3000272820069B2D29246A2E31172B12102EA1
-:103A40002538CC83C0D02D2407C0D7090840648016
-:103A50008E9A290928416480AA64E0B42D2406C006
-:103A60009809E9362D0AA02A628501C404ADAA2D61
-:103A700021042A668508DD11883F8E3E2732100812
-:103A8000EA1800C40408E8180088110ECE5308771D
-:103A900002C08308DD029D4118E401090D4E9840E3
-:103AA00088209A4397449D4517E3FE1DE3CB058884
-:103AB0001108EE02ADBDC08007EE029E4228D4CFB1
-:103AC0002AF29D87CA2AAC18B1772AF69D1AE3B963
-:103AD00097CA28A4A268711C655060C020D10F004D
-:103AE0002D2406C080C09809E9360E893863FF731B
-:103AF000C0A063FE481BE3CB1AE3EB2AB68963FF41
-:103B0000D600000065EF54C098C0D82D240663FF8E
-:103B1000522D2406C09063FF4ACC57DA20DB308C4C
-:103B200011580C51C020D10F00DA20C0B6580CE05B
-:103B300063FFE500DA20580CDE63FFDC2A2C748B6F
-:103B400011580551D2A0D10F6C10062820068A33D7
-:103B50006F8202600161C05013E39729210216E3CE
-:103B600096699204252502D9502C20159A2814E331
-:103B7000948F2627200B0AFE0C0477092B712064F2
-:103B8000E1398E428D436FBC0260016F00E104B0E9
-:103B9000C800881A08A80808D80298272B200668A9
-:103BA000B32ECE972B221E2C221D0111027BC901A0
-:103BB000C0B064B0172CB00728B000DA2003880A20
-:103BC00028824CC0D10B8000DBA065AFE7C020D1BC
-:103BD0000F2D206464DFCA8B29C0F10BAB0C66BFCC
-:103BE000C02B200C0CBC11A6CC28C2862E0A08784B
-:103BF000EB611EE3720EBE0A2EE2A368E0052822E6
-:103C0000007E894F29C2851EE37E6490461FE38CA7
-:103C10009E90C084989128200A95930F88029892CC
-:103C20008E200FEE029E942F200788262F950A984B
-:103C3000969A972E200625240768E3432921022A15
-:103C4000C2851DE3652AAC20ADBD25D4CF2AC6852B
-:103C500063FF4E002E2065CBEDC082282465C9F697
-:103C600005E4310002002A62821BE36D2941020B48
-:103C7000AA022A668209E43129210263FF23000097
-:103C800064DFB88F422E201600F1040DEE0C00EE1A
-:103C90001AAEAE9E2963FFA38A202B3221B1AA9AC5
-:103CA000B0293221283223B4992936217989A92BC8
-:103CB00032222B362163FFA0C020D10F9F2725245D
-:103CC00015ACB82875202B2006C0C12EBCFE64E0C0
-:103CD000AB68B7772DBCFD65DEC72D2064C0F064EE
-:103CE000D0868E290EAE0C66E089C0F128205A28B5
-:103CF0008CFE08CF3865FEE863FF580000E00493AF
-:103D000010C0810AF30C038339C78F08D80308A8B1
-:103D10000108F80C080819A83303C80CA8B82875BE
-:103D200020030B472B24158310CBB700E104B0BC54
-:103D300000CC1AACAC0CDC029C27659E5EC0B20BBA
-:103D4000990209094F29250263FE50002D206A0DB2
-:103D50002D4165DF7EDA20C0B0580CA864AF18C0D2
-:103D6000F163FEEF9F2763FFD02E221F65EE3263C3
-:103D7000FF79000028221F658E2763FF6E25240629
-:103D800029210263FE1B00006C10066571332B4C69
-:103D900018C0C7293C18C0A1C08009A8380808422B
-:103DA0006481101CE3011AE3022AC67E2A5CFDD35B
-:103DB0000F6DAA0500B08800908C8940C0A00988CA
-:103DC000471FE32B080B47094C50090D5304DD1026
-:103DD000B4CC04CC100D5D029D310CBB029B30882D
-:103DE000438E2098350FEE029E328D26D850A6DDE8
-:103DF0009D268E40C0900E5E5064E0971CE3111E1D
-:103E0000E300038B0BC0F49FB19EB02D200A99B341
-:103E10000CDD029DB28F200CFF029FB48E262D2058
-:103E2000079EB68C282DB50A9CB72924072F20069B
-:103E30002B206469F339CBB61DE2E22320168DD224
-:103E40000B330C00D10400331AB48DA3C393292281
-:103E5000200C13E2E11FE2D80C2E11AFEEA32229B1
-:103E600024CF2FE285D2A00FDD0B2DE685D10F00E8
-:103E70002E200CB48C0CEB111FE2D81DE2CFAFEE5C
-:103E8000ADBB22B28529E4CF02C20B22B685D2A0F7
-:103E9000D10F00002E200C1CE2C81FE2CF0CEB114A
-:103EA000AFEEACBB22B28529E4CF02820B22B685ED
-:103EB000D2A0D10FC0D00BAD387DC80263FEEC6339
-:103EC000FEE08E40272C747BEE12DA70C0B32C3CDF
-:103ED00018DD50580A9B8940C08063FEE3066E02DD
-:103EE000022A02DB30DC40DD505800049A10DB501F
-:103EF000DA70580465881063FEF700006C100692B3
-:103F0000121EE2B98C40AE2D0C8C472E3C1804CA10
-:103F10000BD9A07DA30229ADF875C302600084C04F
-:103F2000B0C023C0A09D106D0844B89F0EB80A8D84
-:103F3000900EB70BB8770D6D36ADAA9D800D660C4F
-:103F4000D8F000808800708C879068B124B2227706
-:103F5000D3278891C0D0CB879890279C1000708879
-:103F600000F08C9D91CB6FC08108BB0375CB36638D
-:103F7000FFB4B1222EEC1863FFD485920D770C8626
-:103F8000939790A6D67D6B01B1559693959260005C
-:103F900016B3CC2D9C188810D9D078D3C729DDF85A
-:103FA00063FFC100C0238A421BE2C000CD322D4412
-:103FB000029B3092318942854379A1051EE2BC0EF5
-:103FC000550187121BE2AB897095350B9902993226
-:103FD00088420A880C98428676A6A696768F44AFC9
-:103FE000AF9F44D10F0000006C10089311D63088A9
-:103FF00030C0910863510808470598389812282165
-:1040000002293CFD08084C6581656591628A630A56
-:104010002B5065B18B0A6F142E0AFF7CA60A2C2048
-:104020005ACCC42D0A022D245A7FE0026002158961
-:104030002888261FE29F09880C65820F2E200B0F0F
-:10404000EE0B2DE0FE2EE0FF08DD110EDD021EE27C
-:1040500099AEDD1EE2991CE2990EDD010DCC37C14F
-:1040600080084837B88DB488981089601AE2557B6B
-:1040700096218B622AA0219C147BA3179D132A20D2
-:104080000C8B108C20580BCA8C148D13DBA0CEAC7B
-:104090006001C4002E200C1BE2480CEA110BAA0898
-:1040A0002BA2861FE2467BDB3B0FEF0A2FF2A368B1
-:1040B000F0052822007F892C2BA28564B0AA876294
-:1040C0008826DE700C7936097A0C6FAD1C8F279B21
-:1040D0001508FF0C77F3197E7B729D139C149B15BA
-:1040E000CF56600025C0B063FFD0D79063FFDD00DE
-:1040F000009D139C14DA20DB70580B2F8B158C1449
-:104100008D1365A06A8E6263FFCC00DA208B11DC10
-:1041100040580AD5D6A08B15C051DE70DA20DC607D
-:10412000DD405BFF768D138C14D9A02E200C1BE292
-:10413000221FE2290CEA11AFEFC0E0ABAA2BA28547
-:104140002EF4CF0B990B29A68563FF1D00DA20DC26
-:1041500060DD40DE708912282007DF50A9882824FE
-:10416000075BFF09D2A0D10F00DBE0DA20580B502B
-:104170006550EF2A20140A3A4065A0EBDB60DC4072
-:10418000DD30022A025809BCD6A064A0D584A183E0
-:10419000A00404470305479512036351C05163FE11
-:1041A0005C2C2006D30F28CCFD6480A568C704C012
-:1041B000932924062C2006C0B18D641FE2019D279F
-:1041C0009D289D298FF29D2600F10400BB1A00F066
-:1041D00004B0BE0EDD01C0F0ADBB8D652F24070D10
-:1041E0000E5E01EE11AEBB2E0AFEB0BB0B0B190E1C
-:1041F000BB36C0E20B0B470EBB372B241618E1F978
-:104200000A09450D0B422B240B29240AB4BE2E2487
-:104210000C7D88572920162FCCFDB09D0A5C520DCD
-:10422000CC362C246465FDEC0C0C4764CDE618E11B
-:10423000E48E2888820C9F0C00810400FF1AAFEEE8
-:104240009E2963FDCF1CE21163FE13001CE20B6389
-:10425000FE0C8D6563FFA500DA202B200C580B396E
-:10426000645F0FC020D10F00C020D10FC09329245C
-:1042700016C09363FFA000006C1004C06017E1CD6E
-:104280001DE1D0C3812931012A300829240A78A1EF
-:1042900008C3B27BA172D260D10FC0C16550512654
-:1042A00025022AD0202F200B290AFB2B20142E2098
-:1042B0001526241509BB010DFF0928F1202B241414
-:1042C000A8EE2EF52064A0A92B221E28221D011184
-:1042D000027B8901DB6064B0172CB00728B000DADC
-:1042E0002007880A28824CC0D10B8000DBA065AF74
-:1042F000E7DB30DC40DD50DA205800DE29210209FE
-:104300000B4CCAB2D2A0D10F00CC5A2C30087BC1C2
-:10431000372ED02064E02D022A02033B02DC40DD70
-:10432000505800D4D2A0D10F2B2014B0BB2B241492
-:104330000B0F4164F0797CB7CAC0C10C9C022C25DC
-:1043400002D2A0D10FC020D10F2E200669E2C126D3
-:1043500024062B221E2F221D29200B2820150D9903
-:10436000092A9120262415AA882895207BF14960E6
-:104370000048B0BB2B24140B0A4164A0627CB70236
-:104380002C25022B221E2C221DD30F7BC901C0B06D
-:10439000C9B62CB00728B000DA2007880A28824C5A
-:1043A000C0D10B8000DBA065AFE7C020D10F0000BB
-:1043B000262406D2A0D10F0000DB601DE18164BF7E
-:1043C0004F2CB00728B000DA2007880A28824CC09A
-:1043D000D10B8000DBA065AFE71DE17963FF310001
-:1043E00026240663FF9C00006C1004282006260A81
-:1043F000046F856364502A2920147D9724022A02C1
-:10440000DB30DC40DD50580019292102090A4CC874
-:10441000A2C020D10FC0B10B9B022B2502C020D11E
-:104420000F00022A02033B022C0A015800D1C9AA3C
-:10443000DA20DB30DC40580A0C29A011D3A07E978B
-:10444000082C0AFD0C9C012CA411C0512D2014062F
-:10445000DD022D241463FFA4DA20DB30DC40DD50C4
-:10446000C0E0580987D2A0D10F0000006C100616DA
-:10447000E1521CE152655157C0E117E14E2821027B
-:104480002D220008084C6580932B32000B695129BE
-:104490009CFD6590872A629E6EA84C2A722668A0B1
-:1044A000027AD9432A629DCBAD7CBE502B200C0CE6
-:1044B000BD11A6DD28D2862F4C0478FB160CBF0A4E
-:1044C0002FF2A368F0052822007F89072DD285D31B
-:1044D0000F65D0742A210419E17AD30F7A9B2EDA62
-:1044E00020580883600035002D21041BE1757DBB39
-:1044F00024DA20C0B658087ECA546001030B2B5042
-:104500002B240BB4BB0B0B472B240C63FFA0DA202E
-:10451000580A67600006DA20C0B6580A656550E0A0
-:10452000DC40DB302D3200022A020D6D515808D2DA
-:104530001CE123D3A064A0C8C05184A18EA00404B0
-:10454000470E0E4763FF3500002B2104C08B8931D5
-:10455000C070DF7009F950098F386EB8172C2066CB
-:10456000AECC0C0C472C24667CFB099D105808E44B
-:104570008D1027246694D11EE126B8DC9ED06550AC
-:1045800056C0D7B83AC0B1C0F00CBF380F0F42CBFD
-:10459000F119E10518E10728967EB04BD30F6DBAEB
-:1045A0000500A08800C08C2C200CC0201DE10B0C45
-:1045B000CF11A6FF2EF285ADCC27C4CF0E4E0B2E09
-:1045C000F685D10FC0800AB83878D0CD63FFC1001E
-:1045D0008E300E0E4763FEA12A2C742B0A01044D67
-:1045E000025808D72F200C12E0FC0CF911A699A252
-:1045F000FF27F4CF289285D2A008480B289685D1B2
-:104600000FC020D10F0000006C1004C060CB55DB40
-:1046100030DC40055D02022A025BFF942921020979
-:10462000084CC882D2A0D10F2B2014B0BB2B24146D
-:104630000B0C41CBC57DB7EBC0C10C9C022C2502F5
-:10464000D2A0D10F0000022A02033B02066C02C076
-:10465000D0C7F72E201428310126250228240A0F5E
-:10466000EE012E241458010E63FFA300262406D267
-:10467000A0D10F006C1006282102D62008084C6536
-:10468000809D2B200C12E0CC0CB811A2882A8286C7
-:10469000B5497A930260009719E0C909B90A2992CD
-:1046A000A36890082A620009AA0C65A08228828566
-:1046B0001CE0D46480799C80B887B14B9B819B10AF
-:1046C000655074C0A7D970280A01C0D0078D380D75
-:1046D0000D42CBDE1FE0B51EE0B62EF67ED830D3FD
-:1046E0000F6D4A0500808800908C2E3008C0A00015
-:1046F000EE322E740028600C19E0B80C8D11A2DD8A
-:10470000A988C0202CD2852284CFD2A00CBC0B2C2F
-:10471000D685D10FC0F0038F387FA0C063FFB400EF
-:10472000CC582A6C74DB30DC4058080BC020D10F09
-:10473000DA605809DF63FFE7DD402A6C74C0B0DC43
-:104740007058087F2E30088B1000EE322E7400282F
-:10475000600C19E0A10C8D11A2DDA988C0202CD21B
-:10476000852284CFD2A00CBC0B2CD685D10F0000A3
-:104770006C1004292014282006B19929241468817A
-:1047800024C0AF2C0A012B21022C24067BA004C0DC
-:10479000D02D2502022A02033B02044C02C0D0584D
-:1047A00000C0D2A0D10FC020D10F00006C1004298E
-:1047B0003101C2B429240A2A3011C28378A16C7B4A
-:1047C000A1696450472C2006C0686FC562CA572D86
-:1047D00020147CD722DA20DB30DC40DD505BFFA5E3
-:1047E000292102090E4CC8E2C020D10FC0F10F9F51
-:1047F000022F2502C020D10FDA20DB30C0C05BFFC2
-:10480000DC28201406880228241463FFC7292015F9
-:104810001BE06C2A200BC0C09C240BAA092BA120F2
-:104820002C2415AB9929A52063FF9900C020D10F36
-:10483000DA20DB30DC40DD50C0E0580891D2A0D156
-:104840000F0000006C1004CB5513E06725221F0DEC
-:10485000461106550CA32326221E25261F06440BAF
-:1048600024261E734B1DC852D240D10F280A80C087
-:104870004024261FA82828261E28261DD240D10FF6
-:10488000C020D10F244DF824261E63FFD80000005D
-:104890006C1004D620282006C0706E85026000D4FB
-:1048A0001DE04E19E04612E0442A8CFC64A1302B36
-:1048B0006102B44C0B0B4C65B0A22B600C8A600CEF
-:1048C000B8110288082E828609B90A7EC3026000E8
-:1048D0009A2992A368900509AA0C65A08E28828562
-:1048E000648088B8891BE04A94819B80655155C0DB
-:1048F000B7B8382A0A01C0C009AC380C0C4264C0F1
-:10490000421FE0291EE02B2EF67EB04AD30F6DAA7F
-:104910000500808800908CC0A029600C0C9C11A21E
-:10492000CC2BC285AD990B4B0B2BC6852860062777
-:1049300094CF6881222D6015D2A0C9D2C0E22E6426
-:1049400006D10F00C0F008AF387FB0BD63FFB100E3
-:10495000276406D2A0D10F00D2A0D10F00CC57DA25
-:1049600060DB30DC405808C0C020D10FDA60580945
-:104970005063FFE80028221E29221DD30F789901D9
-:10498000C080C1D6C1C11BE018C122AB6B6480429C
-:1049900078913F2A80000CAE0C64E0BB02AF0C643F
-:1049A000F0B52EACEC64E0AF0DAF0C64F0A92EAC0A
-:1049B000E864E0A32FACE764F09D2EACE664E097DA
-:1049C0002F800708F80BDA807B83022A8DF8D8A0A5
-:1049D00065AFBC28612308D739D97060007B00001F
-:1049E0002B600C0CB811A2882C82862A0A087CAB9A
-:1049F0007E09BA0A2AA2A368A0052C62007AC96FB0
-:104A00002A828564A0691FDFFE276504C0E3C0C455
-:104A10002E64069CA11CE02B9FA02E600A97A30C7D
-:104A2000EE029EA28F600CFF029FA42E60147AEF0C
-:104A30004627A417ADBC2F828527C4CF2FFC202F7B
-:104A4000868563FE692A6C74C0B1DC90DD4058072E
-:104A5000BC1DDFE163FEC100D9A0DA60DB30C2D04B
-:104A6000C1E0DC4009DE39DD50580805D2A0D10F85
-:104A7000DA6058090F63FEE4290A0129A4170DBF63
-:104A8000082E828527F4CF2EEC202E868564500BCD
-:104A90002A6C74DB4058017CD2A0D10FC020D10F0A
-:104AA0006C10062B221E28221D93107B8901C0B09A
-:104AB000C0C9C03BC1F20406401DDFCBC0E2C074D8
-:104AC0000747010E4E01AD2D9E11C0402E0A146401
-:104AD000B06E6D084428221D7B81652AB0007EA13E
-:104AE0003B7FA1477B51207CA14968A91768AA1484
-:104AF00073A111C09F79A10CC18B78A107C1AE2908
-:104B00000A1E29B4007CA12B2AB0070BAB0BDAB02C
-:104B10007DB3022ABDF8DBA0CAA563FFB428B0109C
-:104B200089116987BB649FB863FFDC00647FB4634D
-:104B3000FFD50000646FD0C041C1AE2AB40063FF4E
-:104B4000C62B2102CEBE2A221D2B221E7AB12A8C10
-:104B5000107CB1217AB901C0B0C9B913DF96DA204F
-:104B600028B0002CB00703880A28824CC0D10B80E3
-:104B700000DBA065AFE7D240D10F8910659FD463F9
-:104B8000FFF300006C1008C0D0C8598C30292102F6
-:104B90000C0C4760000C8E300E1E5065E19E2921E2
-:104BA00002C0C116DF85090B4C65B0908A300A6ED1
-:104BB0005168E3026000852F629E1BDF7E6EF85312
-:104BC0002BB22668B0052E22007BE94727629DB7ED
-:104BD00048CB7F97102B200CB04E0CBF11A6FF299D
-:104BE000F2869E12798B4117DF7507B70A2772A3E9
-:104BF000687004882077893029F285DF90D7906526
-:104C000090652A210419DFAE7A9B22DA205806B873
-:104C1000600029002C21041BDFAA7CBB18DA20C00D
-:104C2000B65806B3C95860014CC09063FFCCDA2077
-:104C300058089F600006DA20C0B658089D655135B7
-:104C4000DC40DB308D30DA200D6D5158070BC0D0C1
-:104C5000D3A064A120292102C05184A18CA0040406
-:104C6000470C0C4763FF3E00C09B8831DBD008F83F
-:104C700050089B3828210498116E8823282066ACA0
-:104C80008C0C0C472C24667CBB159F139E148A1039
-:104C90008B1158071B8E148F13C0D02D24668A30B9
-:104CA000C092C1C81BDF5B7FA6099BF099F12CF471
-:104CB0000827FC106550A4B83ADF70C051C08007C7
-:104CC000583808084264806718DF3819DF392986A8
-:104CD0007E6A420AD30F6DE90500A08800F08CC0FF
-:104CE000A08930B4E37F9628C0F207E90B2C940822
-:104CF0009B909F912F200C12DF380CF811A6882969
-:104D00008285A2FF2DF4CFD2A009330B238685D153
-:104D10000F22200C891218DF300C2B11A6BBA82201
-:104D20002D24CF2CB285D2A00C990B29B685D10F9A
-:104D3000C087C0900A593879809663FF8ADB30DAE1
-:104D400020C0C1C0D05BFF56292102C0D02A9CFEE2
-:104D500065AE4D2D2502C09063FE45009E142A2CA1
-:104D600074C0B1DC70DD405806F68E14C0D01BDF75
-:104D700028C1C863FF6AC020D10F00006C1006284C
-:104D8000210217DF0D08084C65824929729E6F9831
-:104D90000260025019DF082A922668A0078B200AB9
-:104DA000BB0C65B23F2A729DC0CB64A2371DDF04E5
-:104DB000C0602B3008C0F164B0712E0AFFB0B86437
-:104DC00081512DBCFE64D0F364505C2A2C74044BDA
-:104DD000025800AD0AA2020600000000001ADF0817
-:104DE0002C20076EBB0260022218DEFE13DF081BB8
-:104DF000DF36C0E229200A9AD09ED1ABCB039902BC
-:104E000099D223B08026B480B13308330293D318EB
-:104E1000DEF20CFD11A7DD2CD285A8F82684CF0C7C
-:104E2000EC0B2CD685655FA2C020D10F2B21048806
-:104E300031DE6008F85008CE386EB8102C2066B10C
-:104E4000CC0C0C472C24667CEB026001AF2E30109A
-:104E50002930112C301300993200CB3264E1452AFD
-:104E600030141EDF1A00AA3278CF050E9C092BC41D
-:104E70007F1CDF1766A0050E98092A8480B4A71846
-:104E8000DF15C76F009104AC9CDBC000AE1A00F3C5
-:104E90001A6EC1048BD00BCB0C1CDF0F08B81C069C
-:104EA0003303AC882A848B2CD03627848C03CC0126
-:104EB0000ECC022CD4365801AD63FF0B2F200C0C06
-:104EC000FB11A7BB2DB286C0987D9302600121190A
-:104ED000DEBB09F90A2992A36890082D220009DD9A
-:104EE0000C65D10C2DB285DE6064D10488312B2194
-:104EF0000408F85008CE386FB80263FEDF2C206635
-:104F0000B1CC0C0C472C24667CE30263FECE9D10D2
-:104F100060013100293108292504283014B0886443
-:104F200080A62B31092B240AC0812B30162FD423C5
-:104F30002B240BB4BC2C240C8D378B36292504DE96
-:104F4000D00D8E39DCB00B8C390ECC0264CE7808D3
-:104F50009C1101C4048F380DBE1800C4040DB8188C
-:104F600000881108FF02C08308CC0218DECC9CA187
-:104F700098A018DECB8C209EA39FA405CC110BCF4C
-:104F800053C1E09EA50CFF0208FF029FA218DE8914
-:104F90002624662C729D2684A22CCC182C769D6328
-:104FA000FE250000002D30121CDECD00DA3278DF45
-:104FB000050C9E0B2AE47F66B0050C9F0B2BF4803A
-:104FC0002A301100AA3263FEEC2E240A2B31099BF1
-:104FD0002B63FF5300CC57DA20DB30DC405807222C
-:104FE000C020D10F00DA20C0B65807B163FFE5003A
-:104FF00000DBF0DA205807AE63FFD9000058064006
-:105000001DDE70C0F126246663FE41008B20280A55
-:10501000FFB1CE23200A2C21040E0E472E24077840
-:1050200031359AD02CD50A96D319DEA62ED416C0C7
-:105030008398D1C0E309B80298D409390299D226DD
-:10504000240763FDC958062E8D102624662B2104E3
-:105050002F200C63FD86000008B81119DE6808EEE9
-:1050600002882B9ED59AD0C0EF09880298D204C935
-:10507000110E990299D4C0E49ED163FFC1000000D3
-:105080006C1004C020D10F006C100485210D381164
-:1050900014DE478622A42408660C962205330B935F
-:1050A00021743B13C862D230D10FC030BC29992182
-:1050B00099209322D230D10F233DF8932163FFE34F
-:1050C0006C100AD620941817DE3CD930B8389819DD
-:1050D0009914655256C0E1D2E02E61021DDE390EF0
-:1050E0000E4C65E1628F308E190F6F512FFCFD65FC
-:1050F000F1558EE129D0230E8F5077E66B8F181E65
-:10510000DE78B0FF0FF4110F1F146590CE18DE7516
-:105110008C60A8CCC0B119DE2728600B09CC0B0D20
-:10512000880929812028811E2A0A0009880C08BACA
-:10513000381BDE6B0CA90A2992947B9B0260008CC1
-:105140002B600C94160CBD11A7DD29D286B84879C6
-:1051500083026000D219DE1909B80A2882A39817C1
-:105160006880026000A36000A51ADE5F84180AEE62
-:1051700001CA981BDE108C192BB0008CC06EB313C3
-:105180001DDE0D0C1C520DCC0B2DC295C0A17EDB7B
-:10519000AE6000380C0C5360000900000018DE51AE
-:1051A0008C60A8CCC0B119DE0328600B09CC0B0DB4
-:1051B000880929812028811E2A0A0009880C08BA3A
-:1051C000380CA90A2992947E930263FF72DA60C0B8
-:1051D000BA58073764507360026A00001ADDF68C13
-:1051E000192AA0008CC06EA31A18DDF20C1C5208FC
-:1051F000CC0B18DE3B2BC295C0A178B30263FF3FF6
-:1052000063FFC9000C0C5363FF0989607899182962
-:10521000D285C9922B729E1DDDE76EB8232DD22652
-:10522000991369D00B60000DDA60580721600017F0
-:105230000088607D890A9A1A29729D9C129915CF5F
-:1052400095DA60C0B658071A6551F98D148C18DBD1
-:10525000D08DD0066A020D6D51580587D3A09A14DF
-:1052600064A1E182A085A1B8AF9F1905054702029C
-:10527000479518C05163FE602B6104C08B8931C013
-:10528000A009F950098A386EB81F2C6066A2CC0CB0
-:105290000C472C64667CAB119F119E1B8A15580528
-:1052A000988E1B8F11C0A02A64669F1164F0E58957
-:1052B0001388190FFD022E0A006DD9172F810300E4
-:1052C000908DAEFE0080889F9200908C008088B800
-:1052D0009900908C65514E8A10851A8B301FDDC85D
-:1052E000881229600708580A2C82942D61040ECC7C
-:1052F0000C2C86946FDB3C1CDDF4AC9C29C0800B2D
-:105300005D50A29909094729C48065D0DA2E600C46
-:10531000C0D01FDDB10CE811AFEEA7882282852D29
-:10532000E4CF02420B228685D2A0D10F8E300E0E22
-:105330004763FDA2A29C0C0C472C64077AB6CD8B68
-:10534000602E600A280AFF08E80C64810E18DDDD73
-:1053500083168213B33902330B2C34162D350AC051
-:105360002392319F30C020923308B20208E80292A3
-:10537000349832C0802864072B600CD2A01CDD96C4
-:105380000CBE11A7EE2DE285ACBB28B4CF0D9D0B52
-:105390002DE685D10F8B1888138D30B88C0D8F4773
-:1053A0000D4950B4990499100D0D5F04DD1009FFEB
-:1053B000029F800DBB029B8165508D851AB83AC053
-:1053C000F1C0800CF83808084264806B1BDD771947
-:1053D000DD7829B67E8D18B0DD6DDA0500A0880075
-:1053E000C08CC0A063FEF30082138B161DDD8828DD
-:1053F000600AC0E02EC4800D880202B20B99239F80
-:1054000020C0D298229D2122600CB2BB0C2D11A786
-:10541000DD28D28508BB0B18DD702BD685A8222E7F
-:1054200024CFD2A0D10F9E1B851A2A6C748B185BD7
-:10543000FF168E1B63FEA300C087C0900AF938795F
-:10544000809263FF86C020D10F9E1B2A6C74C0B16E
-:105450008D1858053B8E1B851A63FE7E886B821360
-:10546000891608BE110ECE0202920B9E25B4991E1B
-:10547000DD639F200E88029822C0EF04D8110E88A9
-:10548000029824C0E49E21C080D2A02B600C286426
-:10549000071CDD510CBE11A7EE2DE285ACBB28B474
-:1054A000CF0D9D0B2DE685D10F0000006C1004C0C0
-:1054B00020D10F006C10048633C071C03060000131
-:1054C000B13300310400741A0462017460F1D10F29
-:1054D0006C1004022A02033B025BFFF61CDD391B41
-:1054E000DD83C79F88B009A903098A019AB0798032
-:1054F0001EC0F00FE4311DDD300002002BD2821EF1
-:10550000DD7C2AC1020EBB022BD6820AE431D10F08
-:1055100028C102C19009880208084F28C50208E482
-:1055200031D10F006C1004C0C00CE43112DD251A1B
-:10553000DD2200020029A28218DD701BDD6E26210B
-:10554000020B990108660129A68226250206E4318C
-:1055500014DD6B15DD66236A9023261685502426FC
-:1055600015252617222C50D10F0000006C1008D6EC
-:10557000102B0A64291AB41ADD0F0D23111CDD103B
-:105580000F2511B81898130E551118DD5DAC55A8EC
-:1055900038AA332C80FF2A80FEA933288D01298068
-:1055A0000108AA112880000CAA02088811098802A3
-:1055B00008AA1C288C0828160458086814DD010A5B
-:1055C000A70224411A2A30802B120407AA2858085F
-:1055D00063B1338B13B4559A6004AC28B4662C566F
-:1055E0002B7B69E016DD3A9412C050C0D017DCF472
-:1055F0009D15D370D4102F60802E60829F169E1749
-:10560000881672891A8D128C402A607F0DCC282B47
-:105610003A200CAA28580851C0B10ABE372E354886
-:105620008F1772F91A8D128C402A60810DCC282BAD
-:105630003A200CAA28580849C0B10ABE372E354A6C
-:10564000B233B444B1556952B6B466C0508F15B880
-:1056500077D370B2FF9F156EF899D10F6C1004C00C
-:1056600021D10F006C1004270A001CDCD31FDCE4DE
-:105670001EDCE71DDCD01ADD141BDD22C02824B09F
-:10568000006D2A75AA48288080C09164806100411D
-:105690000415DCCBC03125503600361A06550105FD
-:1056A00095390C56110C66082962966E974D0D5966
-:1056B0000A29922468900812DD0602420872993B7A
-:1056C00023629512DCC8CB349F300282020E440262
-:1056D000C092993194329233AD52246295C0902495
-:1056E0004C1024669524B0002924A0AA42292480C5
-:1056F000B177B14404044224B400D10FD10FD10FCB
-:105700006C10041ADCAC2AA00058021C5BFFD50206
-:105710002A02033B025BFFD11BDCAAC9A12CB10208
-:10572000C0D40DCC020C0C4F2CB5020CE431D10FBF
-:10573000C0A00AE43118DCA00002002F828219DC2C
-:10574000B32EB10209FF022F86820EE431D10F0081
-:105750006C1004C02002E43114DC9A16DC970002BD
-:1057600000226282234102732F0603E431C020D15C
-:105770000F19DCE61ADCE52841020A2A0109880132
-:105780002A668228450208E43115DCDC12DCE125BA
-:105790004621D10F6C1004292006289CF96480A0B2
-:1057A0002A9CFD65A0968A288D262F0A087AD9049E
-:1057B0002B221FC8BD2C206464C0812E22090EAE8E
-:1057C0000C66E0782B200C1EDC7C0CBC11AECC28C7
-:1057D000C28619DC7A78F3026000AD09B90A299211
-:1057E000A36890082E220009EE0C65E09B29C28573
-:1057F0001FDC846490929F90C0E41FDC919E9128EE
-:10580000200AC0E09E930F8802989288200F880299
-:1058100098942F20079A979D962F950A2E24072853
-:10582000200629206468833328C28512DC6B288C0B
-:1058300020A2B22E24CF28C685C020D10FC020D1EF
-:105840000F2A206A0111020A2A4165AF52DA20C0EC
-:10585000B05805EA64AFE5C021D10F00649FC81FAE
-:10586000DC582D20168FF209DD0C00F10400DD1A42
-:10587000ADAD9D2912DC5928C285A2B22E24CF28B5
-:105880008C2028C685C020D10FC021D10F00000078
-:105890006C1004260A001BDC9F15DC4928206517C4
-:1058A000DC46288CFE6480940C4D110DBD082CD272
-:1058B000F52BD2F42ED2F77CB13DB4BB2BD6F47BC2
-:1058C000E9052BD2F62BD6F47CB92C2AD2F62AD6AF
-:1058D000F52AD6F406E4310002002872822AFAFF83
-:1058E000004104290A012F510200991A0A9903095B
-:1058F00088012876820FE4312624652BD2F48E5C51
-:105900002CD2F5B0EE9E5C7BCB1629D2F62FD2F7C7
-:105910000CB80C09FF0C08FF0C0F2F14C8F960001D
-:10592000320BCA0C0A2A14CEA92B5102C0C20CBBDE
-:10593000020B0B4F2B55020BE431D10F00DB30DA99
-:10594000205BFF941BDC7464AF5D0C4D11ADBD6337
-:10595000FFA8000006E4310002002F728218DC303C
-:105960002E510208FF022F76820EE431D10F000083
-:105970006C1004C03003E43116DC1015DC11000299
-:105980000024628274472118DC64875C084801287F
-:105990006682CD7319DC620C2A11AA99229283299E
-:1059A00092847291038220CC292B51020BE431C0E6
-:1059B00020D10F001FDC5B2E51020FEE012E55028D
-:1059C0000EE431B02DB17C9C5C12DC5608DD112D4B
-:1059D000561DD10F6C10061BDBF71EDBF922B00041
-:1059E0001ADC526F23721DDC39C04818DC511FDCF1
-:1059F0004FDC10D5C083F000808600508A6D4A4F7E
-:105A00000F35110D34092440800B560A296294B1D8
-:105A1000330E55092251480F44110C440A8740099E
-:105A2000A80C02883622514907883608770CA899B5
-:105A30002966949740296295874109A80C02883607
-:105A400007883608770CA899296695974103034281
-:105A5000B13808084298F0D10F1CDC3613DC372728
-:105A6000B0002332B5647057C091C0D016DC351534
-:105A7000DC33C0402AC00003884328C4006D793C51
-:105A8000004104B14400971A7780148E502FB295CC
-:105A90002DB695AFEE2EED2006EE369E5060001826
-:105AA00077A00983509D5023B69560000223B295DC
-:105AB000223D2006223622B695B455B8BBD10F0040
-:105AC00003884328C400D10F6C1004C04004E431A3
-:105AD00015DC1D000200885013DC1CCB815BFFBD70
-:105AE0001CDC1B0C2D11ADCC2BC2822AC28394501E
-:105AF0007BAB142EC28429C2850ABD0C0E990C0DF5
-:105B0000990C0929146000050BA90C092914993076
-:105B100015DBAC2A51020AE4312A2CFC58004B2B2D
-:105B200032000AA2022BBCFF9B30CCB6C8A4D2A084
-:105B3000D10F000004E4311EDBA00002002DE28240
-:105B40002FBAFF2C51020FDD012DE6820CE431D17A
-:105B50000F0000006C1004D10F0000006C1004C096
-:105B600020D10F006C100413DBFAC0D103230923EA
-:105B7000318FC0A06F340260008D19DB8F1BDB906A
-:105B800017DBF30C2811A8772672832572822CFA72
-:105B9000FF76514788502E7285255C0425768275E4
-:105BA000E9052572842576827659292E72842E760F
-:105BB000822E76830AE431000200239282002104BF
-:105BC0002FB10200D61A0C66030633012396820F0A
-:105BD000E43126728325728260000200D8A07659D3
-:105BE000220AE43100020023928200210400D21A2A
-:105BF0002FB1020C22030232012296820FE431D22D
-:105C000080D10F00D280D10FC020D10F6C1004DBE7
-:105C100030862015DB68280A00282502DA2028B003
-:105C2000002CB00705880A28824C2D0A010B800041
-:105C3000DBA065AFE61ADB610A4A0A29A2A3C7BF47
-:105C4000769101D10F2BA6A3D10F00006C1004C0D8
-:105C5000D1C7CF1BDB5B19DB5817DB560C2811A80B
-:105C60007786758574C0A076516288508E77B4555A
-:105C7000957475E903857695747659278F769F75A7
-:105C80009F740AE431000200239282B42E2FB102E5
-:105C900000E10400D61A0C66030633012396820F36
-:105CA000E431867583747639280AE4310002002EC7
-:105CB0009282B42200210424B10200DF1A0CFF03F7
-:105CC0000FEE012E968204E431D280D10FD8A07657
-:105CD00051D6D280D10F00006C1004290A801EDB3F
-:105CE0005D1FDB5D1CDB350C2B11ACBB2C2CFC2DA4
-:105CF000B2850FCC029ED19CD0C051C07013DB592D
-:105D000014DB5818DB562AB285A82804240A234637
-:105D100091A986B8AA2AB685A98827849F25649F59
-:105D2000D10F00006C100419DB8B0C2A11A9A98972
-:105D300090C484798B761BDB79ABAC2AC2832CC2EE
-:105D4000847AC1688AA02BBC30D3A064A05E0B2BE0
-:105D50000A2CB2A319DB4268C0071DDB7FD30F7D7D
-:105D6000C94AA929299D0129901F68913270A6036B
-:105D7000D3A0CA9E689210C7AF2AB6A32A2CFC5B98
-:105D8000FFB3D230D10F000013DB7503A3018C31B8
-:105D90001DDB130C8C140DCC012CB6A363FFDC00AF
-:105DA000C020D10FDA205BFFCCC020D10FC020D1A2
-:105DB0000F0000006C1004DB30C0D019DAFEDA20CE
-:105DC00028300022300708481209880A28824CDC53
-:105DD000200B80001BDAF90C4A11ABAA29A2840916
-:105DE000290B29A684D10F006C1004C04118DAF2E7
-:105DF00017DAF40C2611A727277038A866256286C3
-:105E0000007104A35500441A75414822628415DBD1
-:105E10001502320BC922882117DAF10884140744CD
-:105E200001754905C834C020D10FD10F0809471D9D
-:105E3000DB4AC0B28E201FDADF0E0E43AFEC2BC45C
-:105E4000A00FEE0A2DE6242A6284C0200A990B29AD
-:105E50006684D10FC020D10F6C1004DB30C0D01885
-:105E6000DAD5DA2025300022300708580A28824C7B
-:105E7000DC200B80008931709E121BDACF0C4A1196
-:105E8000ABAA29A28409290B29A684D10F09C952DA
-:105E900068532600910418DACAC0A12F811600AAFF
-:105EA0001A0AFF022F85161EDAC40C4D11AEDD2C26
-:105EB000D2840C2C0B2CD684D10FC0811FDAC1B830
-:105EC0009A0A0A472EF11600A10400881A08EE0269
-:105ED0002EF5161DDAB90C4C11ADCC2BC2840B2B50
-:105EE0000B2BC684D10F00006C1004DB30C0D0191E
-:105EF000DAB1DA2028300022300709880A28824CDB
-:105F0000DC200B80001CDAAC0C4B11ACBB2AB28439
-:105F10000A2A0B2AB684D10F6C1004C04118DAA6E5
-:105F200016DAA80C2711A626266038A87225228624
-:105F3000006104A35500441A7541082222840232EC
-:105F40000BD10F00C020D10F6C100415DB050249E6
-:105F5000142956112452120208430F8811C07300ED
-:105F6000810400361A008104C78F00771A0877036E
-:105F7000074401064402245612D10F006C10066E2D
-:105F800023026000AC6420A7C0A0851013DADD16E0
-:105F9000DAF4C040A6AA2BA2AE0B19416490666841
-:105FA000915D68925268933C2AA2AA283C7F288C73
-:105FB0007F0A0A4D2980012880002AACF208881146
-:105FC0000988027589462B3D0129B0002BB00108D4
-:105FD00099110B99027A9934B8332A2A00B1447284
-:105FE00049B160004A7FBF0715DADF63FFB90000DF
-:105FF000253AE863FFB10000253AE863FFA90000F5
-:10600000250A6463FFA1C05A63FF9C0000705F080B
-:106010002534FF058C142C34FE70AF0B0A8D142E22
-:106020003D012AE4012DE400DA405BFD5063FFA747
-:10603000D10FD10F6C10041ADA6219DA5F1CDACAB8
-:106040001BDACBC080C07160000D00000022A438B4
-:10605000B1AA299C107B915F26928679C2156E6247
-:1060600062C0206D080AB12200210400741A764B28
-:10607000DB63FFEE2292850D6311032514645FCF6D
-:10608000D650032D436DD9039820B4220644146DD5
-:106090004922982098219822982398249825982678
-:1060A000982798289829982A982B982C982D982EDC
-:1060B000982F222C4063FF971EDA4027E68027E6C0
-:1060C00081D10F00C02063FF830000006C1004C06A
-:1060D00062C04112DA3B1ADA3713DA522AA00023DF
-:1060E000322D19DA9F2BACFE2992AE6EA30260000E
-:1060F0008E090E402D1AC2C2CD0EDC392C251A6431
-:10610000B0895BFF9E15DA9A1ADA952B3AE80A3ABB
-:10611000015805922B211A0ABB28D3A09B50580581
-:10612000A92B52000ABB082A0A005805A815DA91C3
-:106130002D21022C3AE80C3C2804DD022D25029C7E
-:10614000505805A08B50AABBC0A15805A01CDA8AE4
-:106150002D21020C3C2806DD0213DA882D25029C35
-:10616000305805988B30AABBC0A25805982A210246
-:10617000C0B40BAA020A0A4F2A25025805ACD10F57
-:10618000242423C3CC2C251A63FF760018DA801C44
-:10619000DA7C19DA7D1BDA7B17DA4F85202E0AFDAF
-:1061A0001FDA7C2D203624F47A24F47E24F4820E27
-:1061B000DD0124F4862E0AF707552806DD02C07596
-:1061C0000EDD01050506AB5BA959C0E8AC5C24C433
-:1061D000AB0EDD0227C4AC2E0ADFA85527B4EC0EA7
-:1061E000DD0124B4EBC2E027942C0EDD0224942BB5
-:1061F0002E0A800D0D4627546C24546B0EDD022DA3
-:10620000243663FEFC0000006C10042A0A302B0ABE
-:10621000035BFF4D12DA53C390292616C3A1C0B306
-:10622000C08A2826175BFF48C03CC3B12B26161A2C
-:10623000D9E42AA02023261764A079C3A2C0B15BA9
-:10624000FF42C3A2C0B15BFF40C3C22C2616C2AF3F
-:10625000C0B12326175BFF3CC28F282616C0FE2F35
-:106260002617C2E22E26162A0AA1C0B1C0D82D26B2
-:10627000175BFF352A0AA12A2616C3A6C0B3C1920E
-:106280002926175BFF31C3C62C2616C1B32A0AA2E2
-:106290002B2617C0B35BFF2C290AA2292616C1851D
-:1062A000282617C2FB2F2616C0E72E26171DDA391F
-:1062B0002D2610D10FC3A2C0B35BFF2363FF820062
-:1062C0006C10041CDA031BD9ED18DA3317DA341614
-:1062D000DA3415DA34C0E0C0D414D9FF1FD9B9C0FC
-:1062E000288FF06D2A36DAC0D9C07C5B020FC90C4A
-:1062F0001CD9F90C9C28A8C3A6C22A36802A25845A
-:10630000A4C2A7CC2D248C2B248A2B24872E248B4B
-:10631000B1BB2E369F2C369E2C369DB1AC1CD9D7E6
-:106320001BDA22C0286D2A33DAC0D9C07C5B020F89
-:10633000C90C1CD9E80C9C28A8C3A6C22A36802BFD
-:106340002584A4C2B1BBA7CC2D248C2E248B2A2457
-:106350008A2E369F2C369E2C369DB1ACC07919D929
-:10636000D81BDA1413DA121ADA1218DA1314D9D97C
-:1063700016DA1304F42812DA1204660C040506A2D5
-:1063800052A858AA5AA3539B3029A50027848AC033
-:1063900091C0A52A848C29848B17DA0B18DA0AA7F6
-:1063A0005726361D26361E2E361F16DA0813DA0833
-:1063B000A65504330C2826C82E75002D54AC2E5437
-:1063C000AB2E54AA2326E62326E52E26E7D10F007E
-:1063D0006C100613D99417D9E224723D2232937FB0
-:1063E0002F0B6D08052832937F8F0263FFF3C0C423
-:1063F000C0B01AD973C051D94004593929A4206EAC
-:1064000044020BB502C3281ED96EDDB025E4220577
-:106410002D392DE421C0501ED9EF19D9DF18D9DF4D
-:1064200016D9E11DD9ED94102A724517D9AB6DA983
-:106430004BD450B3557A5B17DF50756B071FD9608B
-:106440008FF00F5F0C12D9A302F228AE2222D68160
-:10645000D54013D9A0746B0715D95A855005450C42
-:10646000035328B145A73FA832A93322369D2236CF
-:106470009E2436802B369F2BF48B2CF48C14D969F8
-:1064800024424DC030041414C84C6D0806B13304C6
-:106490001414C84263FFF20015D947C44000310408
-:1064A0001AD948C0D193A200DD1AC138B0DD9DA32E
-:1064B00018D95D2B824D29824E29A5202882537A36
-:1064C000871E2C54008E106FE45D12D93D2F2121C0
-:1064D0002321202F251F04330C23252023251ED103
-:1064E0000FC06218D99F88807E87D98910265400F2
-:1064F0006F94191BD9332AB1200A1A1404AA0C2A42
-:10650000B5202AB5212AB51E2AB51FD10F1BD92CBB
-:106510002AB1200A1A1403AA0C2AB5202AB5212A66
-:10652000B51E2AB51FD10F001CD9262BC1212DC1A4
-:10653000202BC51F03DD0C2DC5202DC51ED10F003E
-:106540006C100619D91F14D98612D93615D9A3C7CC
-:106550003FC0E02E56A82E56A92E56AA2E56AB2383
-:10656000262918D946DB101CD99DC0D42A42452DB6
-:1065700016012C160000B0890A880C98905BFF94D5
-:106580002C22E318D90F0C5C149C842B22E48C84FD
-:10659000B1BB0B5B140CBB0C9B852A22E50A5A1479
-:1065A0002A86062922CD0959142986072F22892FE8
-:1065B00086095BFF435BFF1423463BC1B01ED90035
-:1065C0001DD9602AE1022D463A0BAA020A0A4F2A77
-:1065D000E5025804965BFEBD5BFE96C050C0B01647
-:1065E000D8F614D8FE17D96FC0C0C73E93122C2618
-:1065F0002DC0306000440000007F9F0FB155091985
-:1066000014659FF4C0500AA9027FA7EF18D8EADAF0
-:106610005008580A28822C2B0A000B8000005104D5
-:10662000D2A0C091C7AF00991A0A99039912CE3827
-:1066300064206BD3202B20072516032C12022A621C
-:10664000827CA86318D8DC01110208580A28822C21
-:10665000DA500B8000D2A0643FD58A310A8A140434
-:10666000AA01C82A2B22010B8B1404BB017BA9456C
-:10667000DDA07A7B081DD8D22DD2000DAD0CDB3009
-:1066800019D8CD1AD91488130ADA28DC801DD951FB
-:1066900009880A28823C0DAA080B8000652F93D335
-:1066A00020C0B063FF9400007FAF34B155005004A8
-:1066B0000A091963FF42DAB07B7B081AD8C12AA203
-:1066C000000ABA0C1BD9048C310BAB280C8A141CA1
-:1066D000D941ACBB1CD94104AA012BC68163FF8FF1
-:1066E000645F60C050C0B0C7CE9C1263FF5500000D
-:1066F0006C100427221EC08008E4311BD8AF0002B2
-:10670000002AB28219D8AF003104C06100661A298C
-:1067100091020A6A022AB68209E43115D90C0C38B2
-:1067200011A8532832822432842A8CFC7841102903
-:1067300021022A368297A0096902292502D10F0079
-:106740002B21022C32850B6B022CCCFC2C36829731
-:10675000C02B2502D10F00006C1004C0E71DD89299
-:106760001CD8940D4911D7208B228A200B4B0BD2B9
-:10677000A007A80C9B72288CF4C8346F8E026000AE
-:10678000A31FD88AA298AF7B78B334C93DC081C01B
-:10679000F0028F380F0F42C9FA2CD67ED5206D4AF1
-:1067A0000500308800508C887008980878B16DD248
-:1067B000A09870D10FC0F0038F387FE0DE63FFD860
-:1067C000027B0CAFBB0B990C643047D830C0F1C0D2
-:1067D0005002F5380505426450792CD67E0B3612EE
-:1067E0002F6C100F4F366DFA0500808800208C0644
-:1067F000440CC081C05003B208237C0C03853805CB
-:10680000054264505A2CD67ED30F6D4A050020886D
-:1068100000308CD2A0A798BC889870D10FD2A0BCB1
-:10682000799970D10FD2302BAD08C0F1C0500BF563
-:1068300038050542CB542CD67E083F14260A100F8B
-:10684000660C0646366D6A0500208800B08C8270A2
-:1068500063FF2D00C05003F53875E08063FF7A00B8
-:10686000C06002863876E09F63FF9900C05003F550
-:106870003875E0C463FFBE006C1004D62068520F68
-:10688000695324DA20DB30DC405800F7D2A0D10F66
-:10689000DA20DB30DC405800F49A2424240EC02196
-:1068A00022640FC020D10F00B83BB04C2A2C748951
-:1068B000242D200E2E200FA4DDB1EE2E240FB0DDEE
-:1068C0002D240E2890072D9003A488B088B1DD2DCB
-:1068D00094032894075BFFA069511DC0E082242A1D
-:1068E000600F18D8BF2A240329600E8F202924079F
-:1068F00008FF029F209E64D10FC020D10F0000002E
-:106900006C1004942319D8B7C0B3083A110BAA022B
-:10691000992019D8299A2116D827C05028929D2548
-:1069200064A2288C1828969DD10F00006C100428B2
-:106930002066C038232406B788282466D10F0000BB
-:106940006C10060D3C111AD819D820035B0C862256
-:106950000D55118221AA8902320B928105630C9395
-:10696000820C550C792B54CB531CD8111DD80FC059
-:10697000F7A256C031C0A0043A380A0A42769343BF
-:10698000044302C9AB2CD67ED30F6DBA0500208814
-:1069900000308C8281A25272917D92818382C83EA6
-:1069A000D10FC071C06002763876F0DB63FFD5008E
-:1069B000C020BC89998199809282D10F222DF892B2
-:1069C0008163FFA219D7FA02860CA9669611D940F5
-:1069D000063612961006BB0C64A0442CD67E8A1094
-:1069E000D30F6DAA0500208800908CBC828311C053
-:1069F000E0A433240A01034E380E0E42CAEC2CD612
-:106A00007E6DBA0500208800308C821102520CA2E3
-:106A100082BC22928163FF83BC82928163FF7C00EF
-:106A2000C06002363876F0B563FFAF00C070024731
-:106A30003877F0CC63FFC6006C100414D7EBC1525A
-:106A4000A424CA3128221D73811C292102659016B5
-:106A50002A300075A912022A02033B022C3007C01B
-:106A6000D25801D5653FDCD10F2B300703BB0B0B90
-:106A7000BA0274B3022ABDF8D3A063FFC4000000B9
-:106A80006C1004292006C0706E9741292102C08F26
-:106A90002A2014C0B62B240606AA022A24147980C0
-:106AA000022725022A221E2C221D7AC10EC8ABDA2B
-:106AB00020DB302C0A00033D025BF7F96450892D7E
-:106AC00021020D0D4CC9D3C020D10F00002E9CFB1C
-:106AD00064E0962F21020F0F4C65F0A51AD7B71E60
-:106AE000D7B529A29EC08A798B712BE22668B004A3
-:106AF0008C207BC96629A29D1FD7B264905D9790B8
-:106B0000C0C31DD7C62B21049D9608BB110CBB0228
-:106B10009B919B971CD7C3C08527E4A22BA29D28DD
-:106B200024068DFA282102B0DD2BBC302BA69D9DBA
-:106B3000FA0C8802282502C8D2C020D10F8EF91283
-:106B4000D7B92E2689C020D10F283000688938DABD
-:106B500020DB30DC4058004463FF6300022A022B34
-:106B60000A065800D3220A00D10F655010293000C0
-:106B7000689924022A02033B02DC4058003BC020F3
-:106B8000D10FD270D10F00002A2C74033B02044CA9
-:106B9000025BFEF163FF2700DB30DC402A2C745BD4
-:106BA000FEEEC020D10F00006C1004C83F8926887B
-:106BB00029A399992609880C080848282525CC522C
-:106BC000C020D10FDB402A2C745BF92FD2A0D10F4B
-:106BD0006C1004D820D73082220D451105220C926A
-:106BE0008264207407420B13D771D420A3837323CC
-:106BF00002242DF8858074514CBC82C0906D08161B
-:106C000000408800708C773903D720C0918680744B
-:106C10003901D42074610263FFE2CA98C097C04171
-:106C20001BD7F2C0A00B8B0C0B4A380A0A42C9AA28
-:106C30001DD75E1CD75F2CD67EC140D30F6D4A0591
-:106C400000208800308C9780D270D10FBC8FC0E0BC
-:106C50000F4E387E90E263FFD6BC8292819280C054
-:106C6000209282D10F0000006C1006C0D71CD74EB6
-:106C70001BD7500D4911D7202E221F28221D0E4E42
-:106C80000BD280078A0C2E761F2AAC80C8346FAED8
-:106C9000026000CB2F0A801AD754A29EAA7A7EA344
-:106CA0003FC93FC0E1C05002E538050542CA552B37
-:106CB000C67EDB20D30F6D4A0500308800B08C2ED5
-:106CC000721DAE9E0EA50C645086D2802E761DC01D
-:106CD00091298403D10FC05003E53875D0D363FFE9
-:106CE000CD15D741027E0CA5EE643051C0A1250A16
-:106CF0000002A538033A020505426450922BC67E75
-:106D00000E35129510255C10054536D30F6D5A05CA
-:106D100000A08800208CC0A1A3E2C05023FA800309
-:106D2000730C03A538AF730505426450722BC67E01
-:106D3000851005450C6D5A0500208800308CD280E6
-:106D4000C0A10E9B0CAB7BAFBB2B761D2A8403D15D
-:106D50000FD280C0C1AF7D2D761D2C8403D10F00D2
-:106D6000D2302E8D08C0F1C0500EF538050542CB4B
-:106D7000592BC67E0A3F14C1600F660C064636D3F7
-:106D80000F6D6A0500208800E08C22721D63FF03EE
-:106D9000C061C05003653875D80263FF6263FF5C51
-:106DA000C05002A53875D08763FF8100C06003F62C
-:106DB0003876D0BF63FFB9006C10042A2015292053
-:106DC0001614D6FF0A990CCB9D2E200B04ED092B2F
-:106DD000D1208F2809BC36ACAA0CBB0C2BD5200ABD
-:106DE0000A472A2415CAAF8B438942B0A8009104F0
-:106DF00000881AA8FF0FBB029B278F260FB80C78BC
-:106E00003B1AC020D10F0000292102C0A20A99021A
-:106E1000292502C021D10F008B2763FFDC2BD12055
-:106E20000CAA0C0A0A472A2415ACBB2BD520C9AEE4
-:106E30008B438C288F42B0AD00F10400DD1AADCC3D
-:106E40000CBB029B27DA20B7EB580019C021D10FE9
-:106E50009F2763FFEF0000006C100428203C643083
-:106E60004705306000073E01053EB156076539050C
-:106E70004928C77FA933030641076603B1660606A2
-:106E800041A6337E871E222125291AFC732B150269
-:106E9000380C09816000063E01023EB124064239E9
-:106EA00003220AD10FD230D10FC05163FFC00000BE
-:106EB0006C100427221EC08008E4311DD6BF0002DA
-:106EC000002CD2821BD6BF003104C06100661A2B91
-:106ED000B1020C6C022CD6820BE43119D7440C3A67
-:106EE00011AA932832829780253282243284B455A5
-:106EF00025368275410A292102096902292502D114
-:106F00000F2A21022B32830A6A022B36822A25029B
-:106F1000D10F00006C100418D6A80C2711087708B0
-:106F2000267286253C04765B1315D6A405220A2218
-:106F300022A3682002742904227285D10FC020D1B7
-:106F40000F0000006C100419D6A727221EC080096C
-:106F5000770208E4311DD6980002002CD2821BD69D
-:106F600098003104C06100661A2BB1020C6C022C2F
-:106F7000D6820BE43119D71D0C3A11AA932832821C
-:106F80009780253282243284B45525368275410B90
-:106F90002A21020A6A022A2502D10F002B21022C83
-:106FA00032830B6B022C36822B2502D10F0000009E
-:106FB0006C10041BD6810C2A11ABAA29A286B43806
-:106FC000798B221BD67E19D6A50B2B0A2BB2A309CF
-:106FD000290868B00274B90D299D0129901F6E928D
-:106FE0000822A285D10FC020D10FC892C020D10F96
-:106FF000DA205BEE88C020D10F0000006C10041472
-:10700000D66E28429E19D66B6F88026000BA29920C
-:10701000266890078A2009AA0C65A0AC2A429DC068
-:10702000DC64A0A42B200C19D6650CBC11A4CC2EBA
-:10703000C28609B90A7ED30260009A2992A3689099
-:10704000078D2009DD0C65D08C25C2856450862D06
-:107050002104C0306ED80D2C2066B8CC0C0C472C07
-:10706000246665C07B1CD6E218D66B1AD66219D688
-:10707000731DD667C0E49E519D508F209357935542
-:1070800099539A569A5408FF021AD6839F5288261B
-:107090009F5A9E599D58935E9C5D935C9A5B08082D
-:1070A00048058811985FC0D81FD64C0CB911A49917
-:1070B000289285AFBF23F4CF288C402896858E2652
-:1070C0002D24069E29C020D10FCA33DA20C0B65B1A
-:1070D000FF78C72FD10FC93ADA205BFF75C72FD1D0
-:1070E0000FDBD05BFE072324662B200C63FF7500AB
-:1070F000C72FD10FC72FD10F6C1004C85B292006F2
-:1071000068941C689607C020D10FC020D10FDA20E8
-:10711000DB30DC40DD502E0A005BFE59D2A0D10FDF
-:107120002E200C18D6250CEF11A8FF29F286C08856
-:10713000798B791AD6220AEA0A2AA2A368A0048BBC
-:10714000207AB96823F2856430621BD62C290A8024
-:107150002C20682820672D21040B881104DD1108DC
-:10716000DD020DCC02C0842D4A100DCC021DD624A8
-:1071700098319D308A2B99379C340BAA02C0C09C51
-:10718000359C369A322A2C74DB4028F285C0D328ED
-:107190008C2028F6852C25042D24061FD60FDD40D3
-:1071A000AFEE2CE4CF5BFDE6D2A0D10F00DA20DBFE
-:1071B000E05BFF3FC020D10F6C100AD6302A2006BA
-:1071C00024160128ACF86583862B2122C0F22A21DF
-:1071D00024CC572AAC010A0A4F2A25247ABB026024
-:1071E000037F2C21020C0C4C65C3192E22158D3205
-:1071F000C0910EDD0C65D39088381ED5EF64836B8B
-:107200008C37C0B8C0960CB9399914B49A9A120D3B
-:10721000991199138F6718D5EAC9FB2880217F83BC
-:10722000168B142C22002A200C5BFF61D4A064A3CF
-:10723000B38F6760002800002B200C89120CBA1154
-:10724000AEAA2CA2861DD5DD7C9B3E0DBD0A2DD29B
-:10725000A368D00488207D893024A28564436427F4
-:10726000212E07F73607F90C6F9D01D7F0DA20DBE6
-:1072700070C1C42D211F5BFEF889268827DDA00977
-:10728000880C7A8B179A10600006C04063FFCC0010
-:1072900000DA208B105BFEC88D1065A267C0E09EEF
-:1072A000488C649C498B658A669B4A9A4B97458FAC
-:1072B000677F7302600120CD529D10DA20DB302CF5
-:1072C00012015BFE698D10C051D6A08FA7C0C08A85
-:1072D00068974D9A4C8869896A984E994F8E6A8A48
-:1072E00069AE7E77EB01B1AA9E6A9A698B60C0A0F5
-:1072F0000B8E1477B701C0A1C091C08493159D1760
-:107300009516C0D025203CC030085801089338C0DD
-:1073100082083310085B010535400B9D3807DD10EE
-:107320000BAB100E19402A211F07991003DD020D27
-:10733000BB020553100933020A55112921250A2AD7
-:10734000140929140499110A99020933028A2B2974
-:1073500021040BAA021BD6270899110955020855CA
-:10736000020BAA029A408920881408991109880200
-:1073700019D5A61DD62109880298418B2A934695D6
-:107380004783150DBB0285168D179B448A65896658
-:10739000AACAA97C77CB01B1AA07FB0C9C669A65A7
-:1073A00088268E29AD87972607EE0C0E0E482E25CF
-:1073B000259B672B200C87131ED5800CB911AE9925
-:1073C000289285A78828968517D584C090A7BB29C1
-:1073D000B4CF871863FE3C008C60C0E0C091C0F061
-:1073E000C034C0B82A210428203C08AA110B8B0104
-:1073F000038301039F380B9B39C03208FF100388B9
-:1074000001089E380C881407EE100FEE0203880165
-:1074100008983905BF1029211F0ABB1107881008D9
-:10742000FF020BAA0218D57809291403AA022B21FE
-:107430002583200B2B1404BB110833110FBB020B47
-:1074400099028B148F2A0B33020833028B2B647042
-:10745000868868974D984C8769886A9341994697C2
-:107460004E984FC07077C701C0719A4718D5E30B8B
-:107470007C100CEC0208F802984418D5E00CBC0211
-:1074800008CC029C402A200C295CFEC0801FD54AF3
-:107490001CD5520CAE112B2124ACAAAFEEB0BB8F81
-:1074A000132CE28528A4CFAFCC2CE6852A22152BFD
-:1074B0002524B1AA2A26156490DBC9D28F262E2254
-:1074C000090DFF082F26060FEE0C0E0E482E25255F
-:1074D0006550E4C020D10F00C07093419F4499468D
-:1074E0009A4777C70A1CD5362CC022C0810C873832
-:1074F0001CD5C40B781008E80208B8020C88029862
-:107500004063FF8000CC57DA20DB608C115BFDD636
-:10751000292102689806689403C020D10F2B221EEF
-:10752000C0A029221D2A25027B9901C0B064BFE8B2
-:1075300013D5212CB00728B000DA2003880A28824E
-:107540004CC0D10B8000DBA065AFE763FFCA000031
-:1075500068A779DA20DB30DC40DD505BFEE7D2A0A3
-:10756000D10FC16DC19D29252C60000429252CD681
-:10757000902624672F2468DA20DB308C11DD502E12
-:107580000A805BFD3FD2A0D10FC168C1A82A252C7B
-:1075900063FFDD000000C8DF8C268B29ADCC9C2664
-:1075A0000CBB0C0B0B482B25252A2C74DB602C12F2
-:1075B000015BFD87D2A0D10F2A2C748B115BF6B230
-:1075C000D2A0D10FDA205BFE3A63FF3800DA20C088
-:1075D000B15BFE8A64ABF1655F352D2124B1DD2DF1
-:1075E000252463FF1FDA202B200C5BFE5663FF145B
-:1075F00012D5858220028257C82163FFFC12D581F3
-:1076000003E83004EE3005B13093209421952263D5
-:10761000FFFC000010D57D910092019302940311AC
-:10762000D554821001EA30A21101F031C04004E4C7
-:107630001600020011D5768210234A00032202921E
-:107640001011D540C021921004E4318403830282DA
-:1076500001810000D23001230000000010D56D919F
-:107660000092019302940311D543821001EA30A2E3
-:107670001101F131C04004E41600020011D564820A
-:107680001013D4E7032202921004E431840383022E
-:107690008201810000D330013300000010D55E91DB
-:1076A00000810165104981026510448103CF1F925A
-:1076B000019302940311D531821001EA30A2110125
-:1076C000F231C04004E41600020011D550821013BC
-:1076D000D4CF032202921004E43184038302820196
-:1076E000C010910391029101810000D43001430048
-:1076F00012D500C03028374028374428374828376B
-:107700004C233D017233ED03020063FFFC000000D7
-:1077100010D542910092019302940311D54082103A
-:10772000921011D4F28310032202921011D53D124F
-:10773000D5049210C04004E41600020011D5348232
-:107740001013D4EB032202921004E4318403830269
-:107750008201810000D53001530000006C10026EE0
-:10776000322FD620056F04043F04745B2A05440CB5
-:1077700000410400331A220A006D490D73630403AB
-:10778000660CB1220F2211031314736302222C0121
-:10779000D10FC83BD10F000073630CC021D10F0083
-:1077A0000000000044495630C020D10F6C10020088
-:1077B00040046B4C07032318020219D10F0203196E
-:1077C000C020D10F6C100202EA30D10F6C1002CC35
-:1077D0002503F03160000F006F220503F1316000D6
-:1077E000056F230503F231000200D10F6C1002CCAB
-:1077F0002502F030D10F00006F220402F130D10FCA
-:107800006F230402F230D10FC020D10F6C1002227E
-:107810000A20230A006D280E2837402837442837CD
-:107820004828374C233D01030200D10F6C1002029F
-:10783000E431D10F0A0000004368656C73696F2062
-:1078400046572044454255473D3020284275696CD3
-:1078500074204D6F6E204D61722020382031373AF0
-:1078600032383A3135205053542032303130206F85
-:107870006E20636C656F70617472612E61736963F1
-:1078800064657369676E6572732E636F6D3A2F68F6
-:107890006F6D652F66656C69782F772F66775F3718
-:1078A0002E392D6977617270292C205665727369A3
-:1078B0006F6E2054337878203030372E30612E3080
-:1078C00030202D20313030373061303010070A0041
-:0478D0000BDFE8756D
-:00000001FF
index 9a1d426307510135444da722883e570fe52410b9..15b6791662011ed9c1269095c4c80d84c461c958 100644 (file)
@@ -37,7 +37,7 @@ static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
                        return ERR_PTR(-ENOMEM);
                size = v9fs_fid_xattr_get(fid, name, value, size);
                if (size > 0) {
-                       acl = posix_acl_from_xattr(value, size);
+                       acl = posix_acl_from_xattr(&init_user_ns, value, size);
                        if (IS_ERR(acl))
                                goto err_out;
                }
@@ -131,7 +131,7 @@ static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
        buffer = kmalloc(size, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
-       retval = posix_acl_to_xattr(acl, buffer, size);
+       retval = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        if (retval < 0)
                goto err_free_out;
        switch (type) {
@@ -251,7 +251,7 @@ static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name,
                return PTR_ERR(acl);
        if (acl == NULL)
                return -ENODATA;
-       error = posix_acl_to_xattr(acl, buffer, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return error;
@@ -304,7 +304,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
                return -EPERM;
        if (value) {
                /* update the cached acl value */
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
                else if (acl) {
index b85efa77394989bf8dafabb3eb01e5dfa2c64d7d..392c5dac1981be79b4aff040e552b48c78c5751f 100644 (file)
@@ -560,6 +560,11 @@ static int v9fs_init_inode_cache(void)
  */
 static void v9fs_destroy_inode_cache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(v9fs_inode_cache);
 }
 
index 2fb977934673812c52e2aa9e7ed0a392e5181a34..8938f8250320ecbde9ab3502141178ddd7492ad3 100644 (file)
@@ -11,7 +11,7 @@ obj-y :=      open.o read_write.o file_table.o super.o \
                attr.o bad_inode.o file.o filesystems.o namespace.o \
                seq_file.o xattr.o libfs.o fs-writeback.o \
                pnode.o drop_caches.o splice.o sync.o utimes.o \
-               stack.o fs_struct.o statfs.o
+               stack.o fs_struct.o statfs.o coredump.o
 
 ifeq ($(CONFIG_BLOCK),y)
 obj-y +=       buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
index 718ac1f440c6729c334b800375d853a5bd107366..585adafb0cc270f9e0b023b2543e1151fa83e3b4 100644 (file)
@@ -46,8 +46,8 @@ struct adfs_sb_info {
        struct adfs_discmap *s_map;     /* bh list containing map                */
        struct adfs_dir_ops *s_dir;     /* directory operations                  */
 
-       uid_t           s_uid;          /* owner uid                             */
-       gid_t           s_gid;          /* owner gid                             */
+       kuid_t          s_uid;          /* owner uid                             */
+       kgid_t          s_gid;          /* owner gid                             */
        umode_t         s_owner_mask;   /* ADFS owner perm -> unix perm          */
        umode_t         s_other_mask;   /* ADFS other perm -> unix perm          */
        int             s_ftsuffix;     /* ,xyz hex filetype suffix option */
index 1dab6a174d6a8c213d6b58bed1f1869ed6189cd4..e9bad5093a3f92f45761a534e38cae37661fddea 100644 (file)
@@ -304,8 +304,8 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
         * we can't change the UID or GID of any file -
         * we have a global UID/GID in the superblock
         */
-       if ((ia_valid & ATTR_UID && attr->ia_uid != ADFS_SB(sb)->s_uid) ||
-           (ia_valid & ATTR_GID && attr->ia_gid != ADFS_SB(sb)->s_gid))
+       if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, ADFS_SB(sb)->s_uid)) ||
+           (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, ADFS_SB(sb)->s_gid)))
                error = -EPERM;
 
        if (error)
index bdaec92353c2cc0e5e876115ee7a474e45c0ba4b..d5712293579376a89a99befc8eb59e5ecded02bb 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/statfs.h>
+#include <linux/user_namespace.h>
 #include "adfs.h"
 #include "dir_f.h"
 #include "dir_fplus.h"
@@ -130,10 +131,10 @@ static int adfs_show_options(struct seq_file *seq, struct dentry *root)
 {
        struct adfs_sb_info *asb = ADFS_SB(root->d_sb);
 
-       if (asb->s_uid != 0)
-               seq_printf(seq, ",uid=%u", asb->s_uid);
-       if (asb->s_gid != 0)
-               seq_printf(seq, ",gid=%u", asb->s_gid);
+       if (!uid_eq(asb->s_uid, GLOBAL_ROOT_UID))
+               seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, asb->s_uid));
+       if (!gid_eq(asb->s_gid, GLOBAL_ROOT_GID))
+               seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, asb->s_gid));
        if (asb->s_owner_mask != ADFS_DEFAULT_OWNER_MASK)
                seq_printf(seq, ",ownmask=%o", asb->s_owner_mask);
        if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK)
@@ -175,12 +176,16 @@ static int parse_options(struct super_block *sb, char *options)
                case Opt_uid:
                        if (match_int(args, &option))
                                return -EINVAL;
-                       asb->s_uid = option;
+                       asb->s_uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(asb->s_uid))
+                               return -EINVAL;
                        break;
                case Opt_gid:
                        if (match_int(args, &option))
                                return -EINVAL;
-                       asb->s_gid = option;
+                       asb->s_gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(asb->s_gid))
+                               return -EINVAL;
                        break;
                case Opt_ownmask:
                        if (match_octal(args, &option))
@@ -275,6 +280,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(adfs_inode_cachep);
 }
 
@@ -369,8 +379,8 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_fs_info = asb;
 
        /* set default options */
-       asb->s_uid = 0;
-       asb->s_gid = 0;
+       asb->s_uid = GLOBAL_ROOT_UID;
+       asb->s_gid = GLOBAL_ROOT_GID;
        asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK;
        asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
        asb->s_ftsuffix = 0;
index 6e216419f340de5219a684db6a70f99643784ac5..3952121f2f28beee731618ae915a433f16e16fa0 100644 (file)
@@ -88,8 +88,8 @@ struct affs_sb_info {
        u32 s_root_block;               /* FFS root block number. */
        int s_hashsize;                 /* Size of hash table. */
        unsigned long s_flags;          /* See below. */
-       uid_t s_uid;                    /* uid to override */
-       gid_t s_gid;                    /* gid to override */
+       kuid_t s_uid;                   /* uid to override */
+       kgid_t s_gid;                   /* gid to override */
        umode_t s_mode;                 /* mode to override */
        struct buffer_head *s_root_bh;  /* Cached root block. */
        struct mutex s_bmlock;          /* Protects bitmap access. */
index 8bc4a59f4e7ec896b6dfb29f4b252efbbc0ee933..15c4842682292784ce095d85fb01a52615f7326a 100644 (file)
@@ -80,17 +80,17 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
        if (id == 0 || sbi->s_flags & SF_SETUID)
                inode->i_uid = sbi->s_uid;
        else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
-               inode->i_uid = 0;
+               i_uid_write(inode, 0);
        else
-               inode->i_uid = id;
+               i_uid_write(inode, id);
 
        id = be16_to_cpu(tail->gid);
        if (id == 0 || sbi->s_flags & SF_SETGID)
                inode->i_gid = sbi->s_gid;
        else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
-               inode->i_gid = 0;
+               i_gid_write(inode, 0);
        else
-               inode->i_gid = id;
+               i_gid_write(inode, id);
 
        switch (be32_to_cpu(tail->stype)) {
        case ST_ROOT:
@@ -193,13 +193,13 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
                tail->size = cpu_to_be32(inode->i_size);
                secs_to_datestamp(inode->i_mtime.tv_sec,&tail->change);
                if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) {
-                       uid = inode->i_uid;
-                       gid = inode->i_gid;
+                       uid = i_uid_read(inode);
+                       gid = i_gid_read(inode);
                        if (AFFS_SB(sb)->s_flags & SF_MUFS) {
-                               if (inode->i_uid == 0 || inode->i_uid == 0xFFFF)
-                                       uid = inode->i_uid ^ ~0;
-                               if (inode->i_gid == 0 || inode->i_gid == 0xFFFF)
-                                       gid = inode->i_gid ^ ~0;
+                               if (uid == 0 || uid == 0xFFFF)
+                                       uid = uid ^ ~0;
+                               if (gid == 0 || gid == 0xFFFF)
+                                       gid = gid ^ ~0;
                        }
                        if (!(AFFS_SB(sb)->s_flags & SF_SETUID))
                                tail->uid = cpu_to_be16(uid);
index c70f1e5fc0247a616d44e60c2681bbf8fc14c747..b84dc7352502df2976438435b2523f98d5d55966 100644 (file)
@@ -147,6 +147,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(affs_inode_cachep);
 }
 
@@ -188,7 +193,7 @@ static const match_table_t tokens = {
 };
 
 static int
-parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s32 *root,
+parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, s32 *root,
                int *blocksize, char **prefix, char *volume, unsigned long *mount_opts)
 {
        char *p;
@@ -253,13 +258,17 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
                case Opt_setgid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       *gid = option;
+                       *gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(*gid))
+                               return 0;
                        *mount_opts |= SF_SETGID;
                        break;
                case Opt_setuid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       *uid = option;
+                       *uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(*uid))
+                               return 0;
                        *mount_opts |= SF_SETUID;
                        break;
                case Opt_verbose:
@@ -301,8 +310,8 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
        int                      num_bm;
        int                      i, j;
        s32                      key;
-       uid_t                    uid;
-       gid_t                    gid;
+       kuid_t                   uid;
+       kgid_t                   gid;
        int                      reserved;
        unsigned long            mount_flags;
        int                      tmp_flags;     /* fix remount prototype... */
@@ -527,8 +536,8 @@ affs_remount(struct super_block *sb, int *flags, char *data)
 {
        struct affs_sb_info     *sbi = AFFS_SB(sb);
        int                      blocksize;
-       uid_t                    uid;
-       gid_t                    gid;
+       kuid_t                   uid;
+       kgid_t                   gid;
        int                      mode;
        int                      reserved;
        int                      root_block;
@@ -551,7 +560,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
                return -EINVAL;
        }
 
-       flush_delayed_work_sync(&sbi->sb_work);
+       flush_delayed_work(&sbi->sb_work);
        replace_mount_options(sb, new_opts);
 
        sbi->s_flags = mount_flags;
index 587ef5123cd8166f7906ccbeee768eaf3da8fd84..7ef637d7f3a56f22f1b9c4197cd6c94a483ef85b 100644 (file)
@@ -351,9 +351,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work)
  */
 void afs_flush_callback_breaks(struct afs_server *server)
 {
-       cancel_delayed_work(&server->cb_break_work);
-       queue_delayed_work(afs_callback_update_worker,
-                          &server->cb_break_work, 0);
+       mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0);
 }
 
 #if 0
index d59b7516e943ba2617f84e26088a2da42857d4e9..f342acf3547d0d06148335df01ab922939876b02 100644 (file)
@@ -285,12 +285,7 @@ static void afs_reap_server(struct work_struct *work)
                expiry = server->time_of_death + afs_server_timeout;
                if (expiry > now) {
                        delay = (expiry - now) * HZ;
-                       if (!queue_delayed_work(afs_wq, &afs_server_reaper,
-                                               delay)) {
-                               cancel_delayed_work(&afs_server_reaper);
-                               queue_delayed_work(afs_wq, &afs_server_reaper,
-                                                  delay);
-                       }
+                       mod_delayed_work(afs_wq, &afs_server_reaper, delay);
                        break;
                }
 
@@ -323,6 +318,5 @@ static void afs_reap_server(struct work_struct *work)
 void __exit afs_purge_servers(void)
 {
        afs_server_timeout = 0;
-       cancel_delayed_work(&afs_server_reaper);
-       queue_delayed_work(afs_wq, &afs_server_reaper, 0);
+       mod_delayed_work(afs_wq, &afs_server_reaper, 0);
 }
index df8c6047c2a12c41e5cd2935066e9b0397226d9a..43165009428da56c51b47ae9d8bb4b97310c485f 100644 (file)
@@ -123,6 +123,11 @@ void __exit afs_fs_exit(void)
                BUG();
        }
 
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(afs_inode_cachep);
        _leave("");
 }
index 431984d2e372cfcb18984e46366011b12f72e9ee..57bcb1596530892e91ebf39fd12abfee0fe2dead 100644 (file)
@@ -561,12 +561,7 @@ static void afs_vlocation_reaper(struct work_struct *work)
                if (expiry > now) {
                        delay = (expiry - now) * HZ;
                        _debug("delay %lu", delay);
-                       if (!queue_delayed_work(afs_wq, &afs_vlocation_reap,
-                                               delay)) {
-                               cancel_delayed_work(&afs_vlocation_reap);
-                               queue_delayed_work(afs_wq, &afs_vlocation_reap,
-                                                  delay);
-                       }
+                       mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
                        break;
                }
 
@@ -614,13 +609,10 @@ void afs_vlocation_purge(void)
        spin_lock(&afs_vlocation_updates_lock);
        list_del_init(&afs_vlocation_updates);
        spin_unlock(&afs_vlocation_updates_lock);
-       cancel_delayed_work(&afs_vlocation_update);
-       queue_delayed_work(afs_vlocation_update_worker,
-                          &afs_vlocation_update, 0);
+       mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
        destroy_workqueue(afs_vlocation_update_worker);
 
-       cancel_delayed_work(&afs_vlocation_reap);
-       queue_delayed_work(afs_wq, &afs_vlocation_reap, 0);
+       mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
 }
 
 /*
index 29e38a1f7f77a15100f1d20b3e51507470273633..cce7df53b694373b6288795f14b07eb91c0894fc 100644 (file)
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -14,6 +14,7 @@
 #include <linux/fcntl.h>
 #include <linux/security.h>
 #include <linux/evm.h>
+#include <linux/ima.h>
 
 /**
  * inode_change_ok - check if attribute changes to an inode are allowed
@@ -247,6 +248,7 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
 
        if (!error) {
                fsnotify_change(dentry, ia_valid);
+               ima_inode_post_setattr(dentry);
                evm_inode_post_setattr(dentry, ia_valid);
        }
 
index abf645c1703bab0036dcb76db1acb570590a3cd5..a16214109d31ef8c7f0897eb6389b16872a2392d 100644 (file)
@@ -221,20 +221,6 @@ static int test_by_type(struct path *path, void *p)
        return ino && ino->sbi->type & *(unsigned *)p;
 }
 
-static void autofs_dev_ioctl_fd_install(unsigned int fd, struct file *file)
-{
-       struct files_struct *files = current->files;
-       struct fdtable *fdt;
-
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       BUG_ON(fdt->fd[fd] != NULL);
-       rcu_assign_pointer(fdt->fd[fd], file);
-       __set_close_on_exec(fd, fdt);
-       spin_unlock(&files->file_lock);
-}
-
-
 /*
  * Open a file descriptor on the autofs mount point corresponding
  * to the given path and device number (aka. new_encode_dev(sb->s_dev)).
@@ -243,7 +229,7 @@ static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid)
 {
        int err, fd;
 
-       fd = get_unused_fd();
+       fd = get_unused_fd_flags(O_CLOEXEC);
        if (likely(fd >= 0)) {
                struct file *filp;
                struct path path;
@@ -264,7 +250,7 @@ static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid)
                        goto out;
                }
 
-               autofs_dev_ioctl_fd_install(fd, filp);
+               fd_install(fd, filp);
        }
 
        return fd;
index da8876d38a7b7e3a50101f02817cbbbc460cec20..dce436e595c19275cc7f84c79596b41605ab5c59 100644 (file)
@@ -175,8 +175,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
                return;
        }
 
-       pipe = sbi->pipe;
-       get_file(pipe);
+       pipe = get_file(sbi->pipe);
 
        mutex_unlock(&sbi->wq_mutex);
 
index d9a40abda6b700e6fd0c6fcf5cd65b0f97cfb193..b26642839156835ee3abf2652ebadbacf50804b1 100644 (file)
@@ -20,8 +20,8 @@ typedef u64 befs_blocknr_t;
  */
 
 typedef struct befs_mount_options {
-       gid_t gid;
-       uid_t uid;
+       kgid_t gid;
+       kuid_t uid;
        int use_gid;
        int use_uid;
        int debug;
index cf7f3c67c8b7848e6e8e55b83c3c76bde007f60a..2b3bda8d5e6838619222359de38370baedfce7a4 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/vfs.h>
 #include <linux/parser.h>
 #include <linux/namei.h>
+#include <linux/sched.h>
 
 #include "befs.h"
 #include "btree.h"
@@ -352,9 +353,11 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
         */   
 
        inode->i_uid = befs_sb->mount_opts.use_uid ?
-           befs_sb->mount_opts.uid : (uid_t) fs32_to_cpu(sb, raw_inode->uid);
+               befs_sb->mount_opts.uid :
+               make_kuid(&init_user_ns, fs32_to_cpu(sb, raw_inode->uid));
        inode->i_gid = befs_sb->mount_opts.use_gid ?
-           befs_sb->mount_opts.gid : (gid_t) fs32_to_cpu(sb, raw_inode->gid);
+               befs_sb->mount_opts.gid :
+               make_kgid(&init_user_ns, fs32_to_cpu(sb, raw_inode->gid));
 
        set_nlink(inode, 1);
 
@@ -454,6 +457,11 @@ befs_init_inodecache(void)
 static void
 befs_destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(befs_inode_cachep);
 }
 
@@ -674,10 +682,12 @@ parse_options(char *options, befs_mount_options * opts)
        char *p;
        substring_t args[MAX_OPT_ARGS];
        int option;
+       kuid_t uid;
+       kgid_t gid;
 
        /* Initialize options */
-       opts->uid = 0;
-       opts->gid = 0;
+       opts->uid = GLOBAL_ROOT_UID;
+       opts->gid = GLOBAL_ROOT_GID;
        opts->use_uid = 0;
        opts->use_gid = 0;
        opts->iocharset = NULL;
@@ -696,23 +706,29 @@ parse_options(char *options, befs_mount_options * opts)
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       if (option < 0) {
+                       uid = INVALID_UID;
+                       if (option >= 0)
+                               uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(uid)) {
                                printk(KERN_ERR "BeFS: Invalid uid %d, "
                                                "using default\n", option);
                                break;
                        }
-                       opts->uid = option;
+                       opts->uid = uid;
                        opts->use_uid = 1;
                        break;
                case Opt_gid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       if (option < 0) {
+                       gid = INVALID_GID;
+                       if (option >= 0)
+                               gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(gid)) {
                                printk(KERN_ERR "BeFS: Invalid gid %d, "
                                                "using default\n", option);
                                break;
                        }
-                       opts->gid = option;
+                       opts->gid = gid;
                        opts->use_gid = 1;
                        break;
                case Opt_charset:
index 9870417c26e7c43852f98b3d641445b5a94c40b1..737aaa3f709062a9d8aec826c26e956b18eb2bcf 100644 (file)
@@ -76,8 +76,8 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
        BFS_I(inode)->i_sblock =  le32_to_cpu(di->i_sblock);
        BFS_I(inode)->i_eblock =  le32_to_cpu(di->i_eblock);
        BFS_I(inode)->i_dsk_ino = le16_to_cpu(di->i_ino);
-       inode->i_uid =  le32_to_cpu(di->i_uid);
-       inode->i_gid =  le32_to_cpu(di->i_gid);
+       i_uid_write(inode, le32_to_cpu(di->i_uid));
+       i_gid_write(inode,  le32_to_cpu(di->i_gid));
        set_nlink(inode, le32_to_cpu(di->i_nlink));
        inode->i_size = BFS_FILESIZE(di);
        inode->i_blocks = BFS_FILEBLOCKS(di);
@@ -139,8 +139,8 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
 
        di->i_ino = cpu_to_le16(ino);
        di->i_mode = cpu_to_le32(inode->i_mode);
-       di->i_uid = cpu_to_le32(inode->i_uid);
-       di->i_gid = cpu_to_le32(inode->i_gid);
+       di->i_uid = cpu_to_le32(i_uid_read(inode));
+       di->i_gid = cpu_to_le32(i_gid_read(inode));
        di->i_nlink = cpu_to_le32(inode->i_nlink);
        di->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
        di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
@@ -280,6 +280,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(bfs_inode_cachep);
 }
 
index 1b52956afe33ab07889c3963ce2c41b32133483b..0225fddf49b7d9e25ae2a8361b631ba30c34a496 100644 (file)
@@ -1696,30 +1696,19 @@ static int elf_note_info_init(struct elf_note_info *info)
                return 0;
        info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
        if (!info->psinfo)
-               goto notes_free;
+               return 0;
        info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
        if (!info->prstatus)
-               goto psinfo_free;
+               return 0;
        info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
        if (!info->fpu)
-               goto prstatus_free;
+               return 0;
 #ifdef ELF_CORE_COPY_XFPREGS
        info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
        if (!info->xfpu)
-               goto fpu_free;
+               return 0;
 #endif
        return 1;
-#ifdef ELF_CORE_COPY_XFPREGS
- fpu_free:
-       kfree(info->fpu);
-#endif
- prstatus_free:
-       kfree(info->prstatus);
- psinfo_free:
-       kfree(info->psinfo);
- notes_free:
-       kfree(info->notes);
-       return 0;
 }
 
 static int fill_note_info(struct elfhdr *elf, int phdrs,
index 761e2cd8fed16e6046951e50504b8bb9e7acd3e4..0c16e3dbfd5681ea67d4cf2f349cb33083a46d2d 100644 (file)
@@ -61,7 +61,7 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
                size = __btrfs_getxattr(inode, name, value, size);
        }
        if (size > 0) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
        } else if (size == -ENOENT || size == -ENODATA || size == 0) {
                /* FIXME, who returns -ENOENT?  I think nobody */
                acl = NULL;
@@ -91,7 +91,7 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
                return PTR_ERR(acl);
        if (acl == NULL)
                return -ENODATA;
-       ret = posix_acl_to_xattr(acl, value, size);
+       ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
        posix_acl_release(acl);
 
        return ret;
@@ -141,7 +141,7 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
                        goto out;
                }
 
-               ret = posix_acl_to_xattr(acl, value, size);
+               ret = posix_acl_to_xattr(&init_user_ns, acl, value, size);
                if (ret < 0)
                        goto out;
        }
@@ -169,7 +169,7 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
                return -EOPNOTSUPP;
 
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
 
index 07d5eeb1e6f1df1f8ae2ddf94218b1f45298aeda..52c85e2b95d0f7efa9cbd105bab093eca90e1b9c 100644 (file)
@@ -1715,8 +1715,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
                                  struct btrfs_inode_item *inode_item,
                                  struct inode *inode)
 {
-       btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
-       btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
+       btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
+       btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
        btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
        btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
        btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
@@ -1764,8 +1764,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
 
        inode_item = &delayed_node->inode_item;
 
-       inode->i_uid = btrfs_stack_inode_uid(inode_item);
-       inode->i_gid = btrfs_stack_inode_gid(inode_item);
+       i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
+       i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
        btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
        inode->i_mode = btrfs_stack_inode_mode(inode_item);
        set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
index 4c878476bb91ce0985dabc25464622442aaca54a..b08ea4717e9d70ef7967fe33c1658b669eee16ac 100644 (file)
@@ -107,6 +107,12 @@ void extent_io_exit(void)
                list_del(&eb->leak_list);
                kmem_cache_free(extent_buffer_cache, eb);
        }
+
+       /*
+        * Make sure all delayed rcu free are flushed before we
+        * destroy caches.
+        */
+       rcu_barrier();
        if (extent_state_cache)
                kmem_cache_destroy(extent_state_cache);
        if (extent_buffer_cache)
index 316b07a866d246fbd9ae1880112f3387d66bdef0..a6ed6944e50c42eb67a659c1eda6a5f17650d97e 100644 (file)
@@ -2572,8 +2572,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
                                    struct btrfs_inode_item);
        inode->i_mode = btrfs_inode_mode(leaf, inode_item);
        set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
-       inode->i_uid = btrfs_inode_uid(leaf, inode_item);
-       inode->i_gid = btrfs_inode_gid(leaf, inode_item);
+       i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
+       i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
        btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
 
        tspec = btrfs_inode_atime(inode_item);
@@ -2651,8 +2651,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct btrfs_inode_item *item,
                            struct inode *inode)
 {
-       btrfs_set_inode_uid(leaf, item, inode->i_uid);
-       btrfs_set_inode_gid(leaf, item, inode->i_gid);
+       btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
+       btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
        btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
        btrfs_set_inode_mode(leaf, item, inode->i_mode);
        btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
@@ -7076,6 +7076,11 @@ static void init_once(void *foo)
 
 void btrfs_destroy_cachep(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        if (btrfs_inode_cachep)
                kmem_cache_destroy(btrfs_inode_cachep);
        if (btrfs_trans_handle_cachep)
index 9df50fa8a0781ba387553297fbe442ed964e671e..47127c1bd290b5eb91023e8877176dbf3cfb7e58 100644 (file)
@@ -575,13 +575,13 @@ fail:
 */
 static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode)
 {
-       uid_t fsuid = current_fsuid();
+       kuid_t fsuid = current_fsuid();
 
        if (!(dir->i_mode & S_ISVTX))
                return 0;
-       if (inode->i_uid == fsuid)
+       if (uid_eq(inode->i_uid, fsuid))
                return 0;
-       if (dir->i_uid == fsuid)
+       if (uid_eq(dir->i_uid, fsuid))
                return 0;
        return !capable(CAP_FOWNER);
 }
@@ -1397,7 +1397,6 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
                                u64 *transid, bool readonly,
                                struct btrfs_qgroup_inherit **inherit)
 {
-       struct file *src_file;
        int namelen;
        int ret = 0;
 
@@ -1421,25 +1420,24 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
                ret = btrfs_mksubvol(&file->f_path, name, namelen,
                                     NULL, transid, readonly, inherit);
        } else {
+               struct fd src = fdget(fd);
                struct inode *src_inode;
-               src_file = fget(fd);
-               if (!src_file) {
+               if (!src.file) {
                        ret = -EINVAL;
                        goto out_drop_write;
                }
 
-               src_inode = src_file->f_path.dentry->d_inode;
+               src_inode = src.file->f_path.dentry->d_inode;
                if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
                        printk(KERN_INFO "btrfs: Snapshot src from "
                               "another FS\n");
                        ret = -EINVAL;
-                       fput(src_file);
-                       goto out_drop_write;
+               } else {
+                       ret = btrfs_mksubvol(&file->f_path, name, namelen,
+                                            BTRFS_I(src_inode)->root,
+                                            transid, readonly, inherit);
                }
-               ret = btrfs_mksubvol(&file->f_path, name, namelen,
-                                    BTRFS_I(src_inode)->root,
-                                    transid, readonly, inherit);
-               fput(src_file);
+               fdput(src);
        }
 out_drop_write:
        mnt_drop_write_file(file);
@@ -2341,7 +2339,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 {
        struct inode *inode = fdentry(file)->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct file *src_file;
+       struct fsrc_file;
        struct inode *src;
        struct btrfs_trans_handle *trans;
        struct btrfs_path *path;
@@ -2376,24 +2374,24 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        if (ret)
                return ret;
 
-       src_file = fget(srcfd);
-       if (!src_file) {
+       src_file = fdget(srcfd);
+       if (!src_file.file) {
                ret = -EBADF;
                goto out_drop_write;
        }
 
        ret = -EXDEV;
-       if (src_file->f_path.mnt != file->f_path.mnt)
+       if (src_file.file->f_path.mnt != file->f_path.mnt)
                goto out_fput;
 
-       src = src_file->f_dentry->d_inode;
+       src = src_file.file->f_dentry->d_inode;
 
        ret = -EINVAL;
        if (src == inode)
                goto out_fput;
 
        /* the src must be open for reading */
-       if (!(src_file->f_mode & FMODE_READ))
+       if (!(src_file.file->f_mode & FMODE_READ))
                goto out_fput;
 
        /* don't make the dst file partly checksummed */
@@ -2724,7 +2722,7 @@ out_unlock:
        vfree(buf);
        btrfs_free_path(path);
 out_fput:
-       fput(src_file);
+       fdput(src_file);
 out_drop_write:
        mnt_drop_write_file(file);
        return ret;
index 48a4882d8ad5955eaa0be2b940e35f0b3b2a7f6f..a955669519a265bbfb5f13de4723c87932f8047e 100644 (file)
@@ -68,7 +68,7 @@ struct reada_extent {
        u32                     blocksize;
        int                     err;
        struct list_head        extctl;
-       struct kref             refcnt;
+       int                     refcnt;
        spinlock_t              lock;
        struct reada_zone       *zones[BTRFS_MAX_MIRRORS];
        int                     nzones;
@@ -126,7 +126,7 @@ static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
        spin_lock(&fs_info->reada_lock);
        re = radix_tree_lookup(&fs_info->reada_tree, index);
        if (re)
-               kref_get(&re->refcnt);
+               re->refcnt++;
        spin_unlock(&fs_info->reada_lock);
 
        if (!re)
@@ -336,7 +336,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        spin_lock(&fs_info->reada_lock);
        re = radix_tree_lookup(&fs_info->reada_tree, index);
        if (re)
-               kref_get(&re->refcnt);
+               re->refcnt++;
        spin_unlock(&fs_info->reada_lock);
 
        if (re)
@@ -352,7 +352,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        re->top = *top;
        INIT_LIST_HEAD(&re->extctl);
        spin_lock_init(&re->lock);
-       kref_init(&re->refcnt);
+       re->refcnt = 1;
 
        /*
         * map block
@@ -398,7 +398,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        if (ret == -EEXIST) {
                re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
                BUG_ON(!re_exist);
-               kref_get(&re_exist->refcnt);
+               re_exist->refcnt++;
                spin_unlock(&fs_info->reada_lock);
                goto error;
        }
@@ -465,10 +465,6 @@ error:
        return re_exist;
 }
 
-static void reada_kref_dummy(struct kref *kr)
-{
-}
-
 static void reada_extent_put(struct btrfs_fs_info *fs_info,
                             struct reada_extent *re)
 {
@@ -476,7 +472,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
        unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
 
        spin_lock(&fs_info->reada_lock);
-       if (!kref_put(&re->refcnt, reada_kref_dummy)) {
+       if (--re->refcnt) {
                spin_unlock(&fs_info->reada_lock);
                return;
        }
@@ -671,7 +667,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                return 0;
        }
        dev->reada_next = re->logical + re->blocksize;
-       kref_get(&re->refcnt);
+       re->refcnt++;
 
        spin_unlock(&fs_info->reada_lock);
 
index 4b5762ef7c2bf87ecb937a70a55ae20b100d77c9..ba95eea201bf1ef8e809159d6b41019fb9169752 100644 (file)
@@ -1104,7 +1104,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
                                pr_err("fill_trace bad get_inode "
                                       "%llx.%llx\n", vino.ino, vino.snap);
                                err = PTR_ERR(in);
-                               d_delete(dn);
+                               d_drop(dn);
                                goto done;
                        }
                        dn = splice_dentry(dn, in, &have_lease, true);
@@ -1277,7 +1277,7 @@ retry_lookup:
                        in = ceph_get_inode(parent->d_sb, vino);
                        if (IS_ERR(in)) {
                                dout("new_inode badness\n");
-                               d_delete(dn);
+                               d_drop(dn);
                                dput(dn);
                                err = PTR_ERR(in);
                                goto out;
index b982239f38f91dfab38fcc093e600d5b11e5c632..3a42d9326378d5aa3b2a18b9358a142bdcac51db 100644 (file)
@@ -603,6 +603,11 @@ bad_cap:
 
 static void destroy_caches(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ceph_inode_cachep);
        kmem_cache_destroy(ceph_cap_cachep);
        kmem_cache_destroy(ceph_dentry_cachep);
index a41044a310836cf28c65b9b8db7ab65659a54971..e7931cc55d0c96f7a4edde39d5fbd78fa24f82af 100644 (file)
@@ -968,6 +968,11 @@ cifs_init_inodecache(void)
 static void
 cifs_destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(cifs_inode_cachep);
 }
 
index f1813120d753e23ce153e700dd62d4d0d871d779..be2aa49094877c22d326b0466d9cfc32bb6ed94b 100644 (file)
@@ -85,6 +85,11 @@ int coda_init_inodecache(void)
 
 void coda_destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(coda_inode_cachep);
 }
 
@@ -107,43 +112,41 @@ static const struct super_operations coda_super_operations =
 
 static int get_device_index(struct coda_mount_data *data)
 {
-       struct file *file;
+       struct fd f;
        struct inode *inode;
        int idx;
 
-       if(data == NULL) {
+       if (data == NULL) {
                printk("coda_read_super: Bad mount data\n");
                return -1;
        }
 
-       if(data->version != CODA_MOUNT_VERSION) {
+       if (data->version != CODA_MOUNT_VERSION) {
                printk("coda_read_super: Bad mount version\n");
                return -1;
        }
 
-       file = fget(data->fd);
-       inode = NULL;
-       if(file)
-               inode = file->f_path.dentry->d_inode;
-       
-       if(!inode || !S_ISCHR(inode->i_mode) ||
-          imajor(inode) != CODA_PSDEV_MAJOR) {
-               if(file)
-                       fput(file);
-
-               printk("coda_read_super: Bad file\n");
-               return -1;
+       f = fdget(data->fd);
+       if (!f.file)
+               goto Ebadf;
+       inode = f.file->f_path.dentry->d_inode;
+       if (!S_ISCHR(inode->i_mode) || imajor(inode) != CODA_PSDEV_MAJOR) {
+               fdput(f);
+               goto Ebadf;
        }
 
        idx = iminor(inode);
-       fput(file);
+       fdput(f);
 
-       if(idx < 0 || idx >= MAX_CODADEVS) {
+       if (idx < 0 || idx >= MAX_CODADEVS) {
                printk("coda_read_super: Bad minor number\n");
                return -1;
        }
 
        return idx;
+Ebadf:
+       printk("coda_read_super: Bad file\n");
+       return -1;
 }
 
 static int coda_fill_super(struct super_block *sb, void *data, int silent)
index 1bdb350ea5d345fc2842c75ad7cc2e5065c81c16..b7a24d0ca30df9d82beee3866b0dc30861946c0f 100644 (file)
@@ -870,22 +870,20 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
        struct compat_old_linux_dirent __user *dirent, unsigned int count)
 {
        int error;
-       struct file *file;
-       int fput_needed;
+       struct fd f = fdget(fd);
        struct compat_readdir_callback buf;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       if (!f.file)
                return -EBADF;
 
        buf.result = 0;
        buf.dirent = dirent;
 
-       error = vfs_readdir(file, compat_fillonedir, &buf);
+       error = vfs_readdir(f.file, compat_fillonedir, &buf);
        if (buf.result)
                error = buf.result;
 
-       fput_light(file, fput_needed);
+       fdput(f);
        return error;
 }
 
@@ -949,17 +947,16 @@ efault:
 asmlinkage long compat_sys_getdents(unsigned int fd,
                struct compat_linux_dirent __user *dirent, unsigned int count)
 {
-       struct file * file;
+       struct fd f;
        struct compat_linux_dirent __user * lastdirent;
        struct compat_getdents_callback buf;
-       int fput_needed;
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
                return -EFAULT;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                return -EBADF;
 
        buf.current_dir = dirent;
@@ -967,17 +964,17 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
        buf.count = count;
        buf.error = 0;
 
-       error = vfs_readdir(file, compat_filldir, &buf);
+       error = vfs_readdir(f.file, compat_filldir, &buf);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               if (put_user(file->f_pos, &lastdirent->d_off))
+               if (put_user(f.file->f_pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
        }
-       fput_light(file, fput_needed);
+       fdput(f);
        return error;
 }
 
@@ -1035,17 +1032,16 @@ efault:
 asmlinkage long compat_sys_getdents64(unsigned int fd,
                struct linux_dirent64 __user * dirent, unsigned int count)
 {
-       struct file * file;
+       struct fd f;
        struct linux_dirent64 __user * lastdirent;
        struct compat_getdents_callback64 buf;
-       int fput_needed;
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
                return -EFAULT;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                return -EBADF;
 
        buf.current_dir = dirent;
@@ -1053,18 +1049,18 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
        buf.count = count;
        buf.error = 0;
 
-       error = vfs_readdir(file, compat_filldir64, &buf);
+       error = vfs_readdir(f.file, compat_filldir64, &buf);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               typeof(lastdirent->d_off) d_off = file->f_pos;
+               typeof(lastdirent->d_off) d_off = f.file->f_pos;
                if (__put_user_unaligned(d_off, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
        }
-       fput_light(file, fput_needed);
+       fdput(f);
        return error;
 }
 #endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
@@ -1152,18 +1148,16 @@ asmlinkage ssize_t
 compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
                 unsigned long vlen)
 {
-       struct file *file;
-       int fput_needed;
+       struct fd f = fdget(fd);
        ssize_t ret;
        loff_t pos;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       if (!f.file)
                return -EBADF;
-       pos = file->f_pos;
-       ret = compat_readv(file, vec, vlen, &pos);
-       file->f_pos = pos;
-       fput_light(file, fput_needed);
+       pos = f.file->f_pos;
+       ret = compat_readv(f.file, vec, vlen, &pos);
+       f.file->f_pos = pos;
+       fdput(f);
        return ret;
 }
 
@@ -1171,19 +1165,18 @@ asmlinkage ssize_t
 compat_sys_preadv64(unsigned long fd, const struct compat_iovec __user *vec,
                    unsigned long vlen, loff_t pos)
 {
-       struct file *file;
-       int fput_needed;
+       struct fd f;
        ssize_t ret;
 
        if (pos < 0)
                return -EINVAL;
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                return -EBADF;
        ret = -ESPIPE;
-       if (file->f_mode & FMODE_PREAD)
-               ret = compat_readv(file, vec, vlen, &pos);
-       fput_light(file, fput_needed);
+       if (f.file->f_mode & FMODE_PREAD)
+               ret = compat_readv(f.file, vec, vlen, &pos);
+       fdput(f);
        return ret;
 }
 
@@ -1221,18 +1214,16 @@ asmlinkage ssize_t
 compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
                  unsigned long vlen)
 {
-       struct file *file;
-       int fput_needed;
+       struct fd f = fdget(fd);
        ssize_t ret;
        loff_t pos;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       if (!f.file)
                return -EBADF;
-       pos = file->f_pos;
-       ret = compat_writev(file, vec, vlen, &pos);
-       file->f_pos = pos;
-       fput_light(file, fput_needed);
+       pos = f.file->f_pos;
+       ret = compat_writev(f.file, vec, vlen, &pos);
+       f.file->f_pos = pos;
+       fdput(f);
        return ret;
 }
 
@@ -1240,19 +1231,18 @@ asmlinkage ssize_t
 compat_sys_pwritev64(unsigned long fd, const struct compat_iovec __user *vec,
                     unsigned long vlen, loff_t pos)
 {
-       struct file *file;
-       int fput_needed;
+       struct fd f;
        ssize_t ret;
 
        if (pos < 0)
                return -EINVAL;
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                return -EBADF;
        ret = -ESPIPE;
-       if (file->f_mode & FMODE_PWRITE)
-               ret = compat_writev(file, vec, vlen, &pos);
-       fput_light(file, fput_needed);
+       if (f.file->f_mode & FMODE_PWRITE)
+               ret = compat_writev(f.file, vec, vlen, &pos);
+       fdput(f);
        return ret;
 }
 
@@ -1802,3 +1792,25 @@ compat_sys_open_by_handle_at(int mountdirfd,
        return do_handle_open(mountdirfd, handle, flags);
 }
 #endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_SENDFILE
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+                                   compat_off_t __user *offset, compat_size_t count)
+{
+       loff_t pos;
+       off_t off;
+       ssize_t ret;
+
+       if (offset) {
+               if (unlikely(get_user(off, offset)))
+                       return -EFAULT;
+               pos = off;
+               ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
+               if (unlikely(put_user(pos, offset)))
+                       return -EFAULT;
+               return ret;
+       }
+
+       return do_sendfile(out_fd, in_fd, NULL, count, 0);
+}
+#endif /* __ARCH_WANT_COMPAT_SYS_SENDFILE */
index 59f8db4a39a78338b2c7a148122a04e11e026caf..f5054025f9da7c154b067db092cc44f8b953f835 100644 (file)
@@ -903,6 +903,8 @@ COMPATIBLE_IOCTL(KDGKBSENT)
 COMPATIBLE_IOCTL(KDSKBSENT)
 COMPATIBLE_IOCTL(KDGKBDIACR)
 COMPATIBLE_IOCTL(KDSKBDIACR)
+COMPATIBLE_IOCTL(KDGKBDIACRUC)
+COMPATIBLE_IOCTL(KDSKBDIACRUC)
 COMPATIBLE_IOCTL(KDKBDREP)
 COMPATIBLE_IOCTL(KDGKBLED)
 COMPATIBLE_IOCTL(KDGETLED)
@@ -1537,16 +1539,13 @@ static int compat_ioctl_check_table(unsigned int xcmd)
 asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
                                unsigned long arg)
 {
-       struct file *filp;
+       struct fd f = fdget(fd);
        int error = -EBADF;
-       int fput_needed;
-
-       filp = fget_light(fd, &fput_needed);
-       if (!filp)
+       if (!f.file)
                goto out;
 
        /* RED-PEN how should LSM module know it's handling 32bit? */
-       error = security_file_ioctl(filp, cmd, arg);
+       error = security_file_ioctl(f.file, cmd, arg);
        if (error)
                goto out_fput;
 
@@ -1566,30 +1565,30 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
        case FS_IOC_RESVSP_32:
        case FS_IOC_RESVSP64_32:
-               error = compat_ioctl_preallocate(filp, compat_ptr(arg));
+               error = compat_ioctl_preallocate(f.file, compat_ptr(arg));
                goto out_fput;
 #else
        case FS_IOC_RESVSP:
        case FS_IOC_RESVSP64:
-               error = ioctl_preallocate(filp, compat_ptr(arg));
+               error = ioctl_preallocate(f.file, compat_ptr(arg));
                goto out_fput;
 #endif
 
        case FIBMAP:
        case FIGETBSZ:
        case FIONREAD:
-               if (S_ISREG(filp->f_path.dentry->d_inode->i_mode))
+               if (S_ISREG(f.file->f_path.dentry->d_inode->i_mode))
                        break;
                /*FALL THROUGH*/
 
        default:
-               if (filp->f_op && filp->f_op->compat_ioctl) {
-                       error = filp->f_op->compat_ioctl(filp, cmd, arg);
+               if (f.file->f_op && f.file->f_op->compat_ioctl) {
+                       error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
                        if (error != -ENOIOCTLCMD)
                                goto out_fput;
                }
 
-               if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+               if (!f.file->f_op || !f.file->f_op->unlocked_ioctl)
                        goto do_ioctl;
                break;
        }
@@ -1597,7 +1596,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
        if (compat_ioctl_check_table(XFORM(cmd)))
                goto found_handler;
 
-       error = do_ioctl_trans(fd, cmd, arg, filp);
+       error = do_ioctl_trans(fd, cmd, arg, f.file);
        if (error == -ENOIOCTLCMD)
                error = -ENOTTY;
 
@@ -1606,9 +1605,9 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
  found_handler:
        arg = (unsigned long)compat_ptr(arg);
  do_ioctl:
-       error = do_vfs_ioctl(filp, fd, cmd, arg);
+       error = do_vfs_ioctl(f.file, fd, cmd, arg);
  out_fput:
-       fput_light(filp, fput_needed);
+       fdput(f);
  out:
        return error;
 }
index 0074362d9f7faa9518ca2572ee14e8f02946c444..a9d35b0e06cf0be204919aff888f8feff0393724 100644 (file)
@@ -79,8 +79,8 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
                        return -ENOMEM;
                /* assign default attributes */
                sd_iattr->ia_mode = sd->s_mode;
-               sd_iattr->ia_uid = 0;
-               sd_iattr->ia_gid = 0;
+               sd_iattr->ia_uid = GLOBAL_ROOT_UID;
+               sd_iattr->ia_gid = GLOBAL_ROOT_GID;
                sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME;
                sd->s_iattr = sd_iattr;
        }
diff --git a/fs/coredump.c b/fs/coredump.c
new file mode 100644 (file)
index 0000000..f045bba
--- /dev/null
@@ -0,0 +1,686 @@
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/mm.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/swap.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/perf_event.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/key.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/utsname.h>
+#include <linux/pid_namespace.h>
+#include <linux/module.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/tsacct_kern.h>
+#include <linux/cn_proc.h>
+#include <linux/audit.h>
+#include <linux/tracehook.h>
+#include <linux/kmod.h>
+#include <linux/fsnotify.h>
+#include <linux/fs_struct.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/oom.h>
+#include <linux/compat.h>
+
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/exec.h>
+
+#include <trace/events/task.h>
+#include "internal.h"
+
+#include <trace/events/sched.h>
+
+int core_uses_pid;
+char core_pattern[CORENAME_MAX_SIZE] = "core";
+unsigned int core_pipe_limit;
+
+struct core_name {
+       char *corename;
+       int used, size;
+};
+static atomic_t call_count = ATOMIC_INIT(1);
+
+/* The maximal length of core_pattern is also specified in sysctl.c */
+
+static int expand_corename(struct core_name *cn)
+{
+       char *old_corename = cn->corename;
+
+       cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
+       cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
+
+       if (!cn->corename) {
+               kfree(old_corename);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int cn_printf(struct core_name *cn, const char *fmt, ...)
+{
+       char *cur;
+       int need;
+       int ret;
+       va_list arg;
+
+       va_start(arg, fmt);
+       need = vsnprintf(NULL, 0, fmt, arg);
+       va_end(arg);
+
+       if (likely(need < cn->size - cn->used - 1))
+               goto out_printf;
+
+       ret = expand_corename(cn);
+       if (ret)
+               goto expand_fail;
+
+out_printf:
+       cur = cn->corename + cn->used;
+       va_start(arg, fmt);
+       vsnprintf(cur, need + 1, fmt, arg);
+       va_end(arg);
+       cn->used += need;
+       return 0;
+
+expand_fail:
+       return ret;
+}
+
+static void cn_escape(char *str)
+{
+       for (; *str; str++)
+               if (*str == '/')
+                       *str = '!';
+}
+
+static int cn_print_exe_file(struct core_name *cn)
+{
+       struct file *exe_file;
+       char *pathbuf, *path;
+       int ret;
+
+       exe_file = get_mm_exe_file(current->mm);
+       if (!exe_file) {
+               char *commstart = cn->corename + cn->used;
+               ret = cn_printf(cn, "%s (path unknown)", current->comm);
+               cn_escape(commstart);
+               return ret;
+       }
+
+       pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
+       if (!pathbuf) {
+               ret = -ENOMEM;
+               goto put_exe_file;
+       }
+
+       path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
+       if (IS_ERR(path)) {
+               ret = PTR_ERR(path);
+               goto free_buf;
+       }
+
+       cn_escape(path);
+
+       ret = cn_printf(cn, "%s", path);
+
+free_buf:
+       kfree(pathbuf);
+put_exe_file:
+       fput(exe_file);
+       return ret;
+}
+
+/* format_corename will inspect the pattern parameter, and output a
+ * name into corename, which must have space for at least
+ * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
+ */
+static int format_corename(struct core_name *cn, long signr)
+{
+       const struct cred *cred = current_cred();
+       const char *pat_ptr = core_pattern;
+       int ispipe = (*pat_ptr == '|');
+       int pid_in_pattern = 0;
+       int err = 0;
+
+       cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
+       cn->corename = kmalloc(cn->size, GFP_KERNEL);
+       cn->used = 0;
+
+       if (!cn->corename)
+               return -ENOMEM;
+
+       /* Repeat as long as we have more pattern to process and more output
+          space */
+       while (*pat_ptr) {
+               if (*pat_ptr != '%') {
+                       if (*pat_ptr == 0)
+                               goto out;
+                       err = cn_printf(cn, "%c", *pat_ptr++);
+               } else {
+                       switch (*++pat_ptr) {
+                       /* single % at the end, drop that */
+                       case 0:
+                               goto out;
+                       /* Double percent, output one percent */
+                       case '%':
+                               err = cn_printf(cn, "%c", '%');
+                               break;
+                       /* pid */
+                       case 'p':
+                               pid_in_pattern = 1;
+                               err = cn_printf(cn, "%d",
+                                             task_tgid_vnr(current));
+                               break;
+                       /* uid */
+                       case 'u':
+                               err = cn_printf(cn, "%d", cred->uid);
+                               break;
+                       /* gid */
+                       case 'g':
+                               err = cn_printf(cn, "%d", cred->gid);
+                               break;
+                       /* signal that caused the coredump */
+                       case 's':
+                               err = cn_printf(cn, "%ld", signr);
+                               break;
+                       /* UNIX time of coredump */
+                       case 't': {
+                               struct timeval tv;
+                               do_gettimeofday(&tv);
+                               err = cn_printf(cn, "%lu", tv.tv_sec);
+                               break;
+                       }
+                       /* hostname */
+                       case 'h': {
+                               char *namestart = cn->corename + cn->used;
+                               down_read(&uts_sem);
+                               err = cn_printf(cn, "%s",
+                                             utsname()->nodename);
+                               up_read(&uts_sem);
+                               cn_escape(namestart);
+                               break;
+                       }
+                       /* executable */
+                       case 'e': {
+                               char *commstart = cn->corename + cn->used;
+                               err = cn_printf(cn, "%s", current->comm);
+                               cn_escape(commstart);
+                               break;
+                       }
+                       case 'E':
+                               err = cn_print_exe_file(cn);
+                               break;
+                       /* core limit size */
+                       case 'c':
+                               err = cn_printf(cn, "%lu",
+                                             rlimit(RLIMIT_CORE));
+                               break;
+                       default:
+                               break;
+                       }
+                       ++pat_ptr;
+               }
+
+               if (err)
+                       return err;
+       }
+
+       /* Backward compatibility with core_uses_pid:
+        *
+        * If core_pattern does not include a %p (as is the default)
+        * and core_uses_pid is set, then .%pid will be appended to
+        * the filename. Do not do this for piped commands. */
+       if (!ispipe && !pid_in_pattern && core_uses_pid) {
+               err = cn_printf(cn, ".%d", task_tgid_vnr(current));
+               if (err)
+                       return err;
+       }
+out:
+       return ispipe;
+}
+
+static int zap_process(struct task_struct *start, int exit_code)
+{
+       struct task_struct *t;
+       int nr = 0;
+
+       start->signal->flags = SIGNAL_GROUP_EXIT;
+       start->signal->group_exit_code = exit_code;
+       start->signal->group_stop_count = 0;
+
+       t = start;
+       do {
+               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
+               if (t != current && t->mm) {
+                       sigaddset(&t->pending.signal, SIGKILL);
+                       signal_wake_up(t, 1);
+                       nr++;
+               }
+       } while_each_thread(start, t);
+
+       return nr;
+}
+
+static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+                               struct core_state *core_state, int exit_code)
+{
+       struct task_struct *g, *p;
+       unsigned long flags;
+       int nr = -EAGAIN;
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       if (!signal_group_exit(tsk->signal)) {
+               mm->core_state = core_state;
+               nr = zap_process(tsk, exit_code);
+       }
+       spin_unlock_irq(&tsk->sighand->siglock);
+       if (unlikely(nr < 0))
+               return nr;
+
+       if (atomic_read(&mm->mm_users) == nr + 1)
+               goto done;
+       /*
+        * We should find and kill all tasks which use this mm, and we should
+        * count them correctly into ->nr_threads. We don't take tasklist
+        * lock, but this is safe wrt:
+        *
+        * fork:
+        *      None of sub-threads can fork after zap_process(leader). All
+        *      processes which were created before this point should be
+        *      visible to zap_threads() because copy_process() adds the new
+        *      process to the tail of init_task.tasks list, and lock/unlock
+        *      of ->siglock provides a memory barrier.
+        *
+        * do_exit:
+        *      The caller holds mm->mmap_sem. This means that the task which
+        *      uses this mm can't pass exit_mm(), so it can't exit or clear
+        *      its ->mm.
+        *
+        * de_thread:
+        *      It does list_replace_rcu(&leader->tasks, &current->tasks),
+        *      we must see either old or new leader, this does not matter.
+        *      However, it can change p->sighand, so lock_task_sighand(p)
+        *      must be used. Since p->mm != NULL and we hold ->mmap_sem
+        *      it can't fail.
+        *
+        *      Note also that "g" can be the old leader with ->mm == NULL
+        *      and already unhashed and thus removed from ->thread_group.
+        *      This is OK, __unhash_process()->list_del_rcu() does not
+        *      clear the ->next pointer, we will find the new leader via
+        *      next_thread().
+        */
+       rcu_read_lock();
+       for_each_process(g) {
+               if (g == tsk->group_leader)
+                       continue;
+               if (g->flags & PF_KTHREAD)
+                       continue;
+               p = g;
+               do {
+                       if (p->mm) {
+                               if (unlikely(p->mm == mm)) {
+                                       lock_task_sighand(p, &flags);
+                                       nr += zap_process(p, exit_code);
+                                       unlock_task_sighand(p, &flags);
+                               }
+                               break;
+                       }
+               } while_each_thread(g, p);
+       }
+       rcu_read_unlock();
+done:
+       atomic_set(&core_state->nr_threads, nr);
+       return nr;
+}
+
+static int coredump_wait(int exit_code, struct core_state *core_state)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+       int core_waiters = -EBUSY;
+
+       init_completion(&core_state->startup);
+       core_state->dumper.task = tsk;
+       core_state->dumper.next = NULL;
+
+       down_write(&mm->mmap_sem);
+       if (!mm->core_state)
+               core_waiters = zap_threads(tsk, mm, core_state, exit_code);
+       up_write(&mm->mmap_sem);
+
+       if (core_waiters > 0) {
+               struct core_thread *ptr;
+
+               wait_for_completion(&core_state->startup);
+               /*
+                * Wait for all the threads to become inactive, so that
+                * all the thread context (extended register state, like
+                * fpu etc) gets copied to the memory.
+                */
+               ptr = core_state->dumper.next;
+               while (ptr != NULL) {
+                       wait_task_inactive(ptr->task, 0);
+                       ptr = ptr->next;
+               }
+       }
+
+       return core_waiters;
+}
+
+static void coredump_finish(struct mm_struct *mm)
+{
+       struct core_thread *curr, *next;
+       struct task_struct *task;
+
+       next = mm->core_state->dumper.next;
+       while ((curr = next) != NULL) {
+               next = curr->next;
+               task = curr->task;
+               /*
+                * see exit_mm(), curr->task must not see
+                * ->task == NULL before we read ->next.
+                */
+               smp_mb();
+               curr->task = NULL;
+               wake_up_process(task);
+       }
+
+       mm->core_state = NULL;
+}
+
+static void wait_for_dump_helpers(struct file *file)
+{
+       struct pipe_inode_info *pipe;
+
+       pipe = file->f_path.dentry->d_inode->i_pipe;
+
+       pipe_lock(pipe);
+       pipe->readers++;
+       pipe->writers--;
+
+       while ((pipe->readers > 1) && (!signal_pending(current))) {
+               wake_up_interruptible_sync(&pipe->wait);
+               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+               pipe_wait(pipe);
+       }
+
+       pipe->readers--;
+       pipe->writers++;
+       pipe_unlock(pipe);
+
+}
+
+/*
+ * umh_pipe_setup
+ * helper function to customize the process used
+ * to collect the core in userspace.  Specifically
+ * it sets up a pipe and installs it as fd 0 (stdin)
+ * for the process.  Returns 0 on success, or
+ * PTR_ERR on failure.
+ * Note that it also sets the core limit to 1.  This
+ * is a special value that we use to trap recursive
+ * core dumps
+ */
+static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
+{
+       struct file *files[2];
+       struct coredump_params *cp = (struct coredump_params *)info->data;
+       int err = create_pipe_files(files, 0);
+       if (err)
+               return err;
+
+       cp->file = files[1];
+
+       replace_fd(0, files[0], 0);
+       /* and disallow core files too */
+       current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
+
+       return 0;
+}
+
+void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+{
+       struct core_state core_state;
+       struct core_name cn;
+       struct mm_struct *mm = current->mm;
+       struct linux_binfmt * binfmt;
+       const struct cred *old_cred;
+       struct cred *cred;
+       int retval = 0;
+       int flag = 0;
+       int ispipe;
+       struct files_struct *displaced;
+       bool need_nonrelative = false;
+       static atomic_t core_dump_count = ATOMIC_INIT(0);
+       struct coredump_params cprm = {
+               .signr = signr,
+               .regs = regs,
+               .limit = rlimit(RLIMIT_CORE),
+               /*
+                * We must use the same mm->flags while dumping core to avoid
+                * inconsistency of bit flags, since this flag is not protected
+                * by any locks.
+                */
+               .mm_flags = mm->flags,
+       };
+
+       audit_core_dumps(signr);
+
+       binfmt = mm->binfmt;
+       if (!binfmt || !binfmt->core_dump)
+               goto fail;
+       if (!__get_dumpable(cprm.mm_flags))
+               goto fail;
+
+       cred = prepare_creds();
+       if (!cred)
+               goto fail;
+       /*
+        * We cannot trust fsuid as being the "true" uid of the process
+        * nor do we know its entire history. We only know it was tainted
+        * so we dump it as root in mode 2, and only into a controlled
+        * environment (pipe handler or fully qualified path).
+        */
+       if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
+               /* Setuid core dump mode */
+               flag = O_EXCL;          /* Stop rewrite attacks */
+               cred->fsuid = GLOBAL_ROOT_UID;  /* Dump root private */
+               need_nonrelative = true;
+       }
+
+       retval = coredump_wait(exit_code, &core_state);
+       if (retval < 0)
+               goto fail_creds;
+
+       old_cred = override_creds(cred);
+
+       /*
+        * Clear any false indication of pending signals that might
+        * be seen by the filesystem code called to write the core file.
+        */
+       clear_thread_flag(TIF_SIGPENDING);
+
+       ispipe = format_corename(&cn, signr);
+
+       if (ispipe) {
+               int dump_count;
+               char **helper_argv;
+
+               if (ispipe < 0) {
+                       printk(KERN_WARNING "format_corename failed\n");
+                       printk(KERN_WARNING "Aborting core\n");
+                       goto fail_corename;
+               }
+
+               if (cprm.limit == 1) {
+                       /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
+                        *
+                        * Normally core limits are irrelevant to pipes, since
+                        * we're not writing to the file system, but we use
+                        * cprm.limit of 1 here as a speacial value, this is a
+                        * consistent way to catch recursive crashes.
+                        * We can still crash if the core_pattern binary sets
+                        * RLIM_CORE = !1, but it runs as root, and can do
+                        * lots of stupid things.
+                        *
+                        * Note that we use task_tgid_vnr here to grab the pid
+                        * of the process group leader.  That way we get the
+                        * right pid if a thread in a multi-threaded
+                        * core_pattern process dies.
+                        */
+                       printk(KERN_WARNING
+                               "Process %d(%s) has RLIMIT_CORE set to 1\n",
+                               task_tgid_vnr(current), current->comm);
+                       printk(KERN_WARNING "Aborting core\n");
+                       goto fail_unlock;
+               }
+               cprm.limit = RLIM_INFINITY;
+
+               dump_count = atomic_inc_return(&core_dump_count);
+               if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+                       printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+                              task_tgid_vnr(current), current->comm);
+                       printk(KERN_WARNING "Skipping core dump\n");
+                       goto fail_dropcount;
+               }
+
+               helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
+               if (!helper_argv) {
+                       printk(KERN_WARNING "%s failed to allocate memory\n",
+                              __func__);
+                       goto fail_dropcount;
+               }
+
+               retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
+                                       NULL, UMH_WAIT_EXEC, umh_pipe_setup,
+                                       NULL, &cprm);
+               argv_free(helper_argv);
+               if (retval) {
+                       printk(KERN_INFO "Core dump to %s pipe failed\n",
+                              cn.corename);
+                       goto close_fail;
+               }
+       } else {
+               struct inode *inode;
+
+               if (cprm.limit < binfmt->min_coredump)
+                       goto fail_unlock;
+
+               if (need_nonrelative && cn.corename[0] != '/') {
+                       printk(KERN_WARNING "Pid %d(%s) can only dump core "\
+                               "to fully qualified path!\n",
+                               task_tgid_vnr(current), current->comm);
+                       printk(KERN_WARNING "Skipping core dump\n");
+                       goto fail_unlock;
+               }
+
+               cprm.file = filp_open(cn.corename,
+                                O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
+                                0600);
+               if (IS_ERR(cprm.file))
+                       goto fail_unlock;
+
+               inode = cprm.file->f_path.dentry->d_inode;
+               if (inode->i_nlink > 1)
+                       goto close_fail;
+               if (d_unhashed(cprm.file->f_path.dentry))
+                       goto close_fail;
+               /*
+                * AK: actually i see no reason to not allow this for named
+                * pipes etc, but keep the previous behaviour for now.
+                */
+               if (!S_ISREG(inode->i_mode))
+                       goto close_fail;
+               /*
+                * Dont allow local users get cute and trick others to coredump
+                * into their pre-created files.
+                */
+               if (!uid_eq(inode->i_uid, current_fsuid()))
+                       goto close_fail;
+               if (!cprm.file->f_op || !cprm.file->f_op->write)
+                       goto close_fail;
+               if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+                       goto close_fail;
+       }
+
+       /* get us an unshared descriptor table; almost always a no-op */
+       retval = unshare_files(&displaced);
+       if (retval)
+               goto close_fail;
+       if (displaced)
+               put_files_struct(displaced);
+       retval = binfmt->core_dump(&cprm);
+       if (retval)
+               current->signal->group_exit_code |= 0x80;
+
+       if (ispipe && core_pipe_limit)
+               wait_for_dump_helpers(cprm.file);
+close_fail:
+       if (cprm.file)
+               filp_close(cprm.file, NULL);
+fail_dropcount:
+       if (ispipe)
+               atomic_dec(&core_dump_count);
+fail_unlock:
+       kfree(cn.corename);
+fail_corename:
+       coredump_finish(mm);
+       revert_creds(old_cred);
+fail_creds:
+       put_cred(cred);
+fail:
+       return;
+}
+
+/*
+ * Core dumping helper functions.  These are the only things you should
+ * do on a core-file: use only these functions to write out all the
+ * necessary info.
+ */
+int dump_write(struct file *file, const void *addr, int nr)
+{
+       return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+EXPORT_SYMBOL(dump_write);
+
+int dump_seek(struct file *file, loff_t off)
+{
+       int ret = 1;
+
+       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+               if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
+                       return 0;
+       } else {
+               char *buf = (char *)get_zeroed_page(GFP_KERNEL);
+
+               if (!buf)
+                       return 0;
+               while (off > 0) {
+                       unsigned long n = off;
+
+                       if (n > PAGE_SIZE)
+                               n = PAGE_SIZE;
+                       if (!dump_write(file, buf, n)) {
+                               ret = 0;
+                               break;
+                       }
+                       off -= n;
+               }
+               free_page((unsigned long)buf);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(dump_seek);
index 28cca01ca9c9c2b29b20d16581c430d900101a78..c6c3f91ecf069b22f2098e7ac0f3158b62a7b05e 100644 (file)
@@ -90,8 +90,8 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
        }
 
        inode->i_mode = cramfs_inode->mode;
-       inode->i_uid = cramfs_inode->uid;
-       inode->i_gid = cramfs_inode->gid;
+       i_uid_write(inode, cramfs_inode->uid);
+       i_gid_write(inode, cramfs_inode->gid);
 
        /* if the lower 2 bits are zero, the inode contains data */
        if (!(inode->i_ino & 3)) {
index 693f95bf1caeb8769517aa7046702f86b6494fa7..3a463d0c4fe830d5c33c9dcf9ebd05e5caf5fb43 100644 (file)
@@ -2113,7 +2113,7 @@ again:
        inode = dentry->d_inode;
        isdir = S_ISDIR(inode->i_mode);
        if (dentry->d_count == 1) {
-               if (inode && !spin_trylock(&inode->i_lock)) {
+               if (!spin_trylock(&inode->i_lock)) {
                        spin_unlock(&dentry->d_lock);
                        cpu_relax();
                        goto again;
index 6393fd61d5c4dedc19574cdee6eb136d1340fddb..b607d92cdf2445ab8c1e364a70f939ce21e22c6a 100644 (file)
@@ -128,8 +128,8 @@ static inline int debugfs_positive(struct dentry *dentry)
 }
 
 struct debugfs_mount_opts {
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
        umode_t mode;
 };
 
@@ -156,6 +156,8 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
        substring_t args[MAX_OPT_ARGS];
        int option;
        int token;
+       kuid_t uid;
+       kgid_t gid;
        char *p;
 
        opts->mode = DEBUGFS_DEFAULT_MODE;
@@ -169,12 +171,18 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                return -EINVAL;
-                       opts->uid = option;
+                       uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(uid))
+                               return -EINVAL;
+                       opts->uid = uid;
                        break;
                case Opt_gid:
                        if (match_octal(&args[0], &option))
                                return -EINVAL;
-                       opts->gid = option;
+                       gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(gid))
+                               return -EINVAL;
+                       opts->gid = gid;
                        break;
                case Opt_mode:
                        if (match_octal(&args[0], &option))
@@ -226,10 +234,12 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        struct debugfs_fs_info *fsi = root->d_sb->s_fs_info;
        struct debugfs_mount_opts *opts = &fsi->mount_opts;
 
-       if (opts->uid != 0)
-               seq_printf(m, ",uid=%u", opts->uid);
-       if (opts->gid != 0)
-               seq_printf(m, ",gid=%u", opts->gid);
+       if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+               seq_printf(m, ",uid=%u",
+                          from_kuid_munged(&init_user_ns, opts->uid));
+       if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+               seq_printf(m, ",gid=%u",
+                          from_kgid_munged(&init_user_ns, opts->gid));
        if (opts->mode != DEBUGFS_DEFAULT_MODE)
                seq_printf(m, ",mode=%o", opts->mode);
 
index ef17e0169da187ed209164a7881befcf00eec97f..60a327863b1122e246b79bf91ecdf23136eccac9 100644 (file)
@@ -14,7 +14,7 @@
 #include "dlm_internal.h"
 
 static uint32_t dlm_nl_seqnum;
-static uint32_t listener_nlpid;
+static uint32_t listener_nlportid;
 
 static struct genl_family family = {
        .id             = GENL_ID_GENERATE,
@@ -64,13 +64,13 @@ static int send_data(struct sk_buff *skb)
                return rv;
        }
 
-       return genlmsg_unicast(&init_net, skb, listener_nlpid);
+       return genlmsg_unicast(&init_net, skb, listener_nlportid);
 }
 
 static int user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
-       listener_nlpid = info->snd_pid;
-       printk("user_cmd nlpid %u\n", listener_nlpid);
+       listener_nlportid = info->snd_portid;
+       printk("user_cmd nlpid %u\n", listener_nlportid);
        return 0;
 }
 
index 9b627c15010a3af35e1f2ec85ccafc2b18d97d44..4e0886c9e5c476059346f5a0d520257418ce03ba 100644 (file)
@@ -545,11 +545,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
                goto out_free;
        }
 
-       if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) {
+       if (check_ruid && !uid_eq(path.dentry->d_inode->i_uid, current_uid())) {
                rc = -EPERM;
                printk(KERN_ERR "Mount of device (uid: %d) not owned by "
                       "requested user (uid: %d)\n",
-                      path.dentry->d_inode->i_uid, current_uid());
+                       i_uid_read(path.dentry->d_inode),
+                       from_kuid(&init_user_ns, current_uid()));
                goto out_free;
        }
 
@@ -710,6 +711,12 @@ static void ecryptfs_free_kmem_caches(void)
 {
        int i;
 
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
+
        for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) {
                struct ecryptfs_cache_info *info;
 
index b29bb8bfa8d929bb4dde499a6fdc9618eaf56492..5fa2471796c2d9b2174a0f39fed60ef2fc562a07 100644 (file)
@@ -33,7 +33,7 @@ static struct hlist_head *ecryptfs_daemon_hash;
 struct mutex ecryptfs_daemon_hash_mux;
 static int ecryptfs_hash_bits;
 #define ecryptfs_current_euid_hash(uid) \
-               hash_long((unsigned long)current_euid(), ecryptfs_hash_bits)
+       hash_long((unsigned long)from_kuid(&init_user_ns, current_euid()), ecryptfs_hash_bits)
 
 static u32 ecryptfs_msg_counter;
 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
@@ -121,8 +121,7 @@ int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
        hlist_for_each_entry(*daemon, elem,
                            &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
                            euid_chain) {
-               if ((*daemon)->file->f_cred->euid == current_euid() &&
-                   (*daemon)->file->f_cred->user_ns == current_user_ns()) {
+               if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
                        rc = 0;
                        goto out;
                }
index bc84f365d75c3a04516c0f58e57039ca3db8bbb6..f3913eb2c47482739681f51551cd6a325e6fc40d 100644 (file)
@@ -97,8 +97,8 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
     
        inode->i_mode  = be16_to_cpu(efs_inode->di_mode);
        set_nlink(inode, be16_to_cpu(efs_inode->di_nlink));
-       inode->i_uid   = (uid_t)be16_to_cpu(efs_inode->di_uid);
-       inode->i_gid   = (gid_t)be16_to_cpu(efs_inode->di_gid);
+       i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid));
+       i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid));
        inode->i_size  = be32_to_cpu(efs_inode->di_size);
        inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
        inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
index e755ec746c6967ed12e9ea036ddcd83214770d48..2002431ef9a0ff238d838b9d4c2f8bb9760007fa 100644 (file)
@@ -96,6 +96,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(efs_inode_cachep);
 }
 
index eedec84c1809173eb4c627815ad70122de66eeb3..cd96649bfe62da9e408dbd629f56f6452c139bc9 100644 (file)
@@ -1810,7 +1810,7 @@ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
                int, maxevents, int, timeout)
 {
        int error;
-       struct file *file;
+       struct fd f;
        struct eventpoll *ep;
 
        /* The maximum number of event must be greater than zero */
@@ -1818,38 +1818,33 @@ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
                return -EINVAL;
 
        /* Verify that the area passed by the user is writeable */
-       if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
-               error = -EFAULT;
-               goto error_return;
-       }
+       if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event)))
+               return -EFAULT;
 
        /* Get the "struct file *" for the eventpoll file */
-       error = -EBADF;
-       file = fget(epfd);
-       if (!file)
-               goto error_return;
+       f = fdget(epfd);
+       if (!f.file)
+               return -EBADF;
 
        /*
         * We have to check that the file structure underneath the fd
         * the user passed to us _is_ an eventpoll file.
         */
        error = -EINVAL;
-       if (!is_file_epoll(file))
+       if (!is_file_epoll(f.file))
                goto error_fput;
 
        /*
         * At this point it is safe to assume that the "private_data" contains
         * our own data structure.
         */
-       ep = file->private_data;
+       ep = f.file->private_data;
 
        /* Time to fish for events ... */
        error = ep_poll(ep, events, maxevents, timeout);
 
 error_fput:
-       fput(file);
-error_return:
-
+       fdput(f);
        return error;
 }
 
index 574cf4de4ec38ba2c64115ca85d78e80f0e31dd2..48fb26ef8a1b3daebb49a74c9ae09ba83c7f7a14 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
 
 #include <trace/events/sched.h>
 
-int core_uses_pid;
-char core_pattern[CORENAME_MAX_SIZE] = "core";
-unsigned int core_pipe_limit;
 int suid_dumpable = 0;
 
-struct core_name {
-       char *corename;
-       int used, size;
-};
-static atomic_t call_count = ATOMIC_INIT(1);
-
-/* The maximal length of core_pattern is also specified in sysctl.c */
-
 static LIST_HEAD(formats);
 static DEFINE_RWLOCK(binfmt_lock);
 
@@ -1006,40 +995,6 @@ no_thread_group:
        return 0;
 }
 
-/*
- * These functions flushes out all traces of the currently running executable
- * so that a new one can be started
- */
-static void flush_old_files(struct files_struct * files)
-{
-       long j = -1;
-       struct fdtable *fdt;
-
-       spin_lock(&files->file_lock);
-       for (;;) {
-               unsigned long set, i;
-
-               j++;
-               i = j * BITS_PER_LONG;
-               fdt = files_fdtable(files);
-               if (i >= fdt->max_fds)
-                       break;
-               set = fdt->close_on_exec[j];
-               if (!set)
-                       continue;
-               fdt->close_on_exec[j] = 0;
-               spin_unlock(&files->file_lock);
-               for ( ; set ; i++,set >>= 1) {
-                       if (set & 1) {
-                               sys_close(i);
-                       }
-               }
-               spin_lock(&files->file_lock);
-
-       }
-       spin_unlock(&files->file_lock);
-}
-
 char *get_task_comm(char *buf, struct task_struct *tsk)
 {
        /* buf must be at least sizeof(tsk->comm) in size */
@@ -1050,6 +1005,11 @@ char *get_task_comm(char *buf, struct task_struct *tsk)
 }
 EXPORT_SYMBOL_GPL(get_task_comm);
 
+/*
+ * These functions flushes out all traces of the currently running executable
+ * so that a new one can be started
+ */
+
 void set_task_comm(struct task_struct *tsk, char *buf)
 {
        task_lock(tsk);
@@ -1171,7 +1131,7 @@ void setup_new_exec(struct linux_binprm * bprm)
        current->self_exec_id++;
                        
        flush_signal_handlers(current, 0);
-       flush_old_files(current->files);
+       do_close_on_exec(current->files);
 }
 EXPORT_SYMBOL(setup_new_exec);
 
@@ -1632,353 +1592,6 @@ void set_binfmt(struct linux_binfmt *new)
 
 EXPORT_SYMBOL(set_binfmt);
 
-static int expand_corename(struct core_name *cn)
-{
-       char *old_corename = cn->corename;
-
-       cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
-       cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
-
-       if (!cn->corename) {
-               kfree(old_corename);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static int cn_printf(struct core_name *cn, const char *fmt, ...)
-{
-       char *cur;
-       int need;
-       int ret;
-       va_list arg;
-
-       va_start(arg, fmt);
-       need = vsnprintf(NULL, 0, fmt, arg);
-       va_end(arg);
-
-       if (likely(need < cn->size - cn->used - 1))
-               goto out_printf;
-
-       ret = expand_corename(cn);
-       if (ret)
-               goto expand_fail;
-
-out_printf:
-       cur = cn->corename + cn->used;
-       va_start(arg, fmt);
-       vsnprintf(cur, need + 1, fmt, arg);
-       va_end(arg);
-       cn->used += need;
-       return 0;
-
-expand_fail:
-       return ret;
-}
-
-static void cn_escape(char *str)
-{
-       for (; *str; str++)
-               if (*str == '/')
-                       *str = '!';
-}
-
-static int cn_print_exe_file(struct core_name *cn)
-{
-       struct file *exe_file;
-       char *pathbuf, *path;
-       int ret;
-
-       exe_file = get_mm_exe_file(current->mm);
-       if (!exe_file) {
-               char *commstart = cn->corename + cn->used;
-               ret = cn_printf(cn, "%s (path unknown)", current->comm);
-               cn_escape(commstart);
-               return ret;
-       }
-
-       pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
-       if (!pathbuf) {
-               ret = -ENOMEM;
-               goto put_exe_file;
-       }
-
-       path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
-       if (IS_ERR(path)) {
-               ret = PTR_ERR(path);
-               goto free_buf;
-       }
-
-       cn_escape(path);
-
-       ret = cn_printf(cn, "%s", path);
-
-free_buf:
-       kfree(pathbuf);
-put_exe_file:
-       fput(exe_file);
-       return ret;
-}
-
-/* format_corename will inspect the pattern parameter, and output a
- * name into corename, which must have space for at least
- * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
- */
-static int format_corename(struct core_name *cn, long signr)
-{
-       const struct cred *cred = current_cred();
-       const char *pat_ptr = core_pattern;
-       int ispipe = (*pat_ptr == '|');
-       int pid_in_pattern = 0;
-       int err = 0;
-
-       cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
-       cn->corename = kmalloc(cn->size, GFP_KERNEL);
-       cn->used = 0;
-
-       if (!cn->corename)
-               return -ENOMEM;
-
-       /* Repeat as long as we have more pattern to process and more output
-          space */
-       while (*pat_ptr) {
-               if (*pat_ptr != '%') {
-                       if (*pat_ptr == 0)
-                               goto out;
-                       err = cn_printf(cn, "%c", *pat_ptr++);
-               } else {
-                       switch (*++pat_ptr) {
-                       /* single % at the end, drop that */
-                       case 0:
-                               goto out;
-                       /* Double percent, output one percent */
-                       case '%':
-                               err = cn_printf(cn, "%c", '%');
-                               break;
-                       /* pid */
-                       case 'p':
-                               pid_in_pattern = 1;
-                               err = cn_printf(cn, "%d",
-                                             task_tgid_vnr(current));
-                               break;
-                       /* uid */
-                       case 'u':
-                               err = cn_printf(cn, "%d", cred->uid);
-                               break;
-                       /* gid */
-                       case 'g':
-                               err = cn_printf(cn, "%d", cred->gid);
-                               break;
-                       /* signal that caused the coredump */
-                       case 's':
-                               err = cn_printf(cn, "%ld", signr);
-                               break;
-                       /* UNIX time of coredump */
-                       case 't': {
-                               struct timeval tv;
-                               do_gettimeofday(&tv);
-                               err = cn_printf(cn, "%lu", tv.tv_sec);
-                               break;
-                       }
-                       /* hostname */
-                       case 'h': {
-                               char *namestart = cn->corename + cn->used;
-                               down_read(&uts_sem);
-                               err = cn_printf(cn, "%s",
-                                             utsname()->nodename);
-                               up_read(&uts_sem);
-                               cn_escape(namestart);
-                               break;
-                       }
-                       /* executable */
-                       case 'e': {
-                               char *commstart = cn->corename + cn->used;
-                               err = cn_printf(cn, "%s", current->comm);
-                               cn_escape(commstart);
-                               break;
-                       }
-                       case 'E':
-                               err = cn_print_exe_file(cn);
-                               break;
-                       /* core limit size */
-                       case 'c':
-                               err = cn_printf(cn, "%lu",
-                                             rlimit(RLIMIT_CORE));
-                               break;
-                       default:
-                               break;
-                       }
-                       ++pat_ptr;
-               }
-
-               if (err)
-                       return err;
-       }
-
-       /* Backward compatibility with core_uses_pid:
-        *
-        * If core_pattern does not include a %p (as is the default)
-        * and core_uses_pid is set, then .%pid will be appended to
-        * the filename. Do not do this for piped commands. */
-       if (!ispipe && !pid_in_pattern && core_uses_pid) {
-               err = cn_printf(cn, ".%d", task_tgid_vnr(current));
-               if (err)
-                       return err;
-       }
-out:
-       return ispipe;
-}
-
-static int zap_process(struct task_struct *start, int exit_code)
-{
-       struct task_struct *t;
-       int nr = 0;
-
-       start->signal->flags = SIGNAL_GROUP_EXIT;
-       start->signal->group_exit_code = exit_code;
-       start->signal->group_stop_count = 0;
-
-       t = start;
-       do {
-               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
-               if (t != current && t->mm) {
-                       sigaddset(&t->pending.signal, SIGKILL);
-                       signal_wake_up(t, 1);
-                       nr++;
-               }
-       } while_each_thread(start, t);
-
-       return nr;
-}
-
-static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
-                               struct core_state *core_state, int exit_code)
-{
-       struct task_struct *g, *p;
-       unsigned long flags;
-       int nr = -EAGAIN;
-
-       spin_lock_irq(&tsk->sighand->siglock);
-       if (!signal_group_exit(tsk->signal)) {
-               mm->core_state = core_state;
-               nr = zap_process(tsk, exit_code);
-       }
-       spin_unlock_irq(&tsk->sighand->siglock);
-       if (unlikely(nr < 0))
-               return nr;
-
-       if (atomic_read(&mm->mm_users) == nr + 1)
-               goto done;
-       /*
-        * We should find and kill all tasks which use this mm, and we should
-        * count them correctly into ->nr_threads. We don't take tasklist
-        * lock, but this is safe wrt:
-        *
-        * fork:
-        *      None of sub-threads can fork after zap_process(leader). All
-        *      processes which were created before this point should be
-        *      visible to zap_threads() because copy_process() adds the new
-        *      process to the tail of init_task.tasks list, and lock/unlock
-        *      of ->siglock provides a memory barrier.
-        *
-        * do_exit:
-        *      The caller holds mm->mmap_sem. This means that the task which
-        *      uses this mm can't pass exit_mm(), so it can't exit or clear
-        *      its ->mm.
-        *
-        * de_thread:
-        *      It does list_replace_rcu(&leader->tasks, &current->tasks),
-        *      we must see either old or new leader, this does not matter.
-        *      However, it can change p->sighand, so lock_task_sighand(p)
-        *      must be used. Since p->mm != NULL and we hold ->mmap_sem
-        *      it can't fail.
-        *
-        *      Note also that "g" can be the old leader with ->mm == NULL
-        *      and already unhashed and thus removed from ->thread_group.
-        *      This is OK, __unhash_process()->list_del_rcu() does not
-        *      clear the ->next pointer, we will find the new leader via
-        *      next_thread().
-        */
-       rcu_read_lock();
-       for_each_process(g) {
-               if (g == tsk->group_leader)
-                       continue;
-               if (g->flags & PF_KTHREAD)
-                       continue;
-               p = g;
-               do {
-                       if (p->mm) {
-                               if (unlikely(p->mm == mm)) {
-                                       lock_task_sighand(p, &flags);
-                                       nr += zap_process(p, exit_code);
-                                       unlock_task_sighand(p, &flags);
-                               }
-                               break;
-                       }
-               } while_each_thread(g, p);
-       }
-       rcu_read_unlock();
-done:
-       atomic_set(&core_state->nr_threads, nr);
-       return nr;
-}
-
-static int coredump_wait(int exit_code, struct core_state *core_state)
-{
-       struct task_struct *tsk = current;
-       struct mm_struct *mm = tsk->mm;
-       int core_waiters = -EBUSY;
-
-       init_completion(&core_state->startup);
-       core_state->dumper.task = tsk;
-       core_state->dumper.next = NULL;
-
-       down_write(&mm->mmap_sem);
-       if (!mm->core_state)
-               core_waiters = zap_threads(tsk, mm, core_state, exit_code);
-       up_write(&mm->mmap_sem);
-
-       if (core_waiters > 0) {
-               struct core_thread *ptr;
-
-               wait_for_completion(&core_state->startup);
-               /*
-                * Wait for all the threads to become inactive, so that
-                * all the thread context (extended register state, like
-                * fpu etc) gets copied to the memory.
-                */
-               ptr = core_state->dumper.next;
-               while (ptr != NULL) {
-                       wait_task_inactive(ptr->task, 0);
-                       ptr = ptr->next;
-               }
-       }
-
-       return core_waiters;
-}
-
-static void coredump_finish(struct mm_struct *mm)
-{
-       struct core_thread *curr, *next;
-       struct task_struct *task;
-
-       next = mm->core_state->dumper.next;
-       while ((curr = next) != NULL) {
-               next = curr->next;
-               task = curr->task;
-               /*
-                * see exit_mm(), curr->task must not see
-                * ->task == NULL before we read ->next.
-                */
-               smp_mb();
-               curr->task = NULL;
-               wake_up_process(task);
-       }
-
-       mm->core_state = NULL;
-}
-
 /*
  * set_dumpable converts traditional three-value dumpable to two flags and
  * stores them into mm->flags.  It modifies lower two bits of mm->flags, but
@@ -2020,7 +1633,7 @@ void set_dumpable(struct mm_struct *mm, int value)
        }
 }
 
-static int __get_dumpable(unsigned long mm_flags)
+int __get_dumpable(unsigned long mm_flags)
 {
        int ret;
 
@@ -2032,290 +1645,3 @@ int get_dumpable(struct mm_struct *mm)
 {
        return __get_dumpable(mm->flags);
 }
-
-static void wait_for_dump_helpers(struct file *file)
-{
-       struct pipe_inode_info *pipe;
-
-       pipe = file->f_path.dentry->d_inode->i_pipe;
-
-       pipe_lock(pipe);
-       pipe->readers++;
-       pipe->writers--;
-
-       while ((pipe->readers > 1) && (!signal_pending(current))) {
-               wake_up_interruptible_sync(&pipe->wait);
-               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
-               pipe_wait(pipe);
-       }
-
-       pipe->readers--;
-       pipe->writers++;
-       pipe_unlock(pipe);
-
-}
-
-
-/*
- * umh_pipe_setup
- * helper function to customize the process used
- * to collect the core in userspace.  Specifically
- * it sets up a pipe and installs it as fd 0 (stdin)
- * for the process.  Returns 0 on success, or
- * PTR_ERR on failure.
- * Note that it also sets the core limit to 1.  This
- * is a special value that we use to trap recursive
- * core dumps
- */
-static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
-{
-       struct file *files[2];
-       struct fdtable *fdt;
-       struct coredump_params *cp = (struct coredump_params *)info->data;
-       struct files_struct *cf = current->files;
-       int err = create_pipe_files(files, 0);
-       if (err)
-               return err;
-
-       cp->file = files[1];
-
-       sys_close(0);
-       fd_install(0, files[0]);
-       spin_lock(&cf->file_lock);
-       fdt = files_fdtable(cf);
-       __set_open_fd(0, fdt);
-       __clear_close_on_exec(0, fdt);
-       spin_unlock(&cf->file_lock);
-
-       /* and disallow core files too */
-       current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
-
-       return 0;
-}
-
-void do_coredump(long signr, int exit_code, struct pt_regs *regs)
-{
-       struct core_state core_state;
-       struct core_name cn;
-       struct mm_struct *mm = current->mm;
-       struct linux_binfmt * binfmt;
-       const struct cred *old_cred;
-       struct cred *cred;
-       int retval = 0;
-       int flag = 0;
-       int ispipe;
-       bool need_nonrelative = false;
-       static atomic_t core_dump_count = ATOMIC_INIT(0);
-       struct coredump_params cprm = {
-               .signr = signr,
-               .regs = regs,
-               .limit = rlimit(RLIMIT_CORE),
-               /*
-                * We must use the same mm->flags while dumping core to avoid
-                * inconsistency of bit flags, since this flag is not protected
-                * by any locks.
-                */
-               .mm_flags = mm->flags,
-       };
-
-       audit_core_dumps(signr);
-
-       binfmt = mm->binfmt;
-       if (!binfmt || !binfmt->core_dump)
-               goto fail;
-       if (!__get_dumpable(cprm.mm_flags))
-               goto fail;
-
-       cred = prepare_creds();
-       if (!cred)
-               goto fail;
-       /*
-        * We cannot trust fsuid as being the "true" uid of the process
-        * nor do we know its entire history. We only know it was tainted
-        * so we dump it as root in mode 2, and only into a controlled
-        * environment (pipe handler or fully qualified path).
-        */
-       if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
-               /* Setuid core dump mode */
-               flag = O_EXCL;          /* Stop rewrite attacks */
-               cred->fsuid = GLOBAL_ROOT_UID;  /* Dump root private */
-               need_nonrelative = true;
-       }
-
-       retval = coredump_wait(exit_code, &core_state);
-       if (retval < 0)
-               goto fail_creds;
-
-       old_cred = override_creds(cred);
-
-       /*
-        * Clear any false indication of pending signals that might
-        * be seen by the filesystem code called to write the core file.
-        */
-       clear_thread_flag(TIF_SIGPENDING);
-
-       ispipe = format_corename(&cn, signr);
-
-       if (ispipe) {
-               int dump_count;
-               char **helper_argv;
-
-               if (ispipe < 0) {
-                       printk(KERN_WARNING "format_corename failed\n");
-                       printk(KERN_WARNING "Aborting core\n");
-                       goto fail_corename;
-               }
-
-               if (cprm.limit == 1) {
-                       /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
-                        *
-                        * Normally core limits are irrelevant to pipes, since
-                        * we're not writing to the file system, but we use
-                        * cprm.limit of 1 here as a speacial value, this is a
-                        * consistent way to catch recursive crashes.
-                        * We can still crash if the core_pattern binary sets
-                        * RLIM_CORE = !1, but it runs as root, and can do
-                        * lots of stupid things.
-                        *
-                        * Note that we use task_tgid_vnr here to grab the pid
-                        * of the process group leader.  That way we get the
-                        * right pid if a thread in a multi-threaded
-                        * core_pattern process dies.
-                        */
-                       printk(KERN_WARNING
-                               "Process %d(%s) has RLIMIT_CORE set to 1\n",
-                               task_tgid_vnr(current), current->comm);
-                       printk(KERN_WARNING "Aborting core\n");
-                       goto fail_unlock;
-               }
-               cprm.limit = RLIM_INFINITY;
-
-               dump_count = atomic_inc_return(&core_dump_count);
-               if (core_pipe_limit && (core_pipe_limit < dump_count)) {
-                       printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
-                              task_tgid_vnr(current), current->comm);
-                       printk(KERN_WARNING "Skipping core dump\n");
-                       goto fail_dropcount;
-               }
-
-               helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
-               if (!helper_argv) {
-                       printk(KERN_WARNING "%s failed to allocate memory\n",
-                              __func__);
-                       goto fail_dropcount;
-               }
-
-               retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
-                                       NULL, UMH_WAIT_EXEC, umh_pipe_setup,
-                                       NULL, &cprm);
-               argv_free(helper_argv);
-               if (retval) {
-                       printk(KERN_INFO "Core dump to %s pipe failed\n",
-                              cn.corename);
-                       goto close_fail;
-               }
-       } else {
-               struct inode *inode;
-
-               if (cprm.limit < binfmt->min_coredump)
-                       goto fail_unlock;
-
-               if (need_nonrelative && cn.corename[0] != '/') {
-                       printk(KERN_WARNING "Pid %d(%s) can only dump core "\
-                               "to fully qualified path!\n",
-                               task_tgid_vnr(current), current->comm);
-                       printk(KERN_WARNING "Skipping core dump\n");
-                       goto fail_unlock;
-               }
-
-               cprm.file = filp_open(cn.corename,
-                                O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
-                                0600);
-               if (IS_ERR(cprm.file))
-                       goto fail_unlock;
-
-               inode = cprm.file->f_path.dentry->d_inode;
-               if (inode->i_nlink > 1)
-                       goto close_fail;
-               if (d_unhashed(cprm.file->f_path.dentry))
-                       goto close_fail;
-               /*
-                * AK: actually i see no reason to not allow this for named
-                * pipes etc, but keep the previous behaviour for now.
-                */
-               if (!S_ISREG(inode->i_mode))
-                       goto close_fail;
-               /*
-                * Dont allow local users get cute and trick others to coredump
-                * into their pre-created files.
-                */
-               if (!uid_eq(inode->i_uid, current_fsuid()))
-                       goto close_fail;
-               if (!cprm.file->f_op || !cprm.file->f_op->write)
-                       goto close_fail;
-               if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
-                       goto close_fail;
-       }
-
-       retval = binfmt->core_dump(&cprm);
-       if (retval)
-               current->signal->group_exit_code |= 0x80;
-
-       if (ispipe && core_pipe_limit)
-               wait_for_dump_helpers(cprm.file);
-close_fail:
-       if (cprm.file)
-               filp_close(cprm.file, NULL);
-fail_dropcount:
-       if (ispipe)
-               atomic_dec(&core_dump_count);
-fail_unlock:
-       kfree(cn.corename);
-fail_corename:
-       coredump_finish(mm);
-       revert_creds(old_cred);
-fail_creds:
-       put_cred(cred);
-fail:
-       return;
-}
-
-/*
- * Core dumping helper functions.  These are the only things you should
- * do on a core-file: use only these functions to write out all the
- * necessary info.
- */
-int dump_write(struct file *file, const void *addr, int nr)
-{
-       return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
-}
-EXPORT_SYMBOL(dump_write);
-
-int dump_seek(struct file *file, loff_t off)
-{
-       int ret = 1;
-
-       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
-               if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
-                       return 0;
-       } else {
-               char *buf = (char *)get_zeroed_page(GFP_KERNEL);
-
-               if (!buf)
-                       return 0;
-               while (off > 0) {
-                       unsigned long n = off;
-
-                       if (n > PAGE_SIZE)
-                               n = PAGE_SIZE;
-                       if (!dump_write(file, buf, n)) {
-                               ret = 0;
-                               break;
-                       }
-                       off -= n;
-               }
-               free_page((unsigned long)buf);
-       }
-       return ret;
-}
-EXPORT_SYMBOL(dump_seek);
index 1562c27a2fab27f700825e0090e4d98492fa2fbd..b5618104775187787415b67c1d99658f45f8b3d7 100644 (file)
@@ -1172,8 +1172,8 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
 
        /* copy stuff from on-disk struct to in-memory struct */
        inode->i_mode = le16_to_cpu(fcb.i_mode);
-       inode->i_uid = le32_to_cpu(fcb.i_uid);
-       inode->i_gid = le32_to_cpu(fcb.i_gid);
+       i_uid_write(inode, le32_to_cpu(fcb.i_uid));
+       i_gid_write(inode, le32_to_cpu(fcb.i_gid));
        set_nlink(inode, le16_to_cpu(fcb.i_links_count));
        inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
        inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
@@ -1385,8 +1385,8 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
        fcb = &args->fcb;
 
        fcb->i_mode = cpu_to_le16(inode->i_mode);
-       fcb->i_uid = cpu_to_le32(inode->i_uid);
-       fcb->i_gid = cpu_to_le32(inode->i_gid);
+       fcb->i_uid = cpu_to_le32(i_uid_read(inode));
+       fcb->i_gid = cpu_to_le32(i_gid_read(inode));
        fcb->i_links_count = cpu_to_le16(inode->i_nlink);
        fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
        fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
index dde41a75c7c8dbd36597272a13f5c6f3487507d3..59e3bbfac0b17af51d9101b0d35dc6414ad53187 100644 (file)
@@ -206,6 +206,11 @@ static int init_inodecache(void)
  */
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(exofs_inode_cachep);
 }
 
index 35d6a3cfd9ff3a5c6562adb46fc614595ff4c718..110b6b371a4edc353fc1904bcc79299580087184 100644 (file)
@@ -53,16 +53,23 @@ ext2_acl_from_disk(const void *value, size_t size)
                        case ACL_OTHER:
                                value = (char *)value +
                                        sizeof(ext2_acl_entry_short);
-                               acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
                                break;
 
                        case ACL_USER:
+                               value = (char *)value + sizeof(ext2_acl_entry);
+                               if ((char *)value > end)
+                                       goto fail;
+                               acl->a_entries[n].e_uid =
+                                       make_kuid(&init_user_ns,
+                                                 le32_to_cpu(entry->e_id));
+                               break;
                        case ACL_GROUP:
                                value = (char *)value + sizeof(ext2_acl_entry);
                                if ((char *)value > end)
                                        goto fail;
-                               acl->a_entries[n].e_id =
-                                       le32_to_cpu(entry->e_id);
+                               acl->a_entries[n].e_gid =
+                                       make_kgid(&init_user_ns,
+                                                 le32_to_cpu(entry->e_id));
                                break;
 
                        default:
@@ -96,14 +103,19 @@ ext2_acl_to_disk(const struct posix_acl *acl, size_t *size)
        ext_acl->a_version = cpu_to_le32(EXT2_ACL_VERSION);
        e = (char *)ext_acl + sizeof(ext2_acl_header);
        for (n=0; n < acl->a_count; n++) {
+               const struct posix_acl_entry *acl_e = &acl->a_entries[n];
                ext2_acl_entry *entry = (ext2_acl_entry *)e;
-               entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
-               entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
-               switch(acl->a_entries[n].e_tag) {
+               entry->e_tag  = cpu_to_le16(acl_e->e_tag);
+               entry->e_perm = cpu_to_le16(acl_e->e_perm);
+               switch(acl_e->e_tag) {
                        case ACL_USER:
+                               entry->e_id = cpu_to_le32(
+                                       from_kuid(&init_user_ns, acl_e->e_uid));
+                               e += sizeof(ext2_acl_entry);
+                               break;
                        case ACL_GROUP:
-                               entry->e_id =
-                                       cpu_to_le32(acl->a_entries[n].e_id);
+                               entry->e_id = cpu_to_le32(
+                                       from_kgid(&init_user_ns, acl_e->e_gid));
                                e += sizeof(ext2_acl_entry);
                                break;
 
@@ -350,7 +362,7 @@ ext2_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
                return PTR_ERR(acl);
        if (acl == NULL)
                return -ENODATA;
-       error = posix_acl_to_xattr(acl, buffer, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return error;
@@ -371,7 +383,7 @@ ext2_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
                return -EPERM;
 
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
                else if (acl) {
index af74d9e27b71b0cf569cac645e362c174c44389e..6c205d0c565b2595fe837eab5cf789d995df2d18 100644 (file)
@@ -206,6 +206,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ext2_inode_cachep);
 }
 
index c76832c8d19229d6c868878ca3f7250394f10f72..dbb5ad59a7fc3c22380ce30b9034f8942bec967b 100644 (file)
@@ -48,16 +48,23 @@ ext3_acl_from_disk(const void *value, size_t size)
                        case ACL_OTHER:
                                value = (char *)value +
                                        sizeof(ext3_acl_entry_short);
-                               acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
                                break;
 
                        case ACL_USER:
+                               value = (char *)value + sizeof(ext3_acl_entry);
+                               if ((char *)value > end)
+                                       goto fail;
+                               acl->a_entries[n].e_uid =
+                                       make_kuid(&init_user_ns,
+                                                 le32_to_cpu(entry->e_id));
+                               break;
                        case ACL_GROUP:
                                value = (char *)value + sizeof(ext3_acl_entry);
                                if ((char *)value > end)
                                        goto fail;
-                               acl->a_entries[n].e_id =
-                                       le32_to_cpu(entry->e_id);
+                               acl->a_entries[n].e_gid =
+                                       make_kgid(&init_user_ns,
+                                                 le32_to_cpu(entry->e_id));
                                break;
 
                        default:
@@ -91,14 +98,19 @@ ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
        ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
        e = (char *)ext_acl + sizeof(ext3_acl_header);
        for (n=0; n < acl->a_count; n++) {
+               const struct posix_acl_entry *acl_e = &acl->a_entries[n];
                ext3_acl_entry *entry = (ext3_acl_entry *)e;
-               entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
-               entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
-               switch(acl->a_entries[n].e_tag) {
+               entry->e_tag  = cpu_to_le16(acl_e->e_tag);
+               entry->e_perm = cpu_to_le16(acl_e->e_perm);
+               switch(acl_e->e_tag) {
                        case ACL_USER:
+                               entry->e_id = cpu_to_le32(
+                                       from_kuid(&init_user_ns, acl_e->e_uid));
+                               e += sizeof(ext3_acl_entry);
+                               break;
                        case ACL_GROUP:
-                               entry->e_id =
-                                       cpu_to_le32(acl->a_entries[n].e_id);
+                               entry->e_id = cpu_to_le32(
+                                       from_kgid(&init_user_ns, acl_e->e_gid));
                                e += sizeof(ext3_acl_entry);
                                break;
 
@@ -369,7 +381,7 @@ ext3_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
                return PTR_ERR(acl);
        if (acl == NULL)
                return -ENODATA;
-       error = posix_acl_to_xattr(acl, buffer, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return error;
@@ -392,7 +404,7 @@ ext3_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
                return -EPERM;
 
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
                else if (acl) {
index 8c892e93d8e7b6f2ff727709619eedde0e880812..bd29894c8fbcf8e4bf669cc2ab0d64fe20bcb327 100644 (file)
@@ -532,6 +532,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ext3_inode_cachep);
 }
 
@@ -2803,7 +2808,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
 
 static inline struct inode *dquot_to_inode(struct dquot *dquot)
 {
-       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
 }
 
 static int ext3_write_dquot(struct dquot *dquot)
index a5c29bb3b835d7c528d0b9f65ce3436e8b7f9181..d3c5b88fd89f22f9061b4905b756445936e7076b 100644 (file)
@@ -55,16 +55,23 @@ ext4_acl_from_disk(const void *value, size_t size)
                case ACL_OTHER:
                        value = (char *)value +
                                sizeof(ext4_acl_entry_short);
-                       acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
                        break;
 
                case ACL_USER:
+                       value = (char *)value + sizeof(ext4_acl_entry);
+                       if ((char *)value > end)
+                               goto fail;
+                       acl->a_entries[n].e_uid =
+                               make_kuid(&init_user_ns,
+                                         le32_to_cpu(entry->e_id));
+                       break;
                case ACL_GROUP:
                        value = (char *)value + sizeof(ext4_acl_entry);
                        if ((char *)value > end)
                                goto fail;
-                       acl->a_entries[n].e_id =
-                               le32_to_cpu(entry->e_id);
+                       acl->a_entries[n].e_gid =
+                               make_kgid(&init_user_ns,
+                                         le32_to_cpu(entry->e_id));
                        break;
 
                default:
@@ -98,13 +105,19 @@ ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
        ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
        e = (char *)ext_acl + sizeof(ext4_acl_header);
        for (n = 0; n < acl->a_count; n++) {
+               const struct posix_acl_entry *acl_e = &acl->a_entries[n];
                ext4_acl_entry *entry = (ext4_acl_entry *)e;
-               entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
-               entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
-               switch (acl->a_entries[n].e_tag) {
+               entry->e_tag  = cpu_to_le16(acl_e->e_tag);
+               entry->e_perm = cpu_to_le16(acl_e->e_perm);
+               switch (acl_e->e_tag) {
                case ACL_USER:
+                       entry->e_id = cpu_to_le32(
+                               from_kuid(&init_user_ns, acl_e->e_uid));
+                       e += sizeof(ext4_acl_entry);
+                       break;
                case ACL_GROUP:
-                       entry->e_id = cpu_to_le32(acl->a_entries[n].e_id);
+                       entry->e_id = cpu_to_le32(
+                               from_kgid(&init_user_ns, acl_e->e_gid));
                        e += sizeof(ext4_acl_entry);
                        break;
 
@@ -374,7 +387,7 @@ ext4_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
                return PTR_ERR(acl);
        if (acl == NULL)
                return -ENODATA;
-       error = posix_acl_to_xattr(acl, buffer, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return error;
@@ -397,7 +410,7 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
                return -EPERM;
 
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
                else if (acl) {
index 7f7dad7876035e474b3c9ad7054344f6f6e7d045..5439d6a56e991391d1e57682bf713738bf28b3e2 100644 (file)
@@ -233,7 +233,7 @@ group_extend_out:
 
        case EXT4_IOC_MOVE_EXT: {
                struct move_extent me;
-               struct file *donor_filp;
+               struct fd donor;
                int err;
 
                if (!(filp->f_mode & FMODE_READ) ||
@@ -245,11 +245,11 @@ group_extend_out:
                        return -EFAULT;
                me.moved_len = 0;
 
-               donor_filp = fget(me.donor_fd);
-               if (!donor_filp)
+               donor = fdget(me.donor_fd);
+               if (!donor.file)
                        return -EBADF;
 
-               if (!(donor_filp->f_mode & FMODE_WRITE)) {
+               if (!(donor.file->f_mode & FMODE_WRITE)) {
                        err = -EBADF;
                        goto mext_out;
                }
@@ -258,14 +258,15 @@ group_extend_out:
                               EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
                        ext4_msg(sb, KERN_ERR,
                                 "Online defrag not supported with bigalloc");
-                       return -EOPNOTSUPP;
+                       err = -EOPNOTSUPP;
+                       goto mext_out;
                }
 
                err = mnt_want_write_file(filp);
                if (err)
                        goto mext_out;
 
-               err = ext4_move_extents(filp, donor_filp, me.orig_start,
+               err = ext4_move_extents(filp, donor.file, me.orig_start,
                                        me.donor_start, me.len, &me.moved_len);
                mnt_drop_write_file(filp);
 
@@ -273,7 +274,7 @@ group_extend_out:
                                 &me, sizeof(me)))
                        err = -EFAULT;
 mext_out:
-               fput(donor_filp);
+               fdput(donor);
                return err;
        }
 
index c6e0cb3d1f4a9e3730aea4904037eb0e9d4dd6d8..69c55d4e4626552fd3652da4816ac9d97eefabf5 100644 (file)
@@ -1019,6 +1019,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ext4_inode_cachep);
 }
 
@@ -4791,7 +4796,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
 
 static inline struct inode *dquot_to_inode(struct dquot *dquot)
 {
-       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
 }
 
 static int ext4_write_dquot(struct dquot *dquot)
index 2deeeb86f331c8ad70d56300f1b5d4d5b155fe7b..7d8e0dcac5d5602fdd0c351047ef38980d0fb8b7 100644 (file)
@@ -23,8 +23,8 @@
 #define FAT_ERRORS_RO          3      /* remount r/o on error */
 
 struct fat_mount_options {
-       uid_t fs_uid;
-       gid_t fs_gid;
+       kuid_t fs_uid;
+       kgid_t fs_gid;
        unsigned short fs_fmask;
        unsigned short fs_dmask;
        unsigned short codepage;  /* Codepage for shortname conversions */
index e007b8bd8e5ec1d93f29c65f877e083d3ec10b6e..a62e0ecbe2dbbfbfeadc465f9dfff3608b61b108 100644 (file)
@@ -352,7 +352,7 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
 {
        umode_t allow_utime = sbi->options.allow_utime;
 
-       if (current_fsuid() != inode->i_uid) {
+       if (!uid_eq(current_fsuid(), inode->i_uid)) {
                if (in_group_p(inode->i_gid))
                        allow_utime >>= 3;
                if (allow_utime & MAY_WRITE)
@@ -407,9 +407,9 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        if (((attr->ia_valid & ATTR_UID) &&
-            (attr->ia_uid != sbi->options.fs_uid)) ||
+            (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) ||
            ((attr->ia_valid & ATTR_GID) &&
-            (attr->ia_gid != sbi->options.fs_gid)) ||
+            (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) ||
            ((attr->ia_valid & ATTR_MODE) &&
             (attr->ia_mode & ~FAT_VALID_MODE)))
                error = -EPERM;
index 05e897fe9866c49fb96fa09459240bf7a550586c..4e5a6ac54ebd9deec1e8bfeb430eb3c7608dc78d 100644 (file)
@@ -521,6 +521,11 @@ static int __init fat_init_inodecache(void)
 
 static void __exit fat_destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(fat_inode_cachep);
 }
 
@@ -791,10 +796,12 @@ static int fat_show_options(struct seq_file *m, struct dentry *root)
        struct fat_mount_options *opts = &sbi->options;
        int isvfat = opts->isvfat;
 
-       if (opts->fs_uid != 0)
-               seq_printf(m, ",uid=%u", opts->fs_uid);
-       if (opts->fs_gid != 0)
-               seq_printf(m, ",gid=%u", opts->fs_gid);
+       if (!uid_eq(opts->fs_uid, GLOBAL_ROOT_UID))
+               seq_printf(m, ",uid=%u",
+                               from_kuid_munged(&init_user_ns, opts->fs_uid));
+       if (!gid_eq(opts->fs_gid, GLOBAL_ROOT_GID))
+               seq_printf(m, ",gid=%u",
+                               from_kgid_munged(&init_user_ns, opts->fs_gid));
        seq_printf(m, ",fmask=%04o", opts->fs_fmask);
        seq_printf(m, ",dmask=%04o", opts->fs_dmask);
        if (opts->allow_utime)
@@ -1037,12 +1044,16 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       opts->fs_uid = option;
+                       opts->fs_uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(opts->fs_uid))
+                               return 0;
                        break;
                case Opt_gid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       opts->fs_gid = option;
+                       opts->fs_gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(opts->fs_gid))
+                               return 0;
                        break;
                case Opt_umask:
                        if (match_octal(&args[0], &option))
index 887b5ba8c9b56be800d4d3f7a799fc21f1c7892c..8f704291d4ed812d724536f9396485abddde2089 100644 (file)
 #include <asm/siginfo.h>
 #include <asm/uaccess.h>
 
-void set_close_on_exec(unsigned int fd, int flag)
-{
-       struct files_struct *files = current->files;
-       struct fdtable *fdt;
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       if (flag)
-               __set_close_on_exec(fd, fdt);
-       else
-               __clear_close_on_exec(fd, fdt);
-       spin_unlock(&files->file_lock);
-}
-
-static bool get_close_on_exec(unsigned int fd)
-{
-       struct files_struct *files = current->files;
-       struct fdtable *fdt;
-       bool res;
-       rcu_read_lock();
-       fdt = files_fdtable(files);
-       res = close_on_exec(fd, fdt);
-       rcu_read_unlock();
-       return res;
-}
-
-SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
-{
-       int err = -EBADF;
-       struct file * file, *tofree;
-       struct files_struct * files = current->files;
-       struct fdtable *fdt;
-
-       if ((flags & ~O_CLOEXEC) != 0)
-               return -EINVAL;
-
-       if (unlikely(oldfd == newfd))
-               return -EINVAL;
-
-       spin_lock(&files->file_lock);
-       err = expand_files(files, newfd);
-       file = fcheck(oldfd);
-       if (unlikely(!file))
-               goto Ebadf;
-       if (unlikely(err < 0)) {
-               if (err == -EMFILE)
-                       goto Ebadf;
-               goto out_unlock;
-       }
-       /*
-        * We need to detect attempts to do dup2() over allocated but still
-        * not finished descriptor.  NB: OpenBSD avoids that at the price of
-        * extra work in their equivalent of fget() - they insert struct
-        * file immediately after grabbing descriptor, mark it larval if
-        * more work (e.g. actual opening) is needed and make sure that
-        * fget() treats larval files as absent.  Potentially interesting,
-        * but while extra work in fget() is trivial, locking implications
-        * and amount of surgery on open()-related paths in VFS are not.
-        * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
-        * deadlocks in rather amusing ways, AFAICS.  All of that is out of
-        * scope of POSIX or SUS, since neither considers shared descriptor
-        * tables and this condition does not arise without those.
-        */
-       err = -EBUSY;
-       fdt = files_fdtable(files);
-       tofree = fdt->fd[newfd];
-       if (!tofree && fd_is_open(newfd, fdt))
-               goto out_unlock;
-       get_file(file);
-       rcu_assign_pointer(fdt->fd[newfd], file);
-       __set_open_fd(newfd, fdt);
-       if (flags & O_CLOEXEC)
-               __set_close_on_exec(newfd, fdt);
-       else
-               __clear_close_on_exec(newfd, fdt);
-       spin_unlock(&files->file_lock);
-
-       if (tofree)
-               filp_close(tofree, files);
-
-       return newfd;
-
-Ebadf:
-       err = -EBADF;
-out_unlock:
-       spin_unlock(&files->file_lock);
-       return err;
-}
-
-SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
-{
-       if (unlikely(newfd == oldfd)) { /* corner case */
-               struct files_struct *files = current->files;
-               int retval = oldfd;
-
-               rcu_read_lock();
-               if (!fcheck_files(files, oldfd))
-                       retval = -EBADF;
-               rcu_read_unlock();
-               return retval;
-       }
-       return sys_dup3(oldfd, newfd, 0);
-}
-
-SYSCALL_DEFINE1(dup, unsigned int, fildes)
-{
-       int ret = -EBADF;
-       struct file *file = fget_raw(fildes);
-
-       if (file) {
-               ret = get_unused_fd();
-               if (ret >= 0)
-                       fd_install(ret, file);
-               else
-                       fput(file);
-       }
-       return ret;
-}
-
 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
 
 static int setfl(int fd, struct file * filp, unsigned long arg)
@@ -267,7 +149,7 @@ pid_t f_getown(struct file *filp)
 
 static int f_setown_ex(struct file *filp, unsigned long arg)
 {
-       struct f_owner_ex * __user owner_p = (void * __user)arg;
+       struct f_owner_ex __user *owner_p = (void __user *)arg;
        struct f_owner_ex owner;
        struct pid *pid;
        int type;
@@ -307,7 +189,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
 
 static int f_getown_ex(struct file *filp, unsigned long arg)
 {
-       struct f_owner_ex * __user owner_p = (void * __user)arg;
+       struct f_owner_ex __user *owner_p = (void __user *)arg;
        struct f_owner_ex owner;
        int ret = 0;
 
@@ -345,7 +227,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
 static int f_getowner_uids(struct file *filp, unsigned long arg)
 {
        struct user_namespace *user_ns = current_user_ns();
-       uid_t * __user dst = (void * __user)arg;
+       uid_t __user *dst = (void __user *)arg;
        uid_t src[2];
        int err;
 
@@ -373,14 +255,10 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
 
        switch (cmd) {
        case F_DUPFD:
+               err = f_dupfd(arg, filp, 0);
+               break;
        case F_DUPFD_CLOEXEC:
-               if (arg >= rlimit(RLIMIT_NOFILE))
-                       break;
-               err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
-               if (err >= 0) {
-                       get_file(filp);
-                       fd_install(err, filp);
-               }
+               err = f_dupfd(arg, filp, FD_CLOEXEC);
                break;
        case F_GETFD:
                err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
@@ -470,25 +348,23 @@ static int check_fcntl_cmd(unsigned cmd)
 
 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {      
-       struct file *filp;
-       int fput_needed;
+       struct fd f = fdget_raw(fd);
        long err = -EBADF;
 
-       filp = fget_raw_light(fd, &fput_needed);
-       if (!filp)
+       if (!f.file)
                goto out;
 
-       if (unlikely(filp->f_mode & FMODE_PATH)) {
+       if (unlikely(f.file->f_mode & FMODE_PATH)) {
                if (!check_fcntl_cmd(cmd))
                        goto out1;
        }
 
-       err = security_file_fcntl(filp, cmd, arg);
+       err = security_file_fcntl(f.file, cmd, arg);
        if (!err)
-               err = do_fcntl(fd, cmd, arg, filp);
+               err = do_fcntl(fd, cmd, arg, f.file);
 
 out1:
-       fput_light(filp, fput_needed);
+       fdput(f);
 out:
        return err;
 }
@@ -497,38 +373,36 @@ out:
 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
                unsigned long, arg)
 {      
-       struct file * filp;
+       struct fd f = fdget_raw(fd);
        long err = -EBADF;
-       int fput_needed;
 
-       filp = fget_raw_light(fd, &fput_needed);
-       if (!filp)
+       if (!f.file)
                goto out;
 
-       if (unlikely(filp->f_mode & FMODE_PATH)) {
+       if (unlikely(f.file->f_mode & FMODE_PATH)) {
                if (!check_fcntl_cmd(cmd))
                        goto out1;
        }
 
-       err = security_file_fcntl(filp, cmd, arg);
+       err = security_file_fcntl(f.file, cmd, arg);
        if (err)
                goto out1;
        
        switch (cmd) {
                case F_GETLK64:
-                       err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
+                       err = fcntl_getlk64(f.file, (struct flock64 __user *) arg);
                        break;
                case F_SETLK64:
                case F_SETLKW64:
-                       err = fcntl_setlk64(fd, filp, cmd,
+                       err = fcntl_setlk64(fd, f.file, cmd,
                                        (struct flock64 __user *) arg);
                        break;
                default:
-                       err = do_fcntl(fd, cmd, arg, filp);
+                       err = do_fcntl(fd, cmd, arg, f.file);
                        break;
        }
 out1:
-       fput_light(filp, fput_needed);
+       fdput(f);
 out:
        return err;
 }
index a48e4a139be150d01aed47a4e387a14db9e731cb..f775bfdd6e4a7f3e0700b8ea556261ad8c13e3ba 100644 (file)
@@ -113,24 +113,21 @@ SYSCALL_DEFINE5(name_to_handle_at, int, dfd, const char __user *, name,
 
 static struct vfsmount *get_vfsmount_from_fd(int fd)
 {
-       struct path path;
+       struct vfsmount *mnt;
 
        if (fd == AT_FDCWD) {
                struct fs_struct *fs = current->fs;
                spin_lock(&fs->lock);
-               path = fs->pwd;
-               mntget(path.mnt);
+               mnt = mntget(fs->pwd.mnt);
                spin_unlock(&fs->lock);
        } else {
-               int fput_needed;
-               struct file *file = fget_light(fd, &fput_needed);
-               if (!file)
+               struct fd f = fdget(fd);
+               if (!f.file)
                        return ERR_PTR(-EBADF);
-               path = file->f_path;
-               mntget(path.mnt);
-               fput_light(file, fput_needed);
+               mnt = mntget(f.file->f_path.mnt);
+               fdput(f);
        }
-       return path.mnt;
+       return mnt;
 }
 
 static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
index ba3f6053025cf44915ebd5ca42605d3281429d33..0f1bda4bebfaa77280a1234a2b8e19edb4b8d94b 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -6,6 +6,7 @@
  *  Manage the dynamic fd arrays in the process files_struct.
  */
 
+#include <linux/syscalls.h>
 #include <linux/export.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
@@ -84,22 +85,14 @@ static void free_fdtable_work(struct work_struct *work)
        }
 }
 
-void free_fdtable_rcu(struct rcu_head *rcu)
+static void free_fdtable_rcu(struct rcu_head *rcu)
 {
        struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
        struct fdtable_defer *fddef;
 
        BUG_ON(!fdt);
+       BUG_ON(fdt->max_fds <= NR_OPEN_DEFAULT);
 
-       if (fdt->max_fds <= NR_OPEN_DEFAULT) {
-               /*
-                * This fdtable is embedded in the files structure and that
-                * structure itself is getting destroyed.
-                */
-               kmem_cache_free(files_cachep,
-                               container_of(fdt, struct files_struct, fdtab));
-               return;
-       }
        if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
                kfree(fdt->fd);
                kfree(fdt->open_fds);
@@ -229,7 +222,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
                copy_fdtable(new_fdt, cur_fdt);
                rcu_assign_pointer(files->fdt, new_fdt);
                if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
-                       free_fdtable(cur_fdt);
+                       call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
        } else {
                /* Somebody else expanded, so undo our attempt */
                __free_fdtable(new_fdt);
@@ -245,19 +238,12 @@ static int expand_fdtable(struct files_struct *files, int nr)
  * expanded and execution may have blocked.
  * The files->file_lock should be held on entry, and will be held on exit.
  */
-int expand_files(struct files_struct *files, int nr)
+static int expand_files(struct files_struct *files, int nr)
 {
        struct fdtable *fdt;
 
        fdt = files_fdtable(files);
 
-       /*
-        * N.B. For clone tasks sharing a files structure, this test
-        * will limit the total number of files that can be opened.
-        */
-       if (nr >= rlimit(RLIMIT_NOFILE))
-               return -EMFILE;
-
        /* Do we need to expand? */
        if (nr < fdt->max_fds)
                return 0;
@@ -270,6 +256,26 @@ int expand_files(struct files_struct *files, int nr)
        return expand_fdtable(files, nr);
 }
 
+static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
+{
+       __set_bit(fd, fdt->close_on_exec);
+}
+
+static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
+{
+       __clear_bit(fd, fdt->close_on_exec);
+}
+
+static inline void __set_open_fd(int fd, struct fdtable *fdt)
+{
+       __set_bit(fd, fdt->open_fds);
+}
+
+static inline void __clear_open_fd(int fd, struct fdtable *fdt)
+{
+       __clear_bit(fd, fdt->open_fds);
+}
+
 static int count_open_files(struct fdtable *fdt)
 {
        int size = fdt->max_fds;
@@ -395,6 +401,95 @@ out:
        return NULL;
 }
 
+static void close_files(struct files_struct * files)
+{
+       int i, j;
+       struct fdtable *fdt;
+
+       j = 0;
+
+       /*
+        * It is safe to dereference the fd table without RCU or
+        * ->file_lock because this is the last reference to the
+        * files structure.  But use RCU to shut RCU-lockdep up.
+        */
+       rcu_read_lock();
+       fdt = files_fdtable(files);
+       rcu_read_unlock();
+       for (;;) {
+               unsigned long set;
+               i = j * BITS_PER_LONG;
+               if (i >= fdt->max_fds)
+                       break;
+               set = fdt->open_fds[j++];
+               while (set) {
+                       if (set & 1) {
+                               struct file * file = xchg(&fdt->fd[i], NULL);
+                               if (file) {
+                                       filp_close(file, files);
+                                       cond_resched();
+                               }
+                       }
+                       i++;
+                       set >>= 1;
+               }
+       }
+}
+
+struct files_struct *get_files_struct(struct task_struct *task)
+{
+       struct files_struct *files;
+
+       task_lock(task);
+       files = task->files;
+       if (files)
+               atomic_inc(&files->count);
+       task_unlock(task);
+
+       return files;
+}
+
+void put_files_struct(struct files_struct *files)
+{
+       struct fdtable *fdt;
+
+       if (atomic_dec_and_test(&files->count)) {
+               close_files(files);
+               /* not really needed, since nobody can see us */
+               rcu_read_lock();
+               fdt = files_fdtable(files);
+               rcu_read_unlock();
+               /* free the arrays if they are not embedded */
+               if (fdt != &files->fdtab)
+                       __free_fdtable(fdt);
+               kmem_cache_free(files_cachep, files);
+       }
+}
+
+void reset_files_struct(struct files_struct *files)
+{
+       struct task_struct *tsk = current;
+       struct files_struct *old;
+
+       old = tsk->files;
+       task_lock(tsk);
+       tsk->files = files;
+       task_unlock(tsk);
+       put_files_struct(old);
+}
+
+void exit_files(struct task_struct *tsk)
+{
+       struct files_struct * files = tsk->files;
+
+       if (files) {
+               task_lock(tsk);
+               tsk->files = NULL;
+               task_unlock(tsk);
+               put_files_struct(files);
+       }
+}
+
 static void __devinit fdtable_defer_list_init(int cpu)
 {
        struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
@@ -424,12 +519,18 @@ struct files_struct init_files = {
        .file_lock      = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
 };
 
+void daemonize_descriptors(void)
+{
+       atomic_inc(&init_files.count);
+       reset_files_struct(&init_files);
+}
+
 /*
  * allocate a file descriptor, mark it busy.
  */
-int alloc_fd(unsigned start, unsigned flags)
+int __alloc_fd(struct files_struct *files,
+              unsigned start, unsigned end, unsigned flags)
 {
-       struct files_struct *files = current->files;
        unsigned int fd;
        int error;
        struct fdtable *fdt;
@@ -444,6 +545,14 @@ repeat:
        if (fd < fdt->max_fds)
                fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
 
+       /*
+        * N.B. For clone tasks sharing a files structure, this test
+        * will limit the total number of files that can be opened.
+        */
+       error = -EMFILE;
+       if (fd >= end)
+               goto out;
+
        error = expand_files(files, fd);
        if (error < 0)
                goto out;
@@ -477,8 +586,424 @@ out:
        return error;
 }
 
-int get_unused_fd(void)
+static int alloc_fd(unsigned start, unsigned flags)
+{
+       return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
+}
+
+int get_unused_fd_flags(unsigned flags)
+{
+       return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
+}
+EXPORT_SYMBOL(get_unused_fd_flags);
+
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+       struct fdtable *fdt = files_fdtable(files);
+       __clear_open_fd(fd, fdt);
+       if (fd < files->next_fd)
+               files->next_fd = fd;
+}
+
+void put_unused_fd(unsigned int fd)
+{
+       struct files_struct *files = current->files;
+       spin_lock(&files->file_lock);
+       __put_unused_fd(files, fd);
+       spin_unlock(&files->file_lock);
+}
+
+EXPORT_SYMBOL(put_unused_fd);
+
+/*
+ * Install a file pointer in the fd array.
+ *
+ * The VFS is full of places where we drop the files lock between
+ * setting the open_fds bitmap and installing the file in the file
+ * array.  At any such point, we are vulnerable to a dup2() race
+ * installing a file in the array before us.  We need to detect this and
+ * fput() the struct file we are about to overwrite in this case.
+ *
+ * It should never happen - if we allow dup2() do it, _really_ bad things
+ * will follow.
+ *
+ * NOTE: __fd_install() variant is really, really low-level; don't
+ * use it unless you are forced to by truly lousy API shoved down
+ * your throat.  'files' *MUST* be either current->files or obtained
+ * by get_files_struct(current) done by whoever had given it to you,
+ * or really bad things will happen.  Normally you want to use
+ * fd_install() instead.
+ */
+
+void __fd_install(struct files_struct *files, unsigned int fd,
+               struct file *file)
+{
+       struct fdtable *fdt;
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       BUG_ON(fdt->fd[fd] != NULL);
+       rcu_assign_pointer(fdt->fd[fd], file);
+       spin_unlock(&files->file_lock);
+}
+
+void fd_install(unsigned int fd, struct file *file)
 {
-       return alloc_fd(0, 0);
+       __fd_install(current->files, fd, file);
+}
+
+EXPORT_SYMBOL(fd_install);
+
+/*
+ * The same warnings as for __alloc_fd()/__fd_install() apply here...
+ */
+int __close_fd(struct files_struct *files, unsigned fd)
+{
+       struct file *file;
+       struct fdtable *fdt;
+
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       if (fd >= fdt->max_fds)
+               goto out_unlock;
+       file = fdt->fd[fd];
+       if (!file)
+               goto out_unlock;
+       rcu_assign_pointer(fdt->fd[fd], NULL);
+       __clear_close_on_exec(fd, fdt);
+       __put_unused_fd(files, fd);
+       spin_unlock(&files->file_lock);
+       return filp_close(file, files);
+
+out_unlock:
+       spin_unlock(&files->file_lock);
+       return -EBADF;
+}
+
+void do_close_on_exec(struct files_struct *files)
+{
+       unsigned i;
+       struct fdtable *fdt;
+
+       /* exec unshares first */
+       BUG_ON(atomic_read(&files->count) != 1);
+       spin_lock(&files->file_lock);
+       for (i = 0; ; i++) {
+               unsigned long set;
+               unsigned fd = i * BITS_PER_LONG;
+               fdt = files_fdtable(files);
+               if (fd >= fdt->max_fds)
+                       break;
+               set = fdt->close_on_exec[i];
+               if (!set)
+                       continue;
+               fdt->close_on_exec[i] = 0;
+               for ( ; set ; fd++, set >>= 1) {
+                       struct file *file;
+                       if (!(set & 1))
+                               continue;
+                       file = fdt->fd[fd];
+                       if (!file)
+                               continue;
+                       rcu_assign_pointer(fdt->fd[fd], NULL);
+                       __put_unused_fd(files, fd);
+                       spin_unlock(&files->file_lock);
+                       filp_close(file, files);
+                       cond_resched();
+                       spin_lock(&files->file_lock);
+               }
+
+       }
+       spin_unlock(&files->file_lock);
+}
+
+struct file *fget(unsigned int fd)
+{
+       struct file *file;
+       struct files_struct *files = current->files;
+
+       rcu_read_lock();
+       file = fcheck_files(files, fd);
+       if (file) {
+               /* File object ref couldn't be taken */
+               if (file->f_mode & FMODE_PATH ||
+                   !atomic_long_inc_not_zero(&file->f_count))
+                       file = NULL;
+       }
+       rcu_read_unlock();
+
+       return file;
+}
+
+EXPORT_SYMBOL(fget);
+
+struct file *fget_raw(unsigned int fd)
+{
+       struct file *file;
+       struct files_struct *files = current->files;
+
+       rcu_read_lock();
+       file = fcheck_files(files, fd);
+       if (file) {
+               /* File object ref couldn't be taken */
+               if (!atomic_long_inc_not_zero(&file->f_count))
+                       file = NULL;
+       }
+       rcu_read_unlock();
+
+       return file;
+}
+
+EXPORT_SYMBOL(fget_raw);
+
+/*
+ * Lightweight file lookup - no refcnt increment if fd table isn't shared.
+ *
+ * You can use this instead of fget if you satisfy all of the following
+ * conditions:
+ * 1) You must call fput_light before exiting the syscall and returning control
+ *    to userspace (i.e. you cannot remember the returned struct file * after
+ *    returning to userspace).
+ * 2) You must not call filp_close on the returned struct file * in between
+ *    calls to fget_light and fput_light.
+ * 3) You must not clone the current task in between the calls to fget_light
+ *    and fput_light.
+ *
+ * The fput_needed flag returned by fget_light should be passed to the
+ * corresponding fput_light.
+ */
+struct file *fget_light(unsigned int fd, int *fput_needed)
+{
+       struct file *file;
+       struct files_struct *files = current->files;
+
+       *fput_needed = 0;
+       if (atomic_read(&files->count) == 1) {
+               file = fcheck_files(files, fd);
+               if (file && (file->f_mode & FMODE_PATH))
+                       file = NULL;
+       } else {
+               rcu_read_lock();
+               file = fcheck_files(files, fd);
+               if (file) {
+                       if (!(file->f_mode & FMODE_PATH) &&
+                           atomic_long_inc_not_zero(&file->f_count))
+                               *fput_needed = 1;
+                       else
+                               /* Didn't get the reference, someone's freed */
+                               file = NULL;
+               }
+               rcu_read_unlock();
+       }
+
+       return file;
+}
+EXPORT_SYMBOL(fget_light);
+
+struct file *fget_raw_light(unsigned int fd, int *fput_needed)
+{
+       struct file *file;
+       struct files_struct *files = current->files;
+
+       *fput_needed = 0;
+       if (atomic_read(&files->count) == 1) {
+               file = fcheck_files(files, fd);
+       } else {
+               rcu_read_lock();
+               file = fcheck_files(files, fd);
+               if (file) {
+                       if (atomic_long_inc_not_zero(&file->f_count))
+                               *fput_needed = 1;
+                       else
+                               /* Didn't get the reference, someone's freed */
+                               file = NULL;
+               }
+               rcu_read_unlock();
+       }
+
+       return file;
+}
+
+void set_close_on_exec(unsigned int fd, int flag)
+{
+       struct files_struct *files = current->files;
+       struct fdtable *fdt;
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       if (flag)
+               __set_close_on_exec(fd, fdt);
+       else
+               __clear_close_on_exec(fd, fdt);
+       spin_unlock(&files->file_lock);
+}
+
+bool get_close_on_exec(unsigned int fd)
+{
+       struct files_struct *files = current->files;
+       struct fdtable *fdt;
+       bool res;
+       rcu_read_lock();
+       fdt = files_fdtable(files);
+       res = close_on_exec(fd, fdt);
+       rcu_read_unlock();
+       return res;
+}
+
+static int do_dup2(struct files_struct *files,
+       struct file *file, unsigned fd, unsigned flags)
+{
+       struct file *tofree;
+       struct fdtable *fdt;
+
+       /*
+        * We need to detect attempts to do dup2() over allocated but still
+        * not finished descriptor.  NB: OpenBSD avoids that at the price of
+        * extra work in their equivalent of fget() - they insert struct
+        * file immediately after grabbing descriptor, mark it larval if
+        * more work (e.g. actual opening) is needed and make sure that
+        * fget() treats larval files as absent.  Potentially interesting,
+        * but while extra work in fget() is trivial, locking implications
+        * and amount of surgery on open()-related paths in VFS are not.
+        * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
+        * deadlocks in rather amusing ways, AFAICS.  All of that is out of
+        * scope of POSIX or SUS, since neither considers shared descriptor
+        * tables and this condition does not arise without those.
+        */
+       fdt = files_fdtable(files);
+       tofree = fdt->fd[fd];
+       if (!tofree && fd_is_open(fd, fdt))
+               goto Ebusy;
+       get_file(file);
+       rcu_assign_pointer(fdt->fd[fd], file);
+       __set_open_fd(fd, fdt);
+       if (flags & O_CLOEXEC)
+               __set_close_on_exec(fd, fdt);
+       else
+               __clear_close_on_exec(fd, fdt);
+       spin_unlock(&files->file_lock);
+
+       if (tofree)
+               filp_close(tofree, files);
+
+       return fd;
+
+Ebusy:
+       spin_unlock(&files->file_lock);
+       return -EBUSY;
+}
+
+int replace_fd(unsigned fd, struct file *file, unsigned flags)
+{
+       int err;
+       struct files_struct *files = current->files;
+
+       if (!file)
+               return __close_fd(files, fd);
+
+       if (fd >= rlimit(RLIMIT_NOFILE))
+               return -EMFILE;
+
+       spin_lock(&files->file_lock);
+       err = expand_files(files, fd);
+       if (unlikely(err < 0))
+               goto out_unlock;
+       return do_dup2(files, file, fd, flags);
+
+out_unlock:
+       spin_unlock(&files->file_lock);
+       return err;
+}
+
+SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
+{
+       int err = -EBADF;
+       struct file *file;
+       struct files_struct *files = current->files;
+
+       if ((flags & ~O_CLOEXEC) != 0)
+               return -EINVAL;
+
+       if (newfd >= rlimit(RLIMIT_NOFILE))
+               return -EMFILE;
+
+       spin_lock(&files->file_lock);
+       err = expand_files(files, newfd);
+       file = fcheck(oldfd);
+       if (unlikely(!file))
+               goto Ebadf;
+       if (unlikely(err < 0)) {
+               if (err == -EMFILE)
+                       goto Ebadf;
+               goto out_unlock;
+       }
+       return do_dup2(files, file, newfd, flags);
+
+Ebadf:
+       err = -EBADF;
+out_unlock:
+       spin_unlock(&files->file_lock);
+       return err;
+}
+
+SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
+{
+       if (unlikely(newfd == oldfd)) { /* corner case */
+               struct files_struct *files = current->files;
+               int retval = oldfd;
+
+               rcu_read_lock();
+               if (!fcheck_files(files, oldfd))
+                       retval = -EBADF;
+               rcu_read_unlock();
+               return retval;
+       }
+       return sys_dup3(oldfd, newfd, 0);
+}
+
+SYSCALL_DEFINE1(dup, unsigned int, fildes)
+{
+       int ret = -EBADF;
+       struct file *file = fget_raw(fildes);
+
+       if (file) {
+               ret = get_unused_fd();
+               if (ret >= 0)
+                       fd_install(ret, file);
+               else
+                       fput(file);
+       }
+       return ret;
+}
+
+int f_dupfd(unsigned int from, struct file *file, unsigned flags)
+{
+       int err;
+       if (from >= rlimit(RLIMIT_NOFILE))
+               return -EINVAL;
+       err = alloc_fd(from, flags);
+       if (err >= 0) {
+               get_file(file);
+               fd_install(err, file);
+       }
+       return err;
+}
+
+int iterate_fd(struct files_struct *files, unsigned n,
+               int (*f)(const void *, struct file *, unsigned),
+               const void *p)
+{
+       struct fdtable *fdt;
+       struct file *file;
+       int res = 0;
+       if (!files)
+               return 0;
+       spin_lock(&files->file_lock);
+       fdt = files_fdtable(files);
+       while (!res && n < fdt->max_fds) {
+               file = rcu_dereference_check_fdtable(files, fdt->fd[n++]);
+               if (file)
+                       res = f(p, file, n);
+       }
+       spin_unlock(&files->file_lock);
+       return res;
 }
-EXPORT_SYMBOL(get_unused_fd);
+EXPORT_SYMBOL(iterate_fd);
index 701985e4ccda4fc5afc05977f511bd6ca11ba3af..dac67923330f76eff9f4f961d73e5a3b1f1053db 100644 (file)
@@ -243,10 +243,10 @@ static void __fput(struct file *file)
                if (file->f_op && file->f_op->fasync)
                        file->f_op->fasync(-1, file, 0);
        }
+       ima_file_free(file);
        if (file->f_op && file->f_op->release)
                file->f_op->release(inode, file);
        security_file_free(file);
-       ima_file_free(file);
        if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
                     !(file->f_mode & FMODE_PATH))) {
                cdev_put(inode->i_cdev);
@@ -339,112 +339,6 @@ void __fput_sync(struct file *file)
 
 EXPORT_SYMBOL(fput);
 
-struct file *fget(unsigned int fd)
-{
-       struct file *file;
-       struct files_struct *files = current->files;
-
-       rcu_read_lock();
-       file = fcheck_files(files, fd);
-       if (file) {
-               /* File object ref couldn't be taken */
-               if (file->f_mode & FMODE_PATH ||
-                   !atomic_long_inc_not_zero(&file->f_count))
-                       file = NULL;
-       }
-       rcu_read_unlock();
-
-       return file;
-}
-
-EXPORT_SYMBOL(fget);
-
-struct file *fget_raw(unsigned int fd)
-{
-       struct file *file;
-       struct files_struct *files = current->files;
-
-       rcu_read_lock();
-       file = fcheck_files(files, fd);
-       if (file) {
-               /* File object ref couldn't be taken */
-               if (!atomic_long_inc_not_zero(&file->f_count))
-                       file = NULL;
-       }
-       rcu_read_unlock();
-
-       return file;
-}
-
-EXPORT_SYMBOL(fget_raw);
-
-/*
- * Lightweight file lookup - no refcnt increment if fd table isn't shared.
- *
- * You can use this instead of fget if you satisfy all of the following
- * conditions:
- * 1) You must call fput_light before exiting the syscall and returning control
- *    to userspace (i.e. you cannot remember the returned struct file * after
- *    returning to userspace).
- * 2) You must not call filp_close on the returned struct file * in between
- *    calls to fget_light and fput_light.
- * 3) You must not clone the current task in between the calls to fget_light
- *    and fput_light.
- *
- * The fput_needed flag returned by fget_light should be passed to the
- * corresponding fput_light.
- */
-struct file *fget_light(unsigned int fd, int *fput_needed)
-{
-       struct file *file;
-       struct files_struct *files = current->files;
-
-       *fput_needed = 0;
-       if (atomic_read(&files->count) == 1) {
-               file = fcheck_files(files, fd);
-               if (file && (file->f_mode & FMODE_PATH))
-                       file = NULL;
-       } else {
-               rcu_read_lock();
-               file = fcheck_files(files, fd);
-               if (file) {
-                       if (!(file->f_mode & FMODE_PATH) &&
-                           atomic_long_inc_not_zero(&file->f_count))
-                               *fput_needed = 1;
-                       else
-                               /* Didn't get the reference, someone's freed */
-                               file = NULL;
-               }
-               rcu_read_unlock();
-       }
-
-       return file;
-}
-
-struct file *fget_raw_light(unsigned int fd, int *fput_needed)
-{
-       struct file *file;
-       struct files_struct *files = current->files;
-
-       *fput_needed = 0;
-       if (atomic_read(&files->count) == 1) {
-               file = fcheck_files(files, fd);
-       } else {
-               rcu_read_lock();
-               file = fcheck_files(files, fd);
-               if (file) {
-                       if (atomic_long_inc_not_zero(&file->f_count))
-                               *fput_needed = 1;
-                       else
-                               /* Didn't get the reference, someone's freed */
-                               file = NULL;
-               }
-               rcu_read_unlock();
-       }
-
-       return file;
-}
-
 void put_filp(struct file *file)
 {
        if (atomic_long_dec_and_test(&file->f_count)) {
index ef67c95f12d42511cf91892b8afcf5caddea678c..f47df72cef170d1005466d6cc7edcb1c9352b597 100644 (file)
@@ -224,8 +224,8 @@ vxfs_iinit(struct inode *ip, struct vxfs_inode_info *vip)
 {
 
        ip->i_mode = vxfs_transmod(vip);
-       ip->i_uid = (uid_t)vip->vii_uid;
-       ip->i_gid = (gid_t)vip->vii_gid;
+       i_uid_write(ip, (uid_t)vip->vii_uid);
+       i_gid_write(ip, (gid_t)vip->vii_gid);
 
        set_nlink(ip, vip->vii_nlink);
        ip->i_size = vip->vii_size;
index d4fabd26084ed8340e772e3699faa2e9e239da91..fed2c8afb3a9f401945ca5d8a226a4df5ebb4c9f 100644 (file)
@@ -279,6 +279,11 @@ static void __exit
 vxfs_cleanup(void)
 {
        unregister_filesystem(&vxfs_fs_type);
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(vxfs_inode_cachep);
 }
 
index f4246cfc8d876db6ac39a6ef058b144c73d503af..8c23fa7a91e65cb46ad3907432e4418c7f96ad2d 100644 (file)
@@ -148,8 +148,7 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
                if (ff->reserved_req) {
                        req = ff->reserved_req;
                        ff->reserved_req = NULL;
-                       get_file(file);
-                       req->stolen_file = file;
+                       req->stolen_file = get_file(file);
                }
                spin_unlock(&fc->lock);
        } while (!req);
index fca222dabe3ccc4a791e894d325bdc4e4f78b7f3..f0eda124cffb7714daf51c475dcabc8c955056e0 100644 (file)
@@ -1197,6 +1197,12 @@ static void fuse_fs_cleanup(void)
 {
        unregister_filesystem(&fuse_fs_type);
        unregister_fuseblk();
+
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(fuse_inode_cachep);
 }
 
index d0dddaceac594888c96388b5ed82f7ab33bc9846..b3f3676796d31a6870a771ed6952977cfc16d448 100644 (file)
@@ -56,7 +56,7 @@ generic_acl_get(struct dentry *dentry, const char *name, void *buffer,
        acl = get_cached_acl(dentry->d_inode, type);
        if (!acl)
                return -ENODATA;
-       error = posix_acl_to_xattr(acl, buffer, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return error;
@@ -77,7 +77,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
        if (!inode_owner_or_capable(inode))
                return -EPERM;
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
        }
index bd4a5892c93ca18791c0032bdb017aeb6357ca98..f850020ad906a68cc715602e024f1a9bd18c1b88 100644 (file)
@@ -63,7 +63,7 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
        if (len == 0)
                return NULL;
 
-       acl = posix_acl_from_xattr(data, len);
+       acl = posix_acl_from_xattr(&init_user_ns, data, len);
        kfree(data);
        return acl;
 }
@@ -88,13 +88,13 @@ static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
        const char *name = gfs2_acl_name(type);
 
        BUG_ON(name == NULL);
-       len = posix_acl_to_xattr(acl, NULL, 0);
+       len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
        if (len == 0)
                return 0;
        data = kmalloc(len, GFP_NOFS);
        if (data == NULL)
                return -ENOMEM;
-       error = posix_acl_to_xattr(acl, data, len);
+       error = posix_acl_to_xattr(&init_user_ns, acl, data, len);
        if (error < 0)
                goto out;
        error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS);
@@ -166,12 +166,12 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
        if (error)
                return error;
 
-       len = posix_acl_to_xattr(acl, NULL, 0);
+       len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
        data = kmalloc(len, GFP_NOFS);
        error = -ENOMEM;
        if (data == NULL)
                goto out;
-       posix_acl_to_xattr(acl, data, len);
+       posix_acl_to_xattr(&init_user_ns, acl, data, len);
        error = gfs2_xattr_acl_chmod(ip, attr, data);
        kfree(data);
        set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
@@ -212,7 +212,7 @@ static int gfs2_xattr_system_get(struct dentry *dentry, const char *name,
        if (acl == NULL)
                return -ENODATA;
 
-       error = posix_acl_to_xattr(acl, buffer, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return error;
@@ -245,7 +245,7 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
        if (!value)
                goto set_acl;
 
-       acl = posix_acl_from_xattr(value, size);
+       acl = posix_acl_from_xattr(&init_user_ns, value, size);
        if (!acl) {
                /*
                 * acl_set_file(3) may request that we set default ACLs with
index 4a38db739ca0a5e725a40fd39d9ebc4ff96ea34b..0fb6539b0c8cc0d64bbce7ff047eae815f1bafe3 100644 (file)
@@ -1289,7 +1289,7 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
        spin_lock(&ls->ls_recover_spin);
        set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
        spin_unlock(&ls->ls_recover_spin);
-       flush_delayed_work_sync(&sdp->sd_control_work);
+       flush_delayed_work(&sdp->sd_control_work);
 
        /* mounted_lock and control_lock will be purged in dlm recovery */
 release:
index 4021deca61ef3b14558d361823802e8c1ba64fb1..40c4b0d42fa8fea10b73102b4fd7d155ebf8daa0 100644 (file)
@@ -1071,8 +1071,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
 
                if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
                        print_message(qd, "exceeded");
-                       quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
-                                          USRQUOTA : GRPQUOTA, qd->qd_id,
+                       quota_send_warning(make_kqid(&init_user_ns,
+                                                    test_bit(QDF_USER, &qd->qd_flags) ?
+                                                    USRQUOTA : GRPQUOTA,
+                                                    qd->qd_id),
                                           sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
 
                        error = -EDQUOT;
@@ -1082,8 +1084,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
                           time_after_eq(jiffies, qd->qd_last_warn +
                                         gfs2_tune_get(sdp,
                                                gt_quota_warn_period) * HZ)) {
-                       quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
-                                          USRQUOTA : GRPQUOTA, qd->qd_id,
+                       quota_send_warning(make_kqid(&init_user_ns,
+                                                    test_bit(QDF_USER, &qd->qd_flags) ?
+                                                    USRQUOTA : GRPQUOTA,
+                                                    qd->qd_id),
                                           sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
                        error = print_message(qd, "warning");
                        qd->qd_last_warn = jiffies;
@@ -1470,7 +1474,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
        return 0;
 }
 
-static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
+static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
                          struct fs_disk_quota *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
@@ -1478,20 +1482,21 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
        struct gfs2_quota_data *qd;
        struct gfs2_holder q_gh;
        int error;
+       int type;
 
        memset(fdq, 0, sizeof(struct fs_disk_quota));
 
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return -ESRCH; /* Crazy XFS error code */
 
-       if (type == USRQUOTA)
+       if (qid.type == USRQUOTA)
                type = QUOTA_USER;
-       else if (type == GRPQUOTA)
+       else if (qid.type == GRPQUOTA)
                type = QUOTA_GROUP;
        else
                return -EINVAL;
 
-       error = qd_get(sdp, type, id, &qd);
+       error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd);
        if (error)
                return error;
        error = do_glock(qd, FORCE, &q_gh);
@@ -1501,7 +1506,7 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
        fdq->d_version = FS_DQUOT_VERSION;
        fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
-       fdq->d_id = id;
+       fdq->d_id = from_kqid(&init_user_ns, qid);
        fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
        fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
        fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
@@ -1515,7 +1520,7 @@ out:
 /* GFS2 only supports a subset of the XFS fields */
 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
 
-static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
+static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
                          struct fs_disk_quota *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
@@ -1527,11 +1532,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
        int alloc_required;
        loff_t offset;
        int error;
+       int type;
 
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return -ESRCH; /* Crazy XFS error code */
 
-       switch(type) {
+       switch(qid.type) {
        case USRQUOTA:
                type = QUOTA_USER;
                if (fdq->d_flags != FS_USER_QUOTA)
@@ -1548,10 +1554,10 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
 
        if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
                return -EINVAL;
-       if (fdq->d_id != id)
+       if (fdq->d_id != from_kqid(&init_user_ns, qid))
                return -EINVAL;
 
-       error = qd_get(sdp, type, id, &qd);
+       error = qd_get(sdp, type, from_kqid(&init_user_ns, qid), &qd);
        if (error)
                return error;
 
index a8d90f2f576cd01bcf627f6d0b6c69e42037e170..bc737261f234872ad2df43411e6e74296b26b61c 100644 (file)
@@ -1579,7 +1579,7 @@ out:
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
        ip->i_gl->gl_object = NULL;
-       flush_delayed_work_sync(&ip->i_gl->gl_work);
+       flush_delayed_work(&ip->i_gl->gl_work);
        gfs2_glock_add_to_lru(ip->i_gl);
        gfs2_glock_put(ip->i_gl);
        ip->i_gl = NULL;
index 8275175acf6eaaf8606b940d6f05c96d7ae08a5c..693df9fe52b2a8dc7cda4d675749941c0fe4b760 100644 (file)
@@ -134,8 +134,8 @@ struct hfs_sb_info {
                                                   permissions on all files */
        umode_t s_dir_umask;                    /* The umask applied to the
                                                   permissions on all dirs */
-       uid_t s_uid;                            /* The uid of all files */
-       gid_t s_gid;                            /* The gid of all files */
+       kuid_t s_uid;                           /* The uid of all files */
+       kgid_t s_gid;                           /* The gid of all files */
 
        int session, part;
        struct nls_table *nls_io, *nls_disk;
index ee1bc55677f1486c6f27a2549d8f6ce60c1581de..0b35903219bc1056d4c1c3260ec75d7b68a1b269 100644 (file)
@@ -594,9 +594,9 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
 
        /* no uig/gid changes and limit which mode bits can be set */
        if (((attr->ia_valid & ATTR_UID) &&
-            (attr->ia_uid != hsb->s_uid)) ||
+            (!uid_eq(attr->ia_uid, hsb->s_uid))) ||
            ((attr->ia_valid & ATTR_GID) &&
-            (attr->ia_gid != hsb->s_gid)) ||
+            (!gid_eq(attr->ia_gid, hsb->s_gid))) ||
            ((attr->ia_valid & ATTR_MODE) &&
             ((S_ISDIR(inode->i_mode) &&
               (attr->ia_mode != inode->i_mode)) ||
@@ -644,7 +644,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 
        /* sync the superblock to buffers */
        sb = inode->i_sb;
-       flush_delayed_work_sync(&HFS_SB(sb)->mdb_work);
+       flush_delayed_work(&HFS_SB(sb)->mdb_work);
        /* .. finally sync the buffers to disk */
        err = sync_blockdev(sb->s_bdev);
        if (!ret)
index 4eb873e0c07b137c0225f30b20fdf78bfb0c0006..e93ddaadfd1e445fa89e976c9692f2afa302094c 100644 (file)
@@ -138,7 +138,9 @@ static int hfs_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
        if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
                seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type);
-       seq_printf(seq, ",uid=%u,gid=%u", sbi->s_uid, sbi->s_gid);
+       seq_printf(seq, ",uid=%u,gid=%u",
+                       from_kuid_munged(&init_user_ns, sbi->s_uid),
+                       from_kgid_munged(&init_user_ns, sbi->s_gid));
        if (sbi->s_file_umask != 0133)
                seq_printf(seq, ",file_umask=%o", sbi->s_file_umask);
        if (sbi->s_dir_umask != 0022)
@@ -254,14 +256,22 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
                                printk(KERN_ERR "hfs: uid requires an argument\n");
                                return 0;
                        }
-                       hsb->s_uid = (uid_t)tmp;
+                       hsb->s_uid = make_kuid(current_user_ns(), (uid_t)tmp);
+                       if (!uid_valid(hsb->s_uid)) {
+                               printk(KERN_ERR "hfs: invalid uid %d\n", tmp);
+                               return 0;
+                       }
                        break;
                case opt_gid:
                        if (match_int(&args[0], &tmp)) {
                                printk(KERN_ERR "hfs: gid requires an argument\n");
                                return 0;
                        }
-                       hsb->s_gid = (gid_t)tmp;
+                       hsb->s_gid = make_kgid(current_user_ns(), (gid_t)tmp);
+                       if (!gid_valid(hsb->s_gid)) {
+                               printk(KERN_ERR "hfs: invalid gid %d\n", tmp);
+                               return 0;
+                       }
                        break;
                case opt_umask:
                        if (match_octal(&args[0], &tmp)) {
@@ -482,6 +492,12 @@ static int __init init_hfs_fs(void)
 static void __exit exit_hfs_fs(void)
 {
        unregister_filesystem(&hfs_fs_type);
+
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(hfs_inode_cachep);
 }
 
index ec2a9c23f0c9a58b66a517aceeeab8092efaf11b..798d9c4c5e71ce8ab50c4d0cdd4597f753b5d814 100644 (file)
@@ -80,8 +80,8 @@ void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms)
 
        perms->userflags = HFSPLUS_I(inode)->userflags;
        perms->mode = cpu_to_be16(inode->i_mode);
-       perms->owner = cpu_to_be32(inode->i_uid);
-       perms->group = cpu_to_be32(inode->i_gid);
+       perms->owner = cpu_to_be32(i_uid_read(inode));
+       perms->group = cpu_to_be32(i_gid_read(inode));
 
        if (S_ISREG(inode->i_mode))
                perms->dev = cpu_to_be32(inode->i_nlink);
index 558dbb463a4e835632437a5bc427900c304a262f..c571de224b154638e9925fb4a2588b253ceffe63 100644 (file)
@@ -149,8 +149,8 @@ struct hfsplus_sb_info {
        u32 type;
 
        umode_t umask;
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
 
        int part, session;
        unsigned long flags;
index 3d8b4a675ba0d84a0b33d190b337eb1cc84a8fae..2172aa5976f5e9a6288c38a3f4c964ec13930c4c 100644 (file)
@@ -233,12 +233,12 @@ static void hfsplus_get_perms(struct inode *inode,
 
        mode = be16_to_cpu(perms->mode);
 
-       inode->i_uid = be32_to_cpu(perms->owner);
-       if (!inode->i_uid && !mode)
+       i_uid_write(inode, be32_to_cpu(perms->owner));
+       if (!i_uid_read(inode) && !mode)
                inode->i_uid = sbi->uid;
 
-       inode->i_gid = be32_to_cpu(perms->group);
-       if (!inode->i_gid && !mode)
+       i_gid_write(inode, be32_to_cpu(perms->group));
+       if (!i_gid_read(inode) && !mode)
                inode->i_gid = sbi->gid;
 
        if (dir) {
index 06fa5618600c86c18b033b27a89bea1a91fbeaaf..ed257c6716156b4cedc066153675bac0aa7a8d76 100644 (file)
@@ -135,14 +135,22 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
                                printk(KERN_ERR "hfs: uid requires an argument\n");
                                return 0;
                        }
-                       sbi->uid = (uid_t)tmp;
+                       sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp);
+                       if (!uid_valid(sbi->uid)) {
+                               printk(KERN_ERR "hfs: invalid uid specified\n");
+                               return 0;
+                       }
                        break;
                case opt_gid:
                        if (match_int(&args[0], &tmp)) {
                                printk(KERN_ERR "hfs: gid requires an argument\n");
                                return 0;
                        }
-                       sbi->gid = (gid_t)tmp;
+                       sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp);
+                       if (!gid_valid(sbi->gid)) {
+                               printk(KERN_ERR "hfs: invalid gid specified\n");
+                               return 0;
+                       }
                        break;
                case opt_part:
                        if (match_int(&args[0], &sbi->part)) {
@@ -215,7 +223,8 @@ int hfsplus_show_options(struct seq_file *seq, struct dentry *root)
        if (sbi->type != HFSPLUS_DEF_CR_TYPE)
                seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
        seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
-               sbi->uid, sbi->gid);
+                       from_kuid_munged(&init_user_ns, sbi->uid),
+                       from_kgid_munged(&init_user_ns, sbi->gid));
        if (sbi->part >= 0)
                seq_printf(seq, ",part=%u", sbi->part);
        if (sbi->session >= 0)
index fdafb2d71654740776bd1b77931b955da350ecad..811a84d2d9643677832219a0b960c6d42109480e 100644 (file)
@@ -635,6 +635,12 @@ static int __init init_hfsplus_fs(void)
 static void __exit exit_hfsplus_fs(void)
 {
        unregister_filesystem(&hfsplus_fs_type);
+
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(hfsplus_inode_cachep);
 }
 
index 124146543aa76ec87187955cf237f6a04e441ad4..6c9f3a9d5e211adcadb8e93c80bbd7b12d4460db 100644 (file)
@@ -542,8 +542,8 @@ static int read_name(struct inode *ino, char *name)
        ino->i_ino = st.ino;
        ino->i_mode = st.mode;
        set_nlink(ino, st.nlink);
-       ino->i_uid = st.uid;
-       ino->i_gid = st.gid;
+       i_uid_write(ino, st.uid);
+       i_gid_write(ino, st.gid);
        ino->i_atime = st.atime;
        ino->i_mtime = st.mtime;
        ino->i_ctime = st.ctime;
@@ -808,11 +808,11 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
        }
        if (attr->ia_valid & ATTR_UID) {
                attrs.ia_valid |= HOSTFS_ATTR_UID;
-               attrs.ia_uid = attr->ia_uid;
+               attrs.ia_uid = from_kuid(&init_user_ns, attr->ia_uid);
        }
        if (attr->ia_valid & ATTR_GID) {
                attrs.ia_valid |= HOSTFS_ATTR_GID;
-               attrs.ia_gid = attr->ia_gid;
+               attrs.ia_gid = from_kgid(&init_user_ns, attr->ia_gid);
        }
        if (attr->ia_valid & ATTR_SIZE) {
                attrs.ia_valid |= HOSTFS_ATTR_SIZE;
index ac1ead194db5ebf2ea8d5f1ef2102033615d08a9..7102aaecc24414e6af90e8b7465d3fec02c3b3fd 100644 (file)
@@ -63,8 +63,8 @@ struct hpfs_sb_info {
        unsigned sb_dmap;               /* sector number of dnode bit map */
        unsigned sb_n_free;             /* free blocks for statfs, or -1 */
        unsigned sb_n_free_dnodes;      /* free dnodes for statfs, or -1 */
-       uid_t sb_uid;                   /* uid from mount options */
-       gid_t sb_gid;                   /* gid from mount options */
+       kuid_t sb_uid;                  /* uid from mount options */
+       kgid_t sb_gid;                  /* gid from mount options */
        umode_t sb_mode;                /* mode from mount options */
        unsigned sb_eas : 2;            /* eas: 0-ignore, 1-ro, 2-rw */
        unsigned sb_err : 2;            /* on errs: 0-cont, 1-ro, 2-panic */
index ed671e0ea78443b35bb6d1dd3eabc64bd7559c1f..804a9a842cbc1cdf2191efa88eb522821cbcca55 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/user_namespace.h>
 #include "hpfs_fn.h"
 
 void hpfs_init_inode(struct inode *i)
@@ -60,14 +61,14 @@ void hpfs_read_inode(struct inode *i)
        if (hpfs_sb(i->i_sb)->sb_eas) {
                if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) {
                        if (ea_size == 2) {
-                               i->i_uid = le16_to_cpu(*(__le16*)ea);
+                               i_uid_write(i, le16_to_cpu(*(__le16*)ea));
                                hpfs_inode->i_ea_uid = 1;
                        }
                        kfree(ea);
                }
                if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) {
                        if (ea_size == 2) {
-                               i->i_gid = le16_to_cpu(*(__le16*)ea);
+                               i_gid_write(i, le16_to_cpu(*(__le16*)ea));
                                hpfs_inode->i_ea_gid = 1;
                        }
                        kfree(ea);
@@ -149,13 +150,13 @@ static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode)
                hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino);
        } else*/ if (hpfs_sb(i->i_sb)->sb_eas >= 2) {
                __le32 ea;
-               if ((i->i_uid != hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) {
-                       ea = cpu_to_le32(i->i_uid);
+               if (!uid_eq(i->i_uid, hpfs_sb(i->i_sb)->sb_uid) || hpfs_inode->i_ea_uid) {
+                       ea = cpu_to_le32(i_uid_read(i));
                        hpfs_set_ea(i, fnode, "UID", (char*)&ea, 2);
                        hpfs_inode->i_ea_uid = 1;
                }
-               if ((i->i_gid != hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) {
-                       ea = cpu_to_le32(i->i_gid);
+               if (!gid_eq(i->i_gid, hpfs_sb(i->i_sb)->sb_gid) || hpfs_inode->i_ea_gid) {
+                       ea = cpu_to_le32(i_gid_read(i));
                        hpfs_set_ea(i, fnode, "GID", (char *)&ea, 2);
                        hpfs_inode->i_ea_gid = 1;
                }
@@ -261,9 +262,11 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
        hpfs_lock(inode->i_sb);
        if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root)
                goto out_unlock;
-       if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000)
+       if ((attr->ia_valid & ATTR_UID) &&
+           from_kuid(&init_user_ns, attr->ia_uid) >= 0x10000)
                goto out_unlock;
-       if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000)
+       if ((attr->ia_valid & ATTR_GID) &&
+           from_kgid(&init_user_ns, attr->ia_gid) >= 0x10000)
                goto out_unlock;
        if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
                goto out_unlock;
index bc9082482f6841c21a0bb520d4333fae14efcb9a..345713d2f8f31ee4590af09147457b8839081840 100644 (file)
@@ -91,8 +91,8 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        inc_nlink(dir);
        insert_inode_hash(result);
 
-       if (result->i_uid != current_fsuid() ||
-           result->i_gid != current_fsgid() ||
+       if (!uid_eq(result->i_uid, current_fsuid()) ||
+           !gid_eq(result->i_gid, current_fsgid()) ||
            result->i_mode != (mode | S_IFDIR)) {
                result->i_uid = current_fsuid();
                result->i_gid = current_fsgid();
@@ -179,8 +179,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, b
 
        insert_inode_hash(result);
 
-       if (result->i_uid != current_fsuid() ||
-           result->i_gid != current_fsgid() ||
+       if (!uid_eq(result->i_uid, current_fsuid()) ||
+           !gid_eq(result->i_gid, current_fsgid()) ||
            result->i_mode != (mode | S_IFREG)) {
                result->i_uid = current_fsuid();
                result->i_gid = current_fsgid();
index 706a12c083ea726a7a268d647ae266b02a3a2ca7..bc28bf077a6abe92b4041be6f6057cd6815fd57b 100644 (file)
@@ -210,6 +210,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(hpfs_inode_cachep);
 }
 
@@ -251,7 +256,7 @@ static const match_table_t tokens = {
        {Opt_err, NULL},
 };
 
-static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask,
+static int parse_opts(char *opts, kuid_t *uid, kgid_t *gid, umode_t *umask,
                      int *lowercase, int *eas, int *chk, int *errs,
                      int *chkdsk, int *timeshift)
 {
@@ -276,12 +281,16 @@ static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask,
                case Opt_uid:
                        if (match_int(args, &option))
                                return 0;
-                       *uid = option;
+                       *uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(*uid))
+                               return 0;
                        break;
                case Opt_gid:
                        if (match_int(args, &option))
                                return 0;
-                       *gid = option;
+                       *gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(*gid))
+                               return 0;
                        break;
                case Opt_umask:
                        if (match_octal(args, &option))
@@ -378,8 +387,8 @@ HPFS filesystem options:\n\
 
 static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
 {
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
        umode_t umask;
        int lowercase, eas, chk, errs, chkdsk, timeshift;
        int o;
@@ -455,8 +464,8 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        struct hpfs_sb_info *sbi;
        struct inode *root;
 
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
        umode_t umask;
        int lowercase, eas, chk, errs, chkdsk, timeshift;
 
index 8349a899912e5c47ca26c66df1be66c684bdce4d..9460120a5170213cc7bf1e3e6d79db4674b63571 100644 (file)
@@ -42,8 +42,8 @@ static const struct inode_operations hugetlbfs_dir_inode_operations;
 static const struct inode_operations hugetlbfs_inode_operations;
 
 struct hugetlbfs_config {
-       uid_t   uid;
-       gid_t   gid;
+       kuid_t   uid;
+       kgid_t   gid;
        umode_t mode;
        long    nr_blocks;
        long    nr_inodes;
@@ -785,13 +785,17 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                goto bad_val;
-                       pconfig->uid = option;
+                       pconfig->uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(pconfig->uid))
+                               goto bad_val;
                        break;
 
                case Opt_gid:
                        if (match_int(&args[0], &option))
                                goto bad_val;
-                       pconfig->gid = option;
+                       pconfig->gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(pconfig->gid))
+                               goto bad_val;
                        break;
 
                case Opt_mode:
@@ -924,7 +928,9 @@ static struct vfsmount *hugetlbfs_vfsmount;
 
 static int can_do_hugetlb_shm(void)
 {
-       return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
+       kgid_t shm_group;
+       shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
+       return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
 }
 
 struct file *hugetlb_file_setup(const char *name, unsigned long addr,
@@ -1042,6 +1048,11 @@ static int __init init_hugetlbfs_fs(void)
 
 static void __exit exit_hugetlbfs_fs(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(hugetlbfs_inode_cachep);
        kern_unmount(hugetlbfs_vfsmount);
        unregister_filesystem(&hugetlbfs_fs_type);
index 29167bebe874049fb847e1f19c2ae220f4ce91f0..3bdad6d1f26844b29c5911396876742f23088140 100644 (file)
@@ -603,21 +603,14 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
 
 SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {
-       struct file *filp;
-       int error = -EBADF;
-       int fput_needed;
-
-       filp = fget_light(fd, &fput_needed);
-       if (!filp)
-               goto out;
-
-       error = security_file_ioctl(filp, cmd, arg);
-       if (error)
-               goto out_fput;
-
-       error = do_vfs_ioctl(filp, fd, cmd, arg);
- out_fput:
-       fput_light(filp, fput_needed);
- out:
+       int error;
+       struct fd f = fdget(fd);
+
+       if (!f.file)
+               return -EBADF;
+       error = security_file_ioctl(f.file, cmd, arg);
+       if (!error)
+               error = do_vfs_ioctl(f.file, fd, cmd, arg);
+       fdput(f);
        return error;
 }
index 29037c365ba4f4807eca405680def2c78353f966..67ce52507d7dec7f2f009624979ffa4910f8ea36 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/cdrom.h>
 #include <linux/parser.h>
 #include <linux/mpage.h>
+#include <linux/user_namespace.h>
 
 #include "isofs.h"
 #include "zisofs.h"
@@ -114,6 +115,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(isofs_inode_cachep);
 }
 
@@ -171,8 +177,8 @@ struct iso9660_options{
        unsigned int blocksize;
        umode_t fmode;
        umode_t dmode;
-       gid_t gid;
-       uid_t uid;
+       kgid_t gid;
+       kuid_t uid;
        char *iocharset;
        /* LVE */
        s32 session;
@@ -383,8 +389,8 @@ static int parse_options(char *options, struct iso9660_options *popt)
        popt->fmode = popt->dmode = ISOFS_INVALID_MODE;
        popt->uid_set = 0;
        popt->gid_set = 0;
-       popt->gid = 0;
-       popt->uid = 0;
+       popt->gid = GLOBAL_ROOT_GID;
+       popt->uid = GLOBAL_ROOT_UID;
        popt->iocharset = NULL;
        popt->utf8 = 0;
        popt->overriderockperm = 0;
@@ -460,13 +466,17 @@ static int parse_options(char *options, struct iso9660_options *popt)
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       popt->uid = option;
+                       popt->uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(popt->uid))
+                               return 0;
                        popt->uid_set = 1;
                        break;
                case Opt_gid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       popt->gid = option;
+                       popt->gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(popt->gid))
+                               return 0;
                        popt->gid_set = 1;
                        break;
                case Opt_mode:
index 3620ad1ea9bcfc31cf2814ad98df9896cbd8bba9..99167238518d61a30c4a5e6bfc838291d6206a5d 100644 (file)
@@ -52,8 +52,8 @@ struct isofs_sb_info {
 
        umode_t s_fmode;
        umode_t s_dmode;
-       gid_t s_gid;
-       uid_t s_uid;
+       kgid_t s_gid;
+       kuid_t s_uid;
        struct nls_table *s_nls_iocharset; /* Native language support table */
 };
 
index 70e79d0c756a9418155239f18f68d55323809e08..c0bf42472e408fd16911cee33f3d9079943aa46a 100644 (file)
@@ -364,8 +364,8 @@ repeat:
                case SIG('P', 'X'):
                        inode->i_mode = isonum_733(rr->u.PX.mode);
                        set_nlink(inode, isonum_733(rr->u.PX.n_links));
-                       inode->i_uid = isonum_733(rr->u.PX.uid);
-                       inode->i_gid = isonum_733(rr->u.PX.gid);
+                       i_uid_write(inode, isonum_733(rr->u.PX.uid));
+                       i_gid_write(inode, isonum_733(rr->u.PX.gid));
                        break;
                case SIG('P', 'N'):
                        {
index 922f146e42354fb34d7ed68f225901b44e5db77d..223283c301116f3d8d88cb70cf30aa030a54c456 100644 (file)
@@ -94,15 +94,23 @@ static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size)
                        case ACL_MASK:
                        case ACL_OTHER:
                                value += sizeof(struct jffs2_acl_entry_short);
-                               acl->a_entries[i].e_id = ACL_UNDEFINED_ID;
                                break;
 
                        case ACL_USER:
+                               value += sizeof(struct jffs2_acl_entry);
+                               if (value > end)
+                                       goto fail;
+                               acl->a_entries[i].e_uid =
+                                       make_kuid(&init_user_ns,
+                                                 je32_to_cpu(entry->e_id));
+                               break;
                        case ACL_GROUP:
                                value += sizeof(struct jffs2_acl_entry);
                                if (value > end)
                                        goto fail;
-                               acl->a_entries[i].e_id = je32_to_cpu(entry->e_id);
+                               acl->a_entries[i].e_gid =
+                                       make_kgid(&init_user_ns,
+                                                 je32_to_cpu(entry->e_id));
                                break;
 
                        default:
@@ -131,13 +139,19 @@ static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size)
        header->a_version = cpu_to_je32(JFFS2_ACL_VERSION);
        e = header + 1;
        for (i=0; i < acl->a_count; i++) {
+               const struct posix_acl_entry *acl_e = &acl->a_entries[i];
                entry = e;
-               entry->e_tag = cpu_to_je16(acl->a_entries[i].e_tag);
-               entry->e_perm = cpu_to_je16(acl->a_entries[i].e_perm);
-               switch(acl->a_entries[i].e_tag) {
+               entry->e_tag = cpu_to_je16(acl_e->e_tag);
+               entry->e_perm = cpu_to_je16(acl_e->e_perm);
+               switch(acl_e->e_tag) {
                        case ACL_USER:
+                               entry->e_id = cpu_to_je32(
+                                       from_kuid(&init_user_ns, acl_e->e_uid));
+                               e += sizeof(struct jffs2_acl_entry);
+                               break;
                        case ACL_GROUP:
-                               entry->e_id = cpu_to_je32(acl->a_entries[i].e_id);
+                               entry->e_id = cpu_to_je32(
+                                       from_kgid(&init_user_ns, acl_e->e_gid));
                                e += sizeof(struct jffs2_acl_entry);
                                break;
 
@@ -363,7 +377,7 @@ static int jffs2_acl_getxattr(struct dentry *dentry, const char *name,
                return PTR_ERR(acl);
        if (!acl)
                return -ENODATA;
-       rc = posix_acl_to_xattr(acl, buffer, size);
+       rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return rc;
@@ -381,7 +395,7 @@ static int jffs2_acl_setxattr(struct dentry *dentry, const char *name,
                return -EPERM;
 
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
                if (acl) {
index db3889ba8818dd473f37ba3b9e0b888e48eef708..60ef3fb707ffbfc6a77f1257efc82544cf0f9489 100644 (file)
@@ -175,8 +175,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                ri.ino = cpu_to_je32(f->inocache->ino);
                ri.version = cpu_to_je32(++f->highest_version);
                ri.mode = cpu_to_jemode(inode->i_mode);
-               ri.uid = cpu_to_je16(inode->i_uid);
-               ri.gid = cpu_to_je16(inode->i_gid);
+               ri.uid = cpu_to_je16(i_uid_read(inode));
+               ri.gid = cpu_to_je16(i_gid_read(inode));
                ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
                ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
                ri.offset = cpu_to_je32(inode->i_size);
@@ -283,8 +283,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
        /* Set the fields that the generic jffs2_write_inode_range() code can't find */
        ri->ino = cpu_to_je32(inode->i_ino);
        ri->mode = cpu_to_jemode(inode->i_mode);
-       ri->uid = cpu_to_je16(inode->i_uid);
-       ri->gid = cpu_to_je16(inode->i_gid);
+       ri->uid = cpu_to_je16(i_uid_read(inode));
+       ri->gid = cpu_to_je16(i_gid_read(inode));
        ri->isize = cpu_to_je32((uint32_t)inode->i_size);
        ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());
 
index 3d3092eda8119faa47818fded504aa6a328511e7..fe3c0527545f3b96d495f422a220339b0373e620 100644 (file)
@@ -99,8 +99,10 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
        ri->ino = cpu_to_je32(inode->i_ino);
        ri->version = cpu_to_je32(++f->highest_version);
 
-       ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
-       ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
+       ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
+               from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
+       ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
+               from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
 
        if (ivalid & ATTR_MODE)
                ri->mode = cpu_to_jemode(iattr->ia_mode);
@@ -147,8 +149,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
        inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
        inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
        inode->i_mode = jemode_to_cpu(ri->mode);
-       inode->i_uid = je16_to_cpu(ri->uid);
-       inode->i_gid = je16_to_cpu(ri->gid);
+       i_uid_write(inode, je16_to_cpu(ri->uid));
+       i_gid_write(inode, je16_to_cpu(ri->gid));
 
 
        old_metadata = f->metadata;
@@ -276,8 +278,8 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
                return ERR_PTR(ret);
        }
        inode->i_mode = jemode_to_cpu(latest_node.mode);
-       inode->i_uid = je16_to_cpu(latest_node.uid);
-       inode->i_gid = je16_to_cpu(latest_node.gid);
+       i_uid_write(inode, je16_to_cpu(latest_node.uid));
+       i_gid_write(inode, je16_to_cpu(latest_node.gid));
        inode->i_size = je32_to_cpu(latest_node.isize);
        inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
        inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
@@ -440,14 +442,14 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
 
        memset(ri, 0, sizeof(*ri));
        /* Set OS-specific defaults for new inodes */
-       ri->uid = cpu_to_je16(current_fsuid());
+       ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
 
        if (dir_i->i_mode & S_ISGID) {
-               ri->gid = cpu_to_je16(dir_i->i_gid);
+               ri->gid = cpu_to_je16(i_gid_read(dir_i));
                if (S_ISDIR(mode))
                        mode |= S_ISGID;
        } else {
-               ri->gid = cpu_to_je16(current_fsgid());
+               ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
        }
 
        /* POSIX ACLs have to be processed now, at least partly.
@@ -467,8 +469,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
        set_nlink(inode, 1);
        inode->i_ino = je32_to_cpu(ri->ino);
        inode->i_mode = jemode_to_cpu(ri->mode);
-       inode->i_gid = je16_to_cpu(ri->gid);
-       inode->i_uid = je16_to_cpu(ri->uid);
+       i_gid_write(inode, je16_to_cpu(ri->gid));
+       i_uid_write(inode, je16_to_cpu(ri->uid));
        inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
        ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
 
index bcd983d7e7f99e7e295decc1d26092d464a14d9f..d200a9b8fd5efc86e71113bcb8090d0c87674a90 100644 (file)
@@ -27,8 +27,8 @@ struct kvec;
 
 #define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size)
 #define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode)
-#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid)
-#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid)
+#define JFFS2_F_I_UID(f) (i_uid_read(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_GID(f) (i_gid_read(OFNI_EDONI_2SFFJ(f)))
 #define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev)
 
 #define ITIME(sec) ((struct timespec){sec, 0})
index 61ea41389f90d91d8b3ab6a6a39cd580720f1950..ff487954cd96f1447b254b28a41fcdbd82b61b55 100644 (file)
@@ -418,6 +418,12 @@ static void __exit exit_jffs2_fs(void)
        unregister_filesystem(&jffs2_fs_type);
        jffs2_destroy_slab_caches();
        jffs2_compressors_exit();
+
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(jffs2_inode_cachep);
 }
 
index a58fa72d7e59e511cc849eb01cee92fa5afb1320..d20d4737b3ef917e76a440e196efe09c4cf6ce10 100644 (file)
@@ -6,7 +6,7 @@ obj-$(CONFIG_JFS_FS) += jfs.o
 
 jfs-y    := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
            jfs_xtree.o jfs_imap.o jfs_debug.o jfs_dmap.o \
-           jfs_unicode.o jfs_dtree.o jfs_inode.o \
+           jfs_unicode.o jfs_dtree.o jfs_inode.o jfs_discard.o \
            jfs_extent.o symlink.o jfs_metapage.o \
            jfs_logmgr.o jfs_txnmgr.o jfs_uniupr.o \
            resize.o xattr.o ioctl.o
index 45559dc3ea2f59297f57c523a8eac2b3f40c06c6..d254d6d3599565fbca59621cf840d105d9cd4971 100644 (file)
@@ -64,7 +64,7 @@ struct posix_acl *jfs_get_acl(struct inode *inode, int type)
                else
                        acl = ERR_PTR(size);
        } else {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
        }
        kfree(value);
        if (!IS_ERR(acl))
@@ -100,7 +100,7 @@ static int jfs_set_acl(tid_t tid, struct inode *inode, int type,
                value = kmalloc(size, GFP_KERNEL);
                if (!value)
                        return -ENOMEM;
-               rc = posix_acl_to_xattr(acl, value, size);
+               rc = posix_acl_to_xattr(&init_user_ns, acl, value, size);
                if (rc < 0)
                        goto out;
        }
index 844f9460cb11344dc65253c79ef5ec0baf6fe576..9d3afd157f9908dc3278965efe353dcf42694092 100644 (file)
@@ -108,8 +108,8 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
 
        if (is_quota_modification(inode, iattr))
                dquot_initialize(inode);
-       if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
-           (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
+       if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
+           (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
                rc = dquot_transfer(inode, iattr);
                if (rc)
                        return rc;
index f19d1e04a374b39e9dc97779f9539d5598f032dd..bc555ff417e9534cc639bc3ff0c05c2351838177 100644 (file)
 #include <linux/mount.h>
 #include <linux/time.h>
 #include <linux/sched.h>
+#include <linux/blkdev.h>
 #include <asm/current.h>
 #include <asm/uaccess.h>
 
+#include "jfs_filsys.h"
+#include "jfs_debug.h"
 #include "jfs_incore.h"
 #include "jfs_dinode.h"
 #include "jfs_inode.h"
-
+#include "jfs_dmap.h"
+#include "jfs_discard.h"
 
 static struct {
        long jfs_flag;
@@ -123,6 +127,40 @@ setflags_out:
                mnt_drop_write_file(filp);
                return err;
        }
+
+       case FITRIM:
+       {
+               struct super_block *sb = inode->i_sb;
+               struct request_queue *q = bdev_get_queue(sb->s_bdev);
+               struct fstrim_range range;
+               s64 ret = 0;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (!blk_queue_discard(q)) {
+                       jfs_warn("FITRIM not supported on device");
+                       return -EOPNOTSUPP;
+               }
+
+               if (copy_from_user(&range, (struct fstrim_range __user *)arg,
+                   sizeof(range)))
+                       return -EFAULT;
+
+               range.minlen = max_t(unsigned int, range.minlen,
+                       q->limits.discard_granularity);
+
+               ret = jfs_ioc_trim(inode, &range);
+               if (ret < 0)
+                       return ret;
+
+               if (copy_to_user((struct fstrim_range __user *)arg, &range,
+                   sizeof(range)))
+                       return -EFAULT;
+
+               return 0;
+       }
+
        default:
                return -ENOTTY;
        }
@@ -142,6 +180,9 @@ long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case JFS_IOC_SETFLAGS32:
                cmd = JFS_IOC_SETFLAGS;
                break;
+       case FITRIM:
+               cmd = FITRIM;
+               break;
        }
        return jfs_ioctl(filp, cmd, arg);
 }
diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
new file mode 100644 (file)
index 0000000..9947563
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ *   Copyright (C) Tino Reichardt, 2012
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+
+#include "jfs_incore.h"
+#include "jfs_superblock.h"
+#include "jfs_discard.h"
+#include "jfs_dmap.h"
+#include "jfs_debug.h"
+
+
+/*
+ * NAME:       jfs_issue_discard()
+ *
+ * FUNCTION:   TRIM the specified block range on device, if supported
+ *
+ * PARAMETERS:
+ *     ip      - pointer to in-core inode
+ *     blkno   - starting block number to be trimmed (0..N)
+ *     nblocks - number of blocks to be trimmed
+ *
+ * RETURN VALUES:
+ *     none
+ *
+ * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
+ */
+void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks)
+{
+       struct super_block *sb = ip->i_sb;
+       int r = 0;
+
+       r = sb_issue_discard(sb, blkno, nblocks, GFP_NOFS, 0);
+       if (unlikely(r != 0)) {
+               jfs_err("JFS: sb_issue_discard" \
+                       "(%p, %llu, %llu, GFP_NOFS, 0) = %d => failed!\n",
+                       sb, (unsigned long long)blkno,
+                       (unsigned long long)nblocks, r);
+       }
+
+       jfs_info("JFS: sb_issue_discard" \
+               "(%p, %llu, %llu, GFP_NOFS, 0) = %d\n",
+               sb, (unsigned long long)blkno,
+               (unsigned long long)nblocks, r);
+
+       return;
+}
+
+/*
+ * NAME:       jfs_ioc_trim()
+ *
+ * FUNCTION:   attempt to discard (TRIM) all free blocks from the
+ *              filesystem.
+ *
+ * PARAMETERS:
+ *     ip      - pointer to in-core inode;
+ *     range   - the range, given by user space
+ *
+ * RETURN VALUES:
+ *     0       - success
+ *     -EIO    - i/o error
+ */
+int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
+{
+       struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+       struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+       struct super_block *sb = ipbmap->i_sb;
+       int agno, agno_end;
+       s64 start, end, minlen;
+       u64 trimmed = 0;
+
+       /**
+        * convert byte values to block size of filesystem:
+        * start:       First Byte to trim
+        * len:         number of Bytes to trim from start
+        * minlen:      minimum extent length in Bytes
+        */
+       start = range->start >> sb->s_blocksize_bits;
+       if (start < 0)
+               start = 0;
+       end = start + (range->len >> sb->s_blocksize_bits) - 1;
+       if (end >= bmp->db_mapsize)
+               end = bmp->db_mapsize - 1;
+       minlen = range->minlen >> sb->s_blocksize_bits;
+       if (minlen <= 0)
+               minlen = 1;
+
+       /**
+        * we trim all ag's within the range
+        */
+       agno = BLKTOAG(start, JFS_SBI(ip->i_sb));
+       agno_end = BLKTOAG(end, JFS_SBI(ip->i_sb));
+       while (agno <= agno_end) {
+               trimmed += dbDiscardAG(ip, agno, minlen);
+               agno++;
+       }
+       range->len = trimmed << sb->s_blocksize_bits;
+
+       return 0;
+}
diff --git a/fs/jfs/jfs_discard.h b/fs/jfs/jfs_discard.h
new file mode 100644 (file)
index 0000000..40d1ee6
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ *   Copyright (C) Tino Reichardt, 2012
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _H_JFS_DISCARD
+#define _H_JFS_DISCARD
+
+struct fstrim_range;
+
+extern void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks);
+extern int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range);
+
+#endif /* _H_JFS_DISCARD */
index 9cbd11a3f804d23e2fb005db0c1f286c4e7f94db..9a55f53be5ff6a38678a8d52253b14c7e8004941 100644 (file)
@@ -1,5 +1,6 @@
 /*
  *   Copyright (C) International Business Machines Corp., 2000-2004
+ *   Portions Copyright (C) Tino Reichardt, 2012
  *
  *   This program is free software;  you can redistribute it and/or modify
  *   it under the terms of the GNU General Public License as published by
@@ -25,6 +26,7 @@
 #include "jfs_lock.h"
 #include "jfs_metapage.h"
 #include "jfs_debug.h"
+#include "jfs_discard.h"
 
 /*
  *     SERIALIZATION of the Block Allocation Map.
@@ -104,7 +106,6 @@ static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
                      int nblocks);
 static int dbMaxBud(u8 * cp);
-s64 dbMapFileSizeToMapSize(struct inode *ipbmap);
 static int blkstol2(s64 nb);
 
 static int cntlz(u32 value);
@@ -145,7 +146,6 @@ static const s8 budtab[256] = {
        2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
 };
 
-
 /*
  * NAME:       dbMount()
  *
@@ -310,7 +310,6 @@ int dbSync(struct inode *ipbmap)
        return (0);
 }
 
-
 /*
  * NAME:       dbFree()
  *
@@ -337,6 +336,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
        s64 lblkno, rem;
        struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
        struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+       struct super_block *sb = ipbmap->i_sb;
 
        IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
 
@@ -351,6 +351,13 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
                return -EIO;
        }
 
+       /**
+        * TRIM the blocks, when mounted with discard option
+        */
+       if (JFS_SBI(sb)->flag & JFS_DISCARD)
+               if (JFS_SBI(sb)->minblks_trim <= nblocks)
+                       jfs_issue_discard(ipbmap, blkno, nblocks);
+
        /*
         * free the blocks a dmap at a time.
         */
@@ -1095,7 +1102,6 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
                /* we were not successful */
                release_metapage(mp);
 
-
        return (rc);
 }
 
@@ -1589,6 +1595,118 @@ static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
 }
 
 
+/*
+ * NAME:       dbDiscardAG()
+ *
+ * FUNCTION:   attempt to discard (TRIM) all free blocks of specific AG
+ *
+ *             algorithm:
+ *             1) allocate blocks, as large as possible and save them
+ *                while holding IWRITE_LOCK on ipbmap
+ *             2) trim all these saved block/length values
+ *             3) mark the blocks free again
+ *
+ *             benefit:
+ *             - we work only on one ag at some time, minimizing how long we
+ *               need to lock ipbmap
+ *             - reading / writing the fs is possible most time, even on
+ *               trimming
+ *
+ *             downside:
+ *             - we write two times to the dmapctl and dmap pages
+ *             - but for me, this seems the best way, better ideas?
+ *             /TR 2012
+ *
+ * PARAMETERS:
+ *     ip      - pointer to in-core inode
+ *     agno    - ag to trim
+ *     minlen  - minimum value of contiguous blocks
+ *
+ * RETURN VALUES:
+ *     s64     - actual number of blocks trimmed
+ */
+s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
+{
+       struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
+       struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
+       s64 nblocks, blkno;
+       u64 trimmed = 0;
+       int rc, l2nb;
+       struct super_block *sb = ipbmap->i_sb;
+
+       struct range2trim {
+               u64 blkno;
+               u64 nblocks;
+       } *totrim, *tt;
+
+       /* max blkno / nblocks pairs to trim */
+       int count = 0, range_cnt;
+       u64 max_ranges;
+
+       /* prevent others from writing new stuff here, while trimming */
+       IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
+
+       nblocks = bmp->db_agfree[agno];
+       max_ranges = nblocks;
+       do_div(max_ranges, minlen);
+       range_cnt = min_t(u64, max_ranges + 1, 32 * 1024);
+       totrim = kmalloc(sizeof(struct range2trim) * range_cnt, GFP_NOFS);
+       if (totrim == NULL) {
+               jfs_error(bmp->db_ipbmap->i_sb,
+                         "dbDiscardAG: no memory for trim array");
+               IWRITE_UNLOCK(ipbmap);
+               return 0;
+       }
+
+       tt = totrim;
+       while (nblocks >= minlen) {
+               l2nb = BLKSTOL2(nblocks);
+
+               /* 0 = okay, -EIO = fatal, -ENOSPC -> try smaller block */
+               rc = dbAllocAG(bmp, agno, nblocks, l2nb, &blkno);
+               if (rc == 0) {
+                       tt->blkno = blkno;
+                       tt->nblocks = nblocks;
+                       tt++; count++;
+
+                       /* the whole ag is free, trim now */
+                       if (bmp->db_agfree[agno] == 0)
+                               break;
+
+                       /* give a hint for the next while */
+                       nblocks = bmp->db_agfree[agno];
+                       continue;
+               } else if (rc == -ENOSPC) {
+                       /* search for next smaller log2 block */
+                       l2nb = BLKSTOL2(nblocks) - 1;
+                       nblocks = 1 << l2nb;
+               } else {
+                       /* Trim any already allocated blocks */
+                       jfs_error(bmp->db_ipbmap->i_sb,
+                               "dbDiscardAG: -EIO");
+                       break;
+               }
+
+               /* check, if our trim array is full */
+               if (unlikely(count >= range_cnt - 1))
+                       break;
+       }
+       IWRITE_UNLOCK(ipbmap);
+
+       tt->nblocks = 0; /* mark the current end */
+       for (tt = totrim; tt->nblocks != 0; tt++) {
+               /* when mounted with online discard, dbFree() will
+                * call jfs_issue_discard() itself */
+               if (!(JFS_SBI(sb)->flag & JFS_DISCARD))
+                       jfs_issue_discard(ip, tt->blkno, tt->nblocks);
+               dbFree(ip, tt->blkno, tt->nblocks);
+               trimmed += tt->nblocks;
+       }
+       kfree(totrim);
+
+       return trimmed;
+}
+
 /*
  * NAME:       dbFindCtl()
  *
index 6dcb906c55d847647257aa0dbb4b776c125642b5..562b9a7e4311f5b025ad4efcb54f1fe88e609a78 100644 (file)
@@ -311,4 +311,6 @@ extern int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks);
 extern int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks);
 extern void dbFinalizeBmap(struct inode *ipbmap);
 extern s64 dbMapFileSizeToMapSize(struct inode *ipbmap);
+extern s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen);
+
 #endif                         /* _H_JFS_DMAP */
index b3f5463fbe5233a4c5cd45a411bf10bea3843d4a..b67d64671bb407cabf7f5f6a91cc3ac4d595c892 100644 (file)
@@ -45,6 +45,9 @@
 /* mount time flag to disable journaling to disk */
 #define JFS_NOINTEGRITY 0x00000040
 
+/* mount time flag to enable TRIM to ssd disks */
+#define JFS_DISCARD     0x00000080
+
 /* commit option */
 #define        JFS_COMMIT      0x00000f00      /* commit option mask */
 #define        JFS_GROUPCOMMIT 0x00000100      /* group (of 1) commit */
index 1b6f15f191b36b133c4179bfd64fb6593f047ea7..6ba4006e011b84eedb3e42de213e20d6ae34ce0b 100644 (file)
@@ -3078,15 +3078,15 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
        }
        set_nlink(ip, le32_to_cpu(dip->di_nlink));
 
-       jfs_ip->saved_uid = le32_to_cpu(dip->di_uid);
-       if (sbi->uid == -1)
+       jfs_ip->saved_uid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid));
+       if (!uid_valid(sbi->uid))
                ip->i_uid = jfs_ip->saved_uid;
        else {
                ip->i_uid = sbi->uid;
        }
 
-       jfs_ip->saved_gid = le32_to_cpu(dip->di_gid);
-       if (sbi->gid == -1)
+       jfs_ip->saved_gid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid));
+       if (!gid_valid(sbi->gid))
                ip->i_gid = jfs_ip->saved_gid;
        else {
                ip->i_gid = sbi->gid;
@@ -3150,14 +3150,16 @@ static void copy_to_dinode(struct dinode * dip, struct inode *ip)
        dip->di_size = cpu_to_le64(ip->i_size);
        dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
        dip->di_nlink = cpu_to_le32(ip->i_nlink);
-       if (sbi->uid == -1)
-               dip->di_uid = cpu_to_le32(ip->i_uid);
+       if (!uid_valid(sbi->uid))
+               dip->di_uid = cpu_to_le32(i_uid_read(ip));
        else
-               dip->di_uid = cpu_to_le32(jfs_ip->saved_uid);
-       if (sbi->gid == -1)
-               dip->di_gid = cpu_to_le32(ip->i_gid);
+               dip->di_uid =cpu_to_le32(from_kuid(&init_user_ns,
+                                                  jfs_ip->saved_uid));
+       if (!gid_valid(sbi->gid))
+               dip->di_gid = cpu_to_le32(i_gid_read(ip));
        else
-               dip->di_gid = cpu_to_le32(jfs_ip->saved_gid);
+               dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns,
+                                                   jfs_ip->saved_gid));
        jfs_get_inode_flags(jfs_ip);
        /*
         * mode2 is only needed for storing the higher order bits.
index 584a4a1a6e81478a2e212b2f20ec08400159e946..cf47f09e8ac8ab692df3aa14f9420f79005d1725 100644 (file)
@@ -38,8 +38,8 @@
 struct jfs_inode_info {
        int     fileset;        /* fileset number (always 16)*/
        uint    mode2;          /* jfs-specific mode            */
-       uint    saved_uid;      /* saved for uid mount option */
-       uint    saved_gid;      /* saved for gid mount option */
+       kuid_t  saved_uid;      /* saved for uid mount option */
+       kgid_t  saved_gid;      /* saved for gid mount option */
        pxd_t   ixpxd;          /* inode extent descriptor      */
        dxd_t   acl;            /* dxd describing acl   */
        dxd_t   ea;             /* dxd describing ea    */
@@ -192,9 +192,10 @@ struct jfs_sb_info {
        uint            state;          /* mount/recovery state */
        unsigned long   flag;           /* mount time flags */
        uint            p_state;        /* state prior to going no integrity */
-       uint            uid;            /* uid to override on-disk uid */
-       uint            gid;            /* gid to override on-disk gid */
+       kuid_t          uid;            /* uid to override on-disk uid */
+       kgid_t          gid;            /* gid to override on-disk gid */
        uint            umask;          /* umask to override on-disk umask */
+       uint            minblks_trim;   /* minimum blocks, for online trim */
 };
 
 /* jfs_sb_info commit_state */
index bb8b661bcc5055aeb63796b1bbcc47c309608019..5fcc02eaa64ca2af23ba4bba6431647fa04deb4e 100644 (file)
@@ -2977,12 +2977,9 @@ int jfs_sync(void *arg)
                                 * put back on the anon_list.
                                 */
 
-                               /* Take off anon_list */
-                               list_del(&jfs_ip->anon_inode_list);
-
-                               /* Put on anon_list2 */
-                               list_add(&jfs_ip->anon_inode_list,
-                                        &TxAnchor.anon_list2);
+                               /* Move from anon_list to anon_list2 */
+                               list_move(&jfs_ip->anon_inode_list,
+                                         &TxAnchor.anon_list2);
 
                                TXN_UNLOCK();
                                iput(ip);
index c55c7452d2857c1f0f59042f5b6ffd1de9eba8b6..1a543be09c793bb150efe07f73b92c5b85b4b3d1 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <asm/uaccess.h>
 #include <linux/seq_file.h>
+#include <linux/blkdev.h>
 
 #include "jfs_incore.h"
 #include "jfs_filsys.h"
@@ -100,7 +101,7 @@ void jfs_error(struct super_block *sb, const char * function, ...)
        vsnprintf(error_buf, sizeof(error_buf), function, args);
        va_end(args);
 
-       printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
+       pr_err("ERROR: (device %s): %s\n", sb->s_id, error_buf);
 
        jfs_handle_error(sb);
 }
@@ -197,7 +198,8 @@ static void jfs_put_super(struct super_block *sb)
 enum {
        Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
        Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
-       Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
+       Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
+       Opt_discard, Opt_nodiscard, Opt_discard_minblk
 };
 
 static const match_table_t tokens = {
@@ -214,6 +216,9 @@ static const match_table_t tokens = {
        {Opt_uid, "uid=%u"},
        {Opt_gid, "gid=%u"},
        {Opt_umask, "umask=%u"},
+       {Opt_discard, "discard"},
+       {Opt_nodiscard, "nodiscard"},
+       {Opt_discard_minblk, "discard=%u"},
        {Opt_err, NULL}
 };
 
@@ -255,8 +260,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
                        else {
                                nls_map = load_nls(args[0].from);
                                if (!nls_map) {
-                                       printk(KERN_ERR
-                                              "JFS: charset not found\n");
+                                       pr_err("JFS: charset not found\n");
                                        goto cleanup;
                                }
                        }
@@ -272,8 +276,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
                        *newLVSize = sb->s_bdev->bd_inode->i_size >>
                                sb->s_blocksize_bits;
                        if (*newLVSize == 0)
-                               printk(KERN_ERR
-                                      "JFS: Cannot determine volume size\n");
+                               pr_err("JFS: Cannot determine volume size\n");
                        break;
                }
                case Opt_errors:
@@ -294,8 +297,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
                                *flag &= ~JFS_ERR_REMOUNT_RO;
                                *flag |= JFS_ERR_PANIC;
                        } else {
-                               printk(KERN_ERR
-                                      "JFS: %s is an invalid error handler\n",
+                               pr_err("JFS: %s is an invalid error handler\n",
                                       errors);
                                goto cleanup;
                        }
@@ -314,33 +316,76 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
                case Opt_usrquota:
                case Opt_grpquota:
                case Opt_quota:
-                       printk(KERN_ERR
-                              "JFS: quota operations not supported\n");
+                       pr_err("JFS: quota operations not supported\n");
                        break;
 #endif
                case Opt_uid:
                {
                        char *uid = args[0].from;
-                       sbi->uid = simple_strtoul(uid, &uid, 0);
+                       uid_t val = simple_strtoul(uid, &uid, 0);
+                       sbi->uid = make_kuid(current_user_ns(), val);
+                       if (!uid_valid(sbi->uid))
+                               goto cleanup;
                        break;
                }
+
                case Opt_gid:
                {
                        char *gid = args[0].from;
-                       sbi->gid = simple_strtoul(gid, &gid, 0);
+                       gid_t val = simple_strtoul(gid, &gid, 0);
+                       sbi->gid = make_kgid(current_user_ns(), val);
+                       if (!gid_valid(sbi->gid))
+                               goto cleanup;
                        break;
                }
+
                case Opt_umask:
                {
                        char *umask = args[0].from;
                        sbi->umask = simple_strtoul(umask, &umask, 8);
                        if (sbi->umask & ~0777) {
-                               printk(KERN_ERR
-                                      "JFS: Invalid value of umask\n");
+                               pr_err("JFS: Invalid value of umask\n");
                                goto cleanup;
                        }
                        break;
                }
+
+               case Opt_discard:
+               {
+                       struct request_queue *q = bdev_get_queue(sb->s_bdev);
+                       /* if set to 1, even copying files will cause
+                        * trimming :O
+                        * -> user has more control over the online trimming
+                        */
+                       sbi->minblks_trim = 64;
+                       if (blk_queue_discard(q)) {
+                               *flag |= JFS_DISCARD;
+                       } else {
+                               pr_err("JFS: discard option " \
+                                       "not supported on device\n");
+                       }
+                       break;
+               }
+
+               case Opt_nodiscard:
+                       *flag &= ~JFS_DISCARD;
+                       break;
+
+               case Opt_discard_minblk:
+               {
+                       struct request_queue *q = bdev_get_queue(sb->s_bdev);
+                       char *minblks_trim = args[0].from;
+                       if (blk_queue_discard(q)) {
+                               *flag |= JFS_DISCARD;
+                               sbi->minblks_trim = simple_strtoull(
+                                       minblks_trim, &minblks_trim, 0);
+                       } else {
+                               pr_err("JFS: discard option " \
+                                       "not supported on device\n");
+                       }
+                       break;
+               }
+
                default:
                        printk("jfs: Unrecognized mount option \"%s\" "
                                        " or missing value\n", p);
@@ -374,8 +419,8 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
 
        if (newLVSize) {
                if (sb->s_flags & MS_RDONLY) {
-                       printk(KERN_ERR
-                 "JFS: resize requires volume to be mounted read-write\n");
+                       pr_err("JFS: resize requires volume" \
+                               " to be mounted read-write\n");
                        return -EROFS;
                }
                rc = jfs_extendfs(sb, newLVSize, 0);
@@ -443,7 +488,9 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_fs_info = sbi;
        sb->s_max_links = JFS_LINK_MAX;
        sbi->sb = sb;
-       sbi->uid = sbi->gid = sbi->umask = -1;
+       sbi->uid = INVALID_UID;
+       sbi->gid = INVALID_GID;
+       sbi->umask = -1;
 
        /* initialize the mount flag and determine the default error handler */
        flag = JFS_ERR_REMOUNT_RO;
@@ -457,7 +504,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
 #endif
 
        if (newLVSize) {
-               printk(KERN_ERR "resize option for remount only\n");
+               pr_err("resize option for remount only\n");
                goto out_kfree;
        }
 
@@ -617,14 +664,16 @@ static int jfs_show_options(struct seq_file *seq, struct dentry *root)
 {
        struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
 
-       if (sbi->uid != -1)
-               seq_printf(seq, ",uid=%d", sbi->uid);
-       if (sbi->gid != -1)
-               seq_printf(seq, ",gid=%d", sbi->gid);
+       if (uid_valid(sbi->uid))
+               seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
+       if (gid_valid(sbi->gid))
+               seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
        if (sbi->umask != -1)
                seq_printf(seq, ",umask=%03o", sbi->umask);
        if (sbi->flag & JFS_NOINTEGRITY)
                seq_puts(seq, ",nointegrity");
+       if (sbi->flag & JFS_DISCARD)
+               seq_printf(seq, ",discard=%u", sbi->minblks_trim);
        if (sbi->nls_tab)
                seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
        if (sbi->flag & JFS_ERR_CONTINUE)
@@ -903,6 +952,12 @@ static void __exit exit_jfs_fs(void)
        jfs_proc_clean();
 #endif
        unregister_filesystem(&jfs_fs_type);
+
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(jfs_inode_cachep);
 }
 
index 26683e15b3ac9f23991110687023e0f6e3bb0db0..42d67f9757bf641d316b07e78a3fb1a698f76409 100644 (file)
@@ -685,7 +685,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
         * POSIX_ACL_XATTR_ACCESS is tied to i_mode
         */
        if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0) {
-               acl = posix_acl_from_xattr(value, value_len);
+               acl = posix_acl_from_xattr(&init_user_ns, value, value_len);
                if (IS_ERR(acl)) {
                        rc = PTR_ERR(acl);
                        printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
@@ -710,7 +710,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
 
                return 0;
        } else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) {
-               acl = posix_acl_from_xattr(value, value_len);
+               acl = posix_acl_from_xattr(&init_user_ns, value, value_len);
                if (IS_ERR(acl)) {
                        rc = PTR_ERR(acl);
                        printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
index 7e81bfc751644b0cfb70c9c4291ab6e7fb644ea3..abc7dc6c490b6dc7ca6e9119c585dc10b1a2d3d9 100644 (file)
@@ -1625,15 +1625,13 @@ EXPORT_SYMBOL(flock_lock_file_wait);
  */
 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
 {
-       struct file *filp;
-       int fput_needed;
+       struct fd f = fdget(fd);
        struct file_lock *lock;
        int can_sleep, unlock;
        int error;
 
        error = -EBADF;
-       filp = fget_light(fd, &fput_needed);
-       if (!filp)
+       if (!f.file)
                goto out;
 
        can_sleep = !(cmd & LOCK_NB);
@@ -1641,31 +1639,31 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
        unlock = (cmd == LOCK_UN);
 
        if (!unlock && !(cmd & LOCK_MAND) &&
-           !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
+           !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
                goto out_putf;
 
-       error = flock_make_lock(filp, &lock, cmd);
+       error = flock_make_lock(f.file, &lock, cmd);
        if (error)
                goto out_putf;
        if (can_sleep)
                lock->fl_flags |= FL_SLEEP;
 
-       error = security_file_lock(filp, lock->fl_type);
+       error = security_file_lock(f.file, lock->fl_type);
        if (error)
                goto out_free;
 
-       if (filp->f_op && filp->f_op->flock)
-               error = filp->f_op->flock(filp,
+       if (f.file->f_op && f.file->f_op->flock)
+               error = f.file->f_op->flock(f.file,
                                          (can_sleep) ? F_SETLKW : F_SETLK,
                                          lock);
        else
-               error = flock_lock_file_wait(filp, lock);
+               error = flock_lock_file_wait(f.file, lock);
 
  out_free:
        locks_free_lock(lock);
 
  out_putf:
-       fput_light(filp, fput_needed);
+       fdput(f);
  out:
        return error;
 }
index 6984562738d36bc4142a3e0556730ae9e3bf3a57..adb90116d36b1b2b0f0328a58cae9cdf0dde1794 100644 (file)
@@ -208,8 +208,8 @@ static void logfs_init_inode(struct super_block *sb, struct inode *inode)
        li->li_height   = 0;
        li->li_used_bytes = 0;
        li->li_block    = NULL;
-       inode->i_uid    = 0;
-       inode->i_gid    = 0;
+       i_uid_write(inode, 0);
+       i_gid_write(inode, 0);
        inode->i_size   = 0;
        inode->i_blocks = 0;
        inode->i_ctime  = CURRENT_TIME;
@@ -417,5 +417,10 @@ int logfs_init_inode_cache(void)
 
 void logfs_destroy_inode_cache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(logfs_inode_cache);
 }
index 5be0abef603d4f82af9e59aaca639118e280476b..e1a3b6bf63244237215824021d87f553bf4482a6 100644 (file)
@@ -119,8 +119,8 @@ static void logfs_disk_to_inode(struct logfs_disk_inode *di, struct inode*inode)
        inode->i_mode   = be16_to_cpu(di->di_mode);
        li->li_height   = di->di_height;
        li->li_flags    = be32_to_cpu(di->di_flags);
-       inode->i_uid    = be32_to_cpu(di->di_uid);
-       inode->i_gid    = be32_to_cpu(di->di_gid);
+       i_uid_write(inode, be32_to_cpu(di->di_uid));
+       i_gid_write(inode, be32_to_cpu(di->di_gid));
        inode->i_size   = be64_to_cpu(di->di_size);
        logfs_set_blocks(inode, be64_to_cpu(di->di_used_bytes));
        inode->i_atime  = be64_to_timespec(di->di_atime);
@@ -156,8 +156,8 @@ static void logfs_inode_to_disk(struct inode *inode, struct logfs_disk_inode*di)
        di->di_height   = li->li_height;
        di->di_pad      = 0;
        di->di_flags    = cpu_to_be32(li->li_flags);
-       di->di_uid      = cpu_to_be32(inode->i_uid);
-       di->di_gid      = cpu_to_be32(inode->i_gid);
+       di->di_uid      = cpu_to_be32(i_uid_read(inode));
+       di->di_gid      = cpu_to_be32(i_gid_read(inode));
        di->di_size     = cpu_to_be64(i_size_read(inode));
        di->di_used_bytes = cpu_to_be64(li->li_used_bytes);
        di->di_atime    = timespec_to_be64(inode->i_atime);
index 2a503ad020d5da4bd1de5442e306f5b98a0b02d9..4fc5f8ab1c44a47534e1b2b9a0bb338aea451dc7 100644 (file)
@@ -100,6 +100,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(minix_inode_cachep);
 }
 
@@ -460,8 +465,8 @@ static struct inode *V1_minix_iget(struct inode *inode)
                return ERR_PTR(-EIO);
        }
        inode->i_mode = raw_inode->i_mode;
-       inode->i_uid = (uid_t)raw_inode->i_uid;
-       inode->i_gid = (gid_t)raw_inode->i_gid;
+       i_uid_write(inode, raw_inode->i_uid);
+       i_gid_write(inode, raw_inode->i_gid);
        set_nlink(inode, raw_inode->i_nlinks);
        inode->i_size = raw_inode->i_size;
        inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time;
@@ -493,8 +498,8 @@ static struct inode *V2_minix_iget(struct inode *inode)
                return ERR_PTR(-EIO);
        }
        inode->i_mode = raw_inode->i_mode;
-       inode->i_uid = (uid_t)raw_inode->i_uid;
-       inode->i_gid = (gid_t)raw_inode->i_gid;
+       i_uid_write(inode, raw_inode->i_uid);
+       i_gid_write(inode, raw_inode->i_gid);
        set_nlink(inode, raw_inode->i_nlinks);
        inode->i_size = raw_inode->i_size;
        inode->i_mtime.tv_sec = raw_inode->i_mtime;
@@ -545,8 +550,8 @@ static struct buffer_head * V1_minix_update_inode(struct inode * inode)
        if (!raw_inode)
                return NULL;
        raw_inode->i_mode = inode->i_mode;
-       raw_inode->i_uid = fs_high2lowuid(inode->i_uid);
-       raw_inode->i_gid = fs_high2lowgid(inode->i_gid);
+       raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
+       raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
        raw_inode->i_nlinks = inode->i_nlink;
        raw_inode->i_size = inode->i_size;
        raw_inode->i_time = inode->i_mtime.tv_sec;
@@ -572,8 +577,8 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
        if (!raw_inode)
                return NULL;
        raw_inode->i_mode = inode->i_mode;
-       raw_inode->i_uid = fs_high2lowuid(inode->i_uid);
-       raw_inode->i_gid = fs_high2lowgid(inode->i_gid);
+       raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
+       raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
        raw_inode->i_nlinks = inode->i_nlink;
        raw_inode->i_size = inode->i_size;
        raw_inode->i_mtime = inode->i_mtime.tv_sec;
index dd1ed1b8e98efe048683e81bd1244c483160ad55..aa30d19e9edd75acf05780cad24eadbf37fba910 100644 (file)
@@ -680,7 +680,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
 
        /* Allowed if owner and follower match. */
        inode = link->dentry->d_inode;
-       if (current_cred()->fsuid == inode->i_uid)
+       if (uid_eq(current_cred()->fsuid, inode->i_uid))
                return 0;
 
        /* Allowed if parent directory not sticky and world-writable. */
@@ -689,7 +689,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
                return 0;
 
        /* Allowed if parent directory and link owner match. */
-       if (parent->i_uid == inode->i_uid)
+       if (uid_eq(parent->i_uid, inode->i_uid))
                return 0;
 
        path_put_conditional(link, nd);
@@ -759,7 +759,7 @@ static int may_linkat(struct path *link)
        /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
         * otherwise, it must be a safe source.
         */
-       if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) ||
+       if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
            capable(CAP_FOWNER))
                return 0;
 
@@ -1797,8 +1797,6 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                     struct nameidata *nd, struct file **fp)
 {
        int retval = 0;
-       int fput_needed;
-       struct file *file;
 
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED;
@@ -1850,44 +1848,41 @@ static int path_init(int dfd, const char *name, unsigned int flags,
                        get_fs_pwd(current->fs, &nd->path);
                }
        } else {
+               struct fd f = fdget_raw(dfd);
                struct dentry *dentry;
 
-               file = fget_raw_light(dfd, &fput_needed);
-               retval = -EBADF;
-               if (!file)
-                       goto out_fail;
+               if (!f.file)
+                       return -EBADF;
 
-               dentry = file->f_path.dentry;
+               dentry = f.file->f_path.dentry;
 
                if (*name) {
-                       retval = -ENOTDIR;
-                       if (!S_ISDIR(dentry->d_inode->i_mode))
-                               goto fput_fail;
+                       if (!S_ISDIR(dentry->d_inode->i_mode)) {
+                               fdput(f);
+                               return -ENOTDIR;
+                       }
 
                        retval = inode_permission(dentry->d_inode, MAY_EXEC);
-                       if (retval)
-                               goto fput_fail;
+                       if (retval) {
+                               fdput(f);
+                               return retval;
+                       }
                }
 
-               nd->path = file->f_path;
+               nd->path = f.file->f_path;
                if (flags & LOOKUP_RCU) {
-                       if (fput_needed)
-                               *fp = file;
+                       if (f.need_put)
+                               *fp = f.file;
                        nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
                        lock_rcu_walk();
                } else {
-                       path_get(&file->f_path);
-                       fput_light(file, fput_needed);
+                       path_get(&nd->path);
+                       fdput(f);
                }
        }
 
        nd->inode = nd->path.dentry->d_inode;
        return 0;
-
-fput_fail:
-       fput_light(file, fput_needed);
-out_fail:
-       return retval;
 }
 
 static inline int lookup_last(struct nameidata *nd, struct path *path)
@@ -3971,7 +3966,7 @@ EXPORT_SYMBOL(user_path_at);
 EXPORT_SYMBOL(follow_down_one);
 EXPORT_SYMBOL(follow_down);
 EXPORT_SYMBOL(follow_up);
-EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
+EXPORT_SYMBOL(get_write_access); /* nfsd */
 EXPORT_SYMBOL(getname);
 EXPORT_SYMBOL(lock_rename);
 EXPORT_SYMBOL(lookup_one_len);
index 333df07ae3bd2387e0425fd2523de5e95b05667b..d7e9fe77188a6869073bae3daa8c5a731d2a1edb 100644 (file)
@@ -89,6 +89,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ncp_inode_cachep);
 }
 
@@ -314,11 +319,11 @@ static void ncp_stop_tasks(struct ncp_server *server) {
        release_sock(sk);
        del_timer_sync(&server->timeout_tm);
 
-       flush_work_sync(&server->rcv.tq);
+       flush_work(&server->rcv.tq);
        if (sk->sk_socket->type == SOCK_STREAM)
-               flush_work_sync(&server->tx.tq);
+               flush_work(&server->tx.tq);
        else
-               flush_work_sync(&server->timeout_tq);
+               flush_work(&server->timeout_tq);
 }
 
 static int  ncp_show_options(struct seq_file *seq, struct dentry *root)
index 9b47610338f59f03f6b4fdc0280d6aa61c266d4f..e4c716d374a86b16352f539a1cdd364c652986ad 100644 (file)
@@ -1571,6 +1571,11 @@ static int __init nfs_init_inodecache(void)
 
 static void nfs_destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(nfs_inode_cachep);
 }
 
index e4498dc351a834fcc35722645abfa50eaefa80fd..4a1aafba6a20030532ba589ac9db021b10c21ceb 100644 (file)
@@ -70,7 +70,7 @@ ssize_t nfs3_getxattr(struct dentry *dentry, const char *name,
                if (type == ACL_TYPE_ACCESS && acl->a_count == 0)
                        error = -ENODATA;
                else
-                       error = posix_acl_to_xattr(acl, buffer, size);
+                       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
                posix_acl_release(acl);
        } else
                error = -ENODATA;
@@ -92,7 +92,7 @@ int nfs3_setxattr(struct dentry *dentry, const char *name,
        else
                return -EOPNOTSUPP;
 
-       acl = posix_acl_from_xattr(value, size);
+       acl = posix_acl_from_xattr(&init_user_ns, value, size);
        if (IS_ERR(acl))
                return PTR_ERR(acl);
        error = nfs3_proc_setacl(inode, type, acl);
index 6930bec91bca22a8f8f7cf0548dcfe9cc6964762..1720d32ffa545670398d16e077a094aae781528b 100644 (file)
@@ -117,8 +117,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
                timeout = 5 * HZ;
        dprintk("%s: requeueing work. Lease period = %ld\n",
                        __func__, (timeout + HZ - 1) / HZ);
-       cancel_delayed_work(&clp->cl_renewd);
-       schedule_delayed_work(&clp->cl_renewd, timeout);
+       mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
        set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
        spin_unlock(&clp->cl_lock);
 }
index cc894eda385a48d0ecb98f2687ef566815648d75..48a1bad373342b10b689927faf002cf444043b22 100644 (file)
@@ -2837,8 +2837,7 @@ static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
                return -ENOMEM;
        }
        fp->fi_lease = fl;
-       fp->fi_deleg_file = fl->fl_file;
-       get_file(fp->fi_deleg_file);
+       fp->fi_deleg_file = get_file(fl->fl_file);
        atomic_set(&fp->fi_delegees, 1);
        list_add(&dp->dl_perfile, &fp->fi_delegations);
        return 0;
index a9269f142cc481ec451c681397f520c1a1cb97f7..3f67b8e122515302709e606ef38881ba01aae581 100644 (file)
@@ -480,7 +480,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
        if (buf == NULL)
                goto out;
 
-       len = posix_acl_to_xattr(pacl, buf, buflen);
+       len = posix_acl_to_xattr(&init_user_ns, pacl, buf, buflen);
        if (len < 0) {
                error = len;
                goto out;
@@ -549,7 +549,7 @@ _get_posix_acl(struct dentry *dentry, char *key)
        if (buflen <= 0)
                return ERR_PTR(buflen);
 
-       pacl = posix_acl_from_xattr(buf, buflen);
+       pacl = posix_acl_from_xattr(&init_user_ns, buf, buflen);
        kfree(buf);
        return pacl;
 }
@@ -2264,7 +2264,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
        if (size < 0)
                return ERR_PTR(size);
 
-       acl = posix_acl_from_xattr(value, size);
+       acl = posix_acl_from_xattr(&init_user_ns, value, size);
        kfree(value);
        return acl;
 }
@@ -2297,7 +2297,7 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
                value = kmalloc(size, GFP_KERNEL);
                if (!value)
                        return -ENOMEM;
-               error = posix_acl_to_xattr(acl, value, size);
+               error = posix_acl_to_xattr(&init_user_ns, acl, value, size);
                if (error < 0)
                        goto getout;
                size = error;
index 6e2c3db976b2a8d4b2d5bb6fdd877660fb86b0b7..4d31d2cca7fdc3eaff7643cb67f620bce1cfd069 100644 (file)
@@ -401,8 +401,8 @@ int nilfs_read_inode_common(struct inode *inode,
        int err;
 
        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-       inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
-       inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
+       i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
+       i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
        set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
        inode->i_size = le64_to_cpu(raw_inode->i_size);
        inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
@@ -590,8 +590,8 @@ void nilfs_write_inode_common(struct inode *inode,
        struct nilfs_inode_info *ii = NILFS_I(inode);
 
        raw_inode->i_mode = cpu_to_le16(inode->i_mode);
-       raw_inode->i_uid = cpu_to_le32(inode->i_uid);
-       raw_inode->i_gid = cpu_to_le32(inode->i_gid);
+       raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
+       raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
        raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
        raw_inode->i_size = cpu_to_le64(inode->i_size);
        raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
index 6a10812711c1d37bca6660530cd6c34cbb7b30fb..3c991dc84f2f2df6f8c9dccd922d8edbf1ff32a5 100644 (file)
@@ -1382,6 +1382,12 @@ static void nilfs_segbuf_init_once(void *obj)
 
 static void nilfs_destroy_cachep(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
+
        if (nilfs_inode_cachep)
                kmem_cache_destroy(nilfs_inode_cachep);
        if (nilfs_transaction_cachep)
index d43803669739df471e8e832ced4377f6f75ab015..721d692fa8d4a20dd5bf593d3f732d8b3f913261 100644 (file)
@@ -58,7 +58,9 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
        return fsnotify_remove_notify_event(group);
 }
 
-static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
+static int create_fd(struct fsnotify_group *group,
+                       struct fsnotify_event *event,
+                       struct file **file)
 {
        int client_fd;
        struct file *new_file;
@@ -98,7 +100,7 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
                put_unused_fd(client_fd);
                client_fd = PTR_ERR(new_file);
        } else {
-               fd_install(client_fd, new_file);
+               *file = new_file;
        }
 
        return client_fd;
@@ -106,13 +108,15 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
 
 static int fill_event_metadata(struct fsnotify_group *group,
                                   struct fanotify_event_metadata *metadata,
-                                  struct fsnotify_event *event)
+                                  struct fsnotify_event *event,
+                                  struct file **file)
 {
        int ret = 0;
 
        pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
                 group, metadata, event);
 
+       *file = NULL;
        metadata->event_len = FAN_EVENT_METADATA_LEN;
        metadata->metadata_len = FAN_EVENT_METADATA_LEN;
        metadata->vers = FANOTIFY_METADATA_VERSION;
@@ -121,7 +125,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
        if (unlikely(event->mask & FAN_Q_OVERFLOW))
                metadata->fd = FAN_NOFD;
        else {
-               metadata->fd = create_fd(group, event);
+               metadata->fd = create_fd(group, event, file);
                if (metadata->fd < 0)
                        ret = metadata->fd;
        }
@@ -220,25 +224,6 @@ static int prepare_for_access_response(struct fsnotify_group *group,
        return 0;
 }
 
-static void remove_access_response(struct fsnotify_group *group,
-                                  struct fsnotify_event *event,
-                                  __s32 fd)
-{
-       struct fanotify_response_event *re;
-
-       if (!(event->mask & FAN_ALL_PERM_EVENTS))
-               return;
-
-       re = dequeue_re(group, fd);
-       if (!re)
-               return;
-
-       BUG_ON(re->event != event);
-
-       kmem_cache_free(fanotify_response_event_cache, re);
-
-       return;
-}
 #else
 static int prepare_for_access_response(struct fsnotify_group *group,
                                       struct fsnotify_event *event,
@@ -247,12 +232,6 @@ static int prepare_for_access_response(struct fsnotify_group *group,
        return 0;
 }
 
-static void remove_access_response(struct fsnotify_group *group,
-                                  struct fsnotify_event *event,
-                                  __s32 fd)
-{
-       return;
-}
 #endif
 
 static ssize_t copy_event_to_user(struct fsnotify_group *group,
@@ -260,31 +239,33 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
                                  char __user *buf)
 {
        struct fanotify_event_metadata fanotify_event_metadata;
+       struct file *f;
        int fd, ret;
 
        pr_debug("%s: group=%p event=%p\n", __func__, group, event);
 
-       ret = fill_event_metadata(group, &fanotify_event_metadata, event);
+       ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
        if (ret < 0)
                goto out;
 
        fd = fanotify_event_metadata.fd;
-       ret = prepare_for_access_response(group, event, fd);
-       if (ret)
-               goto out_close_fd;
-
        ret = -EFAULT;
        if (copy_to_user(buf, &fanotify_event_metadata,
                         fanotify_event_metadata.event_len))
-               goto out_kill_access_response;
+               goto out_close_fd;
 
+       ret = prepare_for_access_response(group, event, fd);
+       if (ret)
+               goto out_close_fd;
+
+       fd_install(fd, f);
        return fanotify_event_metadata.event_len;
 
-out_kill_access_response:
-       remove_access_response(group, event, fd);
 out_close_fd:
-       if (fd != FAN_NOFD)
-               sys_close(fd);
+       if (fd != FAN_NOFD) {
+               put_unused_fd(fd);
+               fput(f);
+       }
 out:
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
        if (event->mask & FAN_ALL_PERM_EVENTS) {
@@ -470,24 +451,22 @@ static int fanotify_find_path(int dfd, const char __user *filename,
                 dfd, filename, flags);
 
        if (filename == NULL) {
-               struct file *file;
-               int fput_needed;
+               struct fd f = fdget(dfd);
 
                ret = -EBADF;
-               file = fget_light(dfd, &fput_needed);
-               if (!file)
+               if (!f.file)
                        goto out;
 
                ret = -ENOTDIR;
                if ((flags & FAN_MARK_ONLYDIR) &&
-                   !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
-                       fput_light(file, fput_needed);
+                   !(S_ISDIR(f.file->f_path.dentry->d_inode->i_mode))) {
+                       fdput(f);
                        goto out;
                }
 
-               *path = file->f_path;
+               *path = f.file->f_path;
                path_get(path);
-               fput_light(file, fput_needed);
+               fdput(f);
        } else {
                unsigned int lookup_flags = 0;
 
@@ -767,9 +746,9 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
        struct inode *inode = NULL;
        struct vfsmount *mnt = NULL;
        struct fsnotify_group *group;
-       struct file *filp;
+       struct fd f;
        struct path path;
-       int ret, fput_needed;
+       int ret;
 
        pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
                 __func__, fanotify_fd, flags, dfd, pathname, mask);
@@ -803,15 +782,15 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
 #endif
                return -EINVAL;
 
-       filp = fget_light(fanotify_fd, &fput_needed);
-       if (unlikely(!filp))
+       f = fdget(fanotify_fd);
+       if (unlikely(!f.file))
                return -EBADF;
 
        /* verify that this is indeed an fanotify instance */
        ret = -EINVAL;
-       if (unlikely(filp->f_op != &fanotify_fops))
+       if (unlikely(f.file->f_op != &fanotify_fops))
                goto fput_and_out;
-       group = filp->private_data;
+       group = f.file->private_data;
 
        /*
         * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
@@ -858,7 +837,7 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
 
        path_put(&path);
 fput_and_out:
-       fput_light(filp, fput_needed);
+       fdput(f);
        return ret;
 }
 
index 8445fbc8985cae9c7357f59f97fface6e482815b..c311dda054a31efc1cb6d7e1f75f938857a5b604 100644 (file)
@@ -757,16 +757,16 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
        struct fsnotify_group *group;
        struct inode *inode;
        struct path path;
-       struct file *filp;
-       int ret, fput_needed;
+       struct fd f;
+       int ret;
        unsigned flags = 0;
 
-       filp = fget_light(fd, &fput_needed);
-       if (unlikely(!filp))
+       f = fdget(fd);
+       if (unlikely(!f.file))
                return -EBADF;
 
        /* verify that this is indeed an inotify instance */
-       if (unlikely(filp->f_op != &inotify_fops)) {
+       if (unlikely(f.file->f_op != &inotify_fops)) {
                ret = -EINVAL;
                goto fput_and_out;
        }
@@ -782,13 +782,13 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
 
        /* inode held in place by reference to path; group by fget on fd */
        inode = path.dentry->d_inode;
-       group = filp->private_data;
+       group = f.file->private_data;
 
        /* create/update an inode mark */
        ret = inotify_update_watch(group, inode, mask);
        path_put(&path);
 fput_and_out:
-       fput_light(filp, fput_needed);
+       fdput(f);
        return ret;
 }
 
@@ -796,19 +796,19 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
 {
        struct fsnotify_group *group;
        struct inotify_inode_mark *i_mark;
-       struct file *filp;
-       int ret = 0, fput_needed;
+       struct fd f;
+       int ret = 0;
 
-       filp = fget_light(fd, &fput_needed);
-       if (unlikely(!filp))
+       f = fdget(fd);
+       if (unlikely(!f.file))
                return -EBADF;
 
        /* verify that this is indeed an inotify instance */
        ret = -EINVAL;
-       if (unlikely(filp->f_op != &inotify_fops))
+       if (unlikely(f.file->f_op != &inotify_fops))
                goto out;
 
-       group = filp->private_data;
+       group = f.file->private_data;
 
        ret = -EINVAL;
        i_mark = inotify_idr_find(group, wd);
@@ -823,7 +823,7 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
        fsnotify_put_mark(&i_mark->fsn_mark);
 
 out:
-       fput_light(filp, fput_needed);
+       fdput(f);
        return ret;
 }
 
index c6dbd3db6ca8817fba7495a5456ce479e2351b03..1d27331e6fc945ee1ddba591d47ddf1ef1bafbd1 100644 (file)
@@ -2124,7 +2124,8 @@ int ntfs_read_inode_mount(struct inode *vi)
                         * ntfs_read_inode() will have set up the default ones.
                         */
                        /* Set uid and gid to root. */
-                       vi->i_uid = vi->i_gid = 0;
+                       vi->i_uid = GLOBAL_ROOT_UID;
+                       vi->i_gid = GLOBAL_ROOT_GID;
                        /* Regular file. No access for anyone. */
                        vi->i_mode = S_IFREG;
                        /* No VFS initiated operations allowed for $MFT. */
@@ -2312,8 +2313,8 @@ int ntfs_show_options(struct seq_file *sf, struct dentry *root)
        ntfs_volume *vol = NTFS_SB(root->d_sb);
        int i;
 
-       seq_printf(sf, ",uid=%i", vol->uid);
-       seq_printf(sf, ",gid=%i", vol->gid);
+       seq_printf(sf, ",uid=%i", from_kuid_munged(&init_user_ns, vol->uid));
+       seq_printf(sf, ",gid=%i", from_kgid_munged(&init_user_ns, vol->gid));
        if (vol->fmask == vol->dmask)
                seq_printf(sf, ",umask=0%o", vol->fmask);
        else {
index 2bc149d6a784e74ba485216d888ccd95f12e9516..4a8289f8b16c87495e8e6b94fccc6f83c5643c7e 100644 (file)
@@ -102,8 +102,8 @@ static bool parse_options(ntfs_volume *vol, char *opt)
        char *p, *v, *ov;
        static char *utf8 = "utf8";
        int errors = 0, sloppy = 0;
-       uid_t uid = (uid_t)-1;
-       gid_t gid = (gid_t)-1;
+       kuid_t uid = INVALID_UID;
+       kgid_t gid = INVALID_GID;
        umode_t fmask = (umode_t)-1, dmask = (umode_t)-1;
        int mft_zone_multiplier = -1, on_errors = -1;
        int show_sys_files = -1, case_sensitive = -1, disable_sparse = -1;
@@ -128,6 +128,30 @@ static bool parse_options(ntfs_volume *vol, char *opt)
                if (*v)                                                 \
                        goto needs_val;                                 \
        }
+#define NTFS_GETOPT_UID(option, variable)                              \
+       if (!strcmp(p, option)) {                                       \
+               uid_t uid_value;                                        \
+               if (!v || !*v)                                          \
+                       goto needs_arg;                                 \
+               uid_value = simple_strtoul(ov = v, &v, 0);              \
+               if (*v)                                                 \
+                       goto needs_val;                                 \
+               variable = make_kuid(current_user_ns(), uid_value);     \
+               if (!uid_valid(variable))                               \
+                       goto needs_val;                                 \
+       }
+#define NTFS_GETOPT_GID(option, variable)                              \
+       if (!strcmp(p, option)) {                                       \
+               gid_t gid_value;                                        \
+               if (!v || !*v)                                          \
+                       goto needs_arg;                                 \
+               gid_value = simple_strtoul(ov = v, &v, 0);              \
+               if (*v)                                                 \
+                       goto needs_val;                                 \
+               variable = make_kgid(current_user_ns(), gid_value);     \
+               if (!gid_valid(variable))                               \
+                       goto needs_val;                                 \
+       }
 #define NTFS_GETOPT_OCTAL(option, variable)                            \
        if (!strcmp(p, option)) {                                       \
                if (!v || !*v)                                          \
@@ -165,8 +189,8 @@ static bool parse_options(ntfs_volume *vol, char *opt)
        while ((p = strsep(&opt, ","))) {
                if ((v = strchr(p, '=')))
                        *v++ = 0;
-               NTFS_GETOPT("uid", uid)
-               else NTFS_GETOPT("gid", gid)
+               NTFS_GETOPT_UID("uid", uid)
+               else NTFS_GETOPT_GID("gid", gid)
                else NTFS_GETOPT_OCTAL("umask", fmask = dmask)
                else NTFS_GETOPT_OCTAL("fmask", fmask)
                else NTFS_GETOPT_OCTAL("dmask", dmask)
@@ -283,9 +307,9 @@ no_mount_options:
                vol->on_errors = on_errors;
        if (!vol->on_errors || vol->on_errors == ON_ERRORS_RECOVER)
                vol->on_errors |= ON_ERRORS_CONTINUE;
-       if (uid != (uid_t)-1)
+       if (uid_valid(uid))
                vol->uid = uid;
-       if (gid != (gid_t)-1)
+       if (gid_valid(gid))
                vol->gid = gid;
        if (fmask != (umode_t)-1)
                vol->fmask = fmask;
@@ -1023,7 +1047,8 @@ static bool load_and_init_mft_mirror(ntfs_volume *vol)
         * ntfs_read_inode() will have set up the default ones.
         */
        /* Set uid and gid to root. */
-       tmp_ino->i_uid = tmp_ino->i_gid = 0;
+       tmp_ino->i_uid = GLOBAL_ROOT_UID;
+       tmp_ino->i_gid = GLOBAL_ROOT_GID;
        /* Regular file.  No access for anyone. */
        tmp_ino->i_mode = S_IFREG;
        /* No VFS initiated operations allowed for $MFTMirr. */
@@ -3168,6 +3193,12 @@ static void __exit exit_ntfs_fs(void)
        ntfs_debug("Unregistering NTFS driver.");
 
        unregister_filesystem(&ntfs_fs_type);
+
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ntfs_big_inode_cache);
        kmem_cache_destroy(ntfs_inode_cache);
        kmem_cache_destroy(ntfs_name_cache);
index 15e3ba8d521a3ba1e9095374ebea75becab48ebd..4f579b02bc760a50fc8ed602c1dc819402a2fe5a 100644 (file)
@@ -25,6 +25,7 @@
 #define _LINUX_NTFS_VOLUME_H
 
 #include <linux/rwsem.h>
+#include <linux/uidgid.h>
 
 #include "types.h"
 #include "layout.h"
@@ -46,8 +47,8 @@ typedef struct {
                                           sized blocks on the device. */
        /* Configuration provided by user at mount time. */
        unsigned long flags;            /* Miscellaneous flags, see below. */
-       uid_t uid;                      /* uid that files will be mounted as. */
-       gid_t gid;                      /* gid that files will be mounted as. */
+       kuid_t uid;                     /* uid that files will be mounted as. */
+       kgid_t gid;                     /* gid that files will be mounted as. */
        umode_t fmask;                  /* The mask for file permissions. */
        umode_t dmask;                  /* The mask for directory
                                           permissions. */
index a7219075b4deddcd7881a9b3a5c1586f731d36f2..260b16281fc349b216444c4c56b04acf8e04996c 100644 (file)
@@ -452,7 +452,7 @@ static int ocfs2_xattr_get_acl(struct dentry *dentry, const char *name,
                return PTR_ERR(acl);
        if (acl == NULL)
                return -ENODATA;
-       ret = posix_acl_to_xattr(acl, buffer, size);
+       ret = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return ret;
@@ -475,7 +475,7 @@ static int ocfs2_xattr_set_acl(struct dentry *dentry, const char *name,
                return -EPERM;
 
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
                else if (acl) {
index a4e855e3690e6ab844d37788b71649975321cb19..f7c648d7d6bf1b0ecbc88cb6edaf79553eade538 100644 (file)
@@ -1746,8 +1746,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        long fd;
        int sectsize;
        char *p = (char *)page;
-       struct file *filp = NULL;
-       struct inode *inode = NULL;
+       struct fd f;
+       struct inode *inode;
        ssize_t ret = -EINVAL;
        int live_threshold;
 
@@ -1766,26 +1766,26 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        if (fd < 0 || fd >= INT_MAX)
                goto out;
 
-       filp = fget(fd);
-       if (filp == NULL)
+       f = fdget(fd);
+       if (f.file == NULL)
                goto out;
 
        if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
            reg->hr_block_bytes == 0)
-               goto out;
+               goto out2;
 
-       inode = igrab(filp->f_mapping->host);
+       inode = igrab(f.file->f_mapping->host);
        if (inode == NULL)
-               goto out;
+               goto out2;
 
        if (!S_ISBLK(inode->i_mode))
-               goto out;
+               goto out3;
 
-       reg->hr_bdev = I_BDEV(filp->f_mapping->host);
+       reg->hr_bdev = I_BDEV(f.file->f_mapping->host);
        ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL);
        if (ret) {
                reg->hr_bdev = NULL;
-               goto out;
+               goto out3;
        }
        inode = NULL;
 
@@ -1797,7 +1797,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
                     "blocksize %u incorrect for device, expected %d",
                     reg->hr_block_bytes, sectsize);
                ret = -EINVAL;
-               goto out;
+               goto out3;
        }
 
        o2hb_init_region_params(reg);
@@ -1811,13 +1811,13 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        ret = o2hb_map_slot_data(reg);
        if (ret) {
                mlog_errno(ret);
-               goto out;
+               goto out3;
        }
 
        ret = o2hb_populate_slot_data(reg);
        if (ret) {
                mlog_errno(ret);
-               goto out;
+               goto out3;
        }
 
        INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
@@ -1847,7 +1847,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        if (IS_ERR(hb_task)) {
                ret = PTR_ERR(hb_task);
                mlog_errno(ret);
-               goto out;
+               goto out3;
        }
 
        spin_lock(&o2hb_live_lock);
@@ -1863,7 +1863,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
 
        if (reg->hr_aborted_start) {
                ret = -EIO;
-               goto out;
+               goto out3;
        }
 
        /* Ok, we were woken.  Make sure it wasn't by drop_item() */
@@ -1882,11 +1882,11 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
                printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n",
                       config_item_name(&reg->hr_item), reg->hr_dev_name);
 
+out3:
+       iput(inode);
+out2:
+       fdput(f);
 out:
-       if (filp)
-               fput(filp);
-       if (inode)
-               iput(inode);
        if (ret < 0) {
                if (reg->hr_bdev) {
                        blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE);
index 8f9cea1597af0994d4dfac6994159a870cdcd76c..c19897d0fe142a5a9926da9552572daf78982938 100644 (file)
@@ -327,5 +327,5 @@ void o2quo_exit(void)
 {
        struct o2quo_state *qs = &o2quo_state;
 
-       flush_work_sync(&qs->qs_work);
+       flush_work(&qs->qs_work);
 }
index 83b6f98e0665433bda36a2f1d4f34a1ce7bec4cd..16b712d260d4fcb628fa67e13f1684b9239698d6 100644 (file)
@@ -691,6 +691,11 @@ static void __exit exit_dlmfs_fs(void)
        flush_workqueue(user_dlm_worker);
        destroy_workqueue(user_dlm_worker);
 
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(dlmfs_inode_cache);
 
        bdi_destroy(&dlmfs_backing_dev_info);
index 46a1f6d7510405bf14a0dbc837d2585cfd64e75d..5a4ee77cec518ab6108514008681ffd74d562bdc 100644 (file)
@@ -1184,8 +1184,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
                    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
                    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
-                       transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
-                                                     USRQUOTA);
+                       transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
                        if (!transfer_to[USRQUOTA]) {
                                status = -ESRCH;
                                goto bail_unlock;
@@ -1194,8 +1193,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
                    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
                    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
-                       transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
-                                                     GRPQUOTA);
+                       transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
                        if (!transfer_to[GRPQUOTA]) {
                                status = -ESRCH;
                                goto bail_unlock;
index 0a86e302655f3384435ab4843fad50b6bf1a7cae..332a281f217ed021dee419722bd4dd5ca83de9a9 100644 (file)
@@ -95,7 +95,7 @@ static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
        struct ocfs2_global_disk_dqblk *d = dp;
        struct mem_dqblk *m = &dquot->dq_dqb;
 
-       d->dqb_id = cpu_to_le32(dquot->dq_id);
+       d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
        d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
        d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
        d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
@@ -112,11 +112,14 @@ static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
 {
        struct ocfs2_global_disk_dqblk *d = dp;
        struct ocfs2_mem_dqinfo *oinfo =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
 
        if (qtree_entry_unused(&oinfo->dqi_gi, dp))
                return 0;
-       return le32_to_cpu(d->dqb_id) == dquot->dq_id;
+
+       return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
+                               le32_to_cpu(d->dqb_id)),
+                     dquot->dq_id);
 }
 
 struct qtree_fmt_operations ocfs2_global_ops = {
@@ -475,7 +478,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
 {
        int err, err2;
        struct super_block *sb = dquot->dq_sb;
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
        struct ocfs2_global_disk_dqblk dqblk;
        s64 spacechange, inodechange;
@@ -504,7 +507,8 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
        olditime = dquot->dq_dqb.dqb_itime;
        oldbtime = dquot->dq_dqb.dqb_btime;
        ocfs2_global_disk2memdqb(dquot, &dqblk);
-       trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace,
+       trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id),
+                              dquot->dq_dqb.dqb_curspace,
                               (long long)spacechange,
                               dquot->dq_dqb.dqb_curinodes,
                               (long long)inodechange);
@@ -555,8 +559,8 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
        err = ocfs2_qinfo_lock(info, freeing);
        if (err < 0) {
                mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
-                              " (type=%d, id=%u)\n", dquot->dq_type,
-                              (unsigned)dquot->dq_id);
+                              " (type=%d, id=%u)\n", dquot->dq_id.type,
+                              (unsigned)from_kqid(&init_user_ns, dquot->dq_id));
                goto out;
        }
        if (freeing)
@@ -591,9 +595,10 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
        struct ocfs2_super *osb = OCFS2_SB(sb);
        int status = 0;
 
-       trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type,
+       trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id),
+                                     dquot->dq_id.type,
                                      type, sb->s_id);
-       if (type != dquot->dq_type)
+       if (type != dquot->dq_id.type)
                goto out;
        status = ocfs2_lock_global_qf(oinfo, 1);
        if (status < 0)
@@ -643,7 +648,8 @@ static int ocfs2_write_dquot(struct dquot *dquot)
        struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
        int status = 0;
 
-       trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type);
+       trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id),
+                               dquot->dq_id.type);
 
        handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
        if (IS_ERR(handle)) {
@@ -677,11 +683,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
 {
        handle_t *handle;
        struct ocfs2_mem_dqinfo *oinfo =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
        struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
        int status = 0;
 
-       trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type);
+       trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id),
+                                 dquot->dq_id.type);
 
        mutex_lock(&dquot->dq_lock);
        /* Check whether we are not racing with some other dqget() */
@@ -691,7 +698,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
        if (status < 0)
                goto out;
        handle = ocfs2_start_trans(osb,
-               ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
+               ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                mlog_errno(status);
@@ -733,13 +740,14 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
        int ex = 0;
        struct super_block *sb = dquot->dq_sb;
        struct ocfs2_super *osb = OCFS2_SB(sb);
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
        struct inode *gqinode = info->dqi_gqinode;
        int need_alloc = ocfs2_global_qinit_alloc(sb, type);
        handle_t *handle;
 
-       trace_ocfs2_acquire_dquot(dquot->dq_id, type);
+       trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id),
+                                 type);
        mutex_lock(&dquot->dq_lock);
        /*
         * We need an exclusive lock, because we're going to update use count
@@ -821,12 +829,13 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
        int sync = 0;
        int status;
        struct super_block *sb = dquot->dq_sb;
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
        handle_t *handle;
        struct ocfs2_super *osb = OCFS2_SB(sb);
 
-       trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type);
+       trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id),
+                                    type);
 
        /* In case user set some limits, sync dquot immediately to global
         * quota file so that information propagates quicker */
index f100bf70a9066ed1b917b8ec0451c27231840f92..27fe7ee4874cbac39381b90694e0ce7c0c11ce66 100644 (file)
@@ -501,7 +501,9 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
                        }
                        dqblk = (struct ocfs2_local_disk_dqblk *)(qbh->b_data +
                                ol_dqblk_block_off(sb, chunk, bit));
-                       dquot = dqget(sb, le64_to_cpu(dqblk->dqb_id), type);
+                       dquot = dqget(sb,
+                                     make_kqid(&init_user_ns, type,
+                                               le64_to_cpu(dqblk->dqb_id)));
                        if (!dquot) {
                                status = -EIO;
                                mlog(ML_ERROR, "Failed to get quota structure "
@@ -881,7 +883,8 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
        dqblk = (struct ocfs2_local_disk_dqblk *)(bh->b_data
                + ol_dqblk_block_offset(sb, od->dq_local_off));
 
-       dqblk->dqb_id = cpu_to_le64(od->dq_dquot.dq_id);
+       dqblk->dqb_id = cpu_to_le64(from_kqid(&init_user_ns,
+                                             od->dq_dquot.dq_id));
        spin_lock(&dq_data_lock);
        dqblk->dqb_spacemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curspace -
                                          od->dq_origspace);
@@ -891,7 +894,7 @@ static void olq_set_dquot(struct buffer_head *bh, void *private)
        trace_olq_set_dquot(
                (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod),
                (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod),
-               od->dq_dquot.dq_id);
+               from_kqid(&init_user_ns, od->dq_dquot.dq_id));
 }
 
 /* Write dquot to local quota file */
@@ -900,7 +903,7 @@ int ocfs2_local_write_dquot(struct dquot *dquot)
        struct super_block *sb = dquot->dq_sb;
        struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
        struct buffer_head *bh;
-       struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_type];
+       struct inode *lqinode = sb_dqopt(sb)->files[dquot->dq_id.type];
        int status;
 
        status = ocfs2_read_quota_phys_block(lqinode, od->dq_local_phys_blk,
@@ -1221,7 +1224,7 @@ static void olq_alloc_dquot(struct buffer_head *bh, void *private)
 int ocfs2_create_local_dquot(struct dquot *dquot)
 {
        struct super_block *sb = dquot->dq_sb;
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct inode *lqinode = sb_dqopt(sb)->files[type];
        struct ocfs2_quota_chunk *chunk;
        struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
@@ -1275,7 +1278,7 @@ out:
 int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
 {
        int status;
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct ocfs2_dquot *od = OCFS2_DQUOT(dquot);
        struct super_block *sb = dquot->dq_sb;
        struct ocfs2_local_disk_chunk *dchunk;
index 68f4541c2db98b26a3aba49234c721e56d56e3c3..0e91ec22a940ea1488bab00191b8f54356578bd5 100644 (file)
@@ -1818,6 +1818,11 @@ static int ocfs2_initialize_mem_caches(void)
 
 static void ocfs2_free_mem_caches(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        if (ocfs2_inode_cachep)
                kmem_cache_destroy(ocfs2_inode_cachep);
        ocfs2_inode_cachep = NULL;
index e6213b3725d1b60cdc0b66a40c406df1f5d401c2..25d715c7c87abdd373918da6a42489332c88e3c0 100644 (file)
@@ -391,12 +391,16 @@ static int parse_options(char *options, struct omfs_sb_info *sbi)
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       sbi->s_uid = option;
+                       sbi->s_uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(sbi->s_uid))
+                               return 0;
                        break;
                case Opt_gid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       sbi->s_gid = option;
+                       sbi->s_gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(sbi->s_gid))
+                               return 0;
                        break;
                case Opt_umask:
                        if (match_octal(&args[0], &option))
index 8941f12c6b01b49286733407b9e1918d968e22aa..f0f8bc75e6094fa014ab76f46bcba1f73afe3808 100644 (file)
@@ -19,8 +19,8 @@ struct omfs_sb_info {
        unsigned long **s_imap;
        int s_imap_size;
        struct mutex s_bitmap_lock;
-       int s_uid;
-       int s_gid;
+       kuid_t s_uid;
+       kgid_t s_gid;
        int s_dmask;
        int s_fmask;
 };
index e1f2cdb91a4dc494473986f1b0c8b91f23614a43..44da0feeca2c94c980c2723bda63b1fcf74caedc 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -132,27 +132,27 @@ SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
 
 static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
 {
-       struct inode * inode;
+       struct inode *inode;
        struct dentry *dentry;
-       struct file * file;
+       struct fd f;
        int error;
 
        error = -EINVAL;
        if (length < 0)
                goto out;
        error = -EBADF;
-       file = fget(fd);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                goto out;
 
        /* explicitly opened as large or we are on 64-bit box */
-       if (file->f_flags & O_LARGEFILE)
+       if (f.file->f_flags & O_LARGEFILE)
                small = 0;
 
-       dentry = file->f_path.dentry;
+       dentry = f.file->f_path.dentry;
        inode = dentry->d_inode;
        error = -EINVAL;
-       if (!S_ISREG(inode->i_mode) || !(file->f_mode & FMODE_WRITE))
+       if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE))
                goto out_putf;
 
        error = -EINVAL;
@@ -165,14 +165,14 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
                goto out_putf;
 
        sb_start_write(inode->i_sb);
-       error = locks_verify_truncate(inode, file, length);
+       error = locks_verify_truncate(inode, f.file, length);
        if (!error)
-               error = security_path_truncate(&file->f_path);
+               error = security_path_truncate(&f.file->f_path);
        if (!error)
-               error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
+               error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
        sb_end_write(inode->i_sb);
 out_putf:
-       fput(file);
+       fdput(f);
 out:
        return error;
 }
@@ -276,15 +276,13 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 
 SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
 {
-       struct file *file;
+       struct fd f = fdget(fd);
        int error = -EBADF;
 
-       file = fget(fd);
-       if (file) {
-               error = do_fallocate(file, mode, offset, len);
-               fput(file);
+       if (f.file) {
+               error = do_fallocate(f.file, mode, offset, len);
+               fdput(f);
        }
-
        return error;
 }
 
@@ -400,16 +398,15 @@ out:
 
 SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 {
-       struct file *file;
+       struct fd f = fdget_raw(fd);
        struct inode *inode;
-       int error, fput_needed;
+       int error = -EBADF;
 
        error = -EBADF;
-       file = fget_raw_light(fd, &fput_needed);
-       if (!file)
+       if (!f.file)
                goto out;
 
-       inode = file->f_path.dentry->d_inode;
+       inode = f.file->f_path.dentry->d_inode;
 
        error = -ENOTDIR;
        if (!S_ISDIR(inode->i_mode))
@@ -417,9 +414,9 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 
        error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
        if (!error)
-               set_fs_pwd(current->fs, &file->f_path);
+               set_fs_pwd(current->fs, &f.file->f_path);
 out_putf:
-       fput_light(file, fput_needed);
+       fdput(f);
 out:
        return error;
 }
@@ -534,7 +531,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
                newattrs.ia_valid |=
                        ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
        mutex_lock(&inode->i_mutex);
-       error = security_path_chown(path, user, group);
+       error = security_path_chown(path, uid, gid);
        if (!error)
                error = notify_change(path->dentry, &newattrs);
        mutex_unlock(&inode->i_mutex);
@@ -582,23 +579,20 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
 
 SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
 {
-       struct file * file;
+       struct fd f = fdget(fd);
        int error = -EBADF;
-       struct dentry * dentry;
 
-       file = fget(fd);
-       if (!file)
+       if (!f.file)
                goto out;
 
-       error = mnt_want_write_file(file);
+       error = mnt_want_write_file(f.file);
        if (error)
                goto out_fput;
-       dentry = file->f_path.dentry;
-       audit_inode(NULL, dentry);
-       error = chown_common(&file->f_path, user, group);
-       mnt_drop_write_file(file);
+       audit_inode(NULL, f.file->f_path.dentry);
+       error = chown_common(&f.file->f_path, user, group);
+       mnt_drop_write_file(f.file);
 out_fput:
-       fput(file);
+       fdput(f);
 out:
        return error;
 }
@@ -803,50 +797,6 @@ struct file *dentry_open(const struct path *path, int flags,
 }
 EXPORT_SYMBOL(dentry_open);
 
-static void __put_unused_fd(struct files_struct *files, unsigned int fd)
-{
-       struct fdtable *fdt = files_fdtable(files);
-       __clear_open_fd(fd, fdt);
-       if (fd < files->next_fd)
-               files->next_fd = fd;
-}
-
-void put_unused_fd(unsigned int fd)
-{
-       struct files_struct *files = current->files;
-       spin_lock(&files->file_lock);
-       __put_unused_fd(files, fd);
-       spin_unlock(&files->file_lock);
-}
-
-EXPORT_SYMBOL(put_unused_fd);
-
-/*
- * Install a file pointer in the fd array.
- *
- * The VFS is full of places where we drop the files lock between
- * setting the open_fds bitmap and installing the file in the file
- * array.  At any such point, we are vulnerable to a dup2() race
- * installing a file in the array before us.  We need to detect this and
- * fput() the struct file we are about to overwrite in this case.
- *
- * It should never happen - if we allow dup2() do it, _really_ bad things
- * will follow.
- */
-
-void fd_install(unsigned int fd, struct file *file)
-{
-       struct files_struct *files = current->files;
-       struct fdtable *fdt;
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       BUG_ON(fdt->fd[fd] != NULL);
-       rcu_assign_pointer(fdt->fd[fd], file);
-       spin_unlock(&files->file_lock);
-}
-
-EXPORT_SYMBOL(fd_install);
-
 static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
 {
        int lookup_flags = 0;
@@ -858,7 +808,7 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
                op->mode = 0;
 
        /* Must never be set by userspace */
-       flags &= ~FMODE_NONOTIFY;
+       flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC;
 
        /*
         * O_SYNC is implemented as __O_SYNC|O_DSYNC.  As many places only
@@ -1038,23 +988,7 @@ EXPORT_SYMBOL(filp_close);
  */
 SYSCALL_DEFINE1(close, unsigned int, fd)
 {
-       struct file * filp;
-       struct files_struct *files = current->files;
-       struct fdtable *fdt;
-       int retval;
-
-       spin_lock(&files->file_lock);
-       fdt = files_fdtable(files);
-       if (fd >= fdt->max_fds)
-               goto out_unlock;
-       filp = fdt->fd[fd];
-       if (!filp)
-               goto out_unlock;
-       rcu_assign_pointer(fdt->fd[fd], NULL);
-       __clear_close_on_exec(fd, fdt);
-       __put_unused_fd(files, fd);
-       spin_unlock(&files->file_lock);
-       retval = filp_close(filp, files);
+       int retval = __close_fd(current->files, fd);
 
        /* can't restart close syscall because file table entry was cleared */
        if (unlikely(retval == -ERESTARTSYS ||
@@ -1064,10 +998,6 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
                retval = -EINTR;
 
        return retval;
-
-out_unlock:
-       spin_unlock(&files->file_lock);
-       return -EBADF;
 }
 EXPORT_SYMBOL(sys_close);
 
index 4a3477949bca6da23a13161708e0ee7168fdb654..2ad080faca34e38d2ac8c5aed09a33135cae7d29 100644 (file)
@@ -463,6 +463,11 @@ static int __init init_openprom_fs(void)
 static void __exit exit_openprom_fs(void)
 {
        unregister_filesystem(&openprom_fs_type);
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(op_inode_cachep);
 }
 
index 8d85d7068c1e8a0f028a7a72dba9c04c24e3ee91..bd3479db4b624e4f9e9d5fcdbf95dc46acb9545f 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1064,9 +1064,8 @@ err_inode:
        return err;
 }
 
-int do_pipe_flags(int *fd, int flags)
+static int __do_pipe_flags(int *fd, struct file **files, int flags)
 {
-       struct file *files[2];
        int error;
        int fdw, fdr;
 
@@ -1088,11 +1087,8 @@ int do_pipe_flags(int *fd, int flags)
        fdw = error;
 
        audit_fd_pair(fdr, fdw);
-       fd_install(fdr, files[0]);
-       fd_install(fdw, files[1]);
        fd[0] = fdr;
        fd[1] = fdw;
-
        return 0;
 
  err_fdr:
@@ -1103,21 +1099,38 @@ int do_pipe_flags(int *fd, int flags)
        return error;
 }
 
+int do_pipe_flags(int *fd, int flags)
+{
+       struct file *files[2];
+       int error = __do_pipe_flags(fd, files, flags);
+       if (!error) {
+               fd_install(fd[0], files[0]);
+               fd_install(fd[1], files[1]);
+       }
+       return error;
+}
+
 /*
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way Unix traditionally does this, though.
  */
 SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
 {
+       struct file *files[2];
        int fd[2];
        int error;
 
-       error = do_pipe_flags(fd, flags);
+       error = __do_pipe_flags(fd, files, flags);
        if (!error) {
-               if (copy_to_user(fildes, fd, sizeof(fd))) {
-                       sys_close(fd[0]);
-                       sys_close(fd[1]);
+               if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
+                       fput(files[0]);
+                       fput(files[1]);
+                       put_unused_fd(fd[0]);
+                       put_unused_fd(fd[1]);
                        error = -EFAULT;
+               } else {
+                       fd_install(fd[0], files[0]);
+                       fd_install(fd[1], files[1]);
                }
        }
        return error;
index 5e325a42e33d03bb91d32edeb212ac28fb979ec2..8bd2135b7f82ce181445f8e4804d4b226dca4891 100644 (file)
@@ -78,7 +78,8 @@ posix_acl_valid(const struct posix_acl *acl)
 {
        const struct posix_acl_entry *pa, *pe;
        int state = ACL_USER_OBJ;
-       unsigned int id = 0;  /* keep gcc happy */
+       kuid_t prev_uid = INVALID_UID;
+       kgid_t prev_gid = INVALID_GID;
        int needs_mask = 0;
 
        FOREACH_ACL_ENTRY(pa, acl, pe) {
@@ -87,7 +88,6 @@ posix_acl_valid(const struct posix_acl *acl)
                switch (pa->e_tag) {
                        case ACL_USER_OBJ:
                                if (state == ACL_USER_OBJ) {
-                                       id = 0;
                                        state = ACL_USER;
                                        break;
                                }
@@ -96,16 +96,17 @@ posix_acl_valid(const struct posix_acl *acl)
                        case ACL_USER:
                                if (state != ACL_USER)
                                        return -EINVAL;
-                               if (pa->e_id == ACL_UNDEFINED_ID ||
-                                   pa->e_id < id)
+                               if (!uid_valid(pa->e_uid))
                                        return -EINVAL;
-                               id = pa->e_id + 1;
+                               if (uid_valid(prev_uid) &&
+                                   uid_lte(pa->e_uid, prev_uid))
+                                       return -EINVAL;
+                               prev_uid = pa->e_uid;
                                needs_mask = 1;
                                break;
 
                        case ACL_GROUP_OBJ:
                                if (state == ACL_USER) {
-                                       id = 0;
                                        state = ACL_GROUP;
                                        break;
                                }
@@ -114,10 +115,12 @@ posix_acl_valid(const struct posix_acl *acl)
                        case ACL_GROUP:
                                if (state != ACL_GROUP)
                                        return -EINVAL;
-                               if (pa->e_id == ACL_UNDEFINED_ID ||
-                                   pa->e_id < id)
+                               if (!gid_valid(pa->e_gid))
+                                       return -EINVAL;
+                               if (gid_valid(prev_gid) &&
+                                   gid_lte(pa->e_gid, prev_gid))
                                        return -EINVAL;
-                               id = pa->e_id + 1;
+                               prev_gid = pa->e_gid;
                                needs_mask = 1;
                                break;
 
@@ -195,15 +198,12 @@ posix_acl_from_mode(umode_t mode, gfp_t flags)
                return ERR_PTR(-ENOMEM);
 
        acl->a_entries[0].e_tag  = ACL_USER_OBJ;
-       acl->a_entries[0].e_id   = ACL_UNDEFINED_ID;
        acl->a_entries[0].e_perm = (mode & S_IRWXU) >> 6;
 
        acl->a_entries[1].e_tag  = ACL_GROUP_OBJ;
-       acl->a_entries[1].e_id   = ACL_UNDEFINED_ID;
        acl->a_entries[1].e_perm = (mode & S_IRWXG) >> 3;
 
        acl->a_entries[2].e_tag  = ACL_OTHER;
-       acl->a_entries[2].e_id   = ACL_UNDEFINED_ID;
        acl->a_entries[2].e_perm = (mode & S_IRWXO);
        return acl;
 }
@@ -224,11 +224,11 @@ posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
                 switch(pa->e_tag) {
                         case ACL_USER_OBJ:
                                /* (May have been checked already) */
-                               if (inode->i_uid == current_fsuid())
+                               if (uid_eq(inode->i_uid, current_fsuid()))
                                         goto check_perm;
                                 break;
                         case ACL_USER:
-                               if (pa->e_id == current_fsuid())
+                               if (uid_eq(pa->e_uid, current_fsuid()))
                                         goto mask;
                                break;
                         case ACL_GROUP_OBJ:
@@ -239,7 +239,7 @@ posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
                                 }
                                break;
                         case ACL_GROUP:
-                                if (in_group_p(pa->e_id)) {
+                               if (in_group_p(pa->e_gid)) {
                                        found = 1;
                                        if ((pa->e_perm & want) == want)
                                                goto mask;
index c1c729335924803f92e5530d4197b1eb3bd4907c..99349efbbc2b53781afd3e321a9a52cd92441882 100644 (file)
@@ -8,7 +8,7 @@ proc-y                  := nommu.o task_nommu.o
 proc-$(CONFIG_MMU)     := mmu.o task_mmu.o
 
 proc-y       += inode.o root.o base.o generic.o array.o \
-               proc_tty.o
+               proc_tty.o fd.o
 proc-y += cmdline.o
 proc-y += consoles.o
 proc-y += cpuinfo.o
index 1b6c84cbdb732e5684ccaa823548b8780cf1c16d..d295af993677dda290242adb5e0187deef1bcb7c 100644 (file)
@@ -90,6 +90,7 @@
 #endif
 #include <trace/events/oom.h>
 #include "internal.h"
+#include "fd.h"
 
 /* NOTE:
  *     Implementing inode permission operations in /proc is almost
@@ -136,8 +137,6 @@ struct pid_entry {
                NULL, &proc_single_file_operations,     \
                { .proc_show = show } )
 
-static int proc_fd_permission(struct inode *inode, int mask);
-
 /*
  * Count the number of hardlinks for the pid_entry table, excluding the .
  * and .. links.
@@ -1089,7 +1088,8 @@ static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
        if (!task)
                return -ESRCH;
        length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
-                               audit_get_loginuid(task));
+                          from_kuid(file->f_cred->user_ns,
+                                    audit_get_loginuid(task)));
        put_task_struct(task);
        return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
 }
@@ -1101,6 +1101,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
        char *page, *tmp;
        ssize_t length;
        uid_t loginuid;
+       kuid_t kloginuid;
 
        rcu_read_lock();
        if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
@@ -1130,7 +1131,13 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
                goto out_free_page;
 
        }
-       length = audit_set_loginuid(loginuid);
+       kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
+       if (!uid_valid(kloginuid)) {
+               length = -EINVAL;
+               goto out_free_page;
+       }
+
+       length = audit_set_loginuid(kloginuid);
        if (likely(length == 0))
                length = count;
 
@@ -1492,7 +1499,7 @@ out:
        return error;
 }
 
-static const struct inode_operations proc_pid_link_inode_operations = {
+const struct inode_operations proc_pid_link_inode_operations = {
        .readlink       = proc_pid_readlink,
        .follow_link    = proc_pid_follow_link,
        .setattr        = proc_setattr,
@@ -1501,21 +1508,6 @@ static const struct inode_operations proc_pid_link_inode_operations = {
 
 /* building an inode */
 
-static int task_dumpable(struct task_struct *task)
-{
-       int dumpable = 0;
-       struct mm_struct *mm;
-
-       task_lock(task);
-       mm = task->mm;
-       if (mm)
-               dumpable = get_dumpable(mm);
-       task_unlock(task);
-       if(dumpable == 1)
-               return 1;
-       return 0;
-}
-
 struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
 {
        struct inode * inode;
@@ -1641,15 +1633,6 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
        return 0;
 }
 
-static int pid_delete_dentry(const struct dentry * dentry)
-{
-       /* Is the task we represent dead?
-        * If so, then don't put the dentry on the lru list,
-        * kill it immediately.
-        */
-       return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
-}
-
 const struct dentry_operations pid_dentry_operations =
 {
        .d_revalidate   = pid_revalidate,
@@ -1712,289 +1695,6 @@ end_instantiate:
        return filldir(dirent, name, len, filp->f_pos, ino, type);
 }
 
-static unsigned name_to_int(struct dentry *dentry)
-{
-       const char *name = dentry->d_name.name;
-       int len = dentry->d_name.len;
-       unsigned n = 0;
-
-       if (len > 1 && *name == '0')
-               goto out;
-       while (len-- > 0) {
-               unsigned c = *name++ - '0';
-               if (c > 9)
-                       goto out;
-               if (n >= (~0U-9)/10)
-                       goto out;
-               n *= 10;
-               n += c;
-       }
-       return n;
-out:
-       return ~0U;
-}
-
-#define PROC_FDINFO_MAX 64
-
-static int proc_fd_info(struct inode *inode, struct path *path, char *info)
-{
-       struct task_struct *task = get_proc_task(inode);
-       struct files_struct *files = NULL;
-       struct file *file;
-       int fd = proc_fd(inode);
-
-       if (task) {
-               files = get_files_struct(task);
-               put_task_struct(task);
-       }
-       if (files) {
-               /*
-                * We are not taking a ref to the file structure, so we must
-                * hold ->file_lock.
-                */
-               spin_lock(&files->file_lock);
-               file = fcheck_files(files, fd);
-               if (file) {
-                       unsigned int f_flags;
-                       struct fdtable *fdt;
-
-                       fdt = files_fdtable(files);
-                       f_flags = file->f_flags & ~O_CLOEXEC;
-                       if (close_on_exec(fd, fdt))
-                               f_flags |= O_CLOEXEC;
-
-                       if (path) {
-                               *path = file->f_path;
-                               path_get(&file->f_path);
-                       }
-                       if (info)
-                               snprintf(info, PROC_FDINFO_MAX,
-                                        "pos:\t%lli\n"
-                                        "flags:\t0%o\n",
-                                        (long long) file->f_pos,
-                                        f_flags);
-                       spin_unlock(&files->file_lock);
-                       put_files_struct(files);
-                       return 0;
-               }
-               spin_unlock(&files->file_lock);
-               put_files_struct(files);
-       }
-       return -ENOENT;
-}
-
-static int proc_fd_link(struct dentry *dentry, struct path *path)
-{
-       return proc_fd_info(dentry->d_inode, path, NULL);
-}
-
-static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags)
-{
-       struct inode *inode;
-       struct task_struct *task;
-       int fd;
-       struct files_struct *files;
-       const struct cred *cred;
-
-       if (flags & LOOKUP_RCU)
-               return -ECHILD;
-
-       inode = dentry->d_inode;
-       task = get_proc_task(inode);
-       fd = proc_fd(inode);
-
-       if (task) {
-               files = get_files_struct(task);
-               if (files) {
-                       struct file *file;
-                       rcu_read_lock();
-                       file = fcheck_files(files, fd);
-                       if (file) {
-                               unsigned f_mode = file->f_mode;
-
-                               rcu_read_unlock();
-                               put_files_struct(files);
-
-                               if (task_dumpable(task)) {
-                                       rcu_read_lock();
-                                       cred = __task_cred(task);
-                                       inode->i_uid = cred->euid;
-                                       inode->i_gid = cred->egid;
-                                       rcu_read_unlock();
-                               } else {
-                                       inode->i_uid = GLOBAL_ROOT_UID;
-                                       inode->i_gid = GLOBAL_ROOT_GID;
-                               }
-
-                               if (S_ISLNK(inode->i_mode)) {
-                                       unsigned i_mode = S_IFLNK;
-                                       if (f_mode & FMODE_READ)
-                                               i_mode |= S_IRUSR | S_IXUSR;
-                                       if (f_mode & FMODE_WRITE)
-                                               i_mode |= S_IWUSR | S_IXUSR;
-                                       inode->i_mode = i_mode;
-                               }
-
-                               security_task_to_inode(task, inode);
-                               put_task_struct(task);
-                               return 1;
-                       }
-                       rcu_read_unlock();
-                       put_files_struct(files);
-               }
-               put_task_struct(task);
-       }
-       d_drop(dentry);
-       return 0;
-}
-
-static const struct dentry_operations tid_fd_dentry_operations =
-{
-       .d_revalidate   = tid_fd_revalidate,
-       .d_delete       = pid_delete_dentry,
-};
-
-static struct dentry *proc_fd_instantiate(struct inode *dir,
-       struct dentry *dentry, struct task_struct *task, const void *ptr)
-{
-       unsigned fd = (unsigned long)ptr;
-       struct inode *inode;
-       struct proc_inode *ei;
-       struct dentry *error = ERR_PTR(-ENOENT);
-
-       inode = proc_pid_make_inode(dir->i_sb, task);
-       if (!inode)
-               goto out;
-       ei = PROC_I(inode);
-       ei->fd = fd;
-
-       inode->i_mode = S_IFLNK;
-       inode->i_op = &proc_pid_link_inode_operations;
-       inode->i_size = 64;
-       ei->op.proc_get_link = proc_fd_link;
-       d_set_d_op(dentry, &tid_fd_dentry_operations);
-       d_add(dentry, inode);
-       /* Close the race of the process dying before we return the dentry */
-       if (tid_fd_revalidate(dentry, 0))
-               error = NULL;
-
- out:
-       return error;
-}
-
-static struct dentry *proc_lookupfd_common(struct inode *dir,
-                                          struct dentry *dentry,
-                                          instantiate_t instantiate)
-{
-       struct task_struct *task = get_proc_task(dir);
-       unsigned fd = name_to_int(dentry);
-       struct dentry *result = ERR_PTR(-ENOENT);
-
-       if (!task)
-               goto out_no_task;
-       if (fd == ~0U)
-               goto out;
-
-       result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
-out:
-       put_task_struct(task);
-out_no_task:
-       return result;
-}
-
-static int proc_readfd_common(struct file * filp, void * dirent,
-                             filldir_t filldir, instantiate_t instantiate)
-{
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-       struct task_struct *p = get_proc_task(inode);
-       unsigned int fd, ino;
-       int retval;
-       struct files_struct * files;
-
-       retval = -ENOENT;
-       if (!p)
-               goto out_no_task;
-       retval = 0;
-
-       fd = filp->f_pos;
-       switch (fd) {
-               case 0:
-                       if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
-                               goto out;
-                       filp->f_pos++;
-               case 1:
-                       ino = parent_ino(dentry);
-                       if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
-                               goto out;
-                       filp->f_pos++;
-               default:
-                       files = get_files_struct(p);
-                       if (!files)
-                               goto out;
-                       rcu_read_lock();
-                       for (fd = filp->f_pos-2;
-                            fd < files_fdtable(files)->max_fds;
-                            fd++, filp->f_pos++) {
-                               char name[PROC_NUMBUF];
-                               int len;
-                               int rv;
-
-                               if (!fcheck_files(files, fd))
-                                       continue;
-                               rcu_read_unlock();
-
-                               len = snprintf(name, sizeof(name), "%d", fd);
-                               rv = proc_fill_cache(filp, dirent, filldir,
-                                                    name, len, instantiate, p,
-                                                    (void *)(unsigned long)fd);
-                               if (rv < 0)
-                                       goto out_fd_loop;
-                               rcu_read_lock();
-                       }
-                       rcu_read_unlock();
-out_fd_loop:
-                       put_files_struct(files);
-       }
-out:
-       put_task_struct(p);
-out_no_task:
-       return retval;
-}
-
-static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
-                                   unsigned int flags)
-{
-       return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
-}
-
-static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
-{
-       return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
-}
-
-static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
-                                     size_t len, loff_t *ppos)
-{
-       char tmp[PROC_FDINFO_MAX];
-       int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp);
-       if (!err)
-               err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp));
-       return err;
-}
-
-static const struct file_operations proc_fdinfo_file_operations = {
-       .open           = nonseekable_open,
-       .read           = proc_fdinfo_read,
-       .llseek         = no_llseek,
-};
-
-static const struct file_operations proc_fd_operations = {
-       .read           = generic_read_dir,
-       .readdir        = proc_readfd,
-       .llseek         = default_llseek,
-};
-
 #ifdef CONFIG_CHECKPOINT_RESTORE
 
 /*
@@ -2113,7 +1813,7 @@ out:
 }
 
 struct map_files_info {
-       struct file     *file;
+       fmode_t         mode;
        unsigned long   len;
        unsigned char   name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
 };
@@ -2122,13 +1822,10 @@ static struct dentry *
 proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
                           struct task_struct *task, const void *ptr)
 {
-       const struct file *file = ptr;
+       fmode_t mode = (fmode_t)(unsigned long)ptr;
        struct proc_inode *ei;
        struct inode *inode;
 
-       if (!file)
-               return ERR_PTR(-ENOENT);
-
        inode = proc_pid_make_inode(dir->i_sb, task);
        if (!inode)
                return ERR_PTR(-ENOENT);
@@ -2140,9 +1837,9 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
        inode->i_size = 64;
        inode->i_mode = S_IFLNK;
 
-       if (file->f_mode & FMODE_READ)
+       if (mode & FMODE_READ)
                inode->i_mode |= S_IRUSR;
-       if (file->f_mode & FMODE_WRITE)
+       if (mode & FMODE_WRITE)
                inode->i_mode |= S_IWUSR;
 
        d_set_d_op(dentry, &tid_map_files_dentry_operations);
@@ -2186,7 +1883,8 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
        if (!vma)
                goto out_no_vma;
 
-       result = proc_map_files_instantiate(dir, dentry, task, vma->vm_file);
+       result = proc_map_files_instantiate(dir, dentry, task,
+                       (void *)(unsigned long)vma->vm_file->f_mode);
 
 out_no_vma:
        up_read(&mm->mmap_sem);
@@ -2287,8 +1985,7 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                if (++pos <= filp->f_pos)
                                        continue;
 
-                               get_file(vma->vm_file);
-                               info.file = vma->vm_file;
+                               info.mode = vma->vm_file->f_mode;
                                info.len = snprintf(info.name,
                                                sizeof(info.name), "%lx-%lx",
                                                vma->vm_start, vma->vm_end);
@@ -2303,19 +2000,11 @@ proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        ret = proc_fill_cache(filp, dirent, filldir,
                                              p->name, p->len,
                                              proc_map_files_instantiate,
-                                             task, p->file);
+                                             task,
+                                             (void *)(unsigned long)p->mode);
                        if (ret)
                                break;
                        filp->f_pos++;
-                       fput(p->file);
-               }
-               for (; i < nr_files; i++) {
-                       /*
-                        * In case of error don't forget
-                        * to put rest of file refs.
-                        */
-                       p = flex_array_get(fa, i);
-                       fput(p->file);
                }
                if (fa)
                        flex_array_free(fa);
@@ -2337,82 +2026,6 @@ static const struct file_operations proc_map_files_operations = {
 
 #endif /* CONFIG_CHECKPOINT_RESTORE */
 
-/*
- * /proc/pid/fd needs a special permission handler so that a process can still
- * access /proc/self/fd after it has executed a setuid().
- */
-static int proc_fd_permission(struct inode *inode, int mask)
-{
-       int rv = generic_permission(inode, mask);
-       if (rv == 0)
-               return 0;
-       if (task_pid(current) == proc_pid(inode))
-               rv = 0;
-       return rv;
-}
-
-/*
- * proc directories can do almost nothing..
- */
-static const struct inode_operations proc_fd_inode_operations = {
-       .lookup         = proc_lookupfd,
-       .permission     = proc_fd_permission,
-       .setattr        = proc_setattr,
-};
-
-static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
-       struct dentry *dentry, struct task_struct *task, const void *ptr)
-{
-       unsigned fd = (unsigned long)ptr;
-       struct inode *inode;
-       struct proc_inode *ei;
-       struct dentry *error = ERR_PTR(-ENOENT);
-
-       inode = proc_pid_make_inode(dir->i_sb, task);
-       if (!inode)
-               goto out;
-       ei = PROC_I(inode);
-       ei->fd = fd;
-       inode->i_mode = S_IFREG | S_IRUSR;
-       inode->i_fop = &proc_fdinfo_file_operations;
-       d_set_d_op(dentry, &tid_fd_dentry_operations);
-       d_add(dentry, inode);
-       /* Close the race of the process dying before we return the dentry */
-       if (tid_fd_revalidate(dentry, 0))
-               error = NULL;
-
- out:
-       return error;
-}
-
-static struct dentry *proc_lookupfdinfo(struct inode *dir,
-                                       struct dentry *dentry,
-                                       unsigned int flags)
-{
-       return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
-}
-
-static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
-{
-       return proc_readfd_common(filp, dirent, filldir,
-                                 proc_fdinfo_instantiate);
-}
-
-static const struct file_operations proc_fdinfo_operations = {
-       .read           = generic_read_dir,
-       .readdir        = proc_readfdinfo,
-       .llseek         = default_llseek,
-};
-
-/*
- * proc directories can do almost nothing..
- */
-static const struct inode_operations proc_fdinfo_inode_operations = {
-       .lookup         = proc_lookupfdinfo,
-       .setattr        = proc_setattr,
-};
-
-
 static struct dentry *proc_pident_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
@@ -2983,6 +2596,11 @@ static int proc_gid_map_open(struct inode *inode, struct file *file)
        return proc_id_map_open(inode, file, &proc_gid_seq_operations);
 }
 
+static int proc_projid_map_open(struct inode *inode, struct file *file)
+{
+       return proc_id_map_open(inode, file, &proc_projid_seq_operations);
+}
+
 static const struct file_operations proc_uid_map_operations = {
        .open           = proc_uid_map_open,
        .write          = proc_uid_map_write,
@@ -2998,6 +2616,14 @@ static const struct file_operations proc_gid_map_operations = {
        .llseek         = seq_lseek,
        .release        = proc_id_map_release,
 };
+
+static const struct file_operations proc_projid_map_operations = {
+       .open           = proc_projid_map_open,
+       .write          = proc_projid_map_write,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = proc_id_map_release,
+};
 #endif /* CONFIG_USER_NS */
 
 static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
@@ -3105,6 +2731,7 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_USER_NS
        REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
        REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
+       REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
 #endif
 };
 
@@ -3468,6 +3095,7 @@ static const struct pid_entry tid_base_stuff[] = {
 #ifdef CONFIG_USER_NS
        REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
        REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
+       REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
 #endif
 };
 
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
new file mode 100644 (file)
index 0000000..f28a875
--- /dev/null
@@ -0,0 +1,367 @@
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/dcache.h>
+#include <linux/path.h>
+#include <linux/fdtable.h>
+#include <linux/namei.h>
+#include <linux/pid.h>
+#include <linux/security.h>
+#include <linux/file.h>
+#include <linux/seq_file.h>
+
+#include <linux/proc_fs.h>
+
+#include "internal.h"
+#include "fd.h"
+
+static int seq_show(struct seq_file *m, void *v)
+{
+       struct files_struct *files = NULL;
+       int f_flags = 0, ret = -ENOENT;
+       struct file *file = NULL;
+       struct task_struct *task;
+
+       task = get_proc_task(m->private);
+       if (!task)
+               return -ENOENT;
+
+       files = get_files_struct(task);
+       put_task_struct(task);
+
+       if (files) {
+               int fd = proc_fd(m->private);
+
+               spin_lock(&files->file_lock);
+               file = fcheck_files(files, fd);
+               if (file) {
+                       struct fdtable *fdt = files_fdtable(files);
+
+                       f_flags = file->f_flags;
+                       if (close_on_exec(fd, fdt))
+                               f_flags |= O_CLOEXEC;
+
+                       get_file(file);
+                       ret = 0;
+               }
+               spin_unlock(&files->file_lock);
+               put_files_struct(files);
+       }
+
+       if (!ret) {
+                seq_printf(m, "pos:\t%lli\nflags:\t0%o\n",
+                          (long long)file->f_pos, f_flags);
+               fput(file);
+       }
+
+       return ret;
+}
+
+static int seq_fdinfo_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, seq_show, inode);
+}
+
+static const struct file_operations proc_fdinfo_file_operations = {
+       .open           = seq_fdinfo_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       struct files_struct *files;
+       struct task_struct *task;
+       const struct cred *cred;
+       struct inode *inode;
+       int fd;
+
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       inode = dentry->d_inode;
+       task = get_proc_task(inode);
+       fd = proc_fd(inode);
+
+       if (task) {
+               files = get_files_struct(task);
+               if (files) {
+                       struct file *file;
+
+                       rcu_read_lock();
+                       file = fcheck_files(files, fd);
+                       if (file) {
+                               unsigned f_mode = file->f_mode;
+
+                               rcu_read_unlock();
+                               put_files_struct(files);
+
+                               if (task_dumpable(task)) {
+                                       rcu_read_lock();
+                                       cred = __task_cred(task);
+                                       inode->i_uid = cred->euid;
+                                       inode->i_gid = cred->egid;
+                                       rcu_read_unlock();
+                               } else {
+                                       inode->i_uid = GLOBAL_ROOT_UID;
+                                       inode->i_gid = GLOBAL_ROOT_GID;
+                               }
+
+                               if (S_ISLNK(inode->i_mode)) {
+                                       unsigned i_mode = S_IFLNK;
+                                       if (f_mode & FMODE_READ)
+                                               i_mode |= S_IRUSR | S_IXUSR;
+                                       if (f_mode & FMODE_WRITE)
+                                               i_mode |= S_IWUSR | S_IXUSR;
+                                       inode->i_mode = i_mode;
+                               }
+
+                               security_task_to_inode(task, inode);
+                               put_task_struct(task);
+                               return 1;
+                       }
+                       rcu_read_unlock();
+                       put_files_struct(files);
+               }
+               put_task_struct(task);
+       }
+
+       d_drop(dentry);
+       return 0;
+}
+
+static const struct dentry_operations tid_fd_dentry_operations = {
+       .d_revalidate   = tid_fd_revalidate,
+       .d_delete       = pid_delete_dentry,
+};
+
+static int proc_fd_link(struct dentry *dentry, struct path *path)
+{
+       struct files_struct *files = NULL;
+       struct task_struct *task;
+       int ret = -ENOENT;
+
+       task = get_proc_task(dentry->d_inode);
+       if (task) {
+               files = get_files_struct(task);
+               put_task_struct(task);
+       }
+
+       if (files) {
+               int fd = proc_fd(dentry->d_inode);
+               struct file *fd_file;
+
+               spin_lock(&files->file_lock);
+               fd_file = fcheck_files(files, fd);
+               if (fd_file) {
+                       *path = fd_file->f_path;
+                       path_get(&fd_file->f_path);
+                       ret = 0;
+               }
+               spin_unlock(&files->file_lock);
+               put_files_struct(files);
+       }
+
+       return ret;
+}
+
+static struct dentry *
+proc_fd_instantiate(struct inode *dir, struct dentry *dentry,
+                   struct task_struct *task, const void *ptr)
+{
+       struct dentry *error = ERR_PTR(-ENOENT);
+       unsigned fd = (unsigned long)ptr;
+       struct proc_inode *ei;
+       struct inode *inode;
+
+       inode = proc_pid_make_inode(dir->i_sb, task);
+       if (!inode)
+               goto out;
+
+       ei = PROC_I(inode);
+       ei->fd = fd;
+
+       inode->i_mode = S_IFLNK;
+       inode->i_op = &proc_pid_link_inode_operations;
+       inode->i_size = 64;
+
+       ei->op.proc_get_link = proc_fd_link;
+
+       d_set_d_op(dentry, &tid_fd_dentry_operations);
+       d_add(dentry, inode);
+
+       /* Close the race of the process dying before we return the dentry */
+       if (tid_fd_revalidate(dentry, 0))
+               error = NULL;
+ out:
+       return error;
+}
+
+static struct dentry *proc_lookupfd_common(struct inode *dir,
+                                          struct dentry *dentry,
+                                          instantiate_t instantiate)
+{
+       struct task_struct *task = get_proc_task(dir);
+       struct dentry *result = ERR_PTR(-ENOENT);
+       unsigned fd = name_to_int(dentry);
+
+       if (!task)
+               goto out_no_task;
+       if (fd == ~0U)
+               goto out;
+
+       result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
+out:
+       put_task_struct(task);
+out_no_task:
+       return result;
+}
+
+static int proc_readfd_common(struct file * filp, void * dirent,
+                             filldir_t filldir, instantiate_t instantiate)
+{
+       struct dentry *dentry = filp->f_path.dentry;
+       struct inode *inode = dentry->d_inode;
+       struct task_struct *p = get_proc_task(inode);
+       struct files_struct *files;
+       unsigned int fd, ino;
+       int retval;
+
+       retval = -ENOENT;
+       if (!p)
+               goto out_no_task;
+       retval = 0;
+
+       fd = filp->f_pos;
+       switch (fd) {
+               case 0:
+                       if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+                               goto out;
+                       filp->f_pos++;
+               case 1:
+                       ino = parent_ino(dentry);
+                       if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+                               goto out;
+                       filp->f_pos++;
+               default:
+                       files = get_files_struct(p);
+                       if (!files)
+                               goto out;
+                       rcu_read_lock();
+                       for (fd = filp->f_pos - 2;
+                            fd < files_fdtable(files)->max_fds;
+                            fd++, filp->f_pos++) {
+                               char name[PROC_NUMBUF];
+                               int len;
+                               int rv;
+
+                               if (!fcheck_files(files, fd))
+                                       continue;
+                               rcu_read_unlock();
+
+                               len = snprintf(name, sizeof(name), "%d", fd);
+                               rv = proc_fill_cache(filp, dirent, filldir,
+                                                    name, len, instantiate, p,
+                                                    (void *)(unsigned long)fd);
+                               if (rv < 0)
+                                       goto out_fd_loop;
+                               rcu_read_lock();
+                       }
+                       rcu_read_unlock();
+out_fd_loop:
+                       put_files_struct(files);
+       }
+out:
+       put_task_struct(p);
+out_no_task:
+       return retval;
+}
+
+static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
+{
+       return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
+}
+
+const struct file_operations proc_fd_operations = {
+       .read           = generic_read_dir,
+       .readdir        = proc_readfd,
+       .llseek         = default_llseek,
+};
+
+static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
+                                   unsigned int flags)
+{
+       return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
+}
+
+/*
+ * /proc/pid/fd needs a special permission handler so that a process can still
+ * access /proc/self/fd after it has executed a setuid().
+ */
+int proc_fd_permission(struct inode *inode, int mask)
+{
+       int rv = generic_permission(inode, mask);
+       if (rv == 0)
+               return 0;
+       if (task_pid(current) == proc_pid(inode))
+               rv = 0;
+       return rv;
+}
+
+const struct inode_operations proc_fd_inode_operations = {
+       .lookup         = proc_lookupfd,
+       .permission     = proc_fd_permission,
+       .setattr        = proc_setattr,
+};
+
+static struct dentry *
+proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry,
+                       struct task_struct *task, const void *ptr)
+{
+       struct dentry *error = ERR_PTR(-ENOENT);
+       unsigned fd = (unsigned long)ptr;
+       struct proc_inode *ei;
+       struct inode *inode;
+
+       inode = proc_pid_make_inode(dir->i_sb, task);
+       if (!inode)
+               goto out;
+
+       ei = PROC_I(inode);
+       ei->fd = fd;
+
+       inode->i_mode = S_IFREG | S_IRUSR;
+       inode->i_fop = &proc_fdinfo_file_operations;
+
+       d_set_d_op(dentry, &tid_fd_dentry_operations);
+       d_add(dentry, inode);
+
+       /* Close the race of the process dying before we return the dentry */
+       if (tid_fd_revalidate(dentry, 0))
+               error = NULL;
+ out:
+       return error;
+}
+
+static struct dentry *
+proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags)
+{
+       return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
+}
+
+static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
+{
+       return proc_readfd_common(filp, dirent, filldir,
+                                 proc_fdinfo_instantiate);
+}
+
+const struct inode_operations proc_fdinfo_inode_operations = {
+       .lookup         = proc_lookupfdinfo,
+       .setattr        = proc_setattr,
+};
+
+const struct file_operations proc_fdinfo_operations = {
+       .read           = generic_read_dir,
+       .readdir        = proc_readfdinfo,
+       .llseek         = default_llseek,
+};
diff --git a/fs/proc/fd.h b/fs/proc/fd.h
new file mode 100644 (file)
index 0000000..cbb1d47
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __PROCFS_FD_H__
+#define __PROCFS_FD_H__
+
+#include <linux/fs.h>
+
+extern const struct file_operations proc_fd_operations;
+extern const struct inode_operations proc_fd_inode_operations;
+
+extern const struct file_operations proc_fdinfo_operations;
+extern const struct inode_operations proc_fdinfo_inode_operations;
+
+extern int proc_fd_permission(struct inode *inode, int mask);
+
+#endif /* __PROCFS_FD_H__ */
index e1167a1c9126ee5f6a91971fec50ff3eaa79cce4..67925a7bd8cb757f56db940efcff1fe9c7ef112a 100644 (file)
@@ -9,6 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <linux/sched.h>
 #include <linux/proc_fs.h>
 struct  ctl_table_header;
 
@@ -65,6 +66,7 @@ extern const struct file_operations proc_clear_refs_operations;
 extern const struct file_operations proc_pagemap_operations;
 extern const struct file_operations proc_net_operations;
 extern const struct inode_operations proc_net_inode_operations;
+extern const struct inode_operations proc_pid_link_inode_operations;
 
 struct proc_maps_private {
        struct pid *pid;
@@ -91,6 +93,52 @@ static inline int proc_fd(struct inode *inode)
        return PROC_I(inode)->fd;
 }
 
+static inline int task_dumpable(struct task_struct *task)
+{
+       int dumpable = 0;
+       struct mm_struct *mm;
+
+       task_lock(task);
+       mm = task->mm;
+       if (mm)
+               dumpable = get_dumpable(mm);
+       task_unlock(task);
+       if(dumpable == 1)
+               return 1;
+       return 0;
+}
+
+static inline int pid_delete_dentry(const struct dentry * dentry)
+{
+       /* Is the task we represent dead?
+        * If so, then don't put the dentry on the lru list,
+        * kill it immediately.
+        */
+       return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
+}
+
+static inline unsigned name_to_int(struct dentry *dentry)
+{
+       const char *name = dentry->d_name.name;
+       int len = dentry->d_name.len;
+       unsigned n = 0;
+
+       if (len > 1 && *name == '0')
+               goto out;
+       while (len-- > 0) {
+               unsigned c = *name++ - '0';
+               if (c > 9)
+                       goto out;
+               if (n >= (~0U-9)/10)
+                       goto out;
+               n *= 10;
+               n += c;
+       }
+       return n;
+out:
+       return ~0U;
+}
+
 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *ino,
                struct dentry *dentry);
 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
index 552e994e3aa156dff4f6aa43fa87e3885ee44513..43098bb5723af2890acaaa8d2f3a86a37fc7b811 100644 (file)
@@ -312,8 +312,8 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
            (ino % QNX4_INODES_PER_BLOCK);
 
        inode->i_mode    = le16_to_cpu(raw_inode->di_mode);
-       inode->i_uid     = (uid_t)le16_to_cpu(raw_inode->di_uid);
-       inode->i_gid     = (gid_t)le16_to_cpu(raw_inode->di_gid);
+       i_uid_write(inode, (uid_t)le16_to_cpu(raw_inode->di_uid));
+       i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid));
        set_nlink(inode, le16_to_cpu(raw_inode->di_nlink));
        inode->i_size    = le32_to_cpu(raw_inode->di_size);
        inode->i_mtime.tv_sec   = le32_to_cpu(raw_inode->di_mtime);
@@ -391,6 +391,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(qnx4_inode_cachep);
 }
 
index 2049c814bda475a7e02e9e4411d54cc74f304660..b6addf560483f0877078946050d90efb1f7d2344 100644 (file)
@@ -574,8 +574,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
        raw_inode = ((struct qnx6_inode_entry *)page_address(page)) + offs;
 
        inode->i_mode    = fs16_to_cpu(sbi, raw_inode->di_mode);
-       inode->i_uid     = (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid);
-       inode->i_gid     = (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid);
+       i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid));
+       i_gid_write(inode, (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid));
        inode->i_size    = fs64_to_cpu(sbi, raw_inode->di_size);
        inode->i_mtime.tv_sec   = fs32_to_cpu(sbi, raw_inode->di_mtime);
        inode->i_mtime.tv_nsec = 0;
@@ -651,6 +651,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(qnx6_inode_cachep);
 }
 
index 5f9e9e276af09c837b4396b84e724f155908068d..c66c37cdaa396ea860cdee00f17d4148887c078b 100644 (file)
@@ -2,6 +2,6 @@ obj-$(CONFIG_QUOTA)             += dquot.o
 obj-$(CONFIG_QFMT_V1)          += quota_v1.o
 obj-$(CONFIG_QFMT_V2)          += quota_v2.o
 obj-$(CONFIG_QUOTA_TREE)       += quota_tree.o
-obj-$(CONFIG_QUOTACTL)         += quota.o
+obj-$(CONFIG_QUOTACTL)         += quota.o kqid.o
 obj-$(CONFIG_QUOTACTL_COMPAT)  += compat.o
 obj-$(CONFIG_QUOTA_NETLINK_INTERFACE)  += netlink.o
index c495a3055e2a3be9b5e471afebdf53ac00c6fe51..557a9c20a2154856c1e1595e2d0e457d9487aaad 100644 (file)
@@ -253,8 +253,10 @@ static qsize_t inode_get_rsv_space(struct inode *inode);
 static void __dquot_initialize(struct inode *inode, int type);
 
 static inline unsigned int
-hashfn(const struct super_block *sb, unsigned int id, int type)
+hashfn(const struct super_block *sb, struct kqid qid)
 {
+       unsigned int id = from_kqid(&init_user_ns, qid);
+       int type = qid.type;
        unsigned long tmp;
 
        tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
@@ -267,7 +269,7 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
 static inline void insert_dquot_hash(struct dquot *dquot)
 {
        struct hlist_head *head;
-       head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
+       head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
        hlist_add_head(&dquot->dq_hash, head);
 }
 
@@ -277,15 +279,14 @@ static inline void remove_dquot_hash(struct dquot *dquot)
 }
 
 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
-                               unsigned int id, int type)
+                               struct kqid qid)
 {
        struct hlist_node *node;
        struct dquot *dquot;
 
        hlist_for_each (node, dquot_hash+hashent) {
                dquot = hlist_entry(node, struct dquot, dq_hash);
-               if (dquot->dq_sb == sb && dquot->dq_id == id &&
-                   dquot->dq_type == type)
+               if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
                        return dquot;
        }
        return NULL;
@@ -351,7 +352,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
        spin_lock(&dq_list_lock);
        if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
                list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
-                               info[dquot->dq_type].dqi_dirty_list);
+                               info[dquot->dq_id.type].dqi_dirty_list);
                ret = 0;
        }
        spin_unlock(&dq_list_lock);
@@ -410,17 +411,17 @@ int dquot_acquire(struct dquot *dquot)
        mutex_lock(&dquot->dq_lock);
        mutex_lock(&dqopt->dqio_mutex);
        if (!test_bit(DQ_READ_B, &dquot->dq_flags))
-               ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
+               ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
        if (ret < 0)
                goto out_iolock;
        set_bit(DQ_READ_B, &dquot->dq_flags);
        /* Instantiate dquot if needed */
        if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
-               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+               ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
                /* Write the info if needed */
-               if (info_dirty(&dqopt->info[dquot->dq_type])) {
-                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
-                                               dquot->dq_sb, dquot->dq_type);
+               if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+                       ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
+                                       dquot->dq_sb, dquot->dq_id.type);
                }
                if (ret < 0)
                        goto out_iolock;
@@ -455,7 +456,7 @@ int dquot_commit(struct dquot *dquot)
        /* Inactive dquot can be only if there was error during read/init
         * => we have better not writing it */
        if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
-               ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
+               ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
        else
                ret = -EIO;
 out_sem:
@@ -477,12 +478,12 @@ int dquot_release(struct dquot *dquot)
        if (atomic_read(&dquot->dq_count) > 1)
                goto out_dqlock;
        mutex_lock(&dqopt->dqio_mutex);
-       if (dqopt->ops[dquot->dq_type]->release_dqblk) {
-               ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
+       if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
+               ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
                /* Write the info */
-               if (info_dirty(&dqopt->info[dquot->dq_type])) {
-                       ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
-                                               dquot->dq_sb, dquot->dq_type);
+               if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+                       ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
+                                               dquot->dq_sb, dquot->dq_id.type);
                }
                if (ret >= 0)
                        ret = ret2;
@@ -521,7 +522,7 @@ restart:
        list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
                if (dquot->dq_sb != sb)
                        continue;
-               if (dquot->dq_type != type)
+               if (dquot->dq_id.type != type)
                        continue;
                /* Wait for dquot users */
                if (atomic_read(&dquot->dq_count)) {
@@ -741,7 +742,8 @@ void dqput(struct dquot *dquot)
 #ifdef CONFIG_QUOTA_DEBUG
        if (!atomic_read(&dquot->dq_count)) {
                quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
-                           quotatypes[dquot->dq_type], dquot->dq_id);
+                           quotatypes[dquot->dq_id.type],
+                           from_kqid(&init_user_ns, dquot->dq_id));
                BUG();
        }
 #endif
@@ -752,7 +754,7 @@ we_slept:
                /* We have more than one user... nothing to do */
                atomic_dec(&dquot->dq_count);
                /* Releasing dquot during quotaoff phase? */
-               if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
+               if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
                    atomic_read(&dquot->dq_count) == 1)
                        wake_up(&dquot->dq_wait_unused);
                spin_unlock(&dq_list_lock);
@@ -815,7 +817,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
        INIT_LIST_HEAD(&dquot->dq_dirty);
        init_waitqueue_head(&dquot->dq_wait_unused);
        dquot->dq_sb = sb;
-       dquot->dq_type = type;
+       dquot->dq_id = make_kqid_invalid(type);
        atomic_set(&dquot->dq_count, 1);
 
        return dquot;
@@ -829,35 +831,35 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
  *   a) checking for quota flags under dq_list_lock and
  *   b) getting a reference to dquot before we release dq_list_lock
  */
-struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
+struct dquot *dqget(struct super_block *sb, struct kqid qid)
 {
-       unsigned int hashent = hashfn(sb, id, type);
+       unsigned int hashent = hashfn(sb, qid);
        struct dquot *dquot = NULL, *empty = NULL;
 
-        if (!sb_has_quota_active(sb, type))
+        if (!sb_has_quota_active(sb, qid.type))
                return NULL;
 we_slept:
        spin_lock(&dq_list_lock);
        spin_lock(&dq_state_lock);
-       if (!sb_has_quota_active(sb, type)) {
+       if (!sb_has_quota_active(sb, qid.type)) {
                spin_unlock(&dq_state_lock);
                spin_unlock(&dq_list_lock);
                goto out;
        }
        spin_unlock(&dq_state_lock);
 
-       dquot = find_dquot(hashent, sb, id, type);
+       dquot = find_dquot(hashent, sb, qid);
        if (!dquot) {
                if (!empty) {
                        spin_unlock(&dq_list_lock);
-                       empty = get_empty_dquot(sb, type);
+                       empty = get_empty_dquot(sb, qid.type);
                        if (!empty)
                                schedule();     /* Try to wait for a moment... */
                        goto we_slept;
                }
                dquot = empty;
                empty = NULL;
-               dquot->dq_id = id;
+               dquot->dq_id = qid;
                /* all dquots go on the inuse_list */
                put_inuse(dquot);
                /* hash it first so it can be found */
@@ -1129,8 +1131,7 @@ static void dquot_decr_space(struct dquot *dquot, qsize_t number)
 
 struct dquot_warn {
        struct super_block *w_sb;
-       qid_t w_dq_id;
-       short w_dq_type;
+       struct kqid w_dq_id;
        short w_type;
 };
 
@@ -1154,11 +1155,11 @@ static int need_print_warning(struct dquot_warn *warn)
        if (!flag_print_warnings)
                return 0;
 
-       switch (warn->w_dq_type) {
+       switch (warn->w_dq_id.type) {
                case USRQUOTA:
-                       return current_fsuid() == warn->w_dq_id;
+                       return uid_eq(current_fsuid(), warn->w_dq_id.uid);
                case GRPQUOTA:
-                       return in_group_p(warn->w_dq_id);
+                       return in_group_p(warn->w_dq_id.gid);
        }
        return 0;
 }
@@ -1184,7 +1185,7 @@ static void print_warning(struct dquot_warn *warn)
                tty_write_message(tty, ": warning, ");
        else
                tty_write_message(tty, ": write failed, ");
-       tty_write_message(tty, quotatypes[warn->w_dq_type]);
+       tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
        switch (warntype) {
                case QUOTA_NL_IHARDWARN:
                        msg = " file limit reached.\r\n";
@@ -1218,7 +1219,6 @@ static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
        warn->w_type = warntype;
        warn->w_sb = dquot->dq_sb;
        warn->w_dq_id = dquot->dq_id;
-       warn->w_dq_type = dquot->dq_type;
 }
 
 /*
@@ -1236,14 +1236,14 @@ static void flush_warnings(struct dquot_warn *warn)
 #ifdef CONFIG_PRINT_QUOTA_WARNING
                print_warning(&warn[i]);
 #endif
-               quota_send_warning(warn[i].w_dq_type, warn[i].w_dq_id,
+               quota_send_warning(warn[i].w_dq_id,
                                   warn[i].w_sb->s_dev, warn[i].w_type);
        }
 }
 
 static int ignore_hardlimit(struct dquot *dquot)
 {
-       struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+       struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
 
        return capable(CAP_SYS_RESOURCE) &&
               (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
@@ -1256,7 +1256,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes,
 {
        qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
 
-       if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
+       if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
            test_bit(DQ_FAKE_B, &dquot->dq_flags))
                return 0;
 
@@ -1281,7 +1281,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes,
            dquot->dq_dqb.dqb_itime == 0) {
                prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
                dquot->dq_dqb.dqb_itime = get_seconds() +
-                   sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+                   sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
        }
 
        return 0;
@@ -1294,7 +1294,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
        qsize_t tspace;
        struct super_block *sb = dquot->dq_sb;
 
-       if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
+       if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
            test_bit(DQ_FAKE_B, &dquot->dq_flags))
                return 0;
 
@@ -1325,7 +1325,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
                if (!prealloc) {
                        prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
                        dquot->dq_dqb.dqb_btime = get_seconds() +
-                           sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
+                           sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
                }
                else
                        /*
@@ -1344,7 +1344,7 @@ static int info_idq_free(struct dquot *dquot, qsize_t inodes)
 
        if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
            dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
-           !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
+           !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
                return QUOTA_NL_NOWARN;
 
        newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
@@ -1390,7 +1390,6 @@ static int dquot_active(const struct inode *inode)
  */
 static void __dquot_initialize(struct inode *inode, int type)
 {
-       unsigned int id = 0;
        int cnt;
        struct dquot *got[MAXQUOTAS];
        struct super_block *sb = inode->i_sb;
@@ -1403,18 +1402,19 @@ static void __dquot_initialize(struct inode *inode, int type)
 
        /* First get references to structures we might need. */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               struct kqid qid;
                got[cnt] = NULL;
                if (type != -1 && cnt != type)
                        continue;
                switch (cnt) {
                case USRQUOTA:
-                       id = inode->i_uid;
+                       qid = make_kqid_uid(inode->i_uid);
                        break;
                case GRPQUOTA:
-                       id = inode->i_gid;
+                       qid = make_kqid_gid(inode->i_gid);
                        break;
                }
-               got[cnt] = dqget(sb, id, cnt);
+               got[cnt] = dqget(sb, qid);
        }
 
        down_write(&sb_dqopt(sb)->dqptr_sem);
@@ -1897,10 +1897,10 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
        if (!dquot_active(inode))
                return 0;
 
-       if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
-               transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
-       if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
-               transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA);
+       if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
+               transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
+       if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
+               transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
 
        ret = __dquot_transfer(inode, transfer_to);
        dqput_all(transfer_to);
@@ -2360,9 +2360,9 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
 
        memset(di, 0, sizeof(*di));
        di->d_version = FS_DQUOT_VERSION;
-       di->d_flags = dquot->dq_type == USRQUOTA ?
+       di->d_flags = dquot->dq_id.type == USRQUOTA ?
                        FS_USER_QUOTA : FS_GROUP_QUOTA;
-       di->d_id = dquot->dq_id;
+       di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
 
        spin_lock(&dq_data_lock);
        di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
@@ -2376,12 +2376,12 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
        spin_unlock(&dq_data_lock);
 }
 
-int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
+int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
                    struct fs_disk_quota *di)
 {
        struct dquot *dquot;
 
-       dquot = dqget(sb, id, type);
+       dquot = dqget(sb, qid);
        if (!dquot)
                return -ESRCH;
        do_get_dqblk(dquot, di);
@@ -2401,7 +2401,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
        int check_blim = 0, check_ilim = 0;
-       struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+       struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
 
        if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
                return -EINVAL;
@@ -2488,13 +2488,13 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
        return 0;
 }
 
-int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
+int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
                  struct fs_disk_quota *di)
 {
        struct dquot *dquot;
        int rc;
 
-       dquot = dqget(sb, id, type);
+       dquot = dqget(sb, qid);
        if (!dquot) {
                rc = -ESRCH;
                goto out;
diff --git a/fs/quota/kqid.c b/fs/quota/kqid.c
new file mode 100644 (file)
index 0000000..2f97b0e
--- /dev/null
@@ -0,0 +1,132 @@
+#include <linux/fs.h>
+#include <linux/quota.h>
+#include <linux/export.h>
+
+/**
+ *     qid_eq - Test to see if to kquid values are the same
+ *     @left: A qid value
+ *     @right: Another quid value
+ *
+ *     Return true if the two qid values are equal and false otherwise.
+ */
+bool qid_eq(struct kqid left, struct kqid right)
+{
+       if (left.type != right.type)
+               return false;
+       switch(left.type) {
+       case USRQUOTA:
+               return uid_eq(left.uid, right.uid);
+       case GRPQUOTA:
+               return gid_eq(left.gid, right.gid);
+       case PRJQUOTA:
+               return projid_eq(left.projid, right.projid);
+       default:
+               BUG();
+       }
+}
+EXPORT_SYMBOL(qid_eq);
+
+/**
+ *     qid_lt - Test to see if one qid value is less than another
+ *     @left: The possibly lesser qid value
+ *     @right: The possibly greater qid value
+ *
+ *     Return true if left is less than right and false otherwise.
+ */
+bool qid_lt(struct kqid left, struct kqid right)
+{
+       if (left.type < right.type)
+               return true;
+       if (left.type > right.type)
+               return false;
+       switch (left.type) {
+       case USRQUOTA:
+               return uid_lt(left.uid, right.uid);
+       case GRPQUOTA:
+               return gid_lt(left.gid, right.gid);
+       case PRJQUOTA:
+               return projid_lt(left.projid, right.projid);
+       default:
+               BUG();
+       }
+}
+EXPORT_SYMBOL(qid_lt);
+
+/**
+ *     from_kqid - Create a qid from a kqid user-namespace pair.
+ *     @targ: The user namespace we want a qid in.
+ *     @kuid: The kernel internal quota identifier to start with.
+ *
+ *     Map @kqid into the user-namespace specified by @targ and
+ *     return the resulting qid.
+ *
+ *     There is always a mapping into the initial user_namespace.
+ *
+ *     If @kqid has no mapping in @targ (qid_t)-1 is returned.
+ */
+qid_t from_kqid(struct user_namespace *targ, struct kqid kqid)
+{
+       switch (kqid.type) {
+       case USRQUOTA:
+               return from_kuid(targ, kqid.uid);
+       case GRPQUOTA:
+               return from_kgid(targ, kqid.gid);
+       case PRJQUOTA:
+               return from_kprojid(targ, kqid.projid);
+       default:
+               BUG();
+       }
+}
+EXPORT_SYMBOL(from_kqid);
+
+/**
+ *     from_kqid_munged - Create a qid from a kqid user-namespace pair.
+ *     @targ: The user namespace we want a qid in.
+ *     @kqid: The kernel internal quota identifier to start with.
+ *
+ *     Map @kqid into the user-namespace specified by @targ and
+ *     return the resulting qid.
+ *
+ *     There is always a mapping into the initial user_namespace.
+ *
+ *     Unlike from_kqid from_kqid_munged never fails and always
+ *     returns a valid projid.  This makes from_kqid_munged
+ *     appropriate for use in places where failing to provide
+ *     a qid_t is not a good option.
+ *
+ *     If @kqid has no mapping in @targ the kqid.type specific
+ *     overflow identifier is returned.
+ */
+qid_t from_kqid_munged(struct user_namespace *targ, struct kqid kqid)
+{
+       switch (kqid.type) {
+       case USRQUOTA:
+               return from_kuid_munged(targ, kqid.uid);
+       case GRPQUOTA:
+               return from_kgid_munged(targ, kqid.gid);
+       case PRJQUOTA:
+               return from_kprojid_munged(targ, kqid.projid);
+       default:
+               BUG();
+       }
+}
+EXPORT_SYMBOL(from_kqid_munged);
+
+/**
+ *     qid_valid - Report if a valid value is stored in a kqid.
+ *     @qid: The kernel internal quota identifier to test.
+ */
+bool qid_valid(struct kqid qid)
+{
+       switch (qid.type) {
+       case USRQUOTA:
+               return uid_valid(qid.uid);
+       case GRPQUOTA:
+               return gid_valid(qid.gid);
+       case PRJQUOTA:
+               return projid_valid(qid.projid);
+       default:
+               BUG();
+       }
+}
+EXPORT_SYMBOL(qid_valid);
index d67908b407d99dbad1f3f91a1e0c561cbab60768..16e8abb7709ba04fb1bcc1520573acf568e68eb9 100644 (file)
@@ -30,7 +30,7 @@ static struct genl_family quota_genl_family = {
  *
  */
 
-void quota_send_warning(short type, unsigned int id, dev_t dev,
+void quota_send_warning(struct kqid qid, dev_t dev,
                        const char warntype)
 {
        static atomic_t seq;
@@ -56,10 +56,11 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
                  "VFS: Cannot store netlink header in quota warning.\n");
                goto err_out;
        }
-       ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
+       ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
        if (ret)
                goto attr_err_out;
-       ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
+       ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID,
+                         from_kqid_munged(&init_user_ns, qid));
        if (ret)
                goto attr_err_out;
        ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
@@ -71,7 +72,8 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
        ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
        if (ret)
                goto attr_err_out;
-       ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
+       ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID,
+                         from_kuid_munged(&init_user_ns, current_uid()));
        if (ret)
                goto attr_err_out;
        genlmsg_end(skb, msg_head);
index 6f155788cbc6ed498126cfca9edff974f19b4a93..ff0135d6bc51a5d8ddbe1eb16f845a2b9d37c009 100644 (file)
@@ -32,8 +32,8 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
        /* allow to query information for dquots we "own" */
        case Q_GETQUOTA:
        case Q_XGETQUOTA:
-               if ((type == USRQUOTA && current_euid() == id) ||
-                   (type == GRPQUOTA && in_egroup_p(id)))
+               if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) ||
+                   (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id))))
                        break;
                /*FALLTHROUGH*/
        default:
@@ -130,13 +130,17 @@ static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
 static int quota_getquota(struct super_block *sb, int type, qid_t id,
                          void __user *addr)
 {
+       struct kqid qid;
        struct fs_disk_quota fdq;
        struct if_dqblk idq;
        int ret;
 
        if (!sb->s_qcop->get_dqblk)
                return -ENOSYS;
-       ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+       ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
        if (ret)
                return ret;
        copy_to_if_dqblk(&idq, &fdq);
@@ -176,13 +180,17 @@ static int quota_setquota(struct super_block *sb, int type, qid_t id,
 {
        struct fs_disk_quota fdq;
        struct if_dqblk idq;
+       struct kqid qid;
 
        if (copy_from_user(&idq, addr, sizeof(idq)))
                return -EFAULT;
        if (!sb->s_qcop->set_dqblk)
                return -ENOSYS;
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
        copy_from_if_dqblk(&fdq, &idq);
-       return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
+       return sb->s_qcop->set_dqblk(sb, qid, &fdq);
 }
 
 static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
@@ -213,23 +221,31 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct kqid qid;
 
        if (copy_from_user(&fdq, addr, sizeof(fdq)))
                return -EFAULT;
        if (!sb->s_qcop->set_dqblk)
                return -ENOSYS;
-       return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+       return sb->s_qcop->set_dqblk(sb, qid, &fdq);
 }
 
 static int quota_getxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct kqid qid;
        int ret;
 
        if (!sb->s_qcop->get_dqblk)
                return -ENOSYS;
-       ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+       ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
        if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
                return -EFAULT;
        return ret;
index e41c1becf09607f55382f23908918df8040be57e..d65877fbe8f4a389100527ce426c5e22f8fe1e88 100644 (file)
@@ -22,9 +22,10 @@ MODULE_LICENSE("GPL");
 
 #define __QUOTA_QT_PARANOIA
 
-static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
+static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
 {
        unsigned int epb = info->dqi_usable_bs >> 2;
+       qid_t id = from_kqid(&init_user_ns, qid);
 
        depth = info->dqi_qtree_depth - depth - 1;
        while (depth--)
@@ -244,7 +245,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
                /* This is enough as the block is already zeroed and the entry
                 * list is empty... */
                info->dqi_free_entry = blk;
-               mark_info_dirty(dquot->dq_sb, dquot->dq_type);
+               mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
        }
        /* Block will be full? */
        if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
@@ -357,7 +358,7 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
  */
 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
 {
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct super_block *sb = dquot->dq_sb;
        ssize_t ret;
        char *ddquot = getdqbuf(info->dqi_entry_size);
@@ -538,8 +539,9 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
                ddquot += info->dqi_entry_size;
        }
        if (i == qtree_dqstr_in_blk(info)) {
-               quota_error(dquot->dq_sb, "Quota for id %u referenced "
-                           "but not present", dquot->dq_id);
+               quota_error(dquot->dq_sb,
+                           "Quota for id %u referenced but not present",
+                           from_kqid(&init_user_ns, dquot->dq_id));
                ret = -EIO;
                goto out_buf;
        } else {
@@ -589,7 +591,7 @@ static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
 
 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
 {
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct super_block *sb = dquot->dq_sb;
        loff_t offset;
        char *ddquot;
@@ -607,8 +609,10 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
                offset = find_dqentry(info, dquot);
                if (offset <= 0) {      /* Entry not present? */
                        if (offset < 0)
-                               quota_error(sb, "Can't read quota structure "
-                                           "for id %u", dquot->dq_id);
+                               quota_error(sb,"Can't read quota structure "
+                                           "for id %u",
+                                           from_kqid(&init_user_ns,
+                                                     dquot->dq_id));
                        dquot->dq_off = 0;
                        set_bit(DQ_FAKE_B, &dquot->dq_flags);
                        memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
@@ -626,7 +630,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
                if (ret >= 0)
                        ret = -EIO;
                quota_error(sb, "Error while reading quota structure for id %u",
-                           dquot->dq_id);
+                           from_kqid(&init_user_ns, dquot->dq_id));
                set_bit(DQ_FAKE_B, &dquot->dq_flags);
                memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
                kfree(ddquot);
index 34b37a67bb16c6c936bd725e5fa79e76ffb2ddc0..469c6848b322dd9f6b8aab6933b9cc75d8ee18c9 100644 (file)
@@ -54,7 +54,7 @@ static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m)
 
 static int v1_read_dqblk(struct dquot *dquot)
 {
-       int type = dquot->dq_type;
+       int type = dquot->dq_id.type;
        struct v1_disk_dqblk dqblk;
 
        if (!sb_dqopt(dquot->dq_sb)->files[type])
@@ -63,7 +63,8 @@ static int v1_read_dqblk(struct dquot *dquot)
        /* Set structure to 0s in case read fails/is after end of file */
        memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
        dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
-                       sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
+                       sizeof(struct v1_disk_dqblk),
+                       v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id)));
 
        v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
        if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
@@ -78,12 +79,13 @@ static int v1_read_dqblk(struct dquot *dquot)
 
 static int v1_commit_dqblk(struct dquot *dquot)
 {
-       short type = dquot->dq_type;
+       short type = dquot->dq_id.type;
        ssize_t ret;
        struct v1_disk_dqblk dqblk;
 
        v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
-       if (dquot->dq_id == 0) {
+       if (((type == USRQUOTA) && uid_eq(dquot->dq_id.uid, GLOBAL_ROOT_UID)) ||
+           ((type == GRPQUOTA) && gid_eq(dquot->dq_id.gid, GLOBAL_ROOT_GID))) {
                dqblk.dqb_btime =
                        sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
                dqblk.dqb_itime =
@@ -93,7 +95,7 @@ static int v1_commit_dqblk(struct dquot *dquot)
        if (sb_dqopt(dquot->dq_sb)->files[type])
                ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
                        (char *)&dqblk, sizeof(struct v1_disk_dqblk),
-                       v1_dqoff(dquot->dq_id));
+                       v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id)));
        if (ret != sizeof(struct v1_disk_dqblk)) {
                quota_error(dquot->dq_sb, "dquota write failed");
                if (ret >= 0)
index f1ab3604db5a4972d427b64d762df9c02a47f9de..02751ec695c596eb68fc558ab0514c39ab0943c8 100644 (file)
@@ -196,7 +196,7 @@ static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot)
        struct v2r0_disk_dqblk *d = dp;
        struct mem_dqblk *m = &dquot->dq_dqb;
        struct qtree_mem_dqinfo *info =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
 
        d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit);
        d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit);
@@ -206,7 +206,7 @@ static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot)
        d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit));
        d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
        d->dqb_btime = cpu_to_le64(m->dqb_btime);
-       d->dqb_id = cpu_to_le32(dquot->dq_id);
+       d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
        if (qtree_entry_unused(info, dp))
                d->dqb_itime = cpu_to_le64(1);
 }
@@ -215,11 +215,13 @@ static int v2r0_is_id(void *dp, struct dquot *dquot)
 {
        struct v2r0_disk_dqblk *d = dp;
        struct qtree_mem_dqinfo *info =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
 
        if (qtree_entry_unused(info, dp))
                return 0;
-       return le32_to_cpu(d->dqb_id) == dquot->dq_id;
+       return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
+                               le32_to_cpu(d->dqb_id)),
+                     dquot->dq_id);
 }
 
 static void v2r1_disk2memdqb(struct dquot *dquot, void *dp)
@@ -247,7 +249,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
        struct v2r1_disk_dqblk *d = dp;
        struct mem_dqblk *m = &dquot->dq_dqb;
        struct qtree_mem_dqinfo *info =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
 
        d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
        d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
@@ -257,7 +259,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
        d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit));
        d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
        d->dqb_btime = cpu_to_le64(m->dqb_btime);
-       d->dqb_id = cpu_to_le32(dquot->dq_id);
+       d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
        if (qtree_entry_unused(info, dp))
                d->dqb_itime = cpu_to_le64(1);
 }
@@ -266,26 +268,28 @@ static int v2r1_is_id(void *dp, struct dquot *dquot)
 {
        struct v2r1_disk_dqblk *d = dp;
        struct qtree_mem_dqinfo *info =
-                       sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+                       sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
 
        if (qtree_entry_unused(info, dp))
                return 0;
-       return le32_to_cpu(d->dqb_id) == dquot->dq_id;
+       return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
+                               le32_to_cpu(d->dqb_id)),
+                     dquot->dq_id);
 }
 
 static int v2_read_dquot(struct dquot *dquot)
 {
-       return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
+       return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot);
 }
 
 static int v2_write_dquot(struct dquot *dquot)
 {
-       return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
+       return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot);
 }
 
 static int v2_release_dquot(struct dquot *dquot)
 {
-       return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
+       return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot);
 }
 
 static int v2_free_file_info(struct super_block *sb, int type)
index 1adfb691e4f152444d8b7a080c1159057bbed9ad..d06534857e9ed1e83563085f2ffdcfff4dc4ac7e 100644 (file)
@@ -232,23 +232,18 @@ EXPORT_SYMBOL(vfs_llseek);
 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
 {
        off_t retval;
-       struct file * file;
-       int fput_needed;
-
-       retval = -EBADF;
-       file = fget_light(fd, &fput_needed);
-       if (!file)
-               goto bad;
+       struct fd f = fdget(fd);
+       if (!f.file)
+               return -EBADF;
 
        retval = -EINVAL;
        if (origin <= SEEK_MAX) {
-               loff_t res = vfs_llseek(file, offset, origin);
+               loff_t res = vfs_llseek(f.file, offset, origin);
                retval = res;
                if (res != (loff_t)retval)
                        retval = -EOVERFLOW;    /* LFS: should only happen on 32 bit platforms */
        }
-       fput_light(file, fput_needed);
-bad:
+       fdput(f);
        return retval;
 }
 
@@ -258,20 +253,17 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
                unsigned int, origin)
 {
        int retval;
-       struct file * file;
+       struct fd f = fdget(fd);
        loff_t offset;
-       int fput_needed;
 
-       retval = -EBADF;
-       file = fget_light(fd, &fput_needed);
-       if (!file)
-               goto bad;
+       if (!f.file)
+               return -EBADF;
 
        retval = -EINVAL;
        if (origin > SEEK_MAX)
                goto out_putf;
 
-       offset = vfs_llseek(file, ((loff_t) offset_high << 32) | offset_low,
+       offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
                        origin);
 
        retval = (int)offset;
@@ -281,8 +273,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
                        retval = 0;
        }
 out_putf:
-       fput_light(file, fput_needed);
-bad:
+       fdput(f);
        return retval;
 }
 #endif
@@ -461,34 +452,29 @@ static inline void file_pos_write(struct file *file, loff_t pos)
 
 SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
 {
-       struct file *file;
+       struct fd f = fdget(fd);
        ssize_t ret = -EBADF;
-       int fput_needed;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
-               loff_t pos = file_pos_read(file);
-               ret = vfs_read(file, buf, count, &pos);
-               file_pos_write(file, pos);
-               fput_light(file, fput_needed);
+       if (f.file) {
+               loff_t pos = file_pos_read(f.file);
+               ret = vfs_read(f.file, buf, count, &pos);
+               file_pos_write(f.file, pos);
+               fdput(f);
        }
-
        return ret;
 }
 
 SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
                size_t, count)
 {
-       struct file *file;
+       struct fd f = fdget(fd);
        ssize_t ret = -EBADF;
-       int fput_needed;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
-               loff_t pos = file_pos_read(file);
-               ret = vfs_write(file, buf, count, &pos);
-               file_pos_write(file, pos);
-               fput_light(file, fput_needed);
+       if (f.file) {
+               loff_t pos = file_pos_read(f.file);
+               ret = vfs_write(f.file, buf, count, &pos);
+               file_pos_write(f.file, pos);
+               fdput(f);
        }
 
        return ret;
@@ -497,19 +483,18 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
 SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf,
                        size_t count, loff_t pos)
 {
-       struct file *file;
+       struct fd f;
        ssize_t ret = -EBADF;
-       int fput_needed;
 
        if (pos < 0)
                return -EINVAL;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
+       f = fdget(fd);
+       if (f.file) {
                ret = -ESPIPE;
-               if (file->f_mode & FMODE_PREAD)
-                       ret = vfs_read(file, buf, count, &pos);
-               fput_light(file, fput_needed);
+               if (f.file->f_mode & FMODE_PREAD)
+                       ret = vfs_read(f.file, buf, count, &pos);
+               fdput(f);
        }
 
        return ret;
@@ -526,19 +511,18 @@ SYSCALL_ALIAS(sys_pread64, SyS_pread64);
 SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf,
                         size_t count, loff_t pos)
 {
-       struct file *file;
+       struct fd f;
        ssize_t ret = -EBADF;
-       int fput_needed;
 
        if (pos < 0)
                return -EINVAL;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
+       f = fdget(fd);
+       if (f.file) {
                ret = -ESPIPE;
-               if (file->f_mode & FMODE_PWRITE)  
-                       ret = vfs_write(file, buf, count, &pos);
-               fput_light(file, fput_needed);
+               if (f.file->f_mode & FMODE_PWRITE)  
+                       ret = vfs_write(f.file, buf, count, &pos);
+               fdput(f);
        }
 
        return ret;
@@ -789,16 +773,14 @@ EXPORT_SYMBOL(vfs_writev);
 SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
                unsigned long, vlen)
 {
-       struct file *file;
+       struct fd f = fdget(fd);
        ssize_t ret = -EBADF;
-       int fput_needed;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
-               loff_t pos = file_pos_read(file);
-               ret = vfs_readv(file, vec, vlen, &pos);
-               file_pos_write(file, pos);
-               fput_light(file, fput_needed);
+       if (f.file) {
+               loff_t pos = file_pos_read(f.file);
+               ret = vfs_readv(f.file, vec, vlen, &pos);
+               file_pos_write(f.file, pos);
+               fdput(f);
        }
 
        if (ret > 0)
@@ -810,16 +792,14 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
 SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
                unsigned long, vlen)
 {
-       struct file *file;
+       struct fd f = fdget(fd);
        ssize_t ret = -EBADF;
-       int fput_needed;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
-               loff_t pos = file_pos_read(file);
-               ret = vfs_writev(file, vec, vlen, &pos);
-               file_pos_write(file, pos);
-               fput_light(file, fput_needed);
+       if (f.file) {
+               loff_t pos = file_pos_read(f.file);
+               ret = vfs_writev(f.file, vec, vlen, &pos);
+               file_pos_write(f.file, pos);
+               fdput(f);
        }
 
        if (ret > 0)
@@ -838,19 +818,18 @@ SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
                unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
 {
        loff_t pos = pos_from_hilo(pos_h, pos_l);
-       struct file *file;
+       struct fd f;
        ssize_t ret = -EBADF;
-       int fput_needed;
 
        if (pos < 0)
                return -EINVAL;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
+       f = fdget(fd);
+       if (f.file) {
                ret = -ESPIPE;
-               if (file->f_mode & FMODE_PREAD)
-                       ret = vfs_readv(file, vec, vlen, &pos);
-               fput_light(file, fput_needed);
+               if (f.file->f_mode & FMODE_PREAD)
+                       ret = vfs_readv(f.file, vec, vlen, &pos);
+               fdput(f);
        }
 
        if (ret > 0)
@@ -863,19 +842,18 @@ SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
                unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
 {
        loff_t pos = pos_from_hilo(pos_h, pos_l);
-       struct file *file;
+       struct fd f;
        ssize_t ret = -EBADF;
-       int fput_needed;
 
        if (pos < 0)
                return -EINVAL;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
+       f = fdget(fd);
+       if (f.file) {
                ret = -ESPIPE;
-               if (file->f_mode & FMODE_PWRITE)
-                       ret = vfs_writev(file, vec, vlen, &pos);
-               fput_light(file, fput_needed);
+               if (f.file->f_mode & FMODE_PWRITE)
+                       ret = vfs_writev(f.file, vec, vlen, &pos);
+               fdput(f);
        }
 
        if (ret > 0)
@@ -884,31 +862,31 @@ SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
        return ret;
 }
 
-static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
-                          size_t count, loff_t max)
+ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
+                   loff_t max)
 {
-       struct file * in_file, * out_file;
-       struct inode * in_inode, * out_inode;
+       struct fd in, out;
+       struct inode *in_inode, *out_inode;
        loff_t pos;
        ssize_t retval;
-       int fput_needed_in, fput_needed_out, fl;
+       int fl;
 
        /*
         * Get input file, and verify that it is ok..
         */
        retval = -EBADF;
-       in_file = fget_light(in_fd, &fput_needed_in);
-       if (!in_file)
+       in = fdget(in_fd);
+       if (!in.file)
                goto out;
-       if (!(in_file->f_mode & FMODE_READ))
+       if (!(in.file->f_mode & FMODE_READ))
                goto fput_in;
        retval = -ESPIPE;
        if (!ppos)
-               ppos = &in_file->f_pos;
+               ppos = &in.file->f_pos;
        else
-               if (!(in_file->f_mode & FMODE_PREAD))
+               if (!(in.file->f_mode & FMODE_PREAD))
                        goto fput_in;
-       retval = rw_verify_area(READ, in_file, ppos, count);
+       retval = rw_verify_area(READ, in.file, ppos, count);
        if (retval < 0)
                goto fput_in;
        count = retval;
@@ -917,15 +895,15 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
         * Get output file, and verify that it is ok..
         */
        retval = -EBADF;
-       out_file = fget_light(out_fd, &fput_needed_out);
-       if (!out_file)
+       out = fdget(out_fd);
+       if (!out.file)
                goto fput_in;
-       if (!(out_file->f_mode & FMODE_WRITE))
+       if (!(out.file->f_mode & FMODE_WRITE))
                goto fput_out;
        retval = -EINVAL;
-       in_inode = in_file->f_path.dentry->d_inode;
-       out_inode = out_file->f_path.dentry->d_inode;
-       retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
+       in_inode = in.file->f_path.dentry->d_inode;
+       out_inode = out.file->f_path.dentry->d_inode;
+       retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
        if (retval < 0)
                goto fput_out;
        count = retval;
@@ -949,10 +927,10 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
         * and the application is arguably buggy if it doesn't expect
         * EAGAIN on a non-blocking file descriptor.
         */
-       if (in_file->f_flags & O_NONBLOCK)
+       if (in.file->f_flags & O_NONBLOCK)
                fl = SPLICE_F_NONBLOCK;
 #endif
-       retval = do_splice_direct(in_file, ppos, out_file, count, fl);
+       retval = do_splice_direct(in.file, ppos, out.file, count, fl);
 
        if (retval > 0) {
                add_rchar(current, retval);
@@ -965,9 +943,9 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
                retval = -EOVERFLOW;
 
 fput_out:
-       fput_light(out_file, fput_needed_out);
+       fdput(out);
 fput_in:
-       fput_light(in_file, fput_needed_in);
+       fdput(in);
 out:
        return retval;
 }
index d07b954c6e0c39afc06f99b40eb5bbcb2f0cc60b..d3e00ef674203930b0a0d190fb6b10a0d12b6032 100644 (file)
@@ -12,3 +12,5 @@ ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
                unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn);
 ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
                unsigned long nr_segs, loff_t *ppos, io_fn_t fn);
+ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
+                   loff_t max);
index 39e3370d79cf1e6399843137e2d64165baf49a03..5e69ef533b77bcda920dc79be3de68c8a714d990 100644 (file)
@@ -106,22 +106,20 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
                struct old_linux_dirent __user *, dirent, unsigned int, count)
 {
        int error;
-       struct file * file;
+       struct fd f = fdget(fd);
        struct readdir_callback buf;
-       int fput_needed;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       if (!f.file)
                return -EBADF;
 
        buf.result = 0;
        buf.dirent = dirent;
 
-       error = vfs_readdir(file, fillonedir, &buf);
+       error = vfs_readdir(f.file, fillonedir, &buf);
        if (buf.result)
                error = buf.result;
 
-       fput_light(file, fput_needed);
+       fdput(f);
        return error;
 }
 
@@ -191,17 +189,16 @@ efault:
 SYSCALL_DEFINE3(getdents, unsigned int, fd,
                struct linux_dirent __user *, dirent, unsigned int, count)
 {
-       struct file * file;
+       struct fd f;
        struct linux_dirent __user * lastdirent;
        struct getdents_callback buf;
-       int fput_needed;
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
                return -EFAULT;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                return -EBADF;
 
        buf.current_dir = dirent;
@@ -209,17 +206,17 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        buf.count = count;
        buf.error = 0;
 
-       error = vfs_readdir(file, filldir, &buf);
+       error = vfs_readdir(f.file, filldir, &buf);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               if (put_user(file->f_pos, &lastdirent->d_off))
+               if (put_user(f.file->f_pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
        }
-       fput_light(file, fput_needed);
+       fdput(f);
        return error;
 }
 
@@ -272,17 +269,16 @@ efault:
 SYSCALL_DEFINE3(getdents64, unsigned int, fd,
                struct linux_dirent64 __user *, dirent, unsigned int, count)
 {
-       struct file * file;
+       struct fd f;
        struct linux_dirent64 __user * lastdirent;
        struct getdents_callback64 buf;
-       int fput_needed;
        int error;
 
        if (!access_ok(VERIFY_WRITE, dirent, count))
                return -EFAULT;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                return -EBADF;
 
        buf.current_dir = dirent;
@@ -290,17 +286,17 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
        buf.count = count;
        buf.error = 0;
 
-       error = vfs_readdir(file, filldir64, &buf);
+       error = vfs_readdir(f.file, filldir64, &buf);
        if (error >= 0)
                error = buf.error;
        lastdirent = buf.previous;
        if (lastdirent) {
-               typeof(lastdirent->d_off) d_off = file->f_pos;
+               typeof(lastdirent->d_off) d_off = f.file->f_pos;
                if (__put_user(d_off, &lastdirent->d_off))
                        error = -EFAULT;
                else
                        error = count - buf.count;
        }
-       fput_light(file, fput_needed);
+       fdput(f);
        return error;
 }
index 855da58db1456b94d43715bb4a28bbc5f982a8d7..46485557cdc63b037994c05e82735605bbb4069a 100644 (file)
@@ -1155,8 +1155,8 @@ static void init_inode(struct inode *inode, struct treepath *path)
                set_inode_sd_version(inode, STAT_DATA_V1);
                inode->i_mode = sd_v1_mode(sd);
                set_nlink(inode, sd_v1_nlink(sd));
-               inode->i_uid = sd_v1_uid(sd);
-               inode->i_gid = sd_v1_gid(sd);
+               i_uid_write(inode, sd_v1_uid(sd));
+               i_gid_write(inode, sd_v1_gid(sd));
                inode->i_size = sd_v1_size(sd);
                inode->i_atime.tv_sec = sd_v1_atime(sd);
                inode->i_mtime.tv_sec = sd_v1_mtime(sd);
@@ -1200,9 +1200,9 @@ static void init_inode(struct inode *inode, struct treepath *path)
 
                inode->i_mode = sd_v2_mode(sd);
                set_nlink(inode, sd_v2_nlink(sd));
-               inode->i_uid = sd_v2_uid(sd);
+               i_uid_write(inode, sd_v2_uid(sd));
                inode->i_size = sd_v2_size(sd);
-               inode->i_gid = sd_v2_gid(sd);
+               i_gid_write(inode, sd_v2_gid(sd));
                inode->i_mtime.tv_sec = sd_v2_mtime(sd);
                inode->i_atime.tv_sec = sd_v2_atime(sd);
                inode->i_ctime.tv_sec = sd_v2_ctime(sd);
@@ -1258,9 +1258,9 @@ static void inode2sd(void *sd, struct inode *inode, loff_t size)
 
        set_sd_v2_mode(sd_v2, inode->i_mode);
        set_sd_v2_nlink(sd_v2, inode->i_nlink);
-       set_sd_v2_uid(sd_v2, inode->i_uid);
+       set_sd_v2_uid(sd_v2, i_uid_read(inode));
        set_sd_v2_size(sd_v2, size);
-       set_sd_v2_gid(sd_v2, inode->i_gid);
+       set_sd_v2_gid(sd_v2, i_gid_read(inode));
        set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
        set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
        set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
@@ -1280,8 +1280,8 @@ static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
        struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
 
        set_sd_v1_mode(sd_v1, inode->i_mode);
-       set_sd_v1_uid(sd_v1, inode->i_uid);
-       set_sd_v1_gid(sd_v1, inode->i_gid);
+       set_sd_v1_uid(sd_v1, i_uid_read(inode));
+       set_sd_v1_gid(sd_v1, i_gid_read(inode));
        set_sd_v1_nlink(sd_v1, inode->i_nlink);
        set_sd_v1_size(sd_v1, size);
        set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
@@ -1869,7 +1869,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                goto out_bad_inode;
        }
        if (old_format_only(sb)) {
-               if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) {
+               if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
                        pathrelse(&path_to_key);
                        /* i_uid or i_gid is too big to be stored in stat data v3.5 */
                        err = -EINVAL;
@@ -3140,16 +3140,16 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                }
        }
 
-       if ((((attr->ia_valid & ATTR_UID) && (attr->ia_uid & ~0xffff)) ||
-            ((attr->ia_valid & ATTR_GID) && (attr->ia_gid & ~0xffff))) &&
+       if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
+            ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
            (get_inode_sd_version(inode) == STAT_DATA_V1)) {
                /* stat data of format v3.5 has 16 bit uid and gid */
                error = -EINVAL;
                goto out;
        }
 
-       if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
-           (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+       if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
+           (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
                struct reiserfs_transaction_handle th;
                int jbegin_count =
                    2 *
index 7a37dabf5a968b7c8977c2028665f58554987912..1078ae179993bb12f105d39fb1d847eb84c12414 100644 (file)
@@ -608,6 +608,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(reiserfs_inode_cachep);
 }
 
index 44474f9b990db532124d29460983858465186e33..d7c01ef64edab42ed59d022a2cf36f917e1935c0 100644 (file)
@@ -30,7 +30,7 @@ posix_acl_set(struct dentry *dentry, const char *name, const void *value,
                return -EPERM;
 
        if (value) {
-               acl = posix_acl_from_xattr(value, size);
+               acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl)) {
                        return PTR_ERR(acl);
                } else if (acl) {
@@ -77,7 +77,7 @@ posix_acl_get(struct dentry *dentry, const char *name, void *buffer,
                return PTR_ERR(acl);
        if (acl == NULL)
                return -ENODATA;
-       error = posix_acl_to_xattr(acl, buffer, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        posix_acl_release(acl);
 
        return error;
@@ -121,15 +121,23 @@ static struct posix_acl *posix_acl_from_disk(const void *value, size_t size)
                case ACL_OTHER:
                        value = (char *)value +
                            sizeof(reiserfs_acl_entry_short);
-                       acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
                        break;
 
                case ACL_USER:
+                       value = (char *)value + sizeof(reiserfs_acl_entry);
+                       if ((char *)value > end)
+                               goto fail;
+                       acl->a_entries[n].e_uid = 
+                               make_kuid(&init_user_ns,
+                                         le32_to_cpu(entry->e_id));
+                       break;
                case ACL_GROUP:
                        value = (char *)value + sizeof(reiserfs_acl_entry);
                        if ((char *)value > end)
                                goto fail;
-                       acl->a_entries[n].e_id = le32_to_cpu(entry->e_id);
+                       acl->a_entries[n].e_gid =
+                               make_kgid(&init_user_ns,
+                                         le32_to_cpu(entry->e_id));
                        break;
 
                default:
@@ -164,13 +172,19 @@ static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
        ext_acl->a_version = cpu_to_le32(REISERFS_ACL_VERSION);
        e = (char *)ext_acl + sizeof(reiserfs_acl_header);
        for (n = 0; n < acl->a_count; n++) {
+               const struct posix_acl_entry *acl_e = &acl->a_entries[n];
                reiserfs_acl_entry *entry = (reiserfs_acl_entry *) e;
                entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
                entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
                switch (acl->a_entries[n].e_tag) {
                case ACL_USER:
+                       entry->e_id = cpu_to_le32(
+                               from_kuid(&init_user_ns, acl_e->e_uid));
+                       e += sizeof(reiserfs_acl_entry);
+                       break;
                case ACL_GROUP:
-                       entry->e_id = cpu_to_le32(acl->a_entries[n].e_id);
+                       entry->e_id = cpu_to_le32(
+                               from_kgid(&init_user_ns, acl_e->e_gid));
                        e += sizeof(reiserfs_acl_entry);
                        break;
 
index 77c5f21739837753efdfed09e806e32d1376a219..fd7c5f60b46b84f9cdcba814c2c5e803ef02daa2 100644 (file)
@@ -648,6 +648,11 @@ error_register:
 static void __exit exit_romfs_fs(void)
 {
        unregister_filesystem(&romfs_fs_type);
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(romfs_inode_cachep);
 }
 
index db14c781335e32c16d0d773c5ca7da53e5f771a4..2ef72d9650365c71586592a9c456785b7094a59b 100644 (file)
@@ -220,8 +220,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
        struct poll_table_entry *entry = poll_get_entry(pwq);
        if (!entry)
                return;
-       get_file(filp);
-       entry->filp = filp;
+       entry->filp = get_file(filp);
        entry->wait_address = wait_address;
        entry->key = p->_key;
        init_waitqueue_func_entry(&entry->wait, pollwake);
@@ -429,8 +428,6 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
                for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
                        unsigned long in, out, ex, all_bits, bit = 1, mask, j;
                        unsigned long res_in = 0, res_out = 0, res_ex = 0;
-                       const struct file_operations *f_op = NULL;
-                       struct file *file = NULL;
 
                        in = *inp++; out = *outp++; ex = *exp++;
                        all_bits = in | out | ex;
@@ -440,20 +437,21 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
                        }
 
                        for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
-                               int fput_needed;
+                               struct fd f;
                                if (i >= n)
                                        break;
                                if (!(bit & all_bits))
                                        continue;
-                               file = fget_light(i, &fput_needed);
-                               if (file) {
-                                       f_op = file->f_op;
+                               f = fdget(i);
+                               if (f.file) {
+                                       const struct file_operations *f_op;
+                                       f_op = f.file->f_op;
                                        mask = DEFAULT_POLLMASK;
                                        if (f_op && f_op->poll) {
                                                wait_key_set(wait, in, out, bit);
-                                               mask = (*f_op->poll)(file, wait);
+                                               mask = (*f_op->poll)(f.file, wait);
                                        }
-                                       fput_light(file, fput_needed);
+                                       fdput(f);
                                        if ((mask & POLLIN_SET) && (in & bit)) {
                                                res_in |= bit;
                                                retval++;
@@ -726,20 +724,17 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
        mask = 0;
        fd = pollfd->fd;
        if (fd >= 0) {
-               int fput_needed;
-               struct file * file;
-
-               file = fget_light(fd, &fput_needed);
+               struct fd f = fdget(fd);
                mask = POLLNVAL;
-               if (file != NULL) {
+               if (f.file) {
                        mask = DEFAULT_POLLMASK;
-                       if (file->f_op && file->f_op->poll) {
+                       if (f.file->f_op && f.file->f_op->poll) {
                                pwait->_key = pollfd->events|POLLERR|POLLHUP;
-                               mask = file->f_op->poll(file, pwait);
+                               mask = f.file->f_op->poll(f.file, pwait);
                        }
                        /* Mask out unneeded events. */
                        mask &= pollfd->events | POLLERR | POLLHUP;
-                       fput_light(file, fput_needed);
+                       fdput(f);
                }
        }
        pollfd->revents = mask;
index 14cf9de1dbe1751645ea1734d20808c9df6c398e..99dffab4c4e43a900121f7b8c1b8cb3e7ac147c5 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/export.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/cred.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
        memset(p, 0, sizeof(*p));
        mutex_init(&p->lock);
        p->op = op;
+#ifdef CONFIG_USER_NS
+       p->user_ns = file->f_cred->user_ns;
+#endif
 
        /*
         * Wrappers around seq_open(e.g. swaps_open) need to be
index 9f35a37173de0de1f7fbd8d80ca8ad39b50e3782..8bee4e57091183fbe98a0af52915b66f69821d2c 100644 (file)
@@ -269,13 +269,12 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
                if (ufd < 0)
                        kfree(ctx);
        } else {
-               int fput_needed;
-               struct file *file = fget_light(ufd, &fput_needed);
-               if (!file)
+               struct fd f = fdget(ufd);
+               if (!f.file)
                        return -EBADF;
-               ctx = file->private_data;
-               if (file->f_op != &signalfd_fops) {
-                       fput_light(file, fput_needed);
+               ctx = f.file->private_data;
+               if (f.file->f_op != &signalfd_fops) {
+                       fdput(f);
                        return -EINVAL;
                }
                spin_lock_irq(&current->sighand->siglock);
@@ -283,7 +282,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
                spin_unlock_irq(&current->sighand->siglock);
 
                wake_up(&current->sighand->signalfd_wqh);
-               fput_light(file, fput_needed);
+               fdput(f);
        }
 
        return ufd;
index 41514dd89462d73d4d1c7b8a68e431d5cabc08a9..13e5b4776e7aade28b0f2161b4eb9a27a8030f31 100644 (file)
@@ -1666,9 +1666,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
                unsigned long, nr_segs, unsigned int, flags)
 {
-       struct file *file;
+       struct fd f;
        long error;
-       int fput;
 
        if (unlikely(nr_segs > UIO_MAXIOV))
                return -EINVAL;
@@ -1676,14 +1675,14 @@ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
                return 0;
 
        error = -EBADF;
-       file = fget_light(fd, &fput);
-       if (file) {
-               if (file->f_mode & FMODE_WRITE)
-                       error = vmsplice_to_pipe(file, iov, nr_segs, flags);
-               else if (file->f_mode & FMODE_READ)
-                       error = vmsplice_to_user(file, iov, nr_segs, flags);
-
-               fput_light(file, fput);
+       f = fdget(fd);
+       if (f.file) {
+               if (f.file->f_mode & FMODE_WRITE)
+                       error = vmsplice_to_pipe(f.file, iov, nr_segs, flags);
+               else if (f.file->f_mode & FMODE_READ)
+                       error = vmsplice_to_user(f.file, iov, nr_segs, flags);
+
+               fdput(f);
        }
 
        return error;
@@ -1693,30 +1692,27 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
                int, fd_out, loff_t __user *, off_out,
                size_t, len, unsigned int, flags)
 {
+       struct fd in, out;
        long error;
-       struct file *in, *out;
-       int fput_in, fput_out;
 
        if (unlikely(!len))
                return 0;
 
        error = -EBADF;
-       in = fget_light(fd_in, &fput_in);
-       if (in) {
-               if (in->f_mode & FMODE_READ) {
-                       out = fget_light(fd_out, &fput_out);
-                       if (out) {
-                               if (out->f_mode & FMODE_WRITE)
-                                       error = do_splice(in, off_in,
-                                                         out, off_out,
+       in = fdget(fd_in);
+       if (in.file) {
+               if (in.file->f_mode & FMODE_READ) {
+                       out = fdget(fd_out);
+                       if (out.file) {
+                               if (out.file->f_mode & FMODE_WRITE)
+                                       error = do_splice(in.file, off_in,
+                                                         out.file, off_out,
                                                          len, flags);
-                               fput_light(out, fput_out);
+                               fdput(out);
                        }
                }
-
-               fput_light(in, fput_in);
+               fdput(in);
        }
-
        return error;
 }
 
@@ -2027,26 +2023,25 @@ static long do_tee(struct file *in, struct file *out, size_t len,
 
 SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
 {
-       struct file *in;
-       int error, fput_in;
+       struct fin;
+       int error;
 
        if (unlikely(!len))
                return 0;
 
        error = -EBADF;
-       in = fget_light(fdin, &fput_in);
-       if (in) {
-               if (in->f_mode & FMODE_READ) {
-                       int fput_out;
-                       struct file *out = fget_light(fdout, &fput_out);
-
-                       if (out) {
-                               if (out->f_mode & FMODE_WRITE)
-                                       error = do_tee(in, out, len, flags);
-                               fput_light(out, fput_out);
+       in = fdget(fdin);
+       if (in.file) {
+               if (in.file->f_mode & FMODE_READ) {
+                       struct fd out = fdget(fdout);
+                       if (out.file) {
+                               if (out.file->f_mode & FMODE_WRITE)
+                                       error = do_tee(in.file, out.file,
+                                                       len, flags);
+                               fdput(out);
                        }
                }
-               fput_light(in, fput_in);
+               fdput(in);
        }
 
        return error;
index 81afbccfa8432ba49a644a16c2c0a0526da31ea5..a1ce5ce60632e892fc0f17859e99963eb1a67235 100644 (file)
 static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
                                struct squashfs_base_inode *sqsh_ino)
 {
+       uid_t i_uid;
+       gid_t i_gid;
        int err;
 
-       err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &inode->i_uid);
+       err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
        if (err)
                return err;
 
-       err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &inode->i_gid);
+       err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &i_gid);
        if (err)
                return err;
 
+       i_uid_write(inode, i_uid);
+       i_gid_write(inode, i_gid);
        inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
        inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
        inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
index 29cd014ed3a17a776db73b4c1e492f44d78cba4e..260e3928d4f52bb94076e0025f5fd8bef08bb1c3 100644 (file)
@@ -425,6 +425,11 @@ static int __init init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(squashfs_inode_cachep);
 }
 
index 208039eec6c79b3123de706942d596afc5a6a239..eae494630a36507e03defe8bdf836ee6b337b4ee 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -57,13 +57,13 @@ EXPORT_SYMBOL(vfs_getattr);
 
 int vfs_fstat(unsigned int fd, struct kstat *stat)
 {
-       int fput_needed;
-       struct file *f = fget_raw_light(fd, &fput_needed);
+       struct fd f = fdget_raw(fd);
        int error = -EBADF;
 
-       if (f) {
-               error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
-               fput_light(f, fput_needed);
+       if (f.file) {
+               error = vfs_getattr(f.file->f_path.mnt, f.file->f_path.dentry,
+                                   stat);
+               fdput(f);
        }
        return error;
 }
index 95ad5c0e586c9f64fe492e141387b5092956d553..f8e832e6f0a2220d0e16163b230ad0f710ee4132 100644 (file)
@@ -87,12 +87,11 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
 
 int fd_statfs(int fd, struct kstatfs *st)
 {
-       int fput_needed;
-       struct file *file = fget_light(fd, &fput_needed);
+       struct fd f = fdget(fd);
        int error = -EBADF;
-       if (file) {
-               error = vfs_statfs(&file->f_path, st);
-               fput_light(file, fput_needed);
+       if (f.file) {
+               error = vfs_statfs(&f.file->f_path, st);
+               fdput(f);
        }
        return error;
 }
index 0902cfa6a12efd21e4ebd52a39333b7f9d6270eb..5fdf7ff32c4e283e74373bbabe00f9e165d5aa41 100644 (file)
@@ -307,12 +307,6 @@ void deactivate_locked_super(struct super_block *s)
 
                /* caches are now gone, we can safely kill the shrinker now */
                unregister_shrinker(&s->s_shrink);
-
-               /*
-                * We need to call rcu_barrier so all the delayed rcu free
-                * inodes are flushed before we release the fs module.
-                */
-               rcu_barrier();
                put_filesystem(fs);
                put_super(s);
        } else {
index eb8722dc556f5b567c40438919c6ef1ddeef084d..14eefeb44636bd1e24240ae09fe0dd046ed013fa 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -148,21 +148,19 @@ void emergency_sync(void)
  */
 SYSCALL_DEFINE1(syncfs, int, fd)
 {
-       struct file *file;
+       struct fd f = fdget(fd);
        struct super_block *sb;
        int ret;
-       int fput_needed;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       if (!f.file)
                return -EBADF;
-       sb = file->f_dentry->d_sb;
+       sb = f.file->f_dentry->d_sb;
 
        down_read(&sb->s_umount);
        ret = sync_filesystem(sb);
        up_read(&sb->s_umount);
 
-       fput_light(file, fput_needed);
+       fdput(f);
        return ret;
 }
 
@@ -201,14 +199,12 @@ EXPORT_SYMBOL(vfs_fsync);
 
 static int do_fsync(unsigned int fd, int datasync)
 {
-       struct file *file;
+       struct fd f = fdget(fd);
        int ret = -EBADF;
-       int fput_needed;
 
-       file = fget_light(fd, &fput_needed);
-       if (file) {
-               ret = vfs_fsync(file, datasync);
-               fput_light(file, fput_needed);
+       if (f.file) {
+               ret = vfs_fsync(f.file, datasync);
+               fdput(f);
        }
        return ret;
 }
@@ -291,10 +287,9 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
                                unsigned int flags)
 {
        int ret;
-       struct file *file;
+       struct fd f;
        struct address_space *mapping;
        loff_t endbyte;                 /* inclusive */
-       int fput_needed;
        umode_t i_mode;
 
        ret = -EINVAL;
@@ -333,17 +328,17 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
                endbyte--;              /* inclusive */
 
        ret = -EBADF;
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                goto out;
 
-       i_mode = file->f_path.dentry->d_inode->i_mode;
+       i_mode = f.file->f_path.dentry->d_inode->i_mode;
        ret = -ESPIPE;
        if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
                        !S_ISLNK(i_mode))
                goto out_put;
 
-       mapping = file->f_mapping;
+       mapping = f.file->f_mapping;
        if (!mapping) {
                ret = -EINVAL;
                goto out_put;
@@ -366,7 +361,7 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
                ret = filemap_fdatawait_range(mapping, offset, endbyte);
 
 out_put:
-       fput_light(file, fput_needed);
+       fdput(f);
 out:
        return ret;
 }
index 80e1e2b18df17f3537050cd7557955a1c1cd6b49..d33e506c1eacabcebfd354f6ceedf52a3111a6d7 100644 (file)
@@ -202,8 +202,8 @@ struct inode *sysv_iget(struct super_block *sb, unsigned int ino)
        }
        /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */
        inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode);
-       inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid);
-       inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid);
+       i_uid_write(inode, (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid));
+       i_gid_write(inode, (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid));
        set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink));
        inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size);
        inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime);
@@ -256,8 +256,8 @@ static int __sysv_write_inode(struct inode *inode, int wait)
        }
 
        raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode);
-       raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid));
-       raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid));
+       raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(i_uid_read(inode)));
+       raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(i_gid_read(inode)));
        raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink);
        raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size);
        raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec);
@@ -360,5 +360,10 @@ int __init sysv_init_icache(void)
 
 void sysv_destroy_icache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(sysv_inode_cachep);
 }
index dffeb3795af1d4204f8554447dbb2d2c33992429..d03822bbf1909080ff201acc372de185d10d50d5 100644 (file)
@@ -234,19 +234,17 @@ static const struct file_operations timerfd_fops = {
        .llseek         = noop_llseek,
 };
 
-static struct file *timerfd_fget(int fd)
+static int timerfd_fget(int fd, struct fd *p)
 {
-       struct file *file;
-
-       file = fget(fd);
-       if (!file)
-               return ERR_PTR(-EBADF);
-       if (file->f_op != &timerfd_fops) {
-               fput(file);
-               return ERR_PTR(-EINVAL);
+       struct fd f = fdget(fd);
+       if (!f.file)
+               return -EBADF;
+       if (f.file->f_op != &timerfd_fops) {
+               fdput(f);
+               return -EINVAL;
        }
-
-       return file;
+       *p = f;
+       return 0;
 }
 
 SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
@@ -284,7 +282,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
                const struct itimerspec __user *, utmr,
                struct itimerspec __user *, otmr)
 {
-       struct file *file;
+       struct fd f;
        struct timerfd_ctx *ctx;
        struct itimerspec ktmr, kotmr;
        int ret;
@@ -297,10 +295,10 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
            !timespec_valid(&ktmr.it_interval))
                return -EINVAL;
 
-       file = timerfd_fget(ufd);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-       ctx = file->private_data;
+       ret = timerfd_fget(ufd, &f);
+       if (ret)
+               return ret;
+       ctx = f.file->private_data;
 
        timerfd_setup_cancel(ctx, flags);
 
@@ -334,7 +332,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
        ret = timerfd_setup(ctx, flags, &ktmr);
 
        spin_unlock_irq(&ctx->wqh.lock);
-       fput(file);
+       fdput(f);
        if (otmr && copy_to_user(otmr, &kotmr, sizeof(kotmr)))
                return -EFAULT;
 
@@ -343,14 +341,13 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
 
 SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
 {
-       struct file *file;
+       struct fd f;
        struct timerfd_ctx *ctx;
        struct itimerspec kotmr;
-
-       file = timerfd_fget(ufd);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-       ctx = file->private_data;
+       int ret = timerfd_fget(ufd, &f);
+       if (ret)
+               return ret;
+       ctx = f.file->private_data;
 
        spin_lock_irq(&ctx->wqh.lock);
        if (ctx->expired && ctx->tintv.tv64) {
@@ -362,7 +359,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
        kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
        kotmr.it_interval = ktime_to_timespec(ctx->tintv);
        spin_unlock_irq(&ctx->wqh.lock);
-       fput(file);
+       fdput(f);
 
        return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
 }
index bc4f94b28706517e8216585b46847ea5a1ce9cc1..e8e01d74dc0563a86d7185369afd842362f792a4 100644 (file)
@@ -272,8 +272,8 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs)
  */
 static int can_use_rp(struct ubifs_info *c)
 {
-       if (current_fsuid() == c->rp_uid || capable(CAP_SYS_RESOURCE) ||
-           (c->rp_gid != 0 && in_group_p(c->rp_gid)))
+       if (uid_eq(current_fsuid(), c->rp_uid) || capable(CAP_SYS_RESOURCE) ||
+           (!gid_eq(c->rp_gid, GLOBAL_ROOT_GID) && in_group_p(c->rp_gid)))
                return 1;
        return 0;
 }
@@ -342,9 +342,8 @@ static int do_budget_space(struct ubifs_info *c)
        lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
               c->lst.taken_empty_lebs;
        if (unlikely(rsvd_idx_lebs > lebs)) {
-               dbg_budg("out of indexing space: min_idx_lebs %d (old %d), "
-                        "rsvd_idx_lebs %d", min_idx_lebs, c->bi.min_idx_lebs,
-                        rsvd_idx_lebs);
+               dbg_budg("out of indexing space: min_idx_lebs %d (old %d), rsvd_idx_lebs %d",
+                        min_idx_lebs, c->bi.min_idx_lebs, rsvd_idx_lebs);
                return -ENOSPC;
        }
 
index 8eda717cb99b82adeadc6d86fc07425ef6463e5d..ff8229340cd537286fb612efa7041fd125378617 100644 (file)
@@ -293,8 +293,8 @@ int ubifs_bg_thread(void *info)
        int err;
        struct ubifs_info *c = info;
 
-       dbg_msg("background thread \"%s\" started, PID %d",
-               c->bgt_name, current->pid);
+       ubifs_msg("background thread \"%s\" started, PID %d",
+                 c->bgt_name, current->pid);
        set_freezable();
 
        while (1) {
@@ -328,7 +328,7 @@ int ubifs_bg_thread(void *info)
                cond_resched();
        }
 
-       dbg_msg("background thread \"%s\" stops", c->bgt_name);
+       ubifs_msg("background thread \"%s\" stops", c->bgt_name);
        return 0;
 }
 
@@ -514,7 +514,7 @@ struct idx_node {
        struct list_head list;
        int iip;
        union ubifs_key upper_key;
-       struct ubifs_idx_node idx __attribute__((aligned(8)));
+       struct ubifs_idx_node idx __aligned(8);
 };
 
 /**
index 11e4132f314acfcabc610cf97f4c4dd9e52b768d..2bfa0953335d240c2a5fbc902b1daef34bfa2407 100644 (file)
@@ -112,8 +112,7 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
        if (compr->comp_mutex)
                mutex_unlock(compr->comp_mutex);
        if (unlikely(err)) {
-               ubifs_warn("cannot compress %d bytes, compressor %s, "
-                          "error %d, leave data uncompressed",
+               ubifs_warn("cannot compress %d bytes, compressor %s, error %d, leave data uncompressed",
                           in_len, compr->name, err);
                 goto no_compr;
        }
@@ -176,8 +175,8 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
        if (compr->decomp_mutex)
                mutex_unlock(compr->decomp_mutex);
        if (err)
-               ubifs_err("cannot decompress %d bytes, compressor %s, "
-                         "error %d", in_len, compr->name, err);
+               ubifs_err("cannot decompress %d bytes, compressor %s, error %d",
+                         in_len, compr->name, err);
 
        return err;
 }
index bb3167257aabfed6d16452eed551e577c95b706d..62911637e12f3123742ad0e96cc1cc0829f7f2e3 100644 (file)
@@ -219,15 +219,15 @@ const char *dbg_jhead(int jhead)
 
 static void dump_ch(const struct ubifs_ch *ch)
 {
-       printk(KERN_ERR "\tmagic          %#x\n", le32_to_cpu(ch->magic));
-       printk(KERN_ERR "\tcrc            %#x\n", le32_to_cpu(ch->crc));
-       printk(KERN_ERR "\tnode_type      %d (%s)\n", ch->node_type,
+       pr_err("\tmagic          %#x\n", le32_to_cpu(ch->magic));
+       pr_err("\tcrc            %#x\n", le32_to_cpu(ch->crc));
+       pr_err("\tnode_type      %d (%s)\n", ch->node_type,
               dbg_ntype(ch->node_type));
-       printk(KERN_ERR "\tgroup_type     %d (%s)\n", ch->group_type,
+       pr_err("\tgroup_type     %d (%s)\n", ch->group_type,
               dbg_gtype(ch->group_type));
-       printk(KERN_ERR "\tsqnum          %llu\n",
+       pr_err("\tsqnum          %llu\n",
               (unsigned long long)le64_to_cpu(ch->sqnum));
-       printk(KERN_ERR "\tlen            %u\n", le32_to_cpu(ch->len));
+       pr_err("\tlen            %u\n", le32_to_cpu(ch->len));
 }
 
 void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
@@ -238,43 +238,43 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
        struct ubifs_dent_node *dent, *pdent = NULL;
        int count = 2;
 
-       printk(KERN_ERR "Dump in-memory inode:");
-       printk(KERN_ERR "\tinode          %lu\n", inode->i_ino);
-       printk(KERN_ERR "\tsize           %llu\n",
+       pr_err("Dump in-memory inode:");
+       pr_err("\tinode          %lu\n", inode->i_ino);
+       pr_err("\tsize           %llu\n",
               (unsigned long long)i_size_read(inode));
-       printk(KERN_ERR "\tnlink          %u\n", inode->i_nlink);
-       printk(KERN_ERR "\tuid            %u\n", (unsigned int)inode->i_uid);
-       printk(KERN_ERR "\tgid            %u\n", (unsigned int)inode->i_gid);
-       printk(KERN_ERR "\tatime          %u.%u\n",
+       pr_err("\tnlink          %u\n", inode->i_nlink);
+       pr_err("\tuid            %u\n", (unsigned int)i_uid_read(inode));
+       pr_err("\tgid            %u\n", (unsigned int)i_gid_read(inode));
+       pr_err("\tatime          %u.%u\n",
               (unsigned int)inode->i_atime.tv_sec,
               (unsigned int)inode->i_atime.tv_nsec);
-       printk(KERN_ERR "\tmtime          %u.%u\n",
+       pr_err("\tmtime          %u.%u\n",
               (unsigned int)inode->i_mtime.tv_sec,
               (unsigned int)inode->i_mtime.tv_nsec);
-       printk(KERN_ERR "\tctime          %u.%u\n",
+       pr_err("\tctime          %u.%u\n",
               (unsigned int)inode->i_ctime.tv_sec,
               (unsigned int)inode->i_ctime.tv_nsec);
-       printk(KERN_ERR "\tcreat_sqnum    %llu\n", ui->creat_sqnum);
-       printk(KERN_ERR "\txattr_size     %u\n", ui->xattr_size);
-       printk(KERN_ERR "\txattr_cnt      %u\n", ui->xattr_cnt);
-       printk(KERN_ERR "\txattr_names    %u\n", ui->xattr_names);
-       printk(KERN_ERR "\tdirty          %u\n", ui->dirty);
-       printk(KERN_ERR "\txattr          %u\n", ui->xattr);
-       printk(KERN_ERR "\tbulk_read      %u\n", ui->xattr);
-       printk(KERN_ERR "\tsynced_i_size  %llu\n",
+       pr_err("\tcreat_sqnum    %llu\n", ui->creat_sqnum);
+       pr_err("\txattr_size     %u\n", ui->xattr_size);
+       pr_err("\txattr_cnt      %u\n", ui->xattr_cnt);
+       pr_err("\txattr_names    %u\n", ui->xattr_names);
+       pr_err("\tdirty          %u\n", ui->dirty);
+       pr_err("\txattr          %u\n", ui->xattr);
+       pr_err("\tbulk_read      %u\n", ui->xattr);
+       pr_err("\tsynced_i_size  %llu\n",
               (unsigned long long)ui->synced_i_size);
-       printk(KERN_ERR "\tui_size        %llu\n",
+       pr_err("\tui_size        %llu\n",
               (unsigned long long)ui->ui_size);
-       printk(KERN_ERR "\tflags          %d\n", ui->flags);
-       printk(KERN_ERR "\tcompr_type     %d\n", ui->compr_type);
-       printk(KERN_ERR "\tlast_page_read %lu\n", ui->last_page_read);
-       printk(KERN_ERR "\tread_in_a_row  %lu\n", ui->read_in_a_row);
-       printk(KERN_ERR "\tdata_len       %d\n", ui->data_len);
+       pr_err("\tflags          %d\n", ui->flags);
+       pr_err("\tcompr_type     %d\n", ui->compr_type);
+       pr_err("\tlast_page_read %lu\n", ui->last_page_read);
+       pr_err("\tread_in_a_row  %lu\n", ui->read_in_a_row);
+       pr_err("\tdata_len       %d\n", ui->data_len);
 
        if (!S_ISDIR(inode->i_mode))
                return;
 
-       printk(KERN_ERR "List of directory entries:\n");
+       pr_err("List of directory entries:\n");
        ubifs_assert(!mutex_is_locked(&c->tnc_mutex));
 
        lowest_dent_key(c, &key, inode->i_ino);
@@ -282,11 +282,11 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
                dent = ubifs_tnc_next_ent(c, &key, &nm);
                if (IS_ERR(dent)) {
                        if (PTR_ERR(dent) != -ENOENT)
-                               printk(KERN_ERR "error %ld\n", PTR_ERR(dent));
+                               pr_err("error %ld\n", PTR_ERR(dent));
                        break;
                }
 
-               printk(KERN_ERR "\t%d: %s (%s)\n",
+               pr_err("\t%d: %s (%s)\n",
                       count++, dent->name, get_dent_type(dent->type));
 
                nm.name = dent->name;
@@ -305,12 +305,9 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
        const struct ubifs_ch *ch = node;
        char key_buf[DBG_KEY_BUF_LEN];
 
-       if (dbg_is_tst_rcvry(c))
-               return;
-
        /* If the magic is incorrect, just hexdump the first bytes */
        if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
-               printk(KERN_ERR "Not a node, first %zu bytes:", UBIFS_CH_SZ);
+               pr_err("Not a node, first %zu bytes:", UBIFS_CH_SZ);
                print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 32, 1,
                               (void *)node, UBIFS_CH_SZ, 1);
                return;
@@ -324,8 +321,7 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_pad_node *pad = node;
 
-               printk(KERN_ERR "\tpad_len        %u\n",
-                      le32_to_cpu(pad->pad_len));
+               pr_err("\tpad_len        %u\n", le32_to_cpu(pad->pad_len));
                break;
        }
        case UBIFS_SB_NODE:
@@ -333,112 +329,77 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                const struct ubifs_sb_node *sup = node;
                unsigned int sup_flags = le32_to_cpu(sup->flags);
 
-               printk(KERN_ERR "\tkey_hash       %d (%s)\n",
+               pr_err("\tkey_hash       %d (%s)\n",
                       (int)sup->key_hash, get_key_hash(sup->key_hash));
-               printk(KERN_ERR "\tkey_fmt        %d (%s)\n",
+               pr_err("\tkey_fmt        %d (%s)\n",
                       (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
-               printk(KERN_ERR "\tflags          %#x\n", sup_flags);
-               printk(KERN_ERR "\t  big_lpt      %u\n",
+               pr_err("\tflags          %#x\n", sup_flags);
+               pr_err("\t  big_lpt      %u\n",
                       !!(sup_flags & UBIFS_FLG_BIGLPT));
-               printk(KERN_ERR "\t  space_fixup  %u\n",
+               pr_err("\t  space_fixup  %u\n",
                       !!(sup_flags & UBIFS_FLG_SPACE_FIXUP));
-               printk(KERN_ERR "\tmin_io_size    %u\n",
-                      le32_to_cpu(sup->min_io_size));
-               printk(KERN_ERR "\tleb_size       %u\n",
-                      le32_to_cpu(sup->leb_size));
-               printk(KERN_ERR "\tleb_cnt        %u\n",
-                      le32_to_cpu(sup->leb_cnt));
-               printk(KERN_ERR "\tmax_leb_cnt    %u\n",
-                      le32_to_cpu(sup->max_leb_cnt));
-               printk(KERN_ERR "\tmax_bud_bytes  %llu\n",
+               pr_err("\tmin_io_size    %u\n", le32_to_cpu(sup->min_io_size));
+               pr_err("\tleb_size       %u\n", le32_to_cpu(sup->leb_size));
+               pr_err("\tleb_cnt        %u\n", le32_to_cpu(sup->leb_cnt));
+               pr_err("\tmax_leb_cnt    %u\n", le32_to_cpu(sup->max_leb_cnt));
+               pr_err("\tmax_bud_bytes  %llu\n",
                       (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
-               printk(KERN_ERR "\tlog_lebs       %u\n",
-                      le32_to_cpu(sup->log_lebs));
-               printk(KERN_ERR "\tlpt_lebs       %u\n",
-                      le32_to_cpu(sup->lpt_lebs));
-               printk(KERN_ERR "\torph_lebs      %u\n",
-                      le32_to_cpu(sup->orph_lebs));
-               printk(KERN_ERR "\tjhead_cnt      %u\n",
-                      le32_to_cpu(sup->jhead_cnt));
-               printk(KERN_ERR "\tfanout         %u\n",
-                      le32_to_cpu(sup->fanout));
-               printk(KERN_ERR "\tlsave_cnt      %u\n",
-                      le32_to_cpu(sup->lsave_cnt));
-               printk(KERN_ERR "\tdefault_compr  %u\n",
+               pr_err("\tlog_lebs       %u\n", le32_to_cpu(sup->log_lebs));
+               pr_err("\tlpt_lebs       %u\n", le32_to_cpu(sup->lpt_lebs));
+               pr_err("\torph_lebs      %u\n", le32_to_cpu(sup->orph_lebs));
+               pr_err("\tjhead_cnt      %u\n", le32_to_cpu(sup->jhead_cnt));
+               pr_err("\tfanout         %u\n", le32_to_cpu(sup->fanout));
+               pr_err("\tlsave_cnt      %u\n", le32_to_cpu(sup->lsave_cnt));
+               pr_err("\tdefault_compr  %u\n",
                       (int)le16_to_cpu(sup->default_compr));
-               printk(KERN_ERR "\trp_size        %llu\n",
+               pr_err("\trp_size        %llu\n",
                       (unsigned long long)le64_to_cpu(sup->rp_size));
-               printk(KERN_ERR "\trp_uid         %u\n",
-                      le32_to_cpu(sup->rp_uid));
-               printk(KERN_ERR "\trp_gid         %u\n",
-                      le32_to_cpu(sup->rp_gid));
-               printk(KERN_ERR "\tfmt_version    %u\n",
-                      le32_to_cpu(sup->fmt_version));
-               printk(KERN_ERR "\ttime_gran      %u\n",
-                      le32_to_cpu(sup->time_gran));
-               printk(KERN_ERR "\tUUID           %pUB\n",
-                      sup->uuid);
+               pr_err("\trp_uid         %u\n", le32_to_cpu(sup->rp_uid));
+               pr_err("\trp_gid         %u\n", le32_to_cpu(sup->rp_gid));
+               pr_err("\tfmt_version    %u\n", le32_to_cpu(sup->fmt_version));
+               pr_err("\ttime_gran      %u\n", le32_to_cpu(sup->time_gran));
+               pr_err("\tUUID           %pUB\n", sup->uuid);
                break;
        }
        case UBIFS_MST_NODE:
        {
                const struct ubifs_mst_node *mst = node;
 
-               printk(KERN_ERR "\thighest_inum   %llu\n",
+               pr_err("\thighest_inum   %llu\n",
                       (unsigned long long)le64_to_cpu(mst->highest_inum));
-               printk(KERN_ERR "\tcommit number  %llu\n",
+               pr_err("\tcommit number  %llu\n",
                       (unsigned long long)le64_to_cpu(mst->cmt_no));
-               printk(KERN_ERR "\tflags          %#x\n",
-                      le32_to_cpu(mst->flags));
-               printk(KERN_ERR "\tlog_lnum       %u\n",
-                      le32_to_cpu(mst->log_lnum));
-               printk(KERN_ERR "\troot_lnum      %u\n",
-                      le32_to_cpu(mst->root_lnum));
-               printk(KERN_ERR "\troot_offs      %u\n",
-                      le32_to_cpu(mst->root_offs));
-               printk(KERN_ERR "\troot_len       %u\n",
-                      le32_to_cpu(mst->root_len));
-               printk(KERN_ERR "\tgc_lnum        %u\n",
-                      le32_to_cpu(mst->gc_lnum));
-               printk(KERN_ERR "\tihead_lnum     %u\n",
-                      le32_to_cpu(mst->ihead_lnum));
-               printk(KERN_ERR "\tihead_offs     %u\n",
-                      le32_to_cpu(mst->ihead_offs));
-               printk(KERN_ERR "\tindex_size     %llu\n",
+               pr_err("\tflags          %#x\n", le32_to_cpu(mst->flags));
+               pr_err("\tlog_lnum       %u\n", le32_to_cpu(mst->log_lnum));
+               pr_err("\troot_lnum      %u\n", le32_to_cpu(mst->root_lnum));
+               pr_err("\troot_offs      %u\n", le32_to_cpu(mst->root_offs));
+               pr_err("\troot_len       %u\n", le32_to_cpu(mst->root_len));
+               pr_err("\tgc_lnum        %u\n", le32_to_cpu(mst->gc_lnum));
+               pr_err("\tihead_lnum     %u\n", le32_to_cpu(mst->ihead_lnum));
+               pr_err("\tihead_offs     %u\n", le32_to_cpu(mst->ihead_offs));
+               pr_err("\tindex_size     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->index_size));
-               printk(KERN_ERR "\tlpt_lnum       %u\n",
-                      le32_to_cpu(mst->lpt_lnum));
-               printk(KERN_ERR "\tlpt_offs       %u\n",
-                      le32_to_cpu(mst->lpt_offs));
-               printk(KERN_ERR "\tnhead_lnum     %u\n",
-                      le32_to_cpu(mst->nhead_lnum));
-               printk(KERN_ERR "\tnhead_offs     %u\n",
-                      le32_to_cpu(mst->nhead_offs));
-               printk(KERN_ERR "\tltab_lnum      %u\n",
-                      le32_to_cpu(mst->ltab_lnum));
-               printk(KERN_ERR "\tltab_offs      %u\n",
-                      le32_to_cpu(mst->ltab_offs));
-               printk(KERN_ERR "\tlsave_lnum     %u\n",
-                      le32_to_cpu(mst->lsave_lnum));
-               printk(KERN_ERR "\tlsave_offs     %u\n",
-                      le32_to_cpu(mst->lsave_offs));
-               printk(KERN_ERR "\tlscan_lnum     %u\n",
-                      le32_to_cpu(mst->lscan_lnum));
-               printk(KERN_ERR "\tleb_cnt        %u\n",
-                      le32_to_cpu(mst->leb_cnt));
-               printk(KERN_ERR "\tempty_lebs     %u\n",
-                      le32_to_cpu(mst->empty_lebs));
-               printk(KERN_ERR "\tidx_lebs       %u\n",
-                      le32_to_cpu(mst->idx_lebs));
-               printk(KERN_ERR "\ttotal_free     %llu\n",
+               pr_err("\tlpt_lnum       %u\n", le32_to_cpu(mst->lpt_lnum));
+               pr_err("\tlpt_offs       %u\n", le32_to_cpu(mst->lpt_offs));
+               pr_err("\tnhead_lnum     %u\n", le32_to_cpu(mst->nhead_lnum));
+               pr_err("\tnhead_offs     %u\n", le32_to_cpu(mst->nhead_offs));
+               pr_err("\tltab_lnum      %u\n", le32_to_cpu(mst->ltab_lnum));
+               pr_err("\tltab_offs      %u\n", le32_to_cpu(mst->ltab_offs));
+               pr_err("\tlsave_lnum     %u\n", le32_to_cpu(mst->lsave_lnum));
+               pr_err("\tlsave_offs     %u\n", le32_to_cpu(mst->lsave_offs));
+               pr_err("\tlscan_lnum     %u\n", le32_to_cpu(mst->lscan_lnum));
+               pr_err("\tleb_cnt        %u\n", le32_to_cpu(mst->leb_cnt));
+               pr_err("\tempty_lebs     %u\n", le32_to_cpu(mst->empty_lebs));
+               pr_err("\tidx_lebs       %u\n", le32_to_cpu(mst->idx_lebs));
+               pr_err("\ttotal_free     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_free));
-               printk(KERN_ERR "\ttotal_dirty    %llu\n",
+               pr_err("\ttotal_dirty    %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_dirty));
-               printk(KERN_ERR "\ttotal_used     %llu\n",
+               pr_err("\ttotal_used     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_used));
-               printk(KERN_ERR "\ttotal_dead     %llu\n",
+               pr_err("\ttotal_dead     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_dead));
-               printk(KERN_ERR "\ttotal_dark     %llu\n",
+               pr_err("\ttotal_dark     %llu\n",
                       (unsigned long long)le64_to_cpu(mst->total_dark));
                break;
        }
@@ -446,12 +407,9 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_ref_node *ref = node;
 
-               printk(KERN_ERR "\tlnum           %u\n",
-                      le32_to_cpu(ref->lnum));
-               printk(KERN_ERR "\toffs           %u\n",
-                      le32_to_cpu(ref->offs));
-               printk(KERN_ERR "\tjhead          %u\n",
-                      le32_to_cpu(ref->jhead));
+               pr_err("\tlnum           %u\n", le32_to_cpu(ref->lnum));
+               pr_err("\toffs           %u\n", le32_to_cpu(ref->offs));
+               pr_err("\tjhead          %u\n", le32_to_cpu(ref->jhead));
                break;
        }
        case UBIFS_INO_NODE:
@@ -459,41 +417,32 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                const struct ubifs_ino_node *ino = node;
 
                key_read(c, &ino->key, &key);
-               printk(KERN_ERR "\tkey            %s\n",
+               pr_err("\tkey            %s\n",
                       dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
-               printk(KERN_ERR "\tcreat_sqnum    %llu\n",
+               pr_err("\tcreat_sqnum    %llu\n",
                       (unsigned long long)le64_to_cpu(ino->creat_sqnum));
-               printk(KERN_ERR "\tsize           %llu\n",
+               pr_err("\tsize           %llu\n",
                       (unsigned long long)le64_to_cpu(ino->size));
-               printk(KERN_ERR "\tnlink          %u\n",
-                      le32_to_cpu(ino->nlink));
-               printk(KERN_ERR "\tatime          %lld.%u\n",
+               pr_err("\tnlink          %u\n", le32_to_cpu(ino->nlink));
+               pr_err("\tatime          %lld.%u\n",
                       (long long)le64_to_cpu(ino->atime_sec),
                       le32_to_cpu(ino->atime_nsec));
-               printk(KERN_ERR "\tmtime          %lld.%u\n",
+               pr_err("\tmtime          %lld.%u\n",
                       (long long)le64_to_cpu(ino->mtime_sec),
                       le32_to_cpu(ino->mtime_nsec));
-               printk(KERN_ERR "\tctime          %lld.%u\n",
+               pr_err("\tctime          %lld.%u\n",
                       (long long)le64_to_cpu(ino->ctime_sec),
                       le32_to_cpu(ino->ctime_nsec));
-               printk(KERN_ERR "\tuid            %u\n",
-                      le32_to_cpu(ino->uid));
-               printk(KERN_ERR "\tgid            %u\n",
-                      le32_to_cpu(ino->gid));
-               printk(KERN_ERR "\tmode           %u\n",
-                      le32_to_cpu(ino->mode));
-               printk(KERN_ERR "\tflags          %#x\n",
-                      le32_to_cpu(ino->flags));
-               printk(KERN_ERR "\txattr_cnt      %u\n",
-                      le32_to_cpu(ino->xattr_cnt));
-               printk(KERN_ERR "\txattr_size     %u\n",
-                      le32_to_cpu(ino->xattr_size));
-               printk(KERN_ERR "\txattr_names    %u\n",
-                      le32_to_cpu(ino->xattr_names));
-               printk(KERN_ERR "\tcompr_type     %#x\n",
+               pr_err("\tuid            %u\n", le32_to_cpu(ino->uid));
+               pr_err("\tgid            %u\n", le32_to_cpu(ino->gid));
+               pr_err("\tmode           %u\n", le32_to_cpu(ino->mode));
+               pr_err("\tflags          %#x\n", le32_to_cpu(ino->flags));
+               pr_err("\txattr_cnt      %u\n", le32_to_cpu(ino->xattr_cnt));
+               pr_err("\txattr_size     %u\n", le32_to_cpu(ino->xattr_size));
+               pr_err("\txattr_names    %u\n", le32_to_cpu(ino->xattr_names));
+               pr_err("\tcompr_type     %#x\n",
                       (int)le16_to_cpu(ino->compr_type));
-               printk(KERN_ERR "\tdata len       %u\n",
-                      le32_to_cpu(ino->data_len));
+               pr_err("\tdata len       %u\n", le32_to_cpu(ino->data_len));
                break;
        }
        case UBIFS_DENT_NODE:
@@ -503,22 +452,21 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                int nlen = le16_to_cpu(dent->nlen);
 
                key_read(c, &dent->key, &key);
-               printk(KERN_ERR "\tkey            %s\n",
+               pr_err("\tkey            %s\n",
                       dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
-               printk(KERN_ERR "\tinum           %llu\n",
+               pr_err("\tinum           %llu\n",
                       (unsigned long long)le64_to_cpu(dent->inum));
-               printk(KERN_ERR "\ttype           %d\n", (int)dent->type);
-               printk(KERN_ERR "\tnlen           %d\n", nlen);
-               printk(KERN_ERR "\tname           ");
+               pr_err("\ttype           %d\n", (int)dent->type);
+               pr_err("\tnlen           %d\n", nlen);
+               pr_err("\tname           ");
 
                if (nlen > UBIFS_MAX_NLEN)
-                       printk(KERN_ERR "(bad name length, not printing, "
-                                         "bad or corrupted node)");
+                       pr_err("(bad name length, not printing, bad or corrupted node)");
                else {
                        for (i = 0; i < nlen && dent->name[i]; i++)
-                               printk(KERN_CONT "%c", dent->name[i]);
+                               pr_cont("%c", dent->name[i]);
                }
-               printk(KERN_CONT "\n");
+               pr_cont("\n");
 
                break;
        }
@@ -528,15 +476,13 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
 
                key_read(c, &dn->key, &key);
-               printk(KERN_ERR "\tkey            %s\n",
+               pr_err("\tkey            %s\n",
                       dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
-               printk(KERN_ERR "\tsize           %u\n",
-                      le32_to_cpu(dn->size));
-               printk(KERN_ERR "\tcompr_typ      %d\n",
+               pr_err("\tsize           %u\n", le32_to_cpu(dn->size));
+               pr_err("\tcompr_typ      %d\n",
                       (int)le16_to_cpu(dn->compr_type));
-               printk(KERN_ERR "\tdata size      %d\n",
-                      dlen);
-               printk(KERN_ERR "\tdata:\n");
+               pr_err("\tdata size      %d\n", dlen);
+               pr_err("\tdata:\n");
                print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
                               (void *)&dn->data, dlen, 0);
                break;
@@ -545,11 +491,10 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_trun_node *trun = node;
 
-               printk(KERN_ERR "\tinum           %u\n",
-                      le32_to_cpu(trun->inum));
-               printk(KERN_ERR "\told_size       %llu\n",
+               pr_err("\tinum           %u\n", le32_to_cpu(trun->inum));
+               pr_err("\told_size       %llu\n",
                       (unsigned long long)le64_to_cpu(trun->old_size));
-               printk(KERN_ERR "\tnew_size       %llu\n",
+               pr_err("\tnew_size       %llu\n",
                       (unsigned long long)le64_to_cpu(trun->new_size));
                break;
        }
@@ -558,17 +503,16 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                const struct ubifs_idx_node *idx = node;
 
                n = le16_to_cpu(idx->child_cnt);
-               printk(KERN_ERR "\tchild_cnt      %d\n", n);
-               printk(KERN_ERR "\tlevel          %d\n",
-                      (int)le16_to_cpu(idx->level));
-               printk(KERN_ERR "\tBranches:\n");
+               pr_err("\tchild_cnt      %d\n", n);
+               pr_err("\tlevel          %d\n", (int)le16_to_cpu(idx->level));
+               pr_err("\tBranches:\n");
 
                for (i = 0; i < n && i < c->fanout - 1; i++) {
                        const struct ubifs_branch *br;
 
                        br = ubifs_idx_branch(c, idx, i);
                        key_read(c, &br->key, &key);
-                       printk(KERN_ERR "\t%d: LEB %d:%d len %d key %s\n",
+                       pr_err("\t%d: LEB %d:%d len %d key %s\n",
                               i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
                               le32_to_cpu(br->len),
                               dbg_snprintf_key(c, &key, key_buf,
@@ -582,20 +526,20 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
        {
                const struct ubifs_orph_node *orph = node;
 
-               printk(KERN_ERR "\tcommit number  %llu\n",
+               pr_err("\tcommit number  %llu\n",
                       (unsigned long long)
                                le64_to_cpu(orph->cmt_no) & LLONG_MAX);
-               printk(KERN_ERR "\tlast node flag %llu\n",
+               pr_err("\tlast node flag %llu\n",
                       (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
                n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
-               printk(KERN_ERR "\t%d orphan inode numbers:\n", n);
+               pr_err("\t%d orphan inode numbers:\n", n);
                for (i = 0; i < n; i++)
-                       printk(KERN_ERR "\t  ino %llu\n",
+                       pr_err("\t  ino %llu\n",
                               (unsigned long long)le64_to_cpu(orph->inos[i]));
                break;
        }
        default:
-               printk(KERN_ERR "node type %d was not recognized\n",
+               pr_err("node type %d was not recognized\n",
                       (int)ch->node_type);
        }
        spin_unlock(&dbg_lock);
@@ -604,16 +548,16 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
 void ubifs_dump_budget_req(const struct ubifs_budget_req *req)
 {
        spin_lock(&dbg_lock);
-       printk(KERN_ERR "Budgeting request: new_ino %d, dirtied_ino %d\n",
+       pr_err("Budgeting request: new_ino %d, dirtied_ino %d\n",
               req->new_ino, req->dirtied_ino);
-       printk(KERN_ERR "\tnew_ino_d   %d, dirtied_ino_d %d\n",
+       pr_err("\tnew_ino_d   %d, dirtied_ino_d %d\n",
               req->new_ino_d, req->dirtied_ino_d);
-       printk(KERN_ERR "\tnew_page    %d, dirtied_page %d\n",
+       pr_err("\tnew_page    %d, dirtied_page %d\n",
               req->new_page, req->dirtied_page);
-       printk(KERN_ERR "\tnew_dent    %d, mod_dent     %d\n",
+       pr_err("\tnew_dent    %d, mod_dent     %d\n",
               req->new_dent, req->mod_dent);
-       printk(KERN_ERR "\tidx_growth  %d\n", req->idx_growth);
-       printk(KERN_ERR "\tdata_growth %d dd_growth     %d\n",
+       pr_err("\tidx_growth  %d\n", req->idx_growth);
+       pr_err("\tdata_growth %d dd_growth     %d\n",
               req->data_growth, req->dd_growth);
        spin_unlock(&dbg_lock);
 }
@@ -621,14 +565,12 @@ void ubifs_dump_budget_req(const struct ubifs_budget_req *req)
 void ubifs_dump_lstats(const struct ubifs_lp_stats *lst)
 {
        spin_lock(&dbg_lock);
-       printk(KERN_ERR "(pid %d) Lprops statistics: empty_lebs %d, "
-              "idx_lebs  %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
-       printk(KERN_ERR "\ttaken_empty_lebs %d, total_free %lld, "
-              "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
-              lst->total_dirty);
-       printk(KERN_ERR "\ttotal_used %lld, total_dark %lld, "
-              "total_dead %lld\n", lst->total_used, lst->total_dark,
-              lst->total_dead);
+       pr_err("(pid %d) Lprops statistics: empty_lebs %d, idx_lebs  %d\n",
+              current->pid, lst->empty_lebs, lst->idx_lebs);
+       pr_err("\ttaken_empty_lebs %d, total_free %lld, total_dirty %lld\n",
+              lst->taken_empty_lebs, lst->total_free, lst->total_dirty);
+       pr_err("\ttotal_used %lld, total_dark %lld, total_dead %lld\n",
+              lst->total_used, lst->total_dark, lst->total_dead);
        spin_unlock(&dbg_lock);
 }
 
@@ -642,21 +584,17 @@ void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
 
        spin_lock(&c->space_lock);
        spin_lock(&dbg_lock);
-       printk(KERN_ERR "(pid %d) Budgeting info: data budget sum %lld, "
-              "total budget sum %lld\n", current->pid,
-              bi->data_growth + bi->dd_growth,
+       pr_err("(pid %d) Budgeting info: data budget sum %lld, total budget sum %lld\n",
+              current->pid, bi->data_growth + bi->dd_growth,
               bi->data_growth + bi->dd_growth + bi->idx_growth);
-       printk(KERN_ERR "\tbudg_data_growth %lld, budg_dd_growth %lld, "
-              "budg_idx_growth %lld\n", bi->data_growth, bi->dd_growth,
-              bi->idx_growth);
-       printk(KERN_ERR "\tmin_idx_lebs %d, old_idx_sz %llu, "
-              "uncommitted_idx %lld\n", bi->min_idx_lebs, bi->old_idx_sz,
-              bi->uncommitted_idx);
-       printk(KERN_ERR "\tpage_budget %d, inode_budget %d, dent_budget %d\n",
+       pr_err("\tbudg_data_growth %lld, budg_dd_growth %lld, budg_idx_growth %lld\n",
+              bi->data_growth, bi->dd_growth, bi->idx_growth);
+       pr_err("\tmin_idx_lebs %d, old_idx_sz %llu, uncommitted_idx %lld\n",
+              bi->min_idx_lebs, bi->old_idx_sz, bi->uncommitted_idx);
+       pr_err("\tpage_budget %d, inode_budget %d, dent_budget %d\n",
               bi->page_budget, bi->inode_budget, bi->dent_budget);
-       printk(KERN_ERR "\tnospace %u, nospace_rp %u\n",
-              bi->nospace, bi->nospace_rp);
-       printk(KERN_ERR "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
+       pr_err("\tnospace %u, nospace_rp %u\n", bi->nospace, bi->nospace_rp);
+       pr_err("\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
               c->dark_wm, c->dead_wm, c->max_idx_node_sz);
 
        if (bi != &c->bi)
@@ -667,38 +605,37 @@ void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
                 */
                goto out_unlock;
 
-       printk(KERN_ERR "\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
+       pr_err("\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
               c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt);
-       printk(KERN_ERR "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
-              "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
+       pr_err("\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, clean_zn_cnt %ld\n",
+              atomic_long_read(&c->dirty_pg_cnt),
               atomic_long_read(&c->dirty_zn_cnt),
               atomic_long_read(&c->clean_zn_cnt));
-       printk(KERN_ERR "\tgc_lnum %d, ihead_lnum %d\n",
-              c->gc_lnum, c->ihead_lnum);
+       pr_err("\tgc_lnum %d, ihead_lnum %d\n", c->gc_lnum, c->ihead_lnum);
 
        /* If we are in R/O mode, journal heads do not exist */
        if (c->jheads)
                for (i = 0; i < c->jhead_cnt; i++)
-                       printk(KERN_ERR "\tjhead %s\t LEB %d\n",
+                       pr_err("\tjhead %s\t LEB %d\n",
                               dbg_jhead(c->jheads[i].wbuf.jhead),
                               c->jheads[i].wbuf.lnum);
        for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
                bud = rb_entry(rb, struct ubifs_bud, rb);
-               printk(KERN_ERR "\tbud LEB %d\n", bud->lnum);
+               pr_err("\tbud LEB %d\n", bud->lnum);
        }
        list_for_each_entry(bud, &c->old_buds, list)
-               printk(KERN_ERR "\told bud LEB %d\n", bud->lnum);
+               pr_err("\told bud LEB %d\n", bud->lnum);
        list_for_each_entry(idx_gc, &c->idx_gc, list)
-               printk(KERN_ERR "\tGC'ed idx LEB %d unmap %d\n",
+               pr_err("\tGC'ed idx LEB %d unmap %d\n",
                       idx_gc->lnum, idx_gc->unmap);
-       printk(KERN_ERR "\tcommit state %d\n", c->cmt_state);
+       pr_err("\tcommit state %d\n", c->cmt_state);
 
        /* Print budgeting predictions */
        available = ubifs_calc_available(c, c->bi.min_idx_lebs);
        outstanding = c->bi.data_growth + c->bi.dd_growth;
        free = ubifs_get_free_space_nolock(c);
-       printk(KERN_ERR "Budgeting predictions:\n");
-       printk(KERN_ERR "\tavailable: %lld, outstanding %lld, free %lld\n",
+       pr_err("Budgeting predictions:\n");
+       pr_err("\tavailable: %lld, outstanding %lld, free %lld\n",
               available, outstanding, free);
 out_unlock:
        spin_unlock(&dbg_lock);
@@ -718,21 +655,19 @@ void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
                dark = ubifs_calc_dark(c, spc);
 
        if (lp->flags & LPROPS_INDEX)
-               printk(KERN_ERR "LEB %-7d free %-8d dirty %-8d used %-8d "
-                      "free + dirty %-8d flags %#x (", lp->lnum, lp->free,
-                      lp->dirty, c->leb_size - spc, spc, lp->flags);
+               pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d flags %#x (",
+                      lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc,
+                      lp->flags);
        else
-               printk(KERN_ERR "LEB %-7d free %-8d dirty %-8d used %-8d "
-                      "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d "
-                      "flags %#-4x (", lp->lnum, lp->free, lp->dirty,
-                      c->leb_size - spc, spc, dark, dead,
-                      (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
+               pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d flags %#-4x (",
+                      lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc,
+                      dark, dead, (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
 
        if (lp->flags & LPROPS_TAKEN) {
                if (lp->flags & LPROPS_INDEX)
-                       printk(KERN_CONT "index, taken");
+                       pr_cont("index, taken");
                else
-                       printk(KERN_CONT "taken");
+                       pr_cont("taken");
        } else {
                const char *s;
 
@@ -769,7 +704,7 @@ void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
                                break;
                        }
                }
-               printk(KERN_CONT "%s", s);
+               pr_cont("%s", s);
        }
 
        for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
@@ -784,19 +719,18 @@ void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
                                 */
                                if (c->jheads &&
                                    lp->lnum == c->jheads[i].wbuf.lnum) {
-                                       printk(KERN_CONT ", jhead %s",
-                                              dbg_jhead(i));
+                                       pr_cont(", jhead %s", dbg_jhead(i));
                                        head = 1;
                                }
                        }
                        if (!head)
-                               printk(KERN_CONT ", bud of jhead %s",
+                               pr_cont(", bud of jhead %s",
                                       dbg_jhead(bud->jhead));
                }
        }
        if (lp->lnum == c->gc_lnum)
-               printk(KERN_CONT ", GC LEB");
-       printk(KERN_CONT ")\n");
+               pr_cont(", GC LEB");
+       pr_cont(")\n");
 }
 
 void ubifs_dump_lprops(struct ubifs_info *c)
@@ -805,8 +739,7 @@ void ubifs_dump_lprops(struct ubifs_info *c)
        struct ubifs_lprops lp;
        struct ubifs_lp_stats lst;
 
-       printk(KERN_ERR "(pid %d) start dumping LEB properties\n",
-              current->pid);
+       pr_err("(pid %d) start dumping LEB properties\n", current->pid);
        ubifs_get_lp_stats(c, &lst);
        ubifs_dump_lstats(&lst);
 
@@ -817,8 +750,7 @@ void ubifs_dump_lprops(struct ubifs_info *c)
 
                ubifs_dump_lprop(c, &lp);
        }
-       printk(KERN_ERR "(pid %d) finish dumping LEB properties\n",
-              current->pid);
+       pr_err("(pid %d) finish dumping LEB properties\n", current->pid);
 }
 
 void ubifs_dump_lpt_info(struct ubifs_info *c)
@@ -826,37 +758,36 @@ void ubifs_dump_lpt_info(struct ubifs_info *c)
        int i;
 
        spin_lock(&dbg_lock);
-       printk(KERN_ERR "(pid %d) dumping LPT information\n", current->pid);
-       printk(KERN_ERR "\tlpt_sz:        %lld\n", c->lpt_sz);
-       printk(KERN_ERR "\tpnode_sz:      %d\n", c->pnode_sz);
-       printk(KERN_ERR "\tnnode_sz:      %d\n", c->nnode_sz);
-       printk(KERN_ERR "\tltab_sz:       %d\n", c->ltab_sz);
-       printk(KERN_ERR "\tlsave_sz:      %d\n", c->lsave_sz);
-       printk(KERN_ERR "\tbig_lpt:       %d\n", c->big_lpt);
-       printk(KERN_ERR "\tlpt_hght:      %d\n", c->lpt_hght);
-       printk(KERN_ERR "\tpnode_cnt:     %d\n", c->pnode_cnt);
-       printk(KERN_ERR "\tnnode_cnt:     %d\n", c->nnode_cnt);
-       printk(KERN_ERR "\tdirty_pn_cnt:  %d\n", c->dirty_pn_cnt);
-       printk(KERN_ERR "\tdirty_nn_cnt:  %d\n", c->dirty_nn_cnt);
-       printk(KERN_ERR "\tlsave_cnt:     %d\n", c->lsave_cnt);
-       printk(KERN_ERR "\tspace_bits:    %d\n", c->space_bits);
-       printk(KERN_ERR "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
-       printk(KERN_ERR "\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
-       printk(KERN_ERR "\tlpt_spc_bits:  %d\n", c->lpt_spc_bits);
-       printk(KERN_ERR "\tpcnt_bits:     %d\n", c->pcnt_bits);
-       printk(KERN_ERR "\tlnum_bits:     %d\n", c->lnum_bits);
-       printk(KERN_ERR "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
-       printk(KERN_ERR "\tLPT head is at %d:%d\n",
+       pr_err("(pid %d) dumping LPT information\n", current->pid);
+       pr_err("\tlpt_sz:        %lld\n", c->lpt_sz);
+       pr_err("\tpnode_sz:      %d\n", c->pnode_sz);
+       pr_err("\tnnode_sz:      %d\n", c->nnode_sz);
+       pr_err("\tltab_sz:       %d\n", c->ltab_sz);
+       pr_err("\tlsave_sz:      %d\n", c->lsave_sz);
+       pr_err("\tbig_lpt:       %d\n", c->big_lpt);
+       pr_err("\tlpt_hght:      %d\n", c->lpt_hght);
+       pr_err("\tpnode_cnt:     %d\n", c->pnode_cnt);
+       pr_err("\tnnode_cnt:     %d\n", c->nnode_cnt);
+       pr_err("\tdirty_pn_cnt:  %d\n", c->dirty_pn_cnt);
+       pr_err("\tdirty_nn_cnt:  %d\n", c->dirty_nn_cnt);
+       pr_err("\tlsave_cnt:     %d\n", c->lsave_cnt);
+       pr_err("\tspace_bits:    %d\n", c->space_bits);
+       pr_err("\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
+       pr_err("\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
+       pr_err("\tlpt_spc_bits:  %d\n", c->lpt_spc_bits);
+       pr_err("\tpcnt_bits:     %d\n", c->pcnt_bits);
+       pr_err("\tlnum_bits:     %d\n", c->lnum_bits);
+       pr_err("\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
+       pr_err("\tLPT head is at %d:%d\n",
               c->nhead_lnum, c->nhead_offs);
-       printk(KERN_ERR "\tLPT ltab is at %d:%d\n",
-              c->ltab_lnum, c->ltab_offs);
+       pr_err("\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs);
        if (c->big_lpt)
-               printk(KERN_ERR "\tLPT lsave is at %d:%d\n",
+               pr_err("\tLPT lsave is at %d:%d\n",
                       c->lsave_lnum, c->lsave_offs);
        for (i = 0; i < c->lpt_lebs; i++)
-               printk(KERN_ERR "\tLPT LEB %d free %d dirty %d tgc %d "
-                      "cmt %d\n", i + c->lpt_first, c->ltab[i].free,
-                      c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt);
+               pr_err("\tLPT LEB %d free %d dirty %d tgc %d cmt %d\n",
+                      i + c->lpt_first, c->ltab[i].free, c->ltab[i].dirty,
+                      c->ltab[i].tgc, c->ltab[i].cmt);
        spin_unlock(&dbg_lock);
 }
 
@@ -865,13 +796,13 @@ void ubifs_dump_sleb(const struct ubifs_info *c,
 {
        struct ubifs_scan_node *snod;
 
-       printk(KERN_ERR "(pid %d) start dumping scanned data from LEB %d:%d\n",
+       pr_err("(pid %d) start dumping scanned data from LEB %d:%d\n",
               current->pid, sleb->lnum, offs);
 
        list_for_each_entry(snod, &sleb->nodes, list) {
                cond_resched();
-               printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", sleb->lnum,
-                      snod->offs, snod->len);
+               pr_err("Dumping node at LEB %d:%d len %d\n",
+                      sleb->lnum, snod->offs, snod->len);
                ubifs_dump_node(c, snod->node);
        }
 }
@@ -882,11 +813,7 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
        struct ubifs_scan_node *snod;
        void *buf;
 
-       if (dbg_is_tst_rcvry(c))
-               return;
-
-       printk(KERN_ERR "(pid %d) start dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
 
        buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
        if (!buf) {
@@ -900,18 +827,17 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
                goto out;
        }
 
-       printk(KERN_ERR "LEB %d has %d nodes ending at %d\n", lnum,
+       pr_err("LEB %d has %d nodes ending at %d\n", lnum,
               sleb->nodes_cnt, sleb->endpt);
 
        list_for_each_entry(snod, &sleb->nodes, list) {
                cond_resched();
-               printk(KERN_ERR "Dumping node at LEB %d:%d len %d\n", lnum,
+               pr_err("Dumping node at LEB %d:%d len %d\n", lnum,
                       snod->offs, snod->len);
                ubifs_dump_node(c, snod->node);
        }
 
-       printk(KERN_ERR "(pid %d) finish dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
        ubifs_scan_destroy(sleb);
 
 out:
@@ -932,33 +858,28 @@ void ubifs_dump_znode(const struct ubifs_info *c,
        else
                zbr = &c->zroot;
 
-       printk(KERN_ERR "znode %p, LEB %d:%d len %d parent %p iip %d level %d"
-              " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs,
-              zbr->len, znode->parent, znode->iip, znode->level,
-              znode->child_cnt, znode->flags);
+       pr_err("znode %p, LEB %d:%d len %d parent %p iip %d level %d child_cnt %d flags %lx\n",
+              znode, zbr->lnum, zbr->offs, zbr->len, znode->parent, znode->iip,
+              znode->level, znode->child_cnt, znode->flags);
 
        if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
                spin_unlock(&dbg_lock);
                return;
        }
 
-       printk(KERN_ERR "zbranches:\n");
+       pr_err("zbranches:\n");
        for (n = 0; n < znode->child_cnt; n++) {
                zbr = &znode->zbranch[n];
                if (znode->level > 0)
-                       printk(KERN_ERR "\t%d: znode %p LEB %d:%d len %d key "
-                                         "%s\n", n, zbr->znode, zbr->lnum,
-                                         zbr->offs, zbr->len,
-                                         dbg_snprintf_key(c, &zbr->key,
-                                                          key_buf,
-                                                          DBG_KEY_BUF_LEN));
+                       pr_err("\t%d: znode %p LEB %d:%d len %d key %s\n",
+                              n, zbr->znode, zbr->lnum, zbr->offs, zbr->len,
+                              dbg_snprintf_key(c, &zbr->key, key_buf,
+                                               DBG_KEY_BUF_LEN));
                else
-                       printk(KERN_ERR "\t%d: LNC %p LEB %d:%d len %d key "
-                                         "%s\n", n, zbr->znode, zbr->lnum,
-                                         zbr->offs, zbr->len,
-                                         dbg_snprintf_key(c, &zbr->key,
-                                                          key_buf,
-                                                          DBG_KEY_BUF_LEN));
+                       pr_err("\t%d: LNC %p LEB %d:%d len %d key %s\n",
+                              n, zbr->znode, zbr->lnum, zbr->offs, zbr->len,
+                              dbg_snprintf_key(c, &zbr->key, key_buf,
+                                               DBG_KEY_BUF_LEN));
        }
        spin_unlock(&dbg_lock);
 }
@@ -967,16 +888,16 @@ void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
 {
        int i;
 
-       printk(KERN_ERR "(pid %d) start dumping heap cat %d (%d elements)\n",
+       pr_err("(pid %d) start dumping heap cat %d (%d elements)\n",
               current->pid, cat, heap->cnt);
        for (i = 0; i < heap->cnt; i++) {
                struct ubifs_lprops *lprops = heap->arr[i];
 
-               printk(KERN_ERR "\t%d. LEB %d hpos %d free %d dirty %d "
-                      "flags %d\n", i, lprops->lnum, lprops->hpos,
-                      lprops->free, lprops->dirty, lprops->flags);
+               pr_err("\t%d. LEB %d hpos %d free %d dirty %d flags %d\n",
+                      i, lprops->lnum, lprops->hpos, lprops->free,
+                      lprops->dirty, lprops->flags);
        }
-       printk(KERN_ERR "(pid %d) finish dumping heap\n", current->pid);
+       pr_err("(pid %d) finish dumping heap\n", current->pid);
 }
 
 void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
@@ -984,15 +905,15 @@ void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
 {
        int i;
 
-       printk(KERN_ERR "(pid %d) dumping pnode:\n", current->pid);
-       printk(KERN_ERR "\taddress %zx parent %zx cnext %zx\n",
+       pr_err("(pid %d) dumping pnode:\n", current->pid);
+       pr_err("\taddress %zx parent %zx cnext %zx\n",
               (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
-       printk(KERN_ERR "\tflags %lu iip %d level %d num %d\n",
+       pr_err("\tflags %lu iip %d level %d num %d\n",
               pnode->flags, iip, pnode->level, pnode->num);
        for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
                struct ubifs_lprops *lp = &pnode->lprops[i];
 
-               printk(KERN_ERR "\t%d: free %d dirty %d flags %d lnum %d\n",
+               pr_err("\t%d: free %d dirty %d flags %d lnum %d\n",
                       i, lp->free, lp->dirty, lp->flags, lp->lnum);
        }
 }
@@ -1002,20 +923,20 @@ void ubifs_dump_tnc(struct ubifs_info *c)
        struct ubifs_znode *znode;
        int level;
 
-       printk(KERN_ERR "\n");
-       printk(KERN_ERR "(pid %d) start dumping TNC tree\n", current->pid);
+       pr_err("\n");
+       pr_err("(pid %d) start dumping TNC tree\n", current->pid);
        znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
        level = znode->level;
-       printk(KERN_ERR "== Level %d ==\n", level);
+       pr_err("== Level %d ==\n", level);
        while (znode) {
                if (level != znode->level) {
                        level = znode->level;
-                       printk(KERN_ERR "== Level %d ==\n", level);
+                       pr_err("== Level %d ==\n", level);
                }
                ubifs_dump_znode(c, znode);
                znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
        }
-       printk(KERN_ERR "(pid %d) finish dumping TNC tree\n", current->pid);
+       pr_err("(pid %d) finish dumping TNC tree\n", current->pid);
 }
 
 static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
@@ -1154,8 +1075,8 @@ int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode)
        mutex_lock(&ui->ui_mutex);
        spin_lock(&ui->ui_lock);
        if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
-               ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode "
-                         "is clean", ui->ui_size, ui->synced_i_size);
+               ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode is clean",
+                         ui->ui_size, ui->synced_i_size);
                ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
                          inode->i_mode, i_size_read(inode));
                dump_stack();
@@ -1217,17 +1138,16 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
        kfree(pdent);
 
        if (i_size_read(dir) != size) {
-               ubifs_err("directory inode %lu has size %llu, "
-                         "but calculated size is %llu", dir->i_ino,
-                         (unsigned long long)i_size_read(dir),
+               ubifs_err("directory inode %lu has size %llu, but calculated size is %llu",
+                         dir->i_ino, (unsigned long long)i_size_read(dir),
                          (unsigned long long)size);
                ubifs_dump_inode(c, dir);
                dump_stack();
                return -EINVAL;
        }
        if (dir->i_nlink != nlink) {
-               ubifs_err("directory inode %lu has nlink %u, but calculated "
-                         "nlink is %u", dir->i_ino, dir->i_nlink, nlink);
+               ubifs_err("directory inode %lu has nlink %u, but calculated nlink is %u",
+                         dir->i_ino, dir->i_nlink, nlink);
                ubifs_dump_inode(c, dir);
                dump_stack();
                return -EINVAL;
@@ -1686,8 +1606,8 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
                if (znode_cb) {
                        err = znode_cb(c, znode, priv);
                        if (err) {
-                               ubifs_err("znode checking function returned "
-                                         "error %d", err);
+                               ubifs_err("znode checking function returned error %d",
+                                         err);
                                ubifs_dump_znode(c, znode);
                                goto out_dump;
                        }
@@ -1697,9 +1617,7 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
                                zbr = &znode->zbranch[idx];
                                err = leaf_cb(c, zbr, priv);
                                if (err) {
-                                       ubifs_err("leaf checking function "
-                                                 "returned error %d, for leaf "
-                                                 "at LEB %d:%d",
+                                       ubifs_err("leaf checking function returned error %d, for leaf at LEB %d:%d",
                                                  err, zbr->lnum, zbr->offs);
                                        goto out_dump;
                                }
@@ -1807,8 +1725,8 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
        }
 
        if (calc != idx_size) {
-               ubifs_err("index size check failed: calculated size is %lld, "
-                         "should be %lld", calc, idx_size);
+               ubifs_err("index size check failed: calculated size is %lld, should be %lld",
+                         calc, idx_size);
                dump_stack();
                return -EINVAL;
        }
@@ -2120,8 +2038,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                fscki = read_add_inode(c, priv, inum);
                if (IS_ERR(fscki)) {
                        err = PTR_ERR(fscki);
-                       ubifs_err("error %d while processing data node and "
-                                 "trying to find inode node %lu",
+                       ubifs_err("error %d while processing data node and trying to find inode node %lu",
                                  err, (unsigned long)inum);
                        goto out_dump;
                }
@@ -2131,9 +2048,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                blk_offs <<= UBIFS_BLOCK_SHIFT;
                blk_offs += le32_to_cpu(dn->size);
                if (blk_offs > fscki->size) {
-                       ubifs_err("data node at LEB %d:%d is not within inode "
-                                 "size %lld", zbr->lnum, zbr->offs,
-                                 fscki->size);
+                       ubifs_err("data node at LEB %d:%d is not within inode size %lld",
+                                 zbr->lnum, zbr->offs, fscki->size);
                        err = -EINVAL;
                        goto out_dump;
                }
@@ -2154,8 +2070,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                fscki = read_add_inode(c, priv, inum);
                if (IS_ERR(fscki)) {
                        err = PTR_ERR(fscki);
-                       ubifs_err("error %d while processing entry node and "
-                                 "trying to find inode node %lu",
+                       ubifs_err("error %d while processing entry node and trying to find inode node %lu",
                                  err, (unsigned long)inum);
                        goto out_dump;
                }
@@ -2167,8 +2082,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                fscki1 = read_add_inode(c, priv, inum);
                if (IS_ERR(fscki1)) {
                        err = PTR_ERR(fscki1);
-                       ubifs_err("error %d while processing entry node and "
-                                 "trying to find parent inode node %lu",
+                       ubifs_err("error %d while processing entry node and trying to find parent inode node %lu",
                                  err, (unsigned long)inum);
                        goto out_dump;
                }
@@ -2258,61 +2172,52 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
                         */
                        if (fscki->inum != UBIFS_ROOT_INO &&
                            fscki->references != 1) {
-                               ubifs_err("directory inode %lu has %d "
-                                         "direntries which refer it, but "
-                                         "should be 1",
+                               ubifs_err("directory inode %lu has %d direntries which refer it, but should be 1",
                                          (unsigned long)fscki->inum,
                                          fscki->references);
                                goto out_dump;
                        }
                        if (fscki->inum == UBIFS_ROOT_INO &&
                            fscki->references != 0) {
-                               ubifs_err("root inode %lu has non-zero (%d) "
-                                         "direntries which refer it",
+                               ubifs_err("root inode %lu has non-zero (%d) direntries which refer it",
                                          (unsigned long)fscki->inum,
                                          fscki->references);
                                goto out_dump;
                        }
                        if (fscki->calc_sz != fscki->size) {
-                               ubifs_err("directory inode %lu size is %lld, "
-                                         "but calculated size is %lld",
+                               ubifs_err("directory inode %lu size is %lld, but calculated size is %lld",
                                          (unsigned long)fscki->inum,
                                          fscki->size, fscki->calc_sz);
                                goto out_dump;
                        }
                        if (fscki->calc_cnt != fscki->nlink) {
-                               ubifs_err("directory inode %lu nlink is %d, "
-                                         "but calculated nlink is %d",
+                               ubifs_err("directory inode %lu nlink is %d, but calculated nlink is %d",
                                          (unsigned long)fscki->inum,
                                          fscki->nlink, fscki->calc_cnt);
                                goto out_dump;
                        }
                } else {
                        if (fscki->references != fscki->nlink) {
-                               ubifs_err("inode %lu nlink is %d, but "
-                                         "calculated nlink is %d",
+                               ubifs_err("inode %lu nlink is %d, but calculated nlink is %d",
                                          (unsigned long)fscki->inum,
                                          fscki->nlink, fscki->references);
                                goto out_dump;
                        }
                }
                if (fscki->xattr_sz != fscki->calc_xsz) {
-                       ubifs_err("inode %lu has xattr size %u, but "
-                                 "calculated size is %lld",
+                       ubifs_err("inode %lu has xattr size %u, but calculated size is %lld",
                                  (unsigned long)fscki->inum, fscki->xattr_sz,
                                  fscki->calc_xsz);
                        goto out_dump;
                }
                if (fscki->xattr_cnt != fscki->calc_xcnt) {
-                       ubifs_err("inode %lu has %u xattrs, but "
-                                 "calculated count is %lld",
+                       ubifs_err("inode %lu has %u xattrs, but calculated count is %lld",
                                  (unsigned long)fscki->inum,
                                  fscki->xattr_cnt, fscki->calc_xcnt);
                        goto out_dump;
                }
                if (fscki->xattr_nms != fscki->calc_xnms) {
-                       ubifs_err("inode %lu has xattr names' size %u, but "
-                                 "calculated names' size is %lld",
+                       ubifs_err("inode %lu has xattr names' size %u, but calculated names' size is %lld",
                                  (unsigned long)fscki->inum, fscki->xattr_nms,
                                  fscki->calc_xnms);
                        goto out_dump;
@@ -2652,20 +2557,18 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
        return 1;
 }
 
-static void cut_data(const void *buf, unsigned int len)
+static int corrupt_data(const struct ubifs_info *c, const void *buf,
+                       unsigned int len)
 {
        unsigned int from, to, i, ffs = chance(1, 2);
        unsigned char *p = (void *)buf;
 
        from = random32() % (len + 1);
-       if (chance(1, 2))
-               to = random32() % (len - from + 1);
-       else
-               to = len;
+       /* Corruption may only span one max. write unit */
+       to = min(len, ALIGN(from, c->max_write_size));
 
-       if (from < to)
-               ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
-                          ffs ? "0xFFs" : "random data");
+       ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
+                  ffs ? "0xFFs" : "random data");
 
        if (ffs)
                for (i = from; i < to; i++)
@@ -2673,6 +2576,8 @@ static void cut_data(const void *buf, unsigned int len)
        else
                for (i = from; i < to; i++)
                        p[i] = random32() % 0x100;
+
+       return to;
 }
 
 int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
@@ -2685,7 +2590,9 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
 
        failing = power_cut_emulated(c, lnum, 1);
        if (failing)
-               cut_data(buf, len);
+               len = corrupt_data(c, buf, len);
+       ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
+                  len, lnum, offs);
        err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
        if (err)
                return err;
index 760de723dadb4928eec6f9c4729245a40dcce5de..e03d5179769ada74c6b1cb2c4aa2805ffa89dbdd 100644 (file)
@@ -150,7 +150,7 @@ struct ubifs_global_debug_info {
 
 #define ubifs_assert(expr) do {                                                \
        if (unlikely(!(expr))) {                                               \
-               printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
+               pr_crit("UBIFS assert failed in %s at %u (pid %d)\n",          \
                       __func__, __LINE__, current->pid);                      \
                dump_stack();                                                  \
        }                                                                      \
@@ -159,26 +159,23 @@ struct ubifs_global_debug_info {
 #define ubifs_assert_cmt_locked(c) do {                                        \
        if (unlikely(down_write_trylock(&(c)->commit_sem))) {                  \
                up_write(&(c)->commit_sem);                                    \
-               printk(KERN_CRIT "commit lock is not locked!\n");              \
+               pr_crit("commit lock is not locked!\n");                       \
                ubifs_assert(0);                                               \
        }                                                                      \
 } while (0)
 
 #define ubifs_dbg_msg(type, fmt, ...) \
-       pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
+       pr_debug("UBIFS DBG " type " (pid %d): " fmt "\n", current->pid,       \
+                ##__VA_ARGS__)
 
 #define DBG_KEY_BUF_LEN 48
 #define ubifs_dbg_msg_key(type, key, fmt, ...) do {                            \
        char __tmp_key_buf[DBG_KEY_BUF_LEN];                                   \
-       pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__,             \
+       pr_debug("UBIFS DBG " type " (pid %d): " fmt "%s\n", current->pid,     \
+                ##__VA_ARGS__,                                                \
                 dbg_snprintf_key(c, key, __tmp_key_buf, DBG_KEY_BUF_LEN));    \
 } while (0)
 
-/* Just a debugging messages not related to any specific UBIFS subsystem */
-#define dbg_msg(fmt, ...)                                                      \
-       printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid,   \
-              __func__, ##__VA_ARGS__)
-
 /* General messages */
 #define dbg_gen(fmt, ...)   ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
 /* Additional journal messages */
index c95681cf1b71cfe7c99f70d5d88eb2f54c7804c0..e271fba1651baf645e00f62659cc80825625ca71 100644 (file)
@@ -980,8 +980,8 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * separately.
         */
 
-       dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in "
-               "dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name,
+       dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in dir ino %lu",
+               old_dentry->d_name.len, old_dentry->d_name.name,
                old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
                new_dentry->d_name.name, new_dir->i_ino);
        ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
index 7bd6e72afd1136a3141e42227553ba5daa5ffcba..ff48c5a853092e92cd6b4ef77362694ce9ba8cb5 100644 (file)
@@ -1486,8 +1486,8 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
        err = ubifs_budget_space(c, &req);
        if (unlikely(err)) {
                if (err == -ENOSPC)
-                       ubifs_warn("out of space for mmapped file "
-                                  "(inode number %lu)", inode->i_ino);
+                       ubifs_warn("out of space for mmapped file (inode number %lu)",
+                                  inode->i_ino);
                return VM_FAULT_SIGBUS;
        }
 
index 04dd6f47635e075b783fa750e83db5e94df409b8..76ca53cd3eeec52c53ab064221a6519d8c70c80c 100644 (file)
@@ -714,9 +714,9 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
                        break;
                }
 
-               dbg_gc("found LEB %d: free %d, dirty %d, sum %d "
-                      "(min. space %d)", lp.lnum, lp.free, lp.dirty,
-                      lp.free + lp.dirty, min_space);
+               dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
+                      lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
+                      min_space);
 
                space_before = c->leb_size - wbuf->offs - wbuf->used;
                if (wbuf->lnum == -1)
index 12c0f154ca83ec1e4eef8942f23f0ddb251cc2fc..afaad07f3b29f4465ba2b3e0702a947f7b60c071 100644 (file)
@@ -469,8 +469,8 @@ static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
        ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
        ino->mtime_sec  = cpu_to_le64(inode->i_mtime.tv_sec);
        ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
-       ino->uid   = cpu_to_le32(inode->i_uid);
-       ino->gid   = cpu_to_le32(inode->i_gid);
+       ino->uid   = cpu_to_le32(i_uid_read(inode));
+       ino->gid   = cpu_to_le32(i_gid_read(inode));
        ino->mode  = cpu_to_le32(inode->i_mode);
        ino->flags = cpu_to_le32(ui->flags);
        ino->size  = cpu_to_le64(ui->ui_size);
index c80b15d6c8de0a2cc49bd6a591fd482e7b30b456..36bd4efd0819e96ee299acd030f24fa7113e4fb2 100644 (file)
@@ -315,17 +315,15 @@ static void remove_buds(struct ubifs_info *c)
                         * heads (non-closed buds).
                         */
                        c->cmt_bud_bytes += wbuf->offs - bud->start;
-                       dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
-                               "cmt_bud_bytes %lld", bud->lnum, bud->start,
-                               dbg_jhead(bud->jhead), wbuf->offs - bud->start,
-                               c->cmt_bud_bytes);
+                       dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
+                               bud->lnum, bud->start, dbg_jhead(bud->jhead),
+                               wbuf->offs - bud->start, c->cmt_bud_bytes);
                        bud->start = wbuf->offs;
                } else {
                        c->cmt_bud_bytes += c->leb_size - bud->start;
-                       dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
-                               "cmt_bud_bytes %lld", bud->lnum, bud->start,
-                               dbg_jhead(bud->jhead), c->leb_size - bud->start,
-                               c->cmt_bud_bytes);
+                       dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
+                               bud->lnum, bud->start, dbg_jhead(bud->jhead),
+                               c->leb_size - bud->start, c->cmt_bud_bytes);
                        rb_erase(p1, &c->buds);
                        /*
                         * If the commit does not finish, the recovery will need
index 86eb8e533249c4977b6a3dbdcf91bb2f334a519a..e5a2a35a46dcc1fc3fb9178704cec947458bda2e 100644 (file)
@@ -867,15 +867,15 @@ int dbg_check_cats(struct ubifs_info *c)
 
        list_for_each_entry(lprops, &c->empty_list, list) {
                if (lprops->free != c->leb_size) {
-                       ubifs_err("non-empty LEB %d on empty list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-empty LEB %d on empty list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (lprops->flags & LPROPS_TAKEN) {
-                       ubifs_err("taken LEB %d on empty list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("taken LEB %d on empty list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
        }
@@ -883,15 +883,15 @@ int dbg_check_cats(struct ubifs_info *c)
        i = 0;
        list_for_each_entry(lprops, &c->freeable_list, list) {
                if (lprops->free + lprops->dirty != c->leb_size) {
-                       ubifs_err("non-freeable LEB %d on freeable list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-freeable LEB %d on freeable list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (lprops->flags & LPROPS_TAKEN) {
-                       ubifs_err("taken LEB %d on freeable list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("taken LEB %d on freeable list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                i += 1;
@@ -913,21 +913,21 @@ int dbg_check_cats(struct ubifs_info *c)
 
        list_for_each_entry(lprops, &c->frdi_idx_list, list) {
                if (lprops->free + lprops->dirty != c->leb_size) {
-                       ubifs_err("non-freeable LEB %d on frdi_idx list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (lprops->flags & LPROPS_TAKEN) {
-                       ubifs_err("taken LEB %d on frdi_idx list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("taken LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
                if (!(lprops->flags & LPROPS_INDEX)) {
-                       ubifs_err("non-index LEB %d on frdi_idx list "
-                                 "(free %d dirty %d flags %d)", lprops->lnum,
-                                 lprops->free, lprops->dirty, lprops->flags);
+                       ubifs_err("non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)",
+                                 lprops->lnum, lprops->free, lprops->dirty,
+                                 lprops->flags);
                        return -EINVAL;
                }
        }
@@ -982,9 +982,9 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
                        goto out;
                }
                if (lprops != lp) {
-                       dbg_msg("lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
-                               (size_t)lprops, (size_t)lp, lprops->lnum,
-                               lp->lnum);
+                       ubifs_err("lprops %zx lp %zx lprops->lnum %d lp->lnum %d",
+                                 (size_t)lprops, (size_t)lp, lprops->lnum,
+                                 lp->lnum);
                        err = 4;
                        goto out;
                }
@@ -1002,7 +1002,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
        }
 out:
        if (err) {
-               dbg_msg("failed cat %d hpos %d err %d", cat, i, err);
+               ubifs_err("failed cat %d hpos %d err %d", cat, i, err);
                dump_stack();
                ubifs_dump_heap(c, heap, cat);
        }
@@ -1153,8 +1153,8 @@ static int scan_check_cb(struct ubifs_info *c,
 
        if (free > c->leb_size || free < 0 || dirty > c->leb_size ||
            dirty < 0) {
-               ubifs_err("bad calculated accounting for LEB %d: "
-                         "free %d, dirty %d", lnum, free, dirty);
+               ubifs_err("bad calculated accounting for LEB %d: free %d, dirty %d",
+                         lnum, free, dirty);
                goto out_destroy;
        }
 
@@ -1200,8 +1200,7 @@ static int scan_check_cb(struct ubifs_info *c,
                        /* Free but not unmapped LEB, it's fine */
                        is_idx = 0;
                else {
-                       ubifs_err("indexing node without indexing "
-                                 "flag");
+                       ubifs_err("indexing node without indexing flag");
                        goto out_print;
                }
        }
@@ -1236,8 +1235,7 @@ static int scan_check_cb(struct ubifs_info *c,
        return LPT_SCAN_CONTINUE;
 
 out_print:
-       ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, "
-                 "should be free %d, dirty %d",
+       ubifs_err("bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d",
                  lnum, lp->free, lp->dirty, lp->flags, free, dirty);
        ubifs_dump_leb(c, lnum);
 out_destroy:
@@ -1290,12 +1288,10 @@ int dbg_check_lprops(struct ubifs_info *c)
            lst.total_dirty != c->lst.total_dirty ||
            lst.total_used != c->lst.total_used) {
                ubifs_err("bad overall accounting");
-               ubifs_err("calculated: empty_lebs %d, idx_lebs %d, "
-                         "total_free %lld, total_dirty %lld, total_used %lld",
+               ubifs_err("calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
                          lst.empty_lebs, lst.idx_lebs, lst.total_free,
                          lst.total_dirty, lst.total_used);
-               ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, "
-                         "total_free %lld, total_dirty %lld, total_used %lld",
+               ubifs_err("read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld",
                          c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free,
                          c->lst.total_dirty, c->lst.total_used);
                err = -EINVAL;
index 8640920766ed461896add49f69b9db767aa9569c..d46b19ec1815eb1532d4d8f848b610b4437bb63a 100644 (file)
@@ -1311,7 +1311,7 @@ out:
        ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
        ubifs_dump_pnode(c, pnode, parent, iip);
        dump_stack();
-       dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
+       ubifs_err("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
        kfree(pnode);
        return err;
 }
@@ -2237,8 +2237,7 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
                        /* cnode is a nnode */
                        num = calc_nnode_num(row, col);
                        if (cnode->num != num) {
-                               ubifs_err("nnode num %d expected %d "
-                                         "parent num %d iip %d",
+                               ubifs_err("nnode num %d expected %d parent num %d iip %d",
                                          cnode->num, num,
                                          (nnode ? nnode->num : 0), cnode->iip);
                                return -EINVAL;
index 4fa70734e6e77f2b2e5862d862f501e4f7f5b010..9daaeef675dd5460ed7a93a57d51f40d9d926637 100644 (file)
@@ -320,8 +320,8 @@ static int layout_cnodes(struct ubifs_info *c)
        return 0;
 
 no_space:
-       ubifs_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, "
-                 "done_lsave %d", lnum, offs, len, done_ltab, done_lsave);
+       ubifs_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
+                 lnum, offs, len, done_ltab, done_lsave);
        ubifs_dump_lpt_info(c);
        ubifs_dump_lpt_lebs(c);
        dump_stack();
@@ -545,8 +545,8 @@ static int write_cnodes(struct ubifs_info *c)
        return 0;
 
 no_space:
-       ubifs_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab "
-                 "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave);
+       ubifs_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d",
+                 lnum, offs, len, done_ltab, done_lsave);
        ubifs_dump_lpt_info(c);
        ubifs_dump_lpt_lebs(c);
        dump_stack();
@@ -1662,21 +1662,19 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
                                continue;
                        }
                        if (!dbg_is_all_ff(p, len)) {
-                               dbg_msg("invalid empty space in LEB %d at %d",
-                                       lnum, c->leb_size - len);
+                               ubifs_err("invalid empty space in LEB %d at %d",
+                                         lnum, c->leb_size - len);
                                err = -EINVAL;
                        }
                        i = lnum - c->lpt_first;
                        if (len != c->ltab[i].free) {
-                               dbg_msg("invalid free space in LEB %d "
-                                       "(free %d, expected %d)",
-                                       lnum, len, c->ltab[i].free);
+                               ubifs_err("invalid free space in LEB %d (free %d, expected %d)",
+                                         lnum, len, c->ltab[i].free);
                                err = -EINVAL;
                        }
                        if (dirty != c->ltab[i].dirty) {
-                               dbg_msg("invalid dirty space in LEB %d "
-                                       "(dirty %d, expected %d)",
-                                       lnum, dirty, c->ltab[i].dirty);
+                               ubifs_err("invalid dirty space in LEB %d (dirty %d, expected %d)",
+                                         lnum, dirty, c->ltab[i].dirty);
                                err = -EINVAL;
                        }
                        goto out;
@@ -1888,8 +1886,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
        int err, len = c->leb_size, node_type, node_num, node_len, offs;
        void *buf, *p;
 
-       printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
        buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
        if (!buf) {
                ubifs_err("cannot allocate memory to dump LPT");
@@ -1907,14 +1904,14 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
 
                        pad_len = get_pad_len(c, p, len);
                        if (pad_len) {
-                               printk(KERN_DEBUG "LEB %d:%d, pad %d bytes\n",
+                               pr_err("LEB %d:%d, pad %d bytes\n",
                                       lnum, offs, pad_len);
                                p += pad_len;
                                len -= pad_len;
                                continue;
                        }
                        if (len)
-                               printk(KERN_DEBUG "LEB %d:%d, free %d bytes\n",
+                               pr_err("LEB %d:%d, free %d bytes\n",
                                       lnum, offs, len);
                        break;
                }
@@ -1925,11 +1922,10 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
                {
                        node_len = c->pnode_sz;
                        if (c->big_lpt)
-                               printk(KERN_DEBUG "LEB %d:%d, pnode num %d\n",
+                               pr_err("LEB %d:%d, pnode num %d\n",
                                       lnum, offs, node_num);
                        else
-                               printk(KERN_DEBUG "LEB %d:%d, pnode\n",
-                                      lnum, offs);
+                               pr_err("LEB %d:%d, pnode\n", lnum, offs);
                        break;
                }
                case UBIFS_LPT_NNODE:
@@ -1939,29 +1935,28 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
 
                        node_len = c->nnode_sz;
                        if (c->big_lpt)
-                               printk(KERN_DEBUG "LEB %d:%d, nnode num %d, ",
+                               pr_err("LEB %d:%d, nnode num %d, ",
                                       lnum, offs, node_num);
                        else
-                               printk(KERN_DEBUG "LEB %d:%d, nnode, ",
+                               pr_err("LEB %d:%d, nnode, ",
                                       lnum, offs);
                        err = ubifs_unpack_nnode(c, p, &nnode);
                        for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
-                               printk(KERN_CONT "%d:%d", nnode.nbranch[i].lnum,
+                               pr_cont("%d:%d", nnode.nbranch[i].lnum,
                                       nnode.nbranch[i].offs);
                                if (i != UBIFS_LPT_FANOUT - 1)
-                                       printk(KERN_CONT ", ");
+                                       pr_cont(", ");
                        }
-                       printk(KERN_CONT "\n");
+                       pr_cont("\n");
                        break;
                }
                case UBIFS_LPT_LTAB:
                        node_len = c->ltab_sz;
-                       printk(KERN_DEBUG "LEB %d:%d, ltab\n",
-                              lnum, offs);
+                       pr_err("LEB %d:%d, ltab\n", lnum, offs);
                        break;
                case UBIFS_LPT_LSAVE:
                        node_len = c->lsave_sz;
-                       printk(KERN_DEBUG "LEB %d:%d, lsave len\n", lnum, offs);
+                       pr_err("LEB %d:%d, lsave len\n", lnum, offs);
                        break;
                default:
                        ubifs_err("LPT node type %d not recognized", node_type);
@@ -1972,8 +1967,7 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
                len -= node_len;
        }
 
-       printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n",
-              current->pid, lnum);
+       pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
 out:
        vfree(buf);
        return;
@@ -1990,12 +1984,10 @@ void ubifs_dump_lpt_lebs(const struct ubifs_info *c)
 {
        int i;
 
-       printk(KERN_DEBUG "(pid %d) start dumping all LPT LEBs\n",
-              current->pid);
+       pr_err("(pid %d) start dumping all LPT LEBs\n", current->pid);
        for (i = 0; i < c->lpt_lebs; i++)
                dump_lpt_leb(c, i + c->lpt_first);
-       printk(KERN_DEBUG "(pid %d) finish dumping all LPT LEBs\n",
-              current->pid);
+       pr_err("(pid %d) finish dumping all LPT LEBs\n", current->pid);
 }
 
 /**
index cebf17ea045824a052e024a0e0e954c0b5a0c9e9..769701ccb5c9bf5809a3f09b17ade8397a4dbfb2 100644 (file)
@@ -562,8 +562,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
 
        list_for_each_entry(snod, &sleb->nodes, list) {
                if (snod->type != UBIFS_ORPH_NODE) {
-                       ubifs_err("invalid node type %d in orphan area at "
-                                 "%d:%d", snod->type, sleb->lnum, snod->offs);
+                       ubifs_err("invalid node type %d in orphan area at %d:%d",
+                                 snod->type, sleb->lnum, snod->offs);
                        ubifs_dump_node(c, snod->node);
                        return -EINVAL;
                }
@@ -589,8 +589,7 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
                         * number. That makes this orphan node, out of date.
                         */
                        if (!first) {
-                               ubifs_err("out of order commit number %llu in "
-                                         "orphan node at %d:%d",
+                               ubifs_err("out of order commit number %llu in orphan node at %d:%d",
                                          cmt_no, sleb->lnum, snod->offs);
                                ubifs_dump_node(c, snod->node);
                                return -EINVAL;
index edeec499c048ba1d93375796b5fd4f439f38dd81..065096e36ed9733a14f96090decb3131c0e0d184 100644 (file)
@@ -609,7 +609,8 @@ static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
                snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
                                  list);
 
-               dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
+               dbg_rcvry("dropping last node at %d:%d",
+                         sleb->lnum, snod->offs);
                *offs = snod->offs;
                list_del(&snod->list);
                kfree(snod);
@@ -702,8 +703,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
                         * See header comment for this file for more
                         * explanations about the reasons we have this check.
                         */
-                       ubifs_err("corrupt empty space LEB %d:%d, corruption "
-                                 "starts at %d", lnum, offs, corruption);
+                       ubifs_err("corrupt empty space LEB %d:%d, corruption starts at %d",
+                                 lnum, offs, corruption);
                        /* Make sure we dump interesting non-0xFF data */
                        offs += corruption;
                        buf += corruption;
@@ -899,8 +900,8 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
                                }
                        }
                        if (snod->sqnum > cs_sqnum) {
-                               ubifs_err("unrecoverable log corruption "
-                                         "in LEB %d", lnum);
+                               ubifs_err("unrecoverable log corruption in LEB %d",
+                                         lnum);
                                ubifs_scan_destroy(sleb);
                                return ERR_PTR(-EUCLEAN);
                        }
index 94d78fc5d4e0dc11e6ea4db670f43853f569eabb..3187925e9879b29ad2530426fa4834eb9b1028fa 100644 (file)
@@ -141,9 +141,9 @@ static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b)
                 * during the replay.
                 */
                if (dirty != 0)
-                       dbg_msg("LEB %d lp: %d free %d dirty "
-                               "replay: %d free %d dirty", b->bud->lnum,
-                               lp->free, lp->dirty, b->free, b->dirty);
+                       dbg_mnt("LEB %d lp: %d free %d dirty replay: %d free %d dirty",
+                               b->bud->lnum, lp->free, lp->dirty, b->free,
+                               b->dirty);
        }
        lp = ubifs_change_lp(c, lp, b->free, dirty + b->dirty,
                             lp->flags | LPROPS_TAKEN, 0);
@@ -677,7 +677,8 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
 
        b->dirty = sleb->endpt - offs - used;
        b->free = c->leb_size - sleb->endpt;
-       dbg_mnt("bud LEB %d replied: dirty %d, free %d", lnum, b->dirty, b->free);
+       dbg_mnt("bud LEB %d replied: dirty %d, free %d",
+               lnum, b->dirty, b->free);
 
 out:
        ubifs_scan_destroy(sleb);
@@ -865,8 +866,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
                        goto out_dump;
                }
                if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
-                       ubifs_err("first CS node at LEB %d:%d has wrong "
-                                 "commit number %llu expected %llu",
+                       ubifs_err("first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
                                  lnum, offs,
                                  (unsigned long long)le64_to_cpu(node->cmt_no),
                                  c->cmt_no);
@@ -1058,8 +1058,8 @@ int ubifs_replay_journal(struct ubifs_info *c)
        c->bi.uncommitted_idx *= c->max_idx_node_sz;
 
        ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
-       dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, "
-               "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum,
+       dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, highest_inum %lu",
+               c->lhead_lnum, c->lhead_offs, c->max_sqnum,
                (unsigned long)c->highest_inum);
 out:
        destroy_replay_list(c);
index 15e2fc5aa60bd4790a86c828aaea6995b17e1737..4c37607a958e037f7242b2f01ef9d69dd836512b 100644 (file)
@@ -391,9 +391,8 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
        min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6;
 
        if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) {
-               ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, "
-                         "%d minimum required", c->leb_cnt, c->vi.size,
-                         min_leb_cnt);
+               ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, %d minimum required",
+                         c->leb_cnt, c->vi.size, min_leb_cnt);
                goto failed;
        }
 
@@ -411,15 +410,14 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
 
        max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS;
        if (c->max_bud_bytes < max_bytes) {
-               ubifs_err("too small journal (%lld bytes), must be at least "
-                         "%lld bytes",  c->max_bud_bytes, max_bytes);
+               ubifs_err("too small journal (%lld bytes), must be at least %lld bytes",
+                         c->max_bud_bytes, max_bytes);
                goto failed;
        }
 
        max_bytes = (long long)c->leb_size * c->main_lebs;
        if (c->max_bud_bytes > max_bytes) {
-               ubifs_err("too large journal size (%lld bytes), only %lld bytes"
-                         "available in the main area",
+               ubifs_err("too large journal size (%lld bytes), only %lld bytes available in the main area",
                          c->max_bud_bytes, max_bytes);
                goto failed;
        }
@@ -549,10 +547,9 @@ int ubifs_read_superblock(struct ubifs_info *c)
                ubifs_assert(!c->ro_media || c->ro_mount);
                if (!c->ro_mount ||
                    c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
-                       ubifs_err("on-flash format version is w%d/r%d, but "
-                                 "software only supports up to version "
-                                 "w%d/r%d", c->fmt_version,
-                                 c->ro_compat_version, UBIFS_FORMAT_VERSION,
+                       ubifs_err("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
+                                 c->fmt_version, c->ro_compat_version,
+                                 UBIFS_FORMAT_VERSION,
                                  UBIFS_RO_COMPAT_VERSION);
                        if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) {
                                ubifs_msg("only R/O mounting is possible");
@@ -611,8 +608,8 @@ int ubifs_read_superblock(struct ubifs_info *c)
        c->fanout        = le32_to_cpu(sup->fanout);
        c->lsave_cnt     = le32_to_cpu(sup->lsave_cnt);
        c->rp_size       = le64_to_cpu(sup->rp_size);
-       c->rp_uid        = le32_to_cpu(sup->rp_uid);
-       c->rp_gid        = le32_to_cpu(sup->rp_gid);
+       c->rp_uid        = make_kuid(&init_user_ns, le32_to_cpu(sup->rp_uid));
+       c->rp_gid        = make_kgid(&init_user_ns, le32_to_cpu(sup->rp_gid));
        sup_flags        = le32_to_cpu(sup->flags);
        if (!c->mount_opts.override_compr)
                c->default_compr = le16_to_cpu(sup->default_compr);
index 7c40e6025fd601be4022c39e9dded8788befbac7..58aa05df2bb66848513e3a11718049dc9cddd1f1 100644 (file)
@@ -75,7 +75,7 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
        magic = le32_to_cpu(ch->magic);
 
        if (magic == 0xFFFFFFFF) {
-               dbg_scan("hit empty space");
+               dbg_scan("hit empty space at LEB %d:%d", lnum, offs);
                return SCANNED_EMPTY_SPACE;
        }
 
@@ -85,7 +85,8 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
        if (len < UBIFS_CH_SZ)
                return SCANNED_GARBAGE;
 
-       dbg_scan("scanning %s", dbg_ntype(ch->node_type));
+       dbg_scan("scanning %s at LEB %d:%d",
+                dbg_ntype(ch->node_type), lnum, offs);
 
        if (ubifs_check_node(c, buf, lnum, offs, quiet, 1))
                return SCANNED_A_CORRUPT_NODE;
@@ -114,8 +115,8 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
                        return SCANNED_A_BAD_PAD_NODE;
                }
 
-               dbg_scan("%d bytes padded, offset now %d",
-                        pad_len, ALIGN(offs + node_len + pad_len, 8));
+               dbg_scan("%d bytes padded at LEB %d:%d, offset now %d", pad_len,
+                        lnum, offs, ALIGN(offs + node_len + pad_len, 8));
 
                return node_len + pad_len;
        }
@@ -150,8 +151,8 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
 
        err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0);
        if (err && err != -EBADMSG) {
-               ubifs_err("cannot read %d bytes from LEB %d:%d,"
-                         " error %d", c->leb_size - offs, lnum, offs, err);
+               ubifs_err("cannot read %d bytes from LEB %d:%d, error %d",
+                         c->leb_size - offs, lnum, offs, err);
                kfree(sleb);
                return ERR_PTR(err);
        }
@@ -240,8 +241,6 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
        int len;
 
        ubifs_err("corruption at LEB %d:%d", lnum, offs);
-       if (dbg_is_tst_rcvry(c))
-               return;
        len = c->leb_size - offs;
        if (len > 8192)
                len = 8192;
index 71a197f0f93d24c0cc33527d4f2d69c8c3d51f7b..ddc0f6ae65e9992af3dc689ad61021e86c5e40a1 100644 (file)
@@ -89,9 +89,8 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
                return 5;
 
        if (!ubifs_compr_present(ui->compr_type)) {
-               ubifs_warn("inode %lu uses '%s' compression, but it was not "
-                          "compiled in", inode->i_ino,
-                          ubifs_compr_name(ui->compr_type));
+               ubifs_warn("inode %lu uses '%s' compression, but it was not compiled in",
+                          inode->i_ino, ubifs_compr_name(ui->compr_type));
        }
 
        err = dbg_check_dir(c, inode);
@@ -130,8 +129,8 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
 
        inode->i_flags |= (S_NOCMTIME | S_NOATIME);
        set_nlink(inode, le32_to_cpu(ino->nlink));
-       inode->i_uid   = le32_to_cpu(ino->uid);
-       inode->i_gid   = le32_to_cpu(ino->gid);
+       i_uid_write(inode, le32_to_cpu(ino->uid));
+       i_gid_write(inode, le32_to_cpu(ino->gid));
        inode->i_atime.tv_sec  = (int64_t)le64_to_cpu(ino->atime_sec);
        inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
        inode->i_mtime.tv_sec  = (int64_t)le64_to_cpu(ino->mtime_sec);
@@ -1061,8 +1060,8 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
 
                        flag = parse_standard_option(p);
                        if (!flag) {
-                               ubifs_err("unrecognized mount option \"%s\" "
-                                         "or missing value", p);
+                               ubifs_err("unrecognized mount option \"%s\" or missing value",
+                                         p);
                                return -EINVAL;
                        }
                        sb->s_flags |= flag;
@@ -1124,8 +1123,8 @@ again:
                }
 
                /* Just disable bulk-read */
-               ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, "
-                          "disabling it", c->max_bu_buf_len);
+               ubifs_warn("cannot allocate %d bytes of memory for bulk-read, disabling it",
+                          c->max_bu_buf_len);
                c->mount_opts.bulk_read = 1;
                c->bulk_read = 0;
                return;
@@ -1161,7 +1160,7 @@ static int check_free_space(struct ubifs_info *c)
 static int mount_ubifs(struct ubifs_info *c)
 {
        int err;
-       long long x;
+       long long x, y;
        size_t sz;
 
        c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
@@ -1411,75 +1410,69 @@ static int mount_ubifs(struct ubifs_info *c)
 
        c->mounting = 0;
 
-       ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
-                 c->vi.ubi_num, c->vi.vol_id, c->vi.name);
-       if (c->ro_mount)
-               ubifs_msg("mounted read-only");
+       ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s",
+                 c->vi.ubi_num, c->vi.vol_id, c->vi.name,
+                 c->ro_mount ? ", R/O mode" : NULL);
        x = (long long)c->main_lebs * c->leb_size;
-       ubifs_msg("file system size:   %lld bytes (%lld KiB, %lld MiB, %d "
-                 "LEBs)", x, x >> 10, x >> 20, c->main_lebs);
-       x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
-       ubifs_msg("journal size:       %lld bytes (%lld KiB, %lld MiB, %d "
-                 "LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt);
-       ubifs_msg("media format:       w%d/r%d (latest is w%d/r%d)",
+       y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
+       ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
+                 c->leb_size, c->leb_size >> 10, c->min_io_size,
+                 c->max_write_size);
+       ubifs_msg("FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)",
+                 x, x >> 20, c->main_lebs,
+                 y, y >> 20, c->log_lebs + c->max_bud_cnt);
+       ubifs_msg("reserved for root: %llu bytes (%llu KiB)",
+                 c->report_rp_size, c->report_rp_size >> 10);
+       ubifs_msg("media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s",
                  c->fmt_version, c->ro_compat_version,
-                 UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
-       ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr));
-       ubifs_msg("reserved for root:  %llu bytes (%llu KiB)",
-               c->report_rp_size, c->report_rp_size >> 10);
-
-       dbg_msg("compiled on:         " __DATE__ " at " __TIME__);
-       dbg_msg("min. I/O unit size:  %d bytes", c->min_io_size);
-       dbg_msg("max. write size:     %d bytes", c->max_write_size);
-       dbg_msg("LEB size:            %d bytes (%d KiB)",
-               c->leb_size, c->leb_size >> 10);
-       dbg_msg("data journal heads:  %d",
+                 UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid,
+                 c->big_lpt ? ", big LPT model" : ", small LPT model");
+
+       dbg_gen("default compressor:  %s", ubifs_compr_name(c->default_compr));
+       dbg_gen("data journal heads:  %d",
                c->jhead_cnt - NONDATA_JHEADS_CNT);
-       dbg_msg("UUID:                %pUB", c->uuid);
-       dbg_msg("big_lpt              %d", c->big_lpt);
-       dbg_msg("log LEBs:            %d (%d - %d)",
+       dbg_gen("log LEBs:            %d (%d - %d)",
                c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
-       dbg_msg("LPT area LEBs:       %d (%d - %d)",
+       dbg_gen("LPT area LEBs:       %d (%d - %d)",
                c->lpt_lebs, c->lpt_first, c->lpt_last);
-       dbg_msg("orphan area LEBs:    %d (%d - %d)",
+       dbg_gen("orphan area LEBs:    %d (%d - %d)",
                c->orph_lebs, c->orph_first, c->orph_last);
-       dbg_msg("main area LEBs:      %d (%d - %d)",
+       dbg_gen("main area LEBs:      %d (%d - %d)",
                c->main_lebs, c->main_first, c->leb_cnt - 1);
-       dbg_msg("index LEBs:          %d", c->lst.idx_lebs);
-       dbg_msg("total index bytes:   %lld (%lld KiB, %lld MiB)",
+       dbg_gen("index LEBs:          %d", c->lst.idx_lebs);
+       dbg_gen("total index bytes:   %lld (%lld KiB, %lld MiB)",
                c->bi.old_idx_sz, c->bi.old_idx_sz >> 10,
                c->bi.old_idx_sz >> 20);
-       dbg_msg("key hash type:       %d", c->key_hash_type);
-       dbg_msg("tree fanout:         %d", c->fanout);
-       dbg_msg("reserved GC LEB:     %d", c->gc_lnum);
-       dbg_msg("first main LEB:      %d", c->main_first);
-       dbg_msg("max. znode size      %d", c->max_znode_sz);
-       dbg_msg("max. index node size %d", c->max_idx_node_sz);
-       dbg_msg("node sizes:          data %zu, inode %zu, dentry %zu",
+       dbg_gen("key hash type:       %d", c->key_hash_type);
+       dbg_gen("tree fanout:         %d", c->fanout);
+       dbg_gen("reserved GC LEB:     %d", c->gc_lnum);
+       dbg_gen("max. znode size      %d", c->max_znode_sz);
+       dbg_gen("max. index node size %d", c->max_idx_node_sz);
+       dbg_gen("node sizes:          data %zu, inode %zu, dentry %zu",
                UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ);
-       dbg_msg("node sizes:          trun %zu, sb %zu, master %zu",
+       dbg_gen("node sizes:          trun %zu, sb %zu, master %zu",
                UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ);
-       dbg_msg("node sizes:          ref %zu, cmt. start %zu, orph %zu",
+       dbg_gen("node sizes:          ref %zu, cmt. start %zu, orph %zu",
                UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
-       dbg_msg("max. node sizes:     data %zu, inode %zu dentry %zu, idx %d",
+       dbg_gen("max. node sizes:     data %zu, inode %zu dentry %zu, idx %d",
                UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
                UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout));
-       dbg_msg("dead watermark:      %d", c->dead_wm);
-       dbg_msg("dark watermark:      %d", c->dark_wm);
-       dbg_msg("LEB overhead:        %d", c->leb_overhead);
+       dbg_gen("dead watermark:      %d", c->dead_wm);
+       dbg_gen("dark watermark:      %d", c->dark_wm);
+       dbg_gen("LEB overhead:        %d", c->leb_overhead);
        x = (long long)c->main_lebs * c->dark_wm;
-       dbg_msg("max. dark space:     %lld (%lld KiB, %lld MiB)",
+       dbg_gen("max. dark space:     %lld (%lld KiB, %lld MiB)",
                x, x >> 10, x >> 20);
-       dbg_msg("maximum bud bytes:   %lld (%lld KiB, %lld MiB)",
+       dbg_gen("maximum bud bytes:   %lld (%lld KiB, %lld MiB)",
                c->max_bud_bytes, c->max_bud_bytes >> 10,
                c->max_bud_bytes >> 20);
-       dbg_msg("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
+       dbg_gen("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
                c->bg_bud_bytes, c->bg_bud_bytes >> 10,
                c->bg_bud_bytes >> 20);
-       dbg_msg("current bud bytes    %lld (%lld KiB, %lld MiB)",
+       dbg_gen("current bud bytes    %lld (%lld KiB, %lld MiB)",
                c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20);
-       dbg_msg("max. seq. number:    %llu", c->max_sqnum);
-       dbg_msg("commit number:       %llu", c->cmt_no);
+       dbg_gen("max. seq. number:    %llu", c->max_sqnum);
+       dbg_gen("commit number:       %llu", c->cmt_no);
 
        return 0;
 
@@ -1564,10 +1557,9 @@ static int ubifs_remount_rw(struct ubifs_info *c)
 
        if (c->rw_incompat) {
                ubifs_err("the file-system is not R/W-compatible");
-               ubifs_msg("on-flash format version is w%d/r%d, but software "
-                         "only supports up to version w%d/r%d", c->fmt_version,
-                         c->ro_compat_version, UBIFS_FORMAT_VERSION,
-                         UBIFS_RO_COMPAT_VERSION);
+               ubifs_msg("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
+                         c->fmt_version, c->ro_compat_version,
+                         UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
                return -EROFS;
        }
 
@@ -1828,8 +1820,8 @@ static void ubifs_put_super(struct super_block *sb)
                                 * next mount, so we just print a message and
                                 * continue to unmount normally.
                                 */
-                               ubifs_err("failed to write master node, "
-                                         "error %d", err);
+                               ubifs_err("failed to write master node, error %d",
+                                         err);
                } else {
                        for (i = 0; i < c->jhead_cnt; i++)
                                /* Make sure write-buffer timers are canceled */
@@ -2248,8 +2240,7 @@ static int __init ubifs_init(void)
         * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
         */
        if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
-               ubifs_err("VFS page cache size is %u bytes, but UBIFS requires"
-                         " at least 4096 bytes",
+               ubifs_err("VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
                          (unsigned int)PAGE_CACHE_SIZE);
                return -EINVAL;
        }
@@ -2298,6 +2289,12 @@ static void __exit ubifs_exit(void)
        dbg_debugfs_exit();
        ubifs_compressors_exit();
        unregister_shrinker(&ubifs_shrinker_info);
+
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ubifs_inode_slab);
        unregister_filesystem(&ubifs_fs_type);
 }
index d38ac7f9654bf96fb3edcc7db19e7441aef97320..f6bf8995c7b1833a7dd25dadc80d482b00141e22 100644 (file)
@@ -328,8 +328,8 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
                case UBIFS_XENT_KEY:
                        break;
                default:
-                       dbg_msg("bad key type at slot %d: %d",
-                               i, key_type(c, &zbr->key));
+                       ubifs_err("bad key type at slot %d: %d",
+                                 i, key_type(c, &zbr->key));
                        err = 3;
                        goto out_dump;
                }
index 1e5a08623d112193d2d96d277d590b62b545279c..5486346d0a3f9ec63ce10f9296ee0fbdec8732f1 100644 (file)
 #define UBIFS_VERSION 1
 
 /* Normal UBIFS messages */
-#define ubifs_msg(fmt, ...) \
-               printk(KERN_NOTICE "UBIFS: " fmt "\n", ##__VA_ARGS__)
+#define ubifs_msg(fmt, ...) pr_notice("UBIFS: " fmt "\n", ##__VA_ARGS__)
 /* UBIFS error messages */
-#define ubifs_err(fmt, ...)                                                  \
-       printk(KERN_ERR "UBIFS error (pid %d): %s: " fmt "\n", current->pid, \
+#define ubifs_err(fmt, ...)                                         \
+       pr_err("UBIFS error (pid %d): %s: " fmt "\n", current->pid, \
               __func__, ##__VA_ARGS__)
 /* UBIFS warning messages */
-#define ubifs_warn(fmt, ...)                                         \
-       printk(KERN_WARNING "UBIFS warning (pid %d): %s: " fmt "\n", \
-              current->pid, __func__, ##__VA_ARGS__)
+#define ubifs_warn(fmt, ...)                                        \
+       pr_warn("UBIFS warning (pid %d): %s: " fmt "\n",            \
+               current->pid, __func__, ##__VA_ARGS__)
 
 /* UBIFS file system VFS magic number */
 #define UBIFS_SUPER_MAGIC 0x24051905
@@ -1426,8 +1425,8 @@ struct ubifs_info {
 
        long long rp_size;
        long long report_rp_size;
-       uid_t rp_uid;
-       gid_t rp_gid;
+       kuid_t rp_uid;
+       kgid_t rp_gid;
 
        /* The below fields are used only during mounting and re-mounting */
        unsigned int empty:1;
index aa233469b3c1a0deb1e3ef60b8039f5dd2e1cd55..287ef9f587b7eac3f731ddc47995d51b74577d33 100644 (file)
@@ -1312,14 +1312,14 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
        }
 
        read_lock(&sbi->s_cred_lock);
-       inode->i_uid = le32_to_cpu(fe->uid);
-       if (inode->i_uid == -1 ||
+       i_uid_write(inode, le32_to_cpu(fe->uid));
+       if (!uid_valid(inode->i_uid) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
                inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
 
-       inode->i_gid = le32_to_cpu(fe->gid);
-       if (inode->i_gid == -1 ||
+       i_gid_write(inode, le32_to_cpu(fe->gid));
+       if (!gid_valid(inode->i_gid) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
            UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
                inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
@@ -1542,12 +1542,12 @@ static int udf_update_inode(struct inode *inode, int do_sync)
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
                fe->uid = cpu_to_le32(-1);
        else
-               fe->uid = cpu_to_le32(inode->i_uid);
+               fe->uid = cpu_to_le32(i_uid_read(inode));
 
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
                fe->gid = cpu_to_le32(-1);
        else
-               fe->gid = cpu_to_le32(inode->i_gid);
+               fe->gid = cpu_to_le32(i_gid_read(inode));
 
        udfperms = ((inode->i_mode & S_IRWXO)) |
                   ((inode->i_mode & S_IRWXG) << 2) |
index 18fc038a438da4b6bbf58fa73c23c27ecd0cb721..d44fb568abe1a945c014a03c88648cd847906cc0 100644 (file)
@@ -171,6 +171,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(udf_inode_cachep);
 }
 
@@ -199,8 +204,8 @@ struct udf_options {
        unsigned int rootdir;
        unsigned int flags;
        umode_t umask;
-       gid_t gid;
-       uid_t uid;
+       kgid_t gid;
+       kuid_t uid;
        umode_t fmode;
        umode_t dmode;
        struct nls_table *nls_map;
@@ -335,9 +340,9 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
                seq_puts(seq, ",gid=ignore");
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
-               seq_printf(seq, ",uid=%u", sbi->s_uid);
+               seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
-               seq_printf(seq, ",gid=%u", sbi->s_gid);
+               seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
        if (sbi->s_umask != 0)
                seq_printf(seq, ",umask=%ho", sbi->s_umask);
        if (sbi->s_fmode != UDF_INVALID_MODE)
@@ -516,13 +521,17 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
                case Opt_gid:
                        if (match_int(args, &option))
                                return 0;
-                       uopt->gid = option;
+                       uopt->gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(uopt->gid))
+                               return 0;
                        uopt->flags |= (1 << UDF_FLAG_GID_SET);
                        break;
                case Opt_uid:
                        if (match_int(args, &option))
                                return 0;
-                       uopt->uid = option;
+                       uopt->uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(uopt->uid))
+                               return 0;
                        uopt->flags |= (1 << UDF_FLAG_UID_SET);
                        break;
                case Opt_umask:
@@ -1934,8 +1943,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        struct udf_sb_info *sbi;
 
        uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
-       uopt.uid = -1;
-       uopt.gid = -1;
+       uopt.uid = INVALID_UID;
+       uopt.gid = INVALID_GID;
        uopt.umask = 0;
        uopt.fmode = UDF_INVALID_MODE;
        uopt.dmode = UDF_INVALID_MODE;
index 42ad69ac95769cf7e14bc0228f5a4ec67f77b9e3..5f027227f085464d33b2434a1cad64a44195c54d 100644 (file)
@@ -128,8 +128,8 @@ struct udf_sb_info {
 
        /* Default permissions */
        umode_t                 s_umask;
-       gid_t                   s_gid;
-       uid_t                   s_uid;
+       kgid_t                  s_gid;
+       kuid_t                  s_uid;
        umode_t                 s_fmode;
        umode_t                 s_dmode;
        /* Lock protecting consistency of above permission settings */
index dd7c89d8a1c1b728deb32c7dc541c4a6e136711c..eb6d0b7dc8791bc580393dd18ea621582ab8f296 100644 (file)
@@ -597,8 +597,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
        /*
         * Linux now has 32-bit uid and gid, so we can support EFT.
         */
-       inode->i_uid = ufs_get_inode_uid(sb, ufs_inode);
-       inode->i_gid = ufs_get_inode_gid(sb, ufs_inode);
+       i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
+       i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 
        inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
        inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
@@ -645,8 +645,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
         /*
          * Linux now has 32-bit uid and gid, so we can support EFT.
          */
-       inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid);
-       inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid);
+       i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
+       i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
 
        inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
        inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
@@ -745,8 +745,8 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
        ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
        ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 
-       ufs_set_inode_uid(sb, ufs_inode, inode->i_uid);
-       ufs_set_inode_gid(sb, ufs_inode, inode->i_gid);
+       ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
+       ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
                
        ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
        ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
@@ -789,8 +789,8 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
        ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
        ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
 
-       ufs_inode->ui_uid = cpu_to_fs32(sb, inode->i_uid);
-       ufs_inode->ui_gid = cpu_to_fs32(sb, inode->i_gid);
+       ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
+       ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
 
        ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
        ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
index 444927e5706b773380d9050213a8179b3d611fd9..f7cfecfe1caba90f9c6dc8d1cde2938e5128cb79 100644 (file)
@@ -1466,6 +1466,11 @@ static int init_inodecache(void)
 
 static void destroy_inodecache(void)
 {
+       /*
+        * Make sure all delayed rcu free inodes are flushed before we
+        * destroy cache.
+        */
+       rcu_barrier();
        kmem_cache_destroy(ufs_inode_cachep);
 }
 
index fa4dbe451e278eab0f52bbacc110b157a300cdad..bb0696a41735608512963ae1648a79a9d7b0bd27 100644 (file)
@@ -140,19 +140,18 @@ long do_utimes(int dfd, const char __user *filename, struct timespec *times,
                goto out;
 
        if (filename == NULL && dfd != AT_FDCWD) {
-               int fput_needed;
-               struct file *file;
+               struct fd f;
 
                if (flags & AT_SYMLINK_NOFOLLOW)
                        goto out;
 
-               file = fget_light(dfd, &fput_needed);
+               f = fdget(dfd);
                error = -EBADF;
-               if (!file)
+               if (!f.file)
                        goto out;
 
-               error = utimes_common(&file->f_path, times);
-               fput_light(file, fput_needed);
+               error = utimes_common(&f.file->f_path, times);
+               fdput(f);
        } else {
                struct path path;
                int lookup_flags = 0;
index 4d45b7189e7eaa352da0567b8fe4a2d1b738c999..1780f062dbaf1cd504f491af5962f3dfbd8db4ee 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/fsnotify.h>
 #include <linux/audit.h>
 #include <linux/vmalloc.h>
+#include <linux/posix_acl_xattr.h>
 
 #include <asm/uaccess.h>
 
@@ -295,11 +296,13 @@ vfs_removexattr(struct dentry *dentry, const char *name)
        if (error)
                return error;
 
+       mutex_lock(&inode->i_mutex);
        error = security_inode_removexattr(dentry, name);
-       if (error)
+       if (error) {
+               mutex_unlock(&inode->i_mutex);
                return error;
+       }
 
-       mutex_lock(&inode->i_mutex);
        error = inode->i_op->removexattr(dentry, name);
        mutex_unlock(&inode->i_mutex);
 
@@ -347,6 +350,9 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
                        error = -EFAULT;
                        goto out;
                }
+               if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+                   (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
+                       posix_acl_fix_xattr_from_user(kvalue, size);
        }
 
        error = vfs_setxattr(d, kname, kvalue, size, flags);
@@ -399,22 +405,20 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
 SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
                const void __user *,value, size_t, size, int, flags)
 {
-       int fput_needed;
-       struct file *f;
+       struct fd f = fdget(fd);
        struct dentry *dentry;
        int error = -EBADF;
 
-       f = fget_light(fd, &fput_needed);
-       if (!f)
+       if (!f.file)
                return error;
-       dentry = f->f_path.dentry;
+       dentry = f.file->f_path.dentry;
        audit_inode(NULL, dentry);
-       error = mnt_want_write_file(f);
+       error = mnt_want_write_file(f.file);
        if (!error) {
                error = setxattr(dentry, name, value, size, flags);
-               mnt_drop_write_file(f);
+               mnt_drop_write_file(f.file);
        }
-       fput_light(f, fput_needed);
+       fdput(f);
        return error;
 }
 
@@ -450,6 +454,9 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
 
        error = vfs_getxattr(d, kname, kvalue, size);
        if (error > 0) {
+               if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
+                   (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
+                       posix_acl_fix_xattr_to_user(kvalue, size);
                if (size && copy_to_user(value, kvalue, error))
                        error = -EFAULT;
        } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
@@ -495,16 +502,14 @@ SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
 SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
                void __user *, value, size_t, size)
 {
-       int fput_needed;
-       struct file *f;
+       struct fd f = fdget(fd);
        ssize_t error = -EBADF;
 
-       f = fget_light(fd, &fput_needed);
-       if (!f)
+       if (!f.file)
                return error;
-       audit_inode(NULL, f->f_path.dentry);
-       error = getxattr(f->f_path.dentry, name, value, size);
-       fput_light(f, fput_needed);
+       audit_inode(NULL, f.file->f_path.dentry);
+       error = getxattr(f.file->f_path.dentry, name, value, size);
+       fdput(f);
        return error;
 }
 
@@ -576,16 +581,14 @@ SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
 
 SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
 {
-       int fput_needed;
-       struct file *f;
+       struct fd f = fdget(fd);
        ssize_t error = -EBADF;
 
-       f = fget_light(fd, &fput_needed);
-       if (!f)
+       if (!f.file)
                return error;
-       audit_inode(NULL, f->f_path.dentry);
-       error = listxattr(f->f_path.dentry, list, size);
-       fput_light(f, fput_needed);
+       audit_inode(NULL, f.file->f_path.dentry);
+       error = listxattr(f.file->f_path.dentry, list, size);
+       fdput(f);
        return error;
 }
 
@@ -645,22 +648,20 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
 
 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
 {
-       int fput_needed;
-       struct file *f;
+       struct fd f = fdget(fd);
        struct dentry *dentry;
        int error = -EBADF;
 
-       f = fget_light(fd, &fput_needed);
-       if (!f)
+       if (!f.file)
                return error;
-       dentry = f->f_path.dentry;
+       dentry = f.file->f_path.dentry;
        audit_inode(NULL, dentry);
-       error = mnt_want_write_file(f);
+       error = mnt_want_write_file(f.file);
        if (!error) {
                error = removexattr(dentry, name);
-               mnt_drop_write_file(f);
+               mnt_drop_write_file(f.file);
        }
-       fput_light(f, fput_needed);
+       fdput(f);
        return error;
 }
 
@@ -791,3 +792,183 @@ EXPORT_SYMBOL(generic_getxattr);
 EXPORT_SYMBOL(generic_listxattr);
 EXPORT_SYMBOL(generic_setxattr);
 EXPORT_SYMBOL(generic_removexattr);
+
+/*
+ * Allocate new xattr and copy in the value; but leave the name to callers.
+ */
+struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
+{
+       struct simple_xattr *new_xattr;
+       size_t len;
+
+       /* wrap around? */
+       len = sizeof(*new_xattr) + size;
+       if (len <= sizeof(*new_xattr))
+               return NULL;
+
+       new_xattr = kmalloc(len, GFP_KERNEL);
+       if (!new_xattr)
+               return NULL;
+
+       new_xattr->size = size;
+       memcpy(new_xattr->value, value, size);
+       return new_xattr;
+}
+
+/*
+ * xattr GET operation for in-memory/pseudo filesystems
+ */
+int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
+                    void *buffer, size_t size)
+{
+       struct simple_xattr *xattr;
+       int ret = -ENODATA;
+
+       spin_lock(&xattrs->lock);
+       list_for_each_entry(xattr, &xattrs->head, list) {
+               if (strcmp(name, xattr->name))
+                       continue;
+
+               ret = xattr->size;
+               if (buffer) {
+                       if (size < xattr->size)
+                               ret = -ERANGE;
+                       else
+                               memcpy(buffer, xattr->value, xattr->size);
+               }
+               break;
+       }
+       spin_unlock(&xattrs->lock);
+       return ret;
+}
+
+static int __simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
+                             const void *value, size_t size, int flags)
+{
+       struct simple_xattr *xattr;
+       struct simple_xattr *uninitialized_var(new_xattr);
+       int err = 0;
+
+       /* value == NULL means remove */
+       if (value) {
+               new_xattr = simple_xattr_alloc(value, size);
+               if (!new_xattr)
+                       return -ENOMEM;
+
+               new_xattr->name = kstrdup(name, GFP_KERNEL);
+               if (!new_xattr->name) {
+                       kfree(new_xattr);
+                       return -ENOMEM;
+               }
+       }
+
+       spin_lock(&xattrs->lock);
+       list_for_each_entry(xattr, &xattrs->head, list) {
+               if (!strcmp(name, xattr->name)) {
+                       if (flags & XATTR_CREATE) {
+                               xattr = new_xattr;
+                               err = -EEXIST;
+                       } else if (new_xattr) {
+                               list_replace(&xattr->list, &new_xattr->list);
+                       } else {
+                               list_del(&xattr->list);
+                       }
+                       goto out;
+               }
+       }
+       if (flags & XATTR_REPLACE) {
+               xattr = new_xattr;
+               err = -ENODATA;
+       } else {
+               list_add(&new_xattr->list, &xattrs->head);
+               xattr = NULL;
+       }
+out:
+       spin_unlock(&xattrs->lock);
+       if (xattr) {
+               kfree(xattr->name);
+               kfree(xattr);
+       }
+       return err;
+
+}
+
+/**
+ * simple_xattr_set - xattr SET operation for in-memory/pseudo filesystems
+ * @xattrs: target simple_xattr list
+ * @name: name of the new extended attribute
+ * @value: value of the new xattr. If %NULL, will remove the attribute
+ * @size: size of the new xattr
+ * @flags: %XATTR_{CREATE|REPLACE}
+ *
+ * %XATTR_CREATE is set, the xattr shouldn't exist already; otherwise fails
+ * with -EEXIST.  If %XATTR_REPLACE is set, the xattr should exist;
+ * otherwise, fails with -ENODATA.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
+                    const void *value, size_t size, int flags)
+{
+       if (size == 0)
+               value = ""; /* empty EA, do not remove */
+       return __simple_xattr_set(xattrs, name, value, size, flags);
+}
+
+/*
+ * xattr REMOVE operation for in-memory/pseudo filesystems
+ */
+int simple_xattr_remove(struct simple_xattrs *xattrs, const char *name)
+{
+       return __simple_xattr_set(xattrs, name, NULL, 0, XATTR_REPLACE);
+}
+
+static bool xattr_is_trusted(const char *name)
+{
+       return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
+}
+
+/*
+ * xattr LIST operation for in-memory/pseudo filesystems
+ */
+ssize_t simple_xattr_list(struct simple_xattrs *xattrs, char *buffer,
+                         size_t size)
+{
+       bool trusted = capable(CAP_SYS_ADMIN);
+       struct simple_xattr *xattr;
+       size_t used = 0;
+
+       spin_lock(&xattrs->lock);
+       list_for_each_entry(xattr, &xattrs->head, list) {
+               size_t len;
+
+               /* skip "trusted." attributes for unprivileged callers */
+               if (!trusted && xattr_is_trusted(xattr->name))
+                       continue;
+
+               len = strlen(xattr->name) + 1;
+               used += len;
+               if (buffer) {
+                       if (size < used) {
+                               used = -ERANGE;
+                               break;
+                       }
+                       memcpy(buffer, xattr->name, len);
+                       buffer += len;
+               }
+       }
+       spin_unlock(&xattrs->lock);
+
+       return used;
+}
+
+/*
+ * Adds an extended attribute to the list
+ */
+void simple_xattr_list_add(struct simple_xattrs *xattrs,
+                          struct simple_xattr *new_xattr)
+{
+       spin_lock(&xattrs->lock);
+       list_add(&new_xattr->list, &xattrs->head);
+       spin_unlock(&xattrs->lock);
+}
index 69d06b07b169d183f126497770743cb6dedc8256..11efd830b5f5e4c078279846f335d1560eb35487 100644 (file)
@@ -9,13 +9,72 @@
 #include <linux/fs.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/gfp.h>
+#include <linux/user_namespace.h>
 
+/*
+ * Fix up the uids and gids in posix acl extended attributes in place.
+ */
+static void posix_acl_fix_xattr_userns(
+       struct user_namespace *to, struct user_namespace *from,
+       void *value, size_t size)
+{
+       posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
+       posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
+       int count;
+       kuid_t uid;
+       kgid_t gid;
+
+       if (!value)
+               return;
+       if (size < sizeof(posix_acl_xattr_header))
+               return;
+       if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
+               return;
+
+       count = posix_acl_xattr_count(size);
+       if (count < 0)
+               return;
+       if (count == 0)
+               return;
+
+       for (end = entry + count; entry != end; entry++) {
+               switch(le16_to_cpu(entry->e_tag)) {
+               case ACL_USER:
+                       uid = make_kuid(from, le32_to_cpu(entry->e_id));
+                       entry->e_id = cpu_to_le32(from_kuid(to, uid));
+                       break;
+               case ACL_GROUP:
+                       gid = make_kgid(from, le32_to_cpu(entry->e_id));
+                       entry->e_id = cpu_to_le32(from_kuid(to, uid));
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+void posix_acl_fix_xattr_from_user(void *value, size_t size)
+{
+       struct user_namespace *user_ns = current_user_ns();
+       if (user_ns == &init_user_ns)
+               return;
+       posix_acl_fix_xattr_userns(&init_user_ns, user_ns, value, size);
+}
+
+void posix_acl_fix_xattr_to_user(void *value, size_t size)
+{
+       struct user_namespace *user_ns = current_user_ns();
+       if (user_ns == &init_user_ns)
+               return;
+       posix_acl_fix_xattr_userns(user_ns, &init_user_ns, value, size);
+}
 
 /*
  * Convert from extended attribute to in-memory representation.
  */
 struct posix_acl *
-posix_acl_from_xattr(const void *value, size_t size)
+posix_acl_from_xattr(struct user_namespace *user_ns,
+                    const void *value, size_t size)
 {
        posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
        posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
@@ -50,12 +109,21 @@ posix_acl_from_xattr(const void *value, size_t size)
                        case ACL_GROUP_OBJ:
                        case ACL_MASK:
                        case ACL_OTHER:
-                               acl_e->e_id = ACL_UNDEFINED_ID;
                                break;
 
                        case ACL_USER:
+                               acl_e->e_uid =
+                                       make_kuid(user_ns,
+                                                 le32_to_cpu(entry->e_id));
+                               if (!uid_valid(acl_e->e_uid))
+                                       goto fail;
+                               break;
                        case ACL_GROUP:
-                               acl_e->e_id = le32_to_cpu(entry->e_id);
+                               acl_e->e_gid =
+                                       make_kgid(user_ns,
+                                                 le32_to_cpu(entry->e_id));
+                               if (!gid_valid(acl_e->e_gid))
+                                       goto fail;
                                break;
 
                        default:
@@ -74,7 +142,8 @@ EXPORT_SYMBOL (posix_acl_from_xattr);
  * Convert from in-memory to extended attribute representation.
  */
 int
-posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size)
+posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
+                  void *buffer, size_t size)
 {
        posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer;
        posix_acl_xattr_entry *ext_entry = ext_acl->a_entries;
@@ -89,9 +158,22 @@ posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size)
        ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
 
        for (n=0; n < acl->a_count; n++, ext_entry++) {
-               ext_entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
-               ext_entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
-               ext_entry->e_id   = cpu_to_le32(acl->a_entries[n].e_id);
+               const struct posix_acl_entry *acl_e = &acl->a_entries[n];
+               ext_entry->e_tag  = cpu_to_le16(acl_e->e_tag);
+               ext_entry->e_perm = cpu_to_le16(acl_e->e_perm);
+               switch(acl_e->e_tag) {
+               case ACL_USER:
+                       ext_entry->e_id =
+                               cpu_to_le32(from_kuid(user_ns, acl_e->e_uid));
+                       break;
+               case ACL_GROUP:
+                       ext_entry->e_id =
+                               cpu_to_le32(from_kgid(user_ns, acl_e->e_gid));
+                       break;
+               default:
+                       ext_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
+                       break;
+               }
        }
        return real_size;
 }
index ac702a6eab9b05f6e1e4b9e3dde769a06262f84b..1d32f1d5276339e96b0034e13050eaeb39dba653 100644 (file)
@@ -337,7 +337,7 @@ xfs_xattr_acl_get(struct dentry *dentry, const char *name,
        if (acl == NULL)
                return -ENODATA;
 
-       error = posix_acl_to_xattr(acl, value, size);
+       error = posix_acl_to_xattr(&init_user_ns, acl, value, size);
        posix_acl_release(acl);
 
        return error;
@@ -361,7 +361,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
        if (!value)
                goto set_acl;
 
-       acl = posix_acl_from_xattr(value, size);
+       acl = posix_acl_from_xattr(&init_user_ns, value, size);
        if (!acl) {
                /*
                 * acl_set_file(3) may request that we set default ACLs with
index e00de08dc8aca858f8715691c2e0d20046194104..b9b8646e62db7357ae882bae00f3a74036c3da81 100644 (file)
@@ -48,44 +48,44 @@ xfs_swapext(
        xfs_swapext_t   *sxp)
 {
        xfs_inode_t     *ip, *tip;
-       struct file     *file, *tmp_file;
+       struct fd       f, tmp;
        int             error = 0;
 
        /* Pull information for the target fd */
-       file = fget((int)sxp->sx_fdtarget);
-       if (!file) {
+       f = fdget((int)sxp->sx_fdtarget);
+       if (!f.file) {
                error = XFS_ERROR(EINVAL);
                goto out;
        }
 
-       if (!(file->f_mode & FMODE_WRITE) ||
-           !(file->f_mode & FMODE_READ) ||
-           (file->f_flags & O_APPEND)) {
+       if (!(f.file->f_mode & FMODE_WRITE) ||
+           !(f.file->f_mode & FMODE_READ) ||
+           (f.file->f_flags & O_APPEND)) {
                error = XFS_ERROR(EBADF);
                goto out_put_file;
        }
 
-       tmp_file = fget((int)sxp->sx_fdtmp);
-       if (!tmp_file) {
+       tmp = fdget((int)sxp->sx_fdtmp);
+       if (!tmp.file) {
                error = XFS_ERROR(EINVAL);
                goto out_put_file;
        }
 
-       if (!(tmp_file->f_mode & FMODE_WRITE) ||
-           !(tmp_file->f_mode & FMODE_READ) ||
-           (tmp_file->f_flags & O_APPEND)) {
+       if (!(tmp.file->f_mode & FMODE_WRITE) ||
+           !(tmp.file->f_mode & FMODE_READ) ||
+           (tmp.file->f_flags & O_APPEND)) {
                error = XFS_ERROR(EBADF);
                goto out_put_tmp_file;
        }
 
-       if (IS_SWAPFILE(file->f_path.dentry->d_inode) ||
-           IS_SWAPFILE(tmp_file->f_path.dentry->d_inode)) {
+       if (IS_SWAPFILE(f.file->f_path.dentry->d_inode) ||
+           IS_SWAPFILE(tmp.file->f_path.dentry->d_inode)) {
                error = XFS_ERROR(EINVAL);
                goto out_put_tmp_file;
        }
 
-       ip = XFS_I(file->f_path.dentry->d_inode);
-       tip = XFS_I(tmp_file->f_path.dentry->d_inode);
+       ip = XFS_I(f.file->f_path.dentry->d_inode);
+       tip = XFS_I(tmp.file->f_path.dentry->d_inode);
 
        if (ip->i_mount != tip->i_mount) {
                error = XFS_ERROR(EINVAL);
@@ -105,9 +105,9 @@ xfs_swapext(
        error = xfs_swap_extents(ip, tip, sxp);
 
  out_put_tmp_file:
-       fput(tmp_file);
+       fdput(tmp);
  out_put_file:
-       fput(file);
+       fdput(f);
  out:
        return error;
 }
index 56afcdb2377d57c03bce9dc40b78c991184735a1..1eaeb8be3aaea5d4a034c36949bbca34fca6ceac 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <linux/dcache.h>
 #include <linux/falloc.h>
+#include <linux/pagevec.h>
 
 static const struct vm_operations_struct xfs_file_vm_ops;
 
@@ -959,17 +960,232 @@ xfs_vm_page_mkwrite(
        return block_page_mkwrite(vma, vmf, xfs_get_blocks);
 }
 
+/*
+ * This type is designed to indicate the type of offset we would like
+ * to search from page cache for either xfs_seek_data() or xfs_seek_hole().
+ */
+enum {
+       HOLE_OFF = 0,
+       DATA_OFF,
+};
+
+/*
+ * Lookup the desired type of offset from the given page.
+ *
+ * On success, return true and the offset argument will point to the
+ * start of the region that was found.  Otherwise this function will
+ * return false and keep the offset argument unchanged.
+ */
+STATIC bool
+xfs_lookup_buffer_offset(
+       struct page             *page,
+       loff_t                  *offset,
+       unsigned int            type)
+{
+       loff_t                  lastoff = page_offset(page);
+       bool                    found = false;
+       struct buffer_head      *bh, *head;
+
+       bh = head = page_buffers(page);
+       do {
+               /*
+                * Unwritten extents that have data in the page
+                * cache covering them can be identified by the
+                * BH_Unwritten state flag.  Pages with multiple
+                * buffers might have a mix of holes, data and
+                * unwritten extents - any buffer with valid
+                * data in it should have BH_Uptodate flag set
+                * on it.
+                */
+               if (buffer_unwritten(bh) ||
+                   buffer_uptodate(bh)) {
+                       if (type == DATA_OFF)
+                               found = true;
+               } else {
+                       if (type == HOLE_OFF)
+                               found = true;
+               }
+
+               if (found) {
+                       *offset = lastoff;
+                       break;
+               }
+               lastoff += bh->b_size;
+       } while ((bh = bh->b_this_page) != head);
+
+       return found;
+}
+
+/*
+ * This routine is called to find out and return a data or hole offset
+ * from the page cache for unwritten extents according to the desired
+ * type for xfs_seek_data() or xfs_seek_hole().
+ *
+ * The argument offset is used to tell where we start to search from the
+ * page cache.  Map is used to figure out the end points of the range to
+ * lookup pages.
+ *
+ * Return true if the desired type of offset was found, and the argument
+ * offset is filled with that address.  Otherwise, return false and keep
+ * offset unchanged.
+ */
+STATIC bool
+xfs_find_get_desired_pgoff(
+       struct inode            *inode,
+       struct xfs_bmbt_irec    *map,
+       unsigned int            type,
+       loff_t                  *offset)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       struct pagevec          pvec;
+       pgoff_t                 index;
+       pgoff_t                 end;
+       loff_t                  endoff;
+       loff_t                  startoff = *offset;
+       loff_t                  lastoff = startoff;
+       bool                    found = false;
+
+       pagevec_init(&pvec, 0);
+
+       index = startoff >> PAGE_CACHE_SHIFT;
+       endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
+       end = endoff >> PAGE_CACHE_SHIFT;
+       do {
+               int             want;
+               unsigned        nr_pages;
+               unsigned int    i;
+
+               want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+               nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
+                                         want);
+               /*
+                * No page mapped into given range.  If we are searching holes
+                * and if this is the first time we got into the loop, it means
+                * that the given offset is landed in a hole, return it.
+                *
+                * If we have already stepped through some block buffers to find
+                * holes but they all contains data.  In this case, the last
+                * offset is already updated and pointed to the end of the last
+                * mapped page, if it does not reach the endpoint to search,
+                * that means there should be a hole between them.
+                */
+               if (nr_pages == 0) {
+                       /* Data search found nothing */
+                       if (type == DATA_OFF)
+                               break;
+
+                       ASSERT(type == HOLE_OFF);
+                       if (lastoff == startoff || lastoff < endoff) {
+                               found = true;
+                               *offset = lastoff;
+                       }
+                       break;
+               }
+
+               /*
+                * At lease we found one page.  If this is the first time we
+                * step into the loop, and if the first page index offset is
+                * greater than the given search offset, a hole was found.
+                */
+               if (type == HOLE_OFF && lastoff == startoff &&
+                   lastoff < page_offset(pvec.pages[0])) {
+                       found = true;
+                       break;
+               }
+
+               for (i = 0; i < nr_pages; i++) {
+                       struct page     *page = pvec.pages[i];
+                       loff_t          b_offset;
+
+                       /*
+                        * At this point, the page may be truncated or
+                        * invalidated (changing page->mapping to NULL),
+                        * or even swizzled back from swapper_space to tmpfs
+                        * file mapping. However, page->index will not change
+                        * because we have a reference on the page.
+                        *
+                        * Searching done if the page index is out of range.
+                        * If the current offset is not reaches the end of
+                        * the specified search range, there should be a hole
+                        * between them.
+                        */
+                       if (page->index > end) {
+                               if (type == HOLE_OFF && lastoff < endoff) {
+                                       *offset = lastoff;
+                                       found = true;
+                               }
+                               goto out;
+                       }
+
+                       lock_page(page);
+                       /*
+                        * Page truncated or invalidated(page->mapping == NULL).
+                        * We can freely skip it and proceed to check the next
+                        * page.
+                        */
+                       if (unlikely(page->mapping != inode->i_mapping)) {
+                               unlock_page(page);
+                               continue;
+                       }
+
+                       if (!page_has_buffers(page)) {
+                               unlock_page(page);
+                               continue;
+                       }
+
+                       found = xfs_lookup_buffer_offset(page, &b_offset, type);
+                       if (found) {
+                               /*
+                                * The found offset may be less than the start
+                                * point to search if this is the first time to
+                                * come here.
+                                */
+                               *offset = max_t(loff_t, startoff, b_offset);
+                               unlock_page(page);
+                               goto out;
+                       }
+
+                       /*
+                        * We either searching data but nothing was found, or
+                        * searching hole but found a data buffer.  In either
+                        * case, probably the next page contains the desired
+                        * things, update the last offset to it so.
+                        */
+                       lastoff = page_offset(page) + PAGE_SIZE;
+                       unlock_page(page);
+               }
+
+               /*
+                * The number of returned pages less than our desired, search
+                * done.  In this case, nothing was found for searching data,
+                * but we found a hole behind the last offset.
+                */
+               if (nr_pages < want) {
+                       if (type == HOLE_OFF) {
+                               *offset = lastoff;
+                               found = true;
+                       }
+                       break;
+               }
+
+               index = pvec.pages[i - 1]->index + 1;
+               pagevec_release(&pvec);
+       } while (index <= end);
+
+out:
+       pagevec_release(&pvec);
+       return found;
+}
+
 STATIC loff_t
 xfs_seek_data(
        struct file             *file,
-       loff_t                  start,
-       u32                     type)
+       loff_t                  start)
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
-       struct xfs_bmbt_irec    map[2];
-       int                     nmap = 2;
        loff_t                  uninitialized_var(offset);
        xfs_fsize_t             isize;
        xfs_fileoff_t           fsbno;
@@ -985,36 +1201,74 @@ xfs_seek_data(
                goto out_unlock;
        }
 
-       fsbno = XFS_B_TO_FSBT(mp, start);
-
        /*
         * Try to read extents from the first block indicated
         * by fsbno to the end block of the file.
         */
+       fsbno = XFS_B_TO_FSBT(mp, start);
        end = XFS_B_TO_FSB(mp, isize);
+       for (;;) {
+               struct xfs_bmbt_irec    map[2];
+               int                     nmap = 2;
+               unsigned int            i;
 
-       error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
-                              XFS_BMAPI_ENTIRE);
-       if (error)
-               goto out_unlock;
+               error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
+                                      XFS_BMAPI_ENTIRE);
+               if (error)
+                       goto out_unlock;
 
-       /*
-        * Treat unwritten extent as data extent since it might
-        * contains dirty data in page cache.
-        */
-       if (map[0].br_startblock != HOLESTARTBLOCK) {
-               offset = max_t(loff_t, start,
-                              XFS_FSB_TO_B(mp, map[0].br_startoff));
-       } else {
+               /* No extents at given offset, must be beyond EOF */
+               if (nmap == 0) {
+                       error = ENXIO;
+                       goto out_unlock;
+               }
+
+               for (i = 0; i < nmap; i++) {
+                       offset = max_t(loff_t, start,
+                                      XFS_FSB_TO_B(mp, map[i].br_startoff));
+
+                       /* Landed in a data extent */
+                       if (map[i].br_startblock == DELAYSTARTBLOCK ||
+                           (map[i].br_state == XFS_EXT_NORM &&
+                            !isnullstartblock(map[i].br_startblock)))
+                               goto out;
+
+                       /*
+                        * Landed in an unwritten extent, try to search data
+                        * from page cache.
+                        */
+                       if (map[i].br_state == XFS_EXT_UNWRITTEN) {
+                               if (xfs_find_get_desired_pgoff(inode, &map[i],
+                                                       DATA_OFF, &offset))
+                                       goto out;
+                       }
+               }
+
+               /*
+                * map[0] is hole or its an unwritten extent but
+                * without data in page cache.  Probably means that
+                * we are reading after EOF if nothing in map[1].
+                */
                if (nmap == 1) {
                        error = ENXIO;
                        goto out_unlock;
                }
 
-               offset = max_t(loff_t, start,
-                              XFS_FSB_TO_B(mp, map[1].br_startoff));
+               ASSERT(i > 1);
+
+               /*
+                * Nothing was found, proceed to the next round of search
+                * if reading offset not beyond or hit EOF.
+                */
+               fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
+               start = XFS_FSB_TO_B(mp, fsbno);
+               if (start >= isize) {
+                       error = ENXIO;
+                       goto out_unlock;
+               }
        }
 
+out:
        if (offset != file->f_pos)
                file->f_pos = offset;
 
@@ -1029,16 +1283,15 @@ out_unlock:
 STATIC loff_t
 xfs_seek_hole(
        struct file             *file,
-       loff_t                  start,
-       u32                     type)
+       loff_t                  start)
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        loff_t                  uninitialized_var(offset);
-       loff_t                  holeoff;
        xfs_fsize_t             isize;
        xfs_fileoff_t           fsbno;
+       xfs_filblks_t           end;
        uint                    lock;
        int                     error;
 
@@ -1054,21 +1307,77 @@ xfs_seek_hole(
        }
 
        fsbno = XFS_B_TO_FSBT(mp, start);
-       error = xfs_bmap_first_unused(NULL, ip, 1, &fsbno, XFS_DATA_FORK);
-       if (error)
-               goto out_unlock;
+       end = XFS_B_TO_FSB(mp, isize);
+
+       for (;;) {
+               struct xfs_bmbt_irec    map[2];
+               int                     nmap = 2;
+               unsigned int            i;
+
+               error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
+                                      XFS_BMAPI_ENTIRE);
+               if (error)
+                       goto out_unlock;
+
+               /* No extents at given offset, must be beyond EOF */
+               if (nmap == 0) {
+                       error = ENXIO;
+                       goto out_unlock;
+               }
+
+               for (i = 0; i < nmap; i++) {
+                       offset = max_t(loff_t, start,
+                                      XFS_FSB_TO_B(mp, map[i].br_startoff));
+
+                       /* Landed in a hole */
+                       if (map[i].br_startblock == HOLESTARTBLOCK)
+                               goto out;
+
+                       /*
+                        * Landed in an unwritten extent, try to search hole
+                        * from page cache.
+                        */
+                       if (map[i].br_state == XFS_EXT_UNWRITTEN) {
+                               if (xfs_find_get_desired_pgoff(inode, &map[i],
+                                                       HOLE_OFF, &offset))
+                                       goto out;
+                       }
+               }
+
+               /*
+                * map[0] contains data or its unwritten but contains
+                * data in page cache, probably means that we are
+                * reading after EOF.  We should fix offset to point
+                * to the end of the file(i.e., there is an implicit
+                * hole at the end of any file).
+                */
+               if (nmap == 1) {
+                       offset = isize;
+                       break;
+               }
+
+               ASSERT(i > 1);
 
-       holeoff = XFS_FSB_TO_B(mp, fsbno);
-       if (holeoff <= start)
-               offset = start;
-       else {
                /*
-                * xfs_bmap_first_unused() could return a value bigger than
-                * isize if there are no more holes past the supplied offset.
+                * Both mappings contains data, proceed to the next round of
+                * search if the current reading offset not beyond or hit EOF.
                 */
-               offset = min_t(loff_t, holeoff, isize);
+               fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
+               start = XFS_FSB_TO_B(mp, fsbno);
+               if (start >= isize) {
+                       offset = isize;
+                       break;
+               }
        }
 
+out:
+       /*
+        * At this point, we must have found a hole.  However, the returned
+        * offset may be bigger than the file size as it may be aligned to
+        * page boundary for unwritten extents, we need to deal with this
+        * situation in particular.
+        */
+       offset = min_t(loff_t, offset, isize);
        if (offset != file->f_pos)
                file->f_pos = offset;
 
@@ -1092,9 +1401,9 @@ xfs_file_llseek(
        case SEEK_SET:
                return generic_file_llseek(file, offset, origin);
        case SEEK_DATA:
-               return xfs_seek_data(file, offset, origin);
+               return xfs_seek_data(file, offset);
        case SEEK_HOLE:
-               return xfs_seek_hole(file, offset, origin);
+               return xfs_seek_hole(file, offset);
        default:
                return -EINVAL;
        }
index 5aceb3f8ecd625de029daaff00e6093a0e0213a2..445bf1aef31c16d9e6bd17a8b91e78d7f1763c05 100644 (file)
@@ -431,7 +431,7 @@ xfs_ialloc_next_ag(
 
        spin_lock(&mp->m_agirotor_lock);
        agno = mp->m_agirotor;
-       if (++mp->m_agirotor == mp->m_maxagi)
+       if (++mp->m_agirotor >= mp->m_maxagi)
                mp->m_agirotor = 0;
        spin_unlock(&mp->m_agirotor_lock);
 
index 0e0232c3b6d98039eb2b73fb556faec1b9069016..8305f2ac6773a8ee5efd1b0e24546e4194807187 100644 (file)
@@ -70,16 +70,16 @@ xfs_find_handle(
        int                     hsize;
        xfs_handle_t            handle;
        struct inode            *inode;
-       struct file             *file = NULL;
+       struct fd               f;
        struct path             path;
        int                     error;
        struct xfs_inode        *ip;
 
        if (cmd == XFS_IOC_FD_TO_HANDLE) {
-               file = fget(hreq->fd);
-               if (!file)
+               f = fdget(hreq->fd);
+               if (!f.file)
                        return -EBADF;
-               inode = file->f_path.dentry->d_inode;
+               inode = f.file->f_path.dentry->d_inode;
        } else {
                error = user_lpath((const char __user *)hreq->path, &path);
                if (error)
@@ -134,7 +134,7 @@ xfs_find_handle(
 
  out_put:
        if (cmd == XFS_IOC_FD_TO_HANDLE)
-               fput(file);
+               fdput(f);
        else
                path_put(&path);
        return error;
index 29c2f83d4147c6824f8e8b67f5a9b4968c80c92b..b2bd3a0e6376e1190e1bbb747435acdaca998bb6 100644 (file)
@@ -440,7 +440,7 @@ xfs_initialize_perag(
        xfs_agnumber_t  agcount,
        xfs_agnumber_t  *maxagi)
 {
-       xfs_agnumber_t  index, max_metadata;
+       xfs_agnumber_t  index;
        xfs_agnumber_t  first_initialised = 0;
        xfs_perag_t     *pag;
        xfs_agino_t     agino;
@@ -500,43 +500,10 @@ xfs_initialize_perag(
        else
                mp->m_flags &= ~XFS_MOUNT_32BITINODES;
 
-       if (mp->m_flags & XFS_MOUNT_32BITINODES) {
-               /*
-                * Calculate how much should be reserved for inodes to meet
-                * the max inode percentage.
-                */
-               if (mp->m_maxicount) {
-                       __uint64_t      icount;
-
-                       icount = sbp->sb_dblocks * sbp->sb_imax_pct;
-                       do_div(icount, 100);
-                       icount += sbp->sb_agblocks - 1;
-                       do_div(icount, sbp->sb_agblocks);
-                       max_metadata = icount;
-               } else {
-                       max_metadata = agcount;
-               }
-
-               for (index = 0; index < agcount; index++) {
-                       ino = XFS_AGINO_TO_INO(mp, index, agino);
-                       if (ino > XFS_MAXINUMBER_32) {
-                               index++;
-                               break;
-                       }
-
-                       pag = xfs_perag_get(mp, index);
-                       pag->pagi_inodeok = 1;
-                       if (index < max_metadata)
-                               pag->pagf_metadata = 1;
-                       xfs_perag_put(pag);
-               }
-       } else {
-               for (index = 0; index < agcount; index++) {
-                       pag = xfs_perag_get(mp, index);
-                       pag->pagi_inodeok = 1;
-                       xfs_perag_put(pag);
-               }
-       }
+       if (mp->m_flags & XFS_MOUNT_32BITINODES)
+               index = xfs_set_inode32(mp);
+       else
+               index = xfs_set_inode64(mp);
 
        if (maxagi)
                *maxagi = index;
index 05a05a7b611963f03d0249a97cb5edbc50c152a6..deee09e534dcf35de23535bfa6ce94f6ffa750a6 100644 (file)
@@ -54,12 +54,7 @@ typedef struct xfs_trans_reservations {
 #include "xfs_sync.h"
 
 struct xlog;
-struct xfs_mount_args;
 struct xfs_inode;
-struct xfs_bmbt_irec;
-struct xfs_bmap_free;
-struct xfs_extdelta;
-struct xfs_swapext;
 struct xfs_mru_cache;
 struct xfs_nameops;
 struct xfs_ail;
index fed504fc2999f44193eb688c1405d5cee500eea6..71926d6305273426c8007c7ac1ae4092767a565c 100644 (file)
@@ -97,8 +97,7 @@ xfs_fs_set_xstate(
 STATIC int
 xfs_fs_get_dqblk(
        struct super_block      *sb,
-       int                     type,
-       qid_t                   id,
+       struct kqid             qid,
        struct fs_disk_quota    *fdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
@@ -108,14 +107,14 @@ xfs_fs_get_dqblk(
        if (!XFS_IS_QUOTA_ON(mp))
                return -ESRCH;
 
-       return -xfs_qm_scall_getquota(mp, id, xfs_quota_type(type), fdq);
+       return -xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
+                                     xfs_quota_type(qid.type), fdq);
 }
 
 STATIC int
 xfs_fs_set_dqblk(
        struct super_block      *sb,
-       int                     type,
-       qid_t                   id,
+       struct kqid             qid,
        struct fs_disk_quota    *fdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
@@ -127,7 +126,8 @@ xfs_fs_set_dqblk(
        if (!XFS_IS_QUOTA_ON(mp))
                return -ESRCH;
 
-       return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq);
+       return -xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
+                                    xfs_quota_type(qid.type), fdq);
 }
 
 const struct quotactl_ops xfs_quotactl_operations = {
index 19e2380fb8679d2e8641e691eaeb0b200456bfb4..26a09bd7f975f1f344083034b15960e43e4663a6 100644 (file)
@@ -88,6 +88,8 @@ mempool_t *xfs_ioend_pool;
                                         * unwritten extent conversion */
 #define MNTOPT_NOBARRIER "nobarrier"   /* .. disable */
 #define MNTOPT_64BITINODE   "inode64"  /* inodes can be allocated anywhere */
+#define MNTOPT_32BITINODE   "inode32"  /* inode allocation limited to
+                                        * XFS_MAXINUMBER_32 */
 #define MNTOPT_IKEEP   "ikeep"         /* do not free empty inode clusters */
 #define MNTOPT_NOIKEEP "noikeep"       /* free empty inode clusters */
 #define MNTOPT_LARGEIO    "largeio"    /* report large I/O sizes in stat() */
@@ -120,12 +122,18 @@ mempool_t *xfs_ioend_pool;
  * in the future, too.
  */
 enum {
-       Opt_barrier, Opt_nobarrier, Opt_err
+       Opt_barrier,
+       Opt_nobarrier,
+       Opt_inode64,
+       Opt_inode32,
+       Opt_err
 };
 
 static const match_table_t tokens = {
        {Opt_barrier, "barrier"},
        {Opt_nobarrier, "nobarrier"},
+       {Opt_inode64, "inode64"},
+       {Opt_inode32, "inode32"},
        {Opt_err, NULL}
 };
 
@@ -197,7 +205,9 @@ xfs_parseargs(
         */
        mp->m_flags |= XFS_MOUNT_BARRIER;
        mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
+#if !XFS_BIG_INUMS
        mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
+#endif
 
        /*
         * These can be overridden by the mount option parsing.
@@ -294,6 +304,8 @@ xfs_parseargs(
                                return EINVAL;
                        }
                        dswidth = simple_strtoul(value, &eov, 10);
+               } else if (!strcmp(this_char, MNTOPT_32BITINODE)) {
+                       mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
                } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
                        mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
 #if !XFS_BIG_INUMS
@@ -492,6 +504,7 @@ xfs_showargs(
                { XFS_MOUNT_FILESTREAMS,        "," MNTOPT_FILESTREAM },
                { XFS_MOUNT_GRPID,              "," MNTOPT_GRPID },
                { XFS_MOUNT_DISCARD,            "," MNTOPT_DISCARD },
+               { XFS_MOUNT_SMALL_INUMS,        "," MNTOPT_32BITINODE },
                { 0, NULL }
        };
        static struct proc_xfs_info xfs_info_unset[] = {
@@ -591,6 +604,80 @@ xfs_max_file_offset(
        return (((__uint64_t)pagefactor) << bitshift) - 1;
 }
 
+xfs_agnumber_t
+xfs_set_inode32(struct xfs_mount *mp)
+{
+       xfs_agnumber_t  index = 0;
+       xfs_agnumber_t  maxagi = 0;
+       xfs_sb_t        *sbp = &mp->m_sb;
+       xfs_agnumber_t  max_metadata;
+       xfs_agino_t     agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks -1, 0);
+       xfs_ino_t       ino = XFS_AGINO_TO_INO(mp, sbp->sb_agcount -1, agino);
+       xfs_perag_t     *pag;
+
+       /* Calculate how much should be reserved for inodes to meet
+        * the max inode percentage.
+        */
+       if (mp->m_maxicount) {
+               __uint64_t      icount;
+
+               icount = sbp->sb_dblocks * sbp->sb_imax_pct;
+               do_div(icount, 100);
+               icount += sbp->sb_agblocks - 1;
+               do_div(icount, sbp->sb_agblocks);
+               max_metadata = icount;
+       } else {
+               max_metadata = sbp->sb_agcount;
+       }
+
+       for (index = 0; index < sbp->sb_agcount; index++) {
+               ino = XFS_AGINO_TO_INO(mp, index, agino);
+
+               if (ino > XFS_MAXINUMBER_32) {
+                       pag = xfs_perag_get(mp, index);
+                       pag->pagi_inodeok = 0;
+                       pag->pagf_metadata = 0;
+                       xfs_perag_put(pag);
+                       continue;
+               }
+
+               pag = xfs_perag_get(mp, index);
+               pag->pagi_inodeok = 1;
+               maxagi++;
+               if (index < max_metadata)
+                       pag->pagf_metadata = 1;
+               xfs_perag_put(pag);
+       }
+       mp->m_flags |= (XFS_MOUNT_32BITINODES |
+                       XFS_MOUNT_SMALL_INUMS);
+
+       return maxagi;
+}
+
+xfs_agnumber_t
+xfs_set_inode64(struct xfs_mount *mp)
+{
+       xfs_agnumber_t index = 0;
+
+       for (index = 0; index < mp->m_sb.sb_agcount; index++) {
+               struct xfs_perag        *pag;
+
+               pag = xfs_perag_get(mp, index);
+               pag->pagi_inodeok = 1;
+               pag->pagf_metadata = 0;
+               xfs_perag_put(pag);
+       }
+
+       /* There is no need for lock protection on m_flags,
+        * the rw_semaphore of the VFS superblock is locked
+        * during mount/umount/remount operations, so this is
+        * enough to avoid concurency on the m_flags field
+        */
+       mp->m_flags &= ~(XFS_MOUNT_32BITINODES |
+                        XFS_MOUNT_SMALL_INUMS);
+       return index;
+}
+
 STATIC int
 xfs_blkdev_get(
        xfs_mount_t             *mp,
@@ -954,7 +1041,7 @@ xfs_fs_sync_fs(
                 * We schedule xfssyncd now (now that the disk is
                 * active) instead of later (when it might not be).
                 */
-               flush_delayed_work_sync(&mp->m_sync_work);
+               flush_delayed_work(&mp->m_sync_work);
        }
 
        return 0;
@@ -1056,6 +1143,12 @@ xfs_fs_remount(
                case Opt_nobarrier:
                        mp->m_flags &= ~XFS_MOUNT_BARRIER;
                        break;
+               case Opt_inode64:
+                       mp->m_maxagi = xfs_set_inode64(mp);
+                       break;
+               case Opt_inode32:
+                       mp->m_maxagi = xfs_set_inode32(mp);
+                       break;
                default:
                        /*
                         * Logically we would return an error here to prevent
@@ -1506,6 +1599,11 @@ xfs_init_zones(void)
 STATIC void
 xfs_destroy_zones(void)
 {
+       /*
+        * Make sure all delayed rcu free are flushed before we
+        * destroy caches.
+        */
+       rcu_barrier();
        kmem_zone_destroy(xfs_ili_zone);
        kmem_zone_destroy(xfs_inode_zone);
        kmem_zone_destroy(xfs_efi_zone);
index 09b0c26b2245ebd245c2d8fcf4849105d1e5d400..9de4a920ba05962a655c81a87069254ebfbefc28 100644 (file)
@@ -75,6 +75,8 @@ struct block_device;
 extern __uint64_t xfs_max_file_offset(unsigned int);
 
 extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
+extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *);
+extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *);
 
 extern const struct export_operations xfs_export_operations;
 extern const struct xattr_handler *xfs_xattr_handlers[];
index 96548176db80c1ee4557190e5d21d32d8c347de3..9500caf15acf6c355cde32cb0e692a3a58a6a990 100644 (file)
@@ -475,7 +475,7 @@ xfs_flush_inodes(
        struct xfs_mount        *mp = ip->i_mount;
 
        queue_work(xfs_syncd_wq, &mp->m_flush_work);
-       flush_work_sync(&mp->m_flush_work);
+       flush_work(&mp->m_flush_work);
 }
 
 STATIC void
index e5795dd6013ad2b34d5a208d1cf7ad448c937387..7d36ccf57f93236c5d8228c158b3ff5ba0886672 100644 (file)
@@ -37,6 +37,7 @@ struct xlog_recover;
 struct xlog_recover_item;
 struct xfs_buf_log_format;
 struct xfs_inode_log_format;
+struct xfs_bmbt_irec;
 
 DECLARE_EVENT_CLASS(xfs_attr_list_class,
        TP_PROTO(struct xfs_attr_list_context *ctx),
index bcb60542fcf18bdbcfef1633dcbaac355485e66d..0c7fa54f309e1da36012a924905e5226f147f4c4 100644 (file)
@@ -578,9 +578,11 @@ xfs_quota_warn(
        /* no warnings for project quotas - we just return ENOSPC later */
        if (dqp->dq_flags & XFS_DQ_PROJ)
                return;
-       quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA,
-                          be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev,
-                          type);
+       quota_send_warning(make_kqid(&init_user_ns,
+                                    (dqp->dq_flags & XFS_DQ_USER) ?
+                                    USRQUOTA : GRPQUOTA,
+                                    be32_to_cpu(dqp->q_core.d_id)),
+                          mp->m_super->s_dev, type);
 }
 
 /*
index 64ec644808bcb82951f38eaa682cb536e605d7bf..555d0337ad955bbdccd6a7f5b261e9d0777462c3 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/kernel.h>
 #include <linux/cpu.h>
-#include <linux/cpuidle.h>
 #include <linux/thermal.h>
 #include <asm/acpi.h>
 
@@ -59,13 +58,11 @@ struct acpi_processor_cx {
        u8 entry_method;
        u8 index;
        u32 latency;
-       u32 power;
        u8 bm_sts_skip;
        char desc[ACPI_CX_DESC_LEN];
 };
 
 struct acpi_processor_power {
-       struct cpuidle_device dev;
        struct acpi_processor_cx *state;
        unsigned long bm_check_timestamp;
        u32 default_state;
@@ -325,12 +322,10 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
 /* in processor_idle.c */
-int acpi_processor_power_init(struct acpi_processor *pr,
-                             struct acpi_device *device);
+int acpi_processor_power_init(struct acpi_processor *pr);
+int acpi_processor_power_exit(struct acpi_processor *pr);
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
 int acpi_processor_hotplug(struct acpi_processor *pr);
-int acpi_processor_power_exit(struct acpi_processor *pr,
-                             struct acpi_device *device);
 int acpi_processor_suspend(struct device *dev);
 int acpi_processor_resume(struct device *dev);
 extern struct cpuidle_driver acpi_idle_driver;
index 365ea09ed3b05c951b328fa95c51dbe82182566e..a9432fc6b8ba7ade0b50d85c2582e81998c301fe 100644 (file)
@@ -60,6 +60,8 @@ struct device_node;
  * @get: returns value for signal "offset"; for output signals this
  *     returns either the value actually sensed, or zero
  * @direction_output: configures signal "offset" as output, or returns error
+ * @set_debounce: optional hook for setting debounce time for specified gpio in
+ *      interrupt triggered gpio chips
  * @set: assigns output value for signal "offset"
  * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
  *     implementation may not sleep
index 0c44e4a000fdf3e01bf7d840cce700c4e37464d5..54054e41ec387f4a75016292c187da7ed2762103 100644 (file)
@@ -427,8 +427,8 @@ struct drm_prime_file_private {
 /** File private data */
 struct drm_file {
        int authenticated;
-       pid_t pid;
-       uid_t uid;
+       struct pid *pid;
+       kuid_t uid;
        drm_magic_t magic;
        unsigned long ioctl_count;
        struct list_head lhead;
index 3f4c207a93bb5ff8ea27b7eaabf5b3a29afc0e07..e149e8be9065a27d3ca56fb2aad581c8465841bc 100644 (file)
@@ -190,6 +190,7 @@ header-y += in_route.h
 header-y += sock_diag.h
 header-y += inet_diag.h
 header-y += unix_diag.h
+header-y += packet_diag.h
 header-y += inotify.h
 header-y += input.h
 header-y += ioctl.h
@@ -357,6 +358,7 @@ header-y += sysctl.h
 header-y += sysinfo.h
 header-y += taskstats.h
 header-y += tcp.h
+header-y += tcp_metrics.h
 header-y += telephony.h
 header-y += termios.h
 header-y += time.h
index fe1d7b283cb633c4be48d465f5fa572333929fe8..854b7294f6c64f77b636c38188897a9621e7bbff 100644 (file)
@@ -244,6 +244,7 @@ struct dma_chan;
  *     indicates no delay and the device will be suspended immediately.
  * @rt: indicates the controller should run the message pump with realtime
  *     priority to minimise the transfer latency on the bus.
+ * @chipselects: list of <num_chipselects> chip select gpios
  */
 struct pl022_ssp_controller {
        u16 bus_id;
@@ -254,6 +255,7 @@ struct pl022_ssp_controller {
        void *dma_tx_param;
        int autosuspend_delay;
        bool rt;
+       int *chipselects;
 };
 
 /**
index 5713d3ac381ad4f72a8ccc8193a1421286d30f6c..408da9502177bd18a3ba88e2831701e916e800aa 100644 (file)
@@ -77,6 +77,9 @@ enum {
        ATA_ID_EIDE_PIO_IORDY   = 68,
        ATA_ID_ADDITIONAL_SUPP  = 69,
        ATA_ID_QUEUE_DEPTH      = 75,
+       ATA_ID_SATA_CAPABILITY  = 76,
+       ATA_ID_SATA_CAPABILITY_2        = 77,
+       ATA_ID_FEATURE_SUPP     = 78,
        ATA_ID_MAJOR_VER        = 80,
        ATA_ID_COMMAND_SET_1    = 82,
        ATA_ID_COMMAND_SET_2    = 83,
@@ -292,6 +295,13 @@ enum {
 
        /* READ_LOG_EXT pages */
        ATA_LOG_SATA_NCQ        = 0x10,
+       ATA_LOG_SATA_ID_DEV_DATA  = 0x30,
+       ATA_LOG_SATA_SETTINGS     = 0x08,
+       ATA_LOG_DEVSLP_MDAT       = 0x30,
+       ATA_LOG_DEVSLP_MDAT_MASK  = 0x1F,
+       ATA_LOG_DEVSLP_DETO       = 0x31,
+       ATA_LOG_DEVSLP_VALID      = 0x37,
+       ATA_LOG_DEVSLP_VALID_MASK = 0x80,
 
        /* READ/WRITE LONG (obsolete) */
        ATA_CMD_READ_LONG       = 0x22,
@@ -345,6 +355,7 @@ enum {
        SATA_FPDMA_IN_ORDER     = 0x04, /* FPDMA in-order data delivery */
        SATA_AN                 = 0x05, /* Asynchronous Notification */
        SATA_SSP                = 0x06, /* Software Settings Preservation */
+       SATA_DEVSLP             = 0x09, /* Device Sleep */
 
        /* feature values for SET_MAX */
        ATA_SET_MAX_ADDR        = 0x00,
@@ -558,15 +569,17 @@ static inline int ata_is_data(u8 prot)
 #define ata_id_is_ata(id)      (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0)
 #define ata_id_has_lba(id)     ((id)[ATA_ID_CAPABILITY] & (1 << 9))
 #define ata_id_has_dma(id)     ((id)[ATA_ID_CAPABILITY] & (1 << 8))
-#define ata_id_has_ncq(id)     ((id)[76] & (1 << 8))
+#define ata_id_has_ncq(id)     ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
 #define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
 #define ata_id_removeable(id)  ((id)[ATA_ID_CONFIG] & (1 << 7))
 #define ata_id_has_atapi_AN(id)        \
-       ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
-         ((id)[78] & (1 << 5)) )
+       ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+         ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+        ((id)[ATA_ID_FEATURE_SUPP] & (1 << 5)))
 #define ata_id_has_fpdma_aa(id)        \
-       ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
-         ((id)[78] & (1 << 2)) )
+       ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+         ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+        ((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
 #define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
 #define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
 #define ata_id_u32(id,n)       \
@@ -578,11 +591,12 @@ static inline int ata_is_data(u8 prot)
          ((u64) (id)[(n) + 0]) )
 
 #define ata_id_cdb_intr(id)    (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
-#define ata_id_has_da(id)      ((id)[77] & (1 << 4))
+#define ata_id_has_da(id)      ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
+#define ata_id_has_devslp(id)  ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
 
 static inline bool ata_id_has_hipm(const u16 *id)
 {
-       u16 val = id[76];
+       u16 val = id[ATA_ID_SATA_CAPABILITY];
 
        if (val == 0 || val == 0xffff)
                return false;
@@ -592,7 +606,7 @@ static inline bool ata_id_has_hipm(const u16 *id)
 
 static inline bool ata_id_has_dipm(const u16 *id)
 {
-       u16 val = id[78];
+       u16 val = id[ATA_ID_FEATURE_SUPP];
 
        if (val == 0 || val == 0xffff)
                return false;
index 36abf2aa7e680e24afe8c52b87ccf8f88707ee17..e7c836d961ea3e442c6fcf1c63534f549a956e98 100644 (file)
@@ -442,6 +442,8 @@ struct audit_krule {
 struct audit_field {
        u32                             type;
        u32                             val;
+       kuid_t                          uid;
+       kgid_t                          gid;
        u32                             op;
        char                            *lsm_str;
        void                            *lsm_rule;
@@ -525,10 +527,11 @@ static inline void audit_ptrace(struct task_struct *t)
 extern unsigned int audit_serial(void);
 extern int auditsc_get_stamp(struct audit_context *ctx,
                              struct timespec *t, unsigned int *serial);
-extern int  audit_set_loginuid(uid_t loginuid);
+extern int  audit_set_loginuid(kuid_t loginuid);
 #define audit_get_loginuid(t) ((t)->loginuid)
 #define audit_get_sessionid(t) ((t)->sessionid)
 extern void audit_log_task_context(struct audit_buffer *ab);
+extern void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern int __audit_bprm(struct linux_binprm *bprm);
@@ -637,9 +640,10 @@ extern int audit_signals;
 #define audit_core_dumps(i) do { ; } while (0)
 #define audit_seccomp(i,s,c) do { ; } while (0)
 #define auditsc_get_stamp(c,t,s) (0)
-#define audit_get_loginuid(t) (-1)
+#define audit_get_loginuid(t) (INVALID_UID)
 #define audit_get_sessionid(t) (-1)
 #define audit_log_task_context(b) do { ; } while (0)
+#define audit_log_task_info(b, t) do { ; } while (0)
 #define audit_ipc_obj(i) ((void)0)
 #define audit_ipc_set_perm(q,u,g,m) ((void)0)
 #define audit_bprm(p) ({ 0; })
@@ -700,10 +704,10 @@ extern void                   audit_log_secctx(struct audit_buffer *ab, u32 secid);
 extern int                 audit_update_lsm_rules(void);
 
                                /* Private API (for audit.c only) */
-extern int audit_filter_user(struct netlink_skb_parms *cb);
+extern int audit_filter_user(void);
 extern int audit_filter_type(int type);
-extern int  audit_receive_filter(int type, int pid, int uid, int seq,
-                               void *data, size_t datasz, uid_t loginuid,
+extern int  audit_receive_filter(int type, int pid, int seq,
+                               void *data, size_t datasz, kuid_t loginuid,
                                u32 sessionid, u32 sid);
 extern int audit_enabled;
 #else
index d323a4b4143c6e2d1f43b4a82e459fad157d5f04..6ba45d2b99db2e153be43225addaabdb39060072 100644 (file)
 #define  BCMA_CC_CHIPST_4706_SFLASH_TYPE       BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
 #define  BCMA_CC_CHIPST_4706_MIPS_BENDIAN      BIT(3) /* 0: little, 1: big endian */
 #define  BCMA_CC_CHIPST_4706_PCIE1_DISABLE     BIT(5) /* PCIE1 enable strap pin */
+#define  BCMA_CC_CHIPST_5357_NAND_BOOT         BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */
 #define BCMA_CC_JCMD                   0x0030          /* Rev >= 10 only */
 #define  BCMA_CC_JCMD_START            0x80000000
 #define  BCMA_CC_JCMD_BUSY             0x80000000
 #define  BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004
 #define  BCMA_CC_SROM_CONTROL_SIZE_SHIFT       1
 #define  BCMA_CC_SROM_CONTROL_PRESENT  0x00000001
+/* Block 0x140 - 0x190 registers are chipset specific */
+#define BCMA_CC_4706_FLASHSCFG         0x18C           /* Flash struct configuration */
+#define  BCMA_CC_4706_FLASHSCFG_MASK   0x000000ff
+#define  BCMA_CC_4706_FLASHSCFG_SF1    0x00000001      /* 2nd serial flash present */
+#define  BCMA_CC_4706_FLASHSCFG_PF1    0x00000002      /* 2nd parallel flash present */
+#define  BCMA_CC_4706_FLASHSCFG_SF1_TYPE       0x00000004      /* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define  BCMA_CC_4706_FLASHSCFG_NF1    0x00000008      /* 2nd NAND flash present */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK     0x000000f0
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB      0x00000010      /* 4MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB      0x00000020      /* 8MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB     0x00000030      /* 16MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB     0x00000040      /* 32MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB     0x00000050      /* 64MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB    0x00000060      /* 128MB */
+#define  BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB    0x00000070      /* 256MB */
+/* NAND flash registers for BCM4706 (corerev = 31) */
+#define BCMA_CC_NFLASH_CTL             0x01A0
+#define  BCMA_CC_NFLASH_CTL_ERR                0x08000000
+#define BCMA_CC_NFLASH_CONF            0x01A4
+#define BCMA_CC_NFLASH_COL_ADDR                0x01A8
+#define BCMA_CC_NFLASH_ROW_ADDR                0x01AC
+#define BCMA_CC_NFLASH_DATA            0x01B0
+#define BCMA_CC_NFLASH_WAITCNT0                0x01B4
 /* 0x1E0 is defined as shared BCMA_CLKCTLST */
 #define BCMA_CC_HW_WORKAROUND          0x01E4 /* Hardware workaround (rev >= 20) */
 #define BCMA_CC_UART0_DATA             0x0300
 #define BCMA_CC_PLLCTL_ADDR            0x0660
 #define BCMA_CC_PLLCTL_DATA            0x0664
 #define BCMA_CC_SPROM                  0x0800 /* SPROM beginning */
+/* NAND flash MLC controller registers (corerev >= 38) */
+#define BCMA_CC_NAND_REVISION          0x0C00
+#define BCMA_CC_NAND_CMD_START         0x0C04
+#define BCMA_CC_NAND_CMD_ADDR_X                0x0C08
+#define BCMA_CC_NAND_CMD_ADDR          0x0C0C
+#define BCMA_CC_NAND_CMD_END_ADDR      0x0C10
+#define BCMA_CC_NAND_CS_NAND_SELECT    0x0C14
+#define BCMA_CC_NAND_CS_NAND_XOR       0x0C18
+#define BCMA_CC_NAND_SPARE_RD0         0x0C20
+#define BCMA_CC_NAND_SPARE_RD4         0x0C24
+#define BCMA_CC_NAND_SPARE_RD8         0x0C28
+#define BCMA_CC_NAND_SPARE_RD12                0x0C2C
+#define BCMA_CC_NAND_SPARE_WR0         0x0C30
+#define BCMA_CC_NAND_SPARE_WR4         0x0C34
+#define BCMA_CC_NAND_SPARE_WR8         0x0C38
+#define BCMA_CC_NAND_SPARE_WR12                0x0C3C
+#define BCMA_CC_NAND_ACC_CONTROL       0x0C40
+#define BCMA_CC_NAND_CONFIG            0x0C48
+#define BCMA_CC_NAND_TIMING_1          0x0C50
+#define BCMA_CC_NAND_TIMING_2          0x0C54
+#define BCMA_CC_NAND_SEMAPHORE         0x0C58
+#define BCMA_CC_NAND_DEVID             0x0C60
+#define BCMA_CC_NAND_DEVID_X           0x0C64
+#define BCMA_CC_NAND_BLOCK_LOCK_STATUS 0x0C68
+#define BCMA_CC_NAND_INTFC_STATUS      0x0C6C
+#define BCMA_CC_NAND_ECC_CORR_ADDR_X   0x0C70
+#define BCMA_CC_NAND_ECC_CORR_ADDR     0x0C74
+#define BCMA_CC_NAND_ECC_UNC_ADDR_X    0x0C78
+#define BCMA_CC_NAND_ECC_UNC_ADDR      0x0C7C
+#define BCMA_CC_NAND_READ_ERROR_COUNT  0x0C80
+#define BCMA_CC_NAND_CORR_STAT_THRESHOLD       0x0C84
+#define BCMA_CC_NAND_READ_ADDR_X       0x0C90
+#define BCMA_CC_NAND_READ_ADDR         0x0C94
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X       0x0C98
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR 0x0C9C
+#define BCMA_CC_NAND_COPY_BACK_ADDR_X  0x0CA0
+#define BCMA_CC_NAND_COPY_BACK_ADDR    0x0CA4
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X        0x0CA8
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR  0x0CAC
+#define BCMA_CC_NAND_INV_READ_ADDR_X   0x0CB0
+#define BCMA_CC_NAND_INV_READ_ADDR     0x0CB4
+#define BCMA_CC_NAND_BLK_WR_PROTECT    0x0CC0
+#define BCMA_CC_NAND_ACC_CONTROL_CS1   0x0CD0
+#define BCMA_CC_NAND_CONFIG_CS1                0x0CD4
+#define BCMA_CC_NAND_TIMING_1_CS1      0x0CD8
+#define BCMA_CC_NAND_TIMING_2_CS1      0x0CDC
+#define BCMA_CC_NAND_SPARE_RD16                0x0D30
+#define BCMA_CC_NAND_SPARE_RD20                0x0D34
+#define BCMA_CC_NAND_SPARE_RD24                0x0D38
+#define BCMA_CC_NAND_SPARE_RD28                0x0D3C
+#define BCMA_CC_NAND_CACHE_ADDR                0x0D40
+#define BCMA_CC_NAND_CACHE_DATA                0x0D44
+#define BCMA_CC_NAND_CTRL_CONFIG       0x0D48
+#define BCMA_CC_NAND_CTRL_STATUS       0x0D4C
 
 /* Divider allocation in 4716/47162/5356 */
 #define BCMA_CC_PMU5_MAINPLL_CPU       1
 /* 4313 Chip specific ChipControl register bits */
 #define BCMA_CCTRL_4313_12MA_LED_DRIVE         0x00000007      /* 12 mA drive strengh for later 4313 */
 
+/* BCM5357 ChipControl register bits */
+#define BCMA_CHIPCTL_5357_EXTPA                        BIT(14)
+#define BCMA_CHIPCTL_5357_ANT_MUX_2O3          BIT(15)
+#define BCMA_CHIPCTL_5357_NFLASH               BIT(16)
+#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE      BIT(18)
+#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE   BIT(19)
+
 /* Data for the PMU, if available.
  * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
  */
@@ -430,6 +515,26 @@ struct bcma_pflash {
        u32 window_size;
 };
 
+#ifdef CONFIG_BCMA_SFLASH
+struct bcma_sflash {
+       bool present;
+       u32 window;
+       u32 blocksize;
+       u16 numblocks;
+       u32 size;
+};
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+struct mtd_info;
+
+struct bcma_nflash {
+       bool present;
+
+       struct mtd_info *mtd;
+};
+#endif
+
 struct bcma_serial_port {
        void *regs;
        unsigned long clockspeed;
@@ -450,6 +555,12 @@ struct bcma_drv_cc {
        struct bcma_chipcommon_pmu pmu;
 #ifdef CONFIG_BCMA_DRIVER_MIPS
        struct bcma_pflash pflash;
+#ifdef CONFIG_BCMA_SFLASH
+       struct bcma_sflash sflash;
+#endif
+#ifdef CONFIG_BCMA_NFLASH
+       struct bcma_nflash nflash;
+#endif
 
        int nr_serial_ports;
        struct bcma_serial_port serial_ports[4];
index 5a71d57196404780ab587458ac1826343fd74c1c..6c9cb93ae3de4bb3210f5037dbd3865fe18288d3 100644 (file)
 #define  BCMA_CLKCTLST_HAVEHTREQ       0x00000010 /* HT available request */
 #define  BCMA_CLKCTLST_HWCROFF         0x00000020 /* Force HW clock request off */
 #define  BCMA_CLKCTLST_EXTRESREQ       0x00000700 /* Mask of external resource requests */
+#define  BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
 #define  BCMA_CLKCTLST_HAVEALP         0x00010000 /* ALP available */
 #define  BCMA_CLKCTLST_HAVEHT          0x00020000 /* HT available */
 #define  BCMA_CLKCTLST_BP_ON_ALP       0x00040000 /* RO: running on ALP clock */
 #define  BCMA_CLKCTLST_BP_ON_HT                0x00080000 /* RO: running on HT clock */
 #define  BCMA_CLKCTLST_EXTRESST                0x07000000 /* Mask of external resource status */
+#define  BCMA_CLKCTLST_EXTRESST_SHIFT  24
 /* Is there any BCM4328 on BCMA bus? */
 #define  BCMA_CLKCTLST_4328A0_HAVEHT   0x00010000 /* 4328a0 has reversed bits */
 #define  BCMA_CLKCTLST_4328A0_HAVEALP  0x00020000 /* 4328a0 has reversed bits */
@@ -83,4 +85,6 @@
                                                         * (2 ZettaBytes), high 32 bits
                                                         */
 
+#define BCMA_SFLASH                    0x1c000000
+
 #endif /* LINUX_BCMA_REGS_H_ */
index c90eaa80344017fd8a8e7555508a20949020f61f..f8a030ced0c7f39297085eb346a989aff5b5d612 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/rwsem.h>
 #include <linux/idr.h>
 #include <linux/workqueue.h>
+#include <linux/xattr.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -45,17 +46,13 @@ extern const struct file_operations proc_cgroup_operations;
 
 /* Define the enumeration of all builtin cgroup subsystems */
 #define SUBSYS(_x) _x ## _subsys_id,
+#define IS_SUBSYS_ENABLED(option) IS_ENABLED(option)
 enum cgroup_subsys_id {
 #include <linux/cgroup_subsys.h>
-       CGROUP_BUILTIN_SUBSYS_COUNT
+       CGROUP_SUBSYS_COUNT,
 };
+#undef IS_SUBSYS_ENABLED
 #undef SUBSYS
-/*
- * This define indicates the maximum number of subsystems that can be loaded
- * at once. We limit to this many since cgroupfs_root has subsys_bits to keep
- * track of all of them.
- */
-#define CGROUP_SUBSYS_COUNT (BITS_PER_BYTE*sizeof(unsigned long))
 
 /* Per-subsystem/per-cgroup state maintained by the system. */
 struct cgroup_subsys_state {
@@ -216,6 +213,9 @@ struct cgroup {
        /* List of events which userspace want to receive */
        struct list_head event_list;
        spinlock_t event_list_lock;
+
+       /* directory xattrs */
+       struct simple_xattrs xattrs;
 };
 
 /*
@@ -309,6 +309,9 @@ struct cftype {
        /* CFTYPE_* flags */
        unsigned int flags;
 
+       /* file xattrs */
+       struct simple_xattrs xattrs;
+
        int (*open)(struct inode *inode, struct file *file);
        ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
                        struct file *file,
@@ -394,7 +397,7 @@ struct cftype {
  */
 struct cftype_set {
        struct list_head                node;   /* chained at subsys->cftsets */
-       const struct cftype             *cfts;
+       struct cftype                   *cfts;
 };
 
 struct cgroup_scanner {
@@ -406,8 +409,8 @@ struct cgroup_scanner {
        void *data;
 };
 
-int cgroup_add_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts);
-int cgroup_rm_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts);
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 
 int cgroup_is_removed(const struct cgroup *cgrp);
 
@@ -496,6 +499,21 @@ struct cgroup_subsys {
         */
        bool __DEPRECATED_clear_css_refs;
 
+       /*
+        * If %false, this subsystem is properly hierarchical -
+        * configuration, resource accounting and restriction on a parent
+        * cgroup cover those of its children.  If %true, hierarchy support
+        * is broken in some ways - some subsystems ignore hierarchy
+        * completely while others are only implemented half-way.
+        *
+        * It's now disallowed to create nested cgroups if the subsystem is
+        * broken and cgroup core will emit a warning message on such
+        * cases.  Eventually, all subsystems will be made properly
+        * hierarchical and this will go away.
+        */
+       bool broken_hierarchy;
+       bool warned_broken_hierarchy;
+
 #define MAX_CGROUP_TYPE_NAMELEN 32
        const char *name;
 
@@ -521,7 +539,9 @@ struct cgroup_subsys {
 };
 
 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
+#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
 #include <linux/cgroup_subsys.h>
+#undef IS_SUBSYS_ENABLED
 #undef SUBSYS
 
 static inline struct cgroup_subsys_state *cgroup_subsys_state(
index dfae957398c333e4f4a62c969350eac9fae368b9..f204a7a9cf382139a10c77302b4f5aecf1c118be 100644 (file)
@@ -7,73 +7,73 @@
 
 /* */
 
-#ifdef CONFIG_CPUSETS
+#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS)
 SUBSYS(cpuset)
 #endif
 
 /* */
 
-#ifdef CONFIG_CGROUP_DEBUG
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG)
 SUBSYS(debug)
 #endif
 
 /* */
 
-#ifdef CONFIG_CGROUP_SCHED
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED)
 SUBSYS(cpu_cgroup)
 #endif
 
 /* */
 
-#ifdef CONFIG_CGROUP_CPUACCT
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT)
 SUBSYS(cpuacct)
 #endif
 
 /* */
 
-#ifdef CONFIG_MEMCG
+#if IS_SUBSYS_ENABLED(CONFIG_MEMCG)
 SUBSYS(mem_cgroup)
 #endif
 
 /* */
 
-#ifdef CONFIG_CGROUP_DEVICE
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE)
 SUBSYS(devices)
 #endif
 
 /* */
 
-#ifdef CONFIG_CGROUP_FREEZER
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER)
 SUBSYS(freezer)
 #endif
 
 /* */
 
-#ifdef CONFIG_NET_CLS_CGROUP
+#if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP)
 SUBSYS(net_cls)
 #endif
 
 /* */
 
-#ifdef CONFIG_BLK_CGROUP
+#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP)
 SUBSYS(blkio)
 #endif
 
 /* */
 
-#ifdef CONFIG_CGROUP_PERF
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
 SUBSYS(perf)
 #endif
 
 /* */
 
-#ifdef CONFIG_NETPRIO_CGROUP
+#if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP)
 SUBSYS(net_prio)
 #endif
 
 /* */
 
-#ifdef CONFIG_CGROUP_HUGETLB
+#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB)
 SUBSYS(hugetlb)
 #endif
 
index acba894374a1537b4b961eee222e9c28b53845cf..8a7096fcb01ee1354e1d46c67d5d1ff9919bc742 100644 (file)
@@ -97,6 +97,8 @@ struct clock_event_device {
        void                    (*broadcast)(const struct cpumask *mask);
        void                    (*set_mode)(enum clock_event_mode mode,
                                            struct clock_event_device *);
+       void                    (*suspend)(struct clock_event_device *);
+       void                    (*resume)(struct clock_event_device *);
        unsigned long           min_delta_ticks;
        unsigned long           max_delta_ticks;
 
@@ -156,6 +158,9 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
                                      freq, minsec);
 }
 
+extern void clockevents_suspend(void);
+extern void clockevents_resume(void);
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void clockevents_notify(unsigned long reason, void *arg);
 #else
@@ -164,6 +169,9 @@ extern void clockevents_notify(unsigned long reason, void *arg);
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
 
+static inline void clockevents_suspend(void) {}
+static inline void clockevents_resume(void) {}
+
 #define clockevents_notify(reason, arg) do { } while (0)
 
 #endif
index 09b28b7369d77e1ebedfe9a03017860df4d09fc3..fd4e29956d1c1d8069a085553bf1474e7b99ff7f 100644 (file)
@@ -590,6 +590,9 @@ asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
                unsigned long liovcnt, const struct compat_iovec __user *rvec,
                unsigned long riovcnt, unsigned long flags);
 
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+                                   compat_off_t __user *offset, compat_size_t count);
+
 #else
 
 #define is_compat_task() (0)
index af92883bb4a6cf3ed01cc4389a54295cc6cc190d..86ef6ab553b19305b5125881a05c23885e614c78 100644 (file)
@@ -776,6 +776,13 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
        dev->power.ignore_children = enable;
 }
 
+static inline void dev_pm_syscore_device(struct device *dev, bool val)
+{
+#ifdef CONFIG_PM_SLEEP
+       dev->power.syscore = val;
+#endif
+}
+
 static inline void device_lock(struct device *dev)
 {
        mutex_lock(&dev->mutex);
index d426336d92d9e4bb34f5f049bc7f4f05d49303bb..b006ba0a9f4269e79d8bfae11415e8b3c8785f69 100644 (file)
@@ -150,6 +150,17 @@ static inline void eth_broadcast_addr(u8 *addr)
        memset(addr, 0xff, ETH_ALEN);
 }
 
+/**
+ * eth_zero_addr - Assign zero address
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Assign the zero address to the given address array.
+ */
+static inline void eth_zero_addr(u8 *addr)
+{
+       memset(addr, 0x00, ETH_ALEN);
+}
+
 /**
  * eth_hw_addr_random - Generate software assigned random Ethernet and
  * set device flag
index 21eff418091bb6d943740990cd24a7cf9d39342e..fcb4f8e60c1cbe8aa4b0f190cdd7815d25057f2c 100644 (file)
@@ -45,8 +45,10 @@ struct ethtool_cmd {
                                 * bits) in Mbps. Please use
                                 * ethtool_cmd_speed()/_set() to
                                 * access it */
-       __u8    eth_tp_mdix;
-       __u8    reserved2;
+       __u8    eth_tp_mdix;    /* twisted pair MDI-X status */
+       __u8    eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
+                                  * link should be renegotiated if necessary
+                                  */
        __u32   lp_advertising; /* Features the link partner advertises */
        __u32   reserved[2];
 };
@@ -1229,10 +1231,13 @@ struct ethtool_ops {
 #define AUTONEG_DISABLE                0x00
 #define AUTONEG_ENABLE         0x01
 
-/* Mode MDI or MDI-X */
-#define ETH_TP_MDI_INVALID     0x00
-#define ETH_TP_MDI             0x01
-#define ETH_TP_MDI_X           0x02
+/* MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then
+ * the driver is required to renegotiate link
+ */
+#define ETH_TP_MDI_INVALID     0x00 /* status: unknown; control: unsupported */
+#define ETH_TP_MDI             0x01 /* status: MDI;     control: force MDI */
+#define ETH_TP_MDI_X           0x02 /* status: MDI-X;   control: force MDI-X */
+#define ETH_TP_MDI_AUTO                0x03 /*                  control: auto-select */
 
 /* Wake-On-Lan options. */
 #define WAKE_PHY               (1 << 0)
index 158a41eed3140b9b2870f419b8ca3c95ed3a4763..45052aa814c899e4c88bb2d66ab8f9dc58a42d2b 100644 (file)
@@ -30,31 +30,11 @@ struct fdtable {
        struct fdtable *next;
 };
 
-static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
-{
-       __set_bit(fd, fdt->close_on_exec);
-}
-
-static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
-{
-       __clear_bit(fd, fdt->close_on_exec);
-}
-
 static inline bool close_on_exec(int fd, const struct fdtable *fdt)
 {
        return test_bit(fd, fdt->close_on_exec);
 }
 
-static inline void __set_open_fd(int fd, struct fdtable *fdt)
-{
-       __set_bit(fd, fdt->open_fds);
-}
-
-static inline void __clear_open_fd(int fd, struct fdtable *fdt)
-{
-       __clear_bit(fd, fdt->open_fds);
-}
-
 static inline bool fd_is_open(int fd, const struct fdtable *fdt)
 {
        return test_bit(fd, fdt->open_fds);
@@ -93,15 +73,8 @@ struct file_operations;
 struct vfsmount;
 struct dentry;
 
-extern int expand_files(struct files_struct *, int nr);
-extern void free_fdtable_rcu(struct rcu_head *rcu);
 extern void __init files_defer_init(void);
 
-static inline void free_fdtable(struct fdtable *fdt)
-{
-       call_rcu(&fdt->rcu, free_fdtable_rcu);
-}
-
 static inline struct file * fcheck_files(struct files_struct *files, unsigned int fd)
 {
        struct file * file = NULL;
@@ -122,8 +95,20 @@ struct task_struct;
 struct files_struct *get_files_struct(struct task_struct *);
 void put_files_struct(struct files_struct *fs);
 void reset_files_struct(struct files_struct *);
+void daemonize_descriptors(void);
 int unshare_files(struct files_struct **);
 struct files_struct *dup_fd(struct files_struct *, int *);
+void do_close_on_exec(struct files_struct *);
+int iterate_fd(struct files_struct *, unsigned,
+               int (*)(const void *, struct file *, unsigned),
+               const void *);
+
+extern int __alloc_fd(struct files_struct *files,
+                     unsigned start, unsigned end, unsigned flags);
+extern void __fd_install(struct files_struct *files,
+                     unsigned int fd, struct file *file);
+extern int __close_fd(struct files_struct *files,
+                     unsigned int fd);
 
 extern struct kmem_cache *files_cachep;
 
index a22408bac0d005bca76c99e30a5d88b83e294561..cbacf4faf447a9dd2b3fd50d3dc2e4f86a120e67 100644 (file)
@@ -26,15 +26,44 @@ static inline void fput_light(struct file *file, int fput_needed)
                fput(file);
 }
 
+struct fd {
+       struct file *file;
+       int need_put;
+};
+
+static inline void fdput(struct fd fd)
+{
+       if (fd.need_put)
+               fput(fd.file);
+}
+
 extern struct file *fget(unsigned int fd);
 extern struct file *fget_light(unsigned int fd, int *fput_needed);
+
+static inline struct fd fdget(unsigned int fd)
+{
+       int b;
+       struct file *f = fget_light(fd, &b);
+       return (struct fd){f,b};
+}
+
 extern struct file *fget_raw(unsigned int fd);
 extern struct file *fget_raw_light(unsigned int fd, int *fput_needed);
+
+static inline struct fd fdget_raw(unsigned int fd)
+{
+       int b;
+       struct file *f = fget_raw_light(fd, &b);
+       return (struct fd){f,b};
+}
+
+extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
+extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
 extern void set_close_on_exec(unsigned int fd, int flag);
+extern bool get_close_on_exec(unsigned int fd);
 extern void put_filp(struct file *);
-extern int alloc_fd(unsigned start, unsigned flags);
-extern int get_unused_fd(void);
-#define get_unused_fd_flags(flags) alloc_fd(0, (flags))
+extern int get_unused_fd_flags(unsigned flags);
+#define get_unused_fd() get_unused_fd_flags(0)
 extern void put_unused_fd(unsigned int fd);
 
 extern void fd_install(unsigned int fd, struct file *file);
index 82b01357af8b0672c330c648f3b01f5aa65134d2..2ded090e10f4e511461cacdbfcdc031dc1dd0b3e 100644 (file)
@@ -74,6 +74,9 @@ struct sock_fprog {   /* Required for SO_ATTACH_FILTER. */
 #define         BPF_LSH         0x60
 #define         BPF_RSH         0x70
 #define         BPF_NEG         0x80
+#define                BPF_MOD         0x90
+#define                BPF_XOR         0xa0
+
 #define         BPF_JA          0x00
 #define         BPF_JEQ         0x10
 #define         BPF_JGT         0x20
@@ -196,10 +199,14 @@ enum {
        BPF_S_ALU_MUL_K,
        BPF_S_ALU_MUL_X,
        BPF_S_ALU_DIV_X,
+       BPF_S_ALU_MOD_K,
+       BPF_S_ALU_MOD_X,
        BPF_S_ALU_AND_K,
        BPF_S_ALU_AND_X,
        BPF_S_ALU_OR_K,
        BPF_S_ALU_OR_X,
+       BPF_S_ALU_XOR_K,
+       BPF_S_ALU_XOR_X,
        BPF_S_ALU_LSH_K,
        BPF_S_ALU_LSH_X,
        BPF_S_ALU_RSH_K,
index 0e4e2eec5c1db85f4a7d4340147ddbe3263f8bc6..30442547b9e6b8b8aed1d17ecff37fb6298b18e2 100644 (file)
@@ -19,6 +19,8 @@ extern struct frontswap_ops
 extern void frontswap_shrink(unsigned long);
 extern unsigned long frontswap_curr_pages(void);
 extern void frontswap_writethrough(bool);
+#define FRONTSWAP_HAS_EXCLUSIVE_GETS
+extern void frontswap_tmem_exclusive_gets(bool);
 
 extern void __frontswap_init(unsigned type);
 extern int __frontswap_store(struct page *page);
index aa110476a95be0b1d479555346c560d45ecc0b20..ca6d8c806f470ab7d26f23337648978891f37ffd 100644 (file)
@@ -1074,7 +1074,11 @@ struct file_handle {
        unsigned char f_handle[0];
 };
 
-#define get_file(x)    atomic_long_inc(&(x)->f_count)
+static inline struct file *get_file(struct file *f)
+{
+       atomic_long_inc(&f->f_count);
+       return f;
+}
 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
 #define file_count(x)  atomic_long_read(&(x)->f_count)
 
@@ -1126,9 +1130,9 @@ static inline int file_check_writeable(struct file *filp)
 /* Page cache limit. The filesystems should put that into their s_maxbytes 
    limits, otherwise bad things can happen in VM. */ 
 #if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE       (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
+#define MAX_LFS_FILESIZE       (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
 #elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE       0x7fffffffffffffffUL
+#define MAX_LFS_FILESIZE       ((loff_t)0x7fffffffffffffff)
 #endif
 
 #define FL_POSIX       1
index b80506bdd733ee181f202ddb6322529d42299d1b..24df9e70406ffb94fb98faf91c174e236155a639 100644 (file)
@@ -67,4 +67,14 @@ static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
 {
        return hash_long((unsigned long)ptr, bits);
 }
+
+static inline u32 hash32_ptr(const void *ptr)
+{
+       unsigned long val = (unsigned long)ptr;
+
+#if BITS_PER_LONG == 64
+       val ^= (val >> 32);
+#endif
+       return (u32)val;
+}
 #endif /* _LINUX_HASH_H */
index 0767a2a6b2f1256746220a9f772efaec82f5c9f7..781e6bd06c34bfb8ce2daff592cb91a55ae34709 100644 (file)
@@ -10,6 +10,7 @@
  * @setup: optional callback issued once the GPIOs are valid
  * @teardown: optional callback issued before the GPIOs are invalidated
  * @context: optional parameter passed to setup() and teardown()
+ * @irq: optional interrupt number
  *
  * In addition to the I2C_BOARD_INFO() state appropriate to each chip,
  * the i2c_board_info used with the pcf875x driver must provide its
@@ -39,6 +40,8 @@ struct pcf857x_platform_data {
                                        int gpio, unsigned ngpio,
                                        void *context);
        void            *context;
+
+       int             irq;
 };
 
 #endif /* __LINUX_PCF857X_H */
index e02fc682bb6850600258b622da1a80ef52260dd7..2385119f8bb016c003f5bf2b638b412a6ddde7f9 100644 (file)
@@ -1934,36 +1934,6 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
        return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
 }
 
-/**
- * ieee80211_fhss_chan_to_freq - get channel frequency
- * @channel: the FHSS channel
- *
- * Convert IEEE802.11 FHSS channel to frequency (MHz)
- * Ref IEEE 802.11-2007 section 14.6
- */
-static inline int ieee80211_fhss_chan_to_freq(int channel)
-{
-       if ((channel > 1) && (channel < 96))
-               return channel + 2400;
-       else
-               return -1;
-}
-
-/**
- * ieee80211_freq_to_fhss_chan - get channel
- * @freq: the channels frequency
- *
- * Convert frequency (MHz) to IEEE802.11 FHSS channel
- * Ref IEEE 802.11-2007 section 14.6
- */
-static inline int ieee80211_freq_to_fhss_chan(int freq)
-{
-       if ((freq > 2401) && (freq < 2496))
-               return freq - 2400;
-       else
-               return -1;
-}
-
 /**
  * ieee80211_dsss_chan_to_freq - get channel center frequency
  * @channel: the DSSS channel
@@ -2000,56 +1970,6 @@ static inline int ieee80211_freq_to_dsss_chan(int freq)
                return -1;
 }
 
-/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back
- * Ref IEEE 802.11-2007 section 18.4.6.2
- *
- * The channels and frequencies are the same as those defined for DSSS
- */
-#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan)
-#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq)
-
-/* Convert IEEE802.11 ERP channel to frequency (MHz) and back
- * Ref IEEE 802.11-2007 section 19.4.2
- */
-#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan)
-#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq)
-
-/**
- * ieee80211_ofdm_chan_to_freq - get channel center frequency
- * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
- * @channel: the OFDM channel
- *
- * Convert IEEE802.11 OFDM channel to center frequency (MHz)
- * Ref IEEE 802.11-2007 section 17.3.8.3.2
- */
-static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel)
-{
-       if ((channel > 0) && (channel <= 200) &&
-           (s_freq >= 4000))
-               return s_freq + (channel * 5);
-       else
-               return -1;
-}
-
-/**
- * ieee80211_freq_to_ofdm_channel - get channel
- * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
- * @freq: the frequency
- *
- * Convert frequency (MHz) to IEEE802.11 OFDM channel
- * Ref IEEE 802.11-2007 section 17.3.8.3.2
- *
- * This routine selects the channel with the closest center frequency.
- */
-static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq)
-{
-       if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) &&
-           (s_freq >= 4000))
-               return (freq + 2 - s_freq) / 5;
-       else
-               return -1;
-}
-
 /**
  * ieee80211_tu_to_usec - convert time units (TU) to microseconds
  * @tu: the TUs
index f0e69c6e82083c33eb799cbba0eb2ba789699a56..9adcc29f084af485a8b674dd6c6b3b51532ce68b 100644 (file)
@@ -92,6 +92,7 @@
 #define ARPHRD_PHONET  820             /* PhoNet media type            */
 #define ARPHRD_PHONET_PIPE 821         /* PhoNet pipe header           */
 #define ARPHRD_CAIF    822             /* CAIF media type              */
+#define ARPHRD_IP6GRE  823             /* GRE over IPv6                */
 
 #define ARPHRD_VOID      0xFFFF        /* Void type, nothing is known */
 #define ARPHRD_NONE      0xFFFE        /* zero header length */
index ac173bd2ab65e40c8759eb181460b79dd588b5c6..e4dad4ddf0855b43ed30747ed565203d273acf2d 100644 (file)
@@ -272,6 +272,22 @@ enum macvlan_mode {
 
 #define MACVLAN_FLAG_NOPROMISC 1
 
+/* VXLAN section */
+enum {
+       IFLA_VXLAN_UNSPEC,
+       IFLA_VXLAN_ID,
+       IFLA_VXLAN_GROUP,
+       IFLA_VXLAN_LINK,
+       IFLA_VXLAN_LOCAL,
+       IFLA_VXLAN_TTL,
+       IFLA_VXLAN_TOS,
+       IFLA_VXLAN_LEARNING,
+       IFLA_VXLAN_AGEING,
+       IFLA_VXLAN_LIMIT,
+       __IFLA_VXLAN_MAX
+};
+#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
+
 /* SR-IOV virtual function management section */
 
 enum {
@@ -398,4 +414,22 @@ struct ifla_port_vsi {
        __u8 pad[3];
 };
 
+
+/* IPoIB section */
+
+enum {
+       IFLA_IPOIB_UNSPEC,
+       IFLA_IPOIB_PKEY,
+       IFLA_IPOIB_MODE,
+       IFLA_IPOIB_UMCAST,
+       __IFLA_IPOIB_MAX
+};
+
+enum {
+       IPOIB_MODE_DATAGRAM  = 0, /* using unreliable datagram QPs */
+       IPOIB_MODE_CONNECTED = 1, /* using connected QPs */
+};
+
+#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
+
 #endif /* _LINUX_IF_LINK_H */
index aa2e167e1ef434a696a55322bd941bdb3983bf50..6d88a7f576808cbd743ca1c4ad42d2cb5b6ba8cd 100644 (file)
@@ -67,6 +67,9 @@ struct team_port {
        struct netpoll *np;
 #endif
 
+       s32 priority; /* lower number ~ higher priority */
+       u16 queue_id;
+       struct list_head qom_list; /* node in queue override mapping list */
        long mode_priv[0];
 };
 
@@ -105,7 +108,7 @@ struct team_mode_ops {
        bool (*transmit)(struct team *team, struct sk_buff *skb);
        int (*port_enter)(struct team *team, struct team_port *port);
        void (*port_leave)(struct team *team, struct team_port *port);
-       void (*port_change_mac)(struct team *team, struct team_port *port);
+       void (*port_change_dev_addr)(struct team *team, struct team_port *port);
        void (*port_enabled)(struct team *team, struct team_port *port);
        void (*port_disabled)(struct team *team, struct team_port *port);
 };
@@ -115,6 +118,7 @@ enum team_option_type {
        TEAM_OPTION_TYPE_STRING,
        TEAM_OPTION_TYPE_BINARY,
        TEAM_OPTION_TYPE_BOOL,
+       TEAM_OPTION_TYPE_S32,
 };
 
 struct team_option_inst_info {
@@ -131,6 +135,7 @@ struct team_gsetter_ctx {
                        u32 len;
                } bin_val;
                bool bool_val;
+               s32 s32_val;
        } data;
        struct team_option_inst_info *info;
 };
@@ -182,6 +187,8 @@ struct team {
 
        const struct team_mode *mode;
        struct team_mode_ops ops;
+       bool queue_override_enabled;
+       struct list_head *qom_lists; /* array of queue override mapping lists */
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
@@ -231,7 +238,7 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
        return NULL;
 }
 
-extern int team_port_set_team_mac(struct team_port *port);
+extern int team_port_set_team_dev_addr(struct team_port *port);
 extern int team_options_register(struct team *team,
                                 const struct team_option *option,
                                 size_t option_count);
index 5efff60b6f56906112b5c71dffbdf47b2b22cc8d..8c5035ac31421aa1bee89a34c342de34ff63131a 100644 (file)
@@ -75,6 +75,9 @@ enum {
        IFLA_GRE_TTL,
        IFLA_GRE_TOS,
        IFLA_GRE_PMTUDISC,
+       IFLA_GRE_ENCAP_LIMIT,
+       IFLA_GRE_FLOWINFO,
+       IFLA_GRE_FLAGS,
        __IFLA_GRE_MAX,
 };
 
index a810987cb80e47cd2a344dd1e3d289a6d3b46371..e6ff12dd717baf338d2e8ef88cb524c912304ad0 100644 (file)
@@ -74,8 +74,6 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-struct vlan_info;
-
 static inline int is_vlan_dev(struct net_device *dev)
 {
         return dev->priv_flags & IFF_802_1Q_VLAN;
@@ -101,6 +99,8 @@ extern int vlan_vids_add_by_dev(struct net_device *dev,
                                const struct net_device *by_dev);
 extern void vlan_vids_del_by_dev(struct net_device *dev,
                                 const struct net_device *by_dev);
+
+extern bool vlan_uses_dev(const struct net_device *dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -151,6 +151,11 @@ static inline void vlan_vids_del_by_dev(struct net_device *dev,
                                        const struct net_device *by_dev)
 {
 }
+
+static inline bool vlan_uses_dev(const struct net_device *dev)
+{
+       return false;
+}
 #endif
 
 /**
index 6ac8e50c6cf5453e338fec108f9dd1418046e3e9..2c7223d7e73b720de899756e3a0433a813c2640e 100644 (file)
@@ -39,5 +39,32 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
 {
        return 0;
 }
+
 #endif /* CONFIG_IMA_H */
+
+#ifdef CONFIG_IMA_APPRAISE
+extern void ima_inode_post_setattr(struct dentry *dentry);
+extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+                      const void *xattr_value, size_t xattr_value_len);
+extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+#else
+static inline void ima_inode_post_setattr(struct dentry *dentry)
+{
+       return;
+}
+
+static inline int ima_inode_setxattr(struct dentry *dentry,
+                                    const char *xattr_name,
+                                    const void *xattr_value,
+                                    size_t xattr_value_len)
+{
+       return 0;
+}
+
+static inline int ima_inode_removexattr(struct dentry *dentry,
+                                       const char *xattr_name)
+{
+       return 0;
+}
+#endif /* CONFIG_IMA_APPRAISE_H */
 #endif /* _LINUX_IMA_H */
index f1362b5447fcbcf6935e49aec748675562eb0b12..e788c186ed3a85a933b0ce219d4f6b8b8bb84e88 100644 (file)
@@ -159,6 +159,7 @@ struct inet_diag_handler {
 struct inet_connection_sock;
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
+                             struct user_namespace *user_ns,
                              u32 pid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh);
 void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
index 67f9ddacb70c327e6576b47d0c103cddb752dd18..d032780d0ce50849c5441aae24025b2cc2f80cc3 100644 (file)
@@ -104,9 +104,14 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
 #define IN_DEV_ANDCONF(in_dev, attr) \
        (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
         IN_DEV_CONF_GET((in_dev), attr))
-#define IN_DEV_ORCONF(in_dev, attr) \
-       (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \
+
+#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
+       (IPV4_DEVCONF_ALL(net, attr) || \
         IN_DEV_CONF_GET((in_dev), attr))
+
+#define IN_DEV_ORCONF(in_dev, attr) \
+       IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
+
 #define IN_DEV_MAXCONF(in_dev, attr) \
        (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
             IN_DEV_CONF_GET((in_dev), attr)))
@@ -133,6 +138,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
                                        IN_DEV_ORCONF((in_dev), \
                                                      PROMOTE_SECONDARIES)
 #define IN_DEV_ROUTE_LOCALNET(in_dev)  IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
+#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \
+       IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
 
 #define IN_DEV_RX_REDIRECTS(in_dev) \
        ((IN_DEV_FORWARD(in_dev) && \
index 89f1cb1056f0f93d26162fe288a58da31b918450..6d087c5f57f79e5a22ffa9a440061b5079838f53 100644 (file)
@@ -92,7 +92,7 @@ extern struct group_info init_groups;
 
 #ifdef CONFIG_AUDITSYSCALL
 #define INIT_IDS \
-       .loginuid = -1, \
+       .loginuid = INVALID_UID, \
        .sessionid = -1,
 #else
 #define INIT_IDS
diff --git a/include/linux/input/tegra_kbc.h b/include/linux/input/tegra_kbc.h
new file mode 100644 (file)
index 0000000..a130256
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Platform definitions for tegra-kbc keyboard input driver
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef ASMARM_ARCH_TEGRA_KBC_H
+#define ASMARM_ARCH_TEGRA_KBC_H
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+#define KBC_MAX_GPIO   24
+#define KBC_MAX_KPENT  8
+
+#define KBC_MAX_ROW    16
+#define KBC_MAX_COL    8
+#define KBC_MAX_KEY    (KBC_MAX_ROW * KBC_MAX_COL)
+
+enum tegra_pin_type {
+       PIN_CFG_IGNORE,
+       PIN_CFG_COL,
+       PIN_CFG_ROW,
+};
+
+struct tegra_kbc_pin_cfg {
+       enum tegra_pin_type type;
+       unsigned char num;
+};
+
+struct tegra_kbc_wake_key {
+       u8 row:4;
+       u8 col:4;
+};
+
+struct tegra_kbc_platform_data {
+       unsigned int debounce_cnt;
+       unsigned int repeat_cnt;
+
+       struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
+       const struct matrix_keymap_data *keymap_data;
+
+       u32 wakeup_key;
+       bool wakeup;
+       bool use_fn_map;
+       bool use_ghost_filter;
+};
+#endif
index a0c41256cb923d5ffb21d9f4acd20680d3fc54a0..66c5fe9550a5e47bfc2a32c67e1339eb7d4f92ae 100644 (file)
@@ -22,13 +22,14 @@ enum integrity_status {
 
 /* List of EVM protected security xattrs */
 #ifdef CONFIG_INTEGRITY
-extern int integrity_inode_alloc(struct inode *inode);
+extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
 extern void integrity_inode_free(struct inode *inode);
 
 #else
-static inline int integrity_inode_alloc(struct inode *inode)
+static inline struct integrity_iint_cache *
+                               integrity_inode_get(struct inode *inode)
 {
-       return 0;
+       return NULL;
 }
 
 static inline void integrity_inode_free(struct inode *inode)
index bf22b03179022da13565642026213601e879fa88..48af63c9a48d25f5f6d9e2681e6eeeec5a4fef8c 100644 (file)
@@ -31,4 +31,21 @@ struct ip6_tnl_parm {
        struct in6_addr raddr;  /* remote tunnel end-point address */
 };
 
+struct ip6_tnl_parm2 {
+       char name[IFNAMSIZ];    /* name of tunnel device */
+       int link;               /* ifindex of underlying L2 interface */
+       __u8 proto;             /* tunnel protocol */
+       __u8 encap_limit;       /* encapsulation limit for tunnel */
+       __u8 hop_limit;         /* hop limit for tunnel */
+       __be32 flowinfo;        /* traffic class and flowlabel for tunnel */
+       __u32 flags;            /* tunnel flags */
+       struct in6_addr laddr;  /* local tunnel end-point address */
+       struct in6_addr raddr;  /* remote tunnel end-point address */
+
+       __be16                  i_flags;
+       __be16                  o_flags;
+       __be32                  i_key;
+       __be32                  o_key;
+};
+
 #endif
index 30e816148df42c0dd4c7fc0f71755a2a7a448215..ca833fdc3138595ed076af4ba41003e37935e5a8 100644 (file)
@@ -79,6 +79,7 @@ struct ipc_kludge {
 
 #ifdef __KERNEL__
 #include <linux/spinlock.h>
+#include <linux/uidgid.h>
 
 #define IPCMNI 32768  /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
 
@@ -89,10 +90,10 @@ struct kern_ipc_perm
        int             deleted;
        int             id;
        key_t           key;
-       uid_t           uid;
-       gid_t           gid;
-       uid_t           cuid;
-       gid_t           cgid;
+       kuid_t          uid;
+       kgid_t          gid;
+       kuid_t          cuid;
+       kgid_t          cgid;
        umode_t         mode; 
        unsigned long   seq;
        void            *security;
index 879db26ec4013297fb76f16143b87ec9db52b5e4..0b94e91ed68529ef74573b4c170eb96f5afc8175 100644 (file)
@@ -256,6 +256,7 @@ struct inet6_skb_parm {
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
        __u16                   dsthao;
 #endif
+       __u16                   frag_max_size;
 
 #define IP6SKB_XFRM_TRANSFORMED        1
 #define IP6SKB_FORWARDED       2
index 82680541576d2f2d307ba9bfb1847681f290f49b..05e3c2c7a8cf81e2184f49a531a8158fa45639b4 100644 (file)
@@ -312,7 +312,13 @@ extern void jiffies_to_timespec(const unsigned long jiffies,
 extern unsigned long timeval_to_jiffies(const struct timeval *value);
 extern void jiffies_to_timeval(const unsigned long jiffies,
                               struct timeval *value);
+
 extern clock_t jiffies_to_clock_t(unsigned long x);
+static inline clock_t jiffies_delta_to_clock_t(long delta)
+{
+       return jiffies_to_clock_t(max(0L, delta));
+}
+
 extern unsigned long clock_t_to_jiffies(unsigned long x);
 extern u64 jiffies_64_to_clock_t(u64 x);
 extern u64 nsec_to_clock_t(u64 x);
index cef3b315ba7c2e0786e78940cc476c8e12bc89ce..2393b1c040b695d69d3a2f700300d3b4cec296c5 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/atomic.h>
 
 #ifdef __KERNEL__
+#include <linux/uidgid.h>
 
 /* key handle serial number */
 typedef int32_t key_serial_t;
@@ -137,8 +138,8 @@ struct key {
                time_t          revoked_at;     /* time at which key was revoked */
        };
        time_t                  last_used_at;   /* last time used for LRU keyring discard */
-       uid_t                   uid;
-       gid_t                   gid;
+       kuid_t                  uid;
+       kgid_t                  gid;
        key_perm_t              perm;           /* access permissions */
        unsigned short          quotalen;       /* length added to quota */
        unsigned short          datalen;        /* payload data length
@@ -193,7 +194,7 @@ struct key {
 
 extern struct key *key_alloc(struct key_type *type,
                             const char *desc,
-                            uid_t uid, gid_t gid,
+                            kuid_t uid, kgid_t gid,
                             const struct cred *cred,
                             key_perm_t perm,
                             unsigned long flags);
@@ -262,7 +263,7 @@ extern int key_link(struct key *keyring,
 extern int key_unlink(struct key *keyring,
                      struct key *key);
 
-extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
                                 const struct cred *cred,
                                 unsigned long flags,
                                 struct key *dest);
index 64f90e17e51d5a87f3aa997e664c132542ad8029..77eeeda2b6e2cbcab6cda30aaa5269885344150c 100644 (file)
@@ -162,6 +162,7 @@ enum {
        ATA_DFLAG_DETACHED      = (1 << 25),
 
        ATA_DFLAG_DA            = (1 << 26), /* device supports Device Attention */
+       ATA_DFLAG_DEVSLP        = (1 << 27), /* device supports Device Sleep */
 
        ATA_DEV_UNKNOWN         = 0,    /* unknown device */
        ATA_DEV_ATA             = 1,    /* ATA device */
@@ -184,6 +185,7 @@ enum {
        ATA_LFLAG_DISABLED      = (1 << 6), /* link is disabled */
        ATA_LFLAG_SW_ACTIVITY   = (1 << 7), /* keep activity stats */
        ATA_LFLAG_NO_LPM        = (1 << 8), /* disable LPM on this link */
+       ATA_LFLAG_RST_ONCE      = (1 << 9), /* limit recovery to one reset */
 
        /* struct ata_port flags */
        ATA_FLAG_SLAVE_POSS     = (1 << 0), /* host supports slave dev */
@@ -649,6 +651,9 @@ struct ata_device {
                u32             gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
        };
 
+       /* Identify Device Data Log (30h), SATA Settings (page 08h) */
+       u8                      sata_settings[ATA_SECT_SIZE];
+
        /* error history */
        int                     spdn_cnt;
        /* ering is CLEAR_END, read comment above CLEAR_END */
@@ -986,8 +991,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
                             irq_handler_t irq_handler, unsigned long irq_flags,
                             struct scsi_host_template *sht);
 extern void ata_host_detach(struct ata_host *host);
-extern void ata_host_init(struct ata_host *, struct device *,
-                         unsigned long, struct ata_port_operations *);
+extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
 extern int ata_scsi_detect(struct scsi_host_template *sht);
 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
 extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
@@ -1012,6 +1016,17 @@ extern bool ata_link_offline(struct ata_link *link);
 #ifdef CONFIG_PM
 extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
 extern void ata_host_resume(struct ata_host *host);
+extern int ata_sas_port_async_suspend(struct ata_port *ap, int *async);
+extern int ata_sas_port_async_resume(struct ata_port *ap, int *async);
+#else
+static inline int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+{
+       return 0;
+}
+static inline int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+{
+       return 0;
+}
 #endif
 extern int ata_ratelimit(void);
 extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
index 11a41a8f08eb9e98cb1105982d093c3d075d5206..9635116dd830e3634a6c6e2d06219eb36661578c 100644 (file)
@@ -44,7 +44,7 @@ struct loop_device {
        int             lo_encrypt_key_size;
        struct loop_func_table *lo_encryption;
        __u32           lo_init[2];
-       uid_t           lo_key_owner;   /* Who set the key */
+       kuid_t          lo_key_owner;   /* Who set the key */
        int             (*ioctl)(struct loop_device *, int cmd, 
                                 unsigned long arg); 
 
index 7cccafe50e7bd6629b10623240fb6b08e79b088c..6c406845f7e29d166667ebc1587fe41eb5cb2448 100644 (file)
@@ -377,5 +377,88 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
 extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
                          struct mii_ioctl_data *mii_data, int cmd);
 
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+       u32 supported = 0;
+
+       if (eee_cap & MDIO_EEE_100TX)
+               supported |= SUPPORTED_100baseT_Full;
+       if (eee_cap & MDIO_EEE_1000T)
+               supported |= SUPPORTED_1000baseT_Full;
+       if (eee_cap & MDIO_EEE_10GT)
+               supported |= SUPPORTED_10000baseT_Full;
+       if (eee_cap & MDIO_EEE_1000KX)
+               supported |= SUPPORTED_1000baseKX_Full;
+       if (eee_cap & MDIO_EEE_10GKX4)
+               supported |= SUPPORTED_10000baseKX4_Full;
+       if (eee_cap & MDIO_EEE_10GKR)
+               supported |= SUPPORTED_10000baseKR_Full;
+
+       return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+       u32 adv = 0;
+
+       if (eee_adv & MDIO_EEE_100TX)
+               adv |= ADVERTISED_100baseT_Full;
+       if (eee_adv & MDIO_EEE_1000T)
+               adv |= ADVERTISED_1000baseT_Full;
+       if (eee_adv & MDIO_EEE_10GT)
+               adv |= ADVERTISED_10000baseT_Full;
+       if (eee_adv & MDIO_EEE_1000KX)
+               adv |= ADVERTISED_1000baseKX_Full;
+       if (eee_adv & MDIO_EEE_10GKX4)
+               adv |= ADVERTISED_10000baseKX4_Full;
+       if (eee_adv & MDIO_EEE_10GKR)
+               adv |= ADVERTISED_10000baseKR_Full;
+
+       return adv;
+}
+
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+       u16 reg = 0;
+
+       if (adv & ADVERTISED_100baseT_Full)
+               reg |= MDIO_EEE_100TX;
+       if (adv & ADVERTISED_1000baseT_Full)
+               reg |= MDIO_EEE_1000T;
+       if (adv & ADVERTISED_10000baseT_Full)
+               reg |= MDIO_EEE_10GT;
+       if (adv & ADVERTISED_1000baseKX_Full)
+               reg |= MDIO_EEE_1000KX;
+       if (adv & ADVERTISED_10000baseKX4_Full)
+               reg |= MDIO_EEE_10GKX4;
+       if (adv & ADVERTISED_10000baseKR_Full)
+               reg |= MDIO_EEE_10GKR;
+
+       return reg;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __LINUX_MDIO_H__ */
index 6e1b0f973a03511b398154a5d42f3a9174b9268a..6d1acb04cd17c1babf6d477c0554156afd6d5950 100644 (file)
@@ -54,7 +54,13 @@ enum {
 };
 
 enum {
-       MLX4_MAX_PORTS          = 2
+       MLX4_PORT_CAP_IS_SM     = 1 << 1,
+       MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19,
+};
+
+enum {
+       MLX4_MAX_PORTS          = 2,
+       MLX4_MAX_PORT_PKEYS     = 128
 };
 
 /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
@@ -191,6 +197,25 @@ enum {
        MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
 };
 
+enum slave_port_state {
+       SLAVE_PORT_DOWN = 0,
+       SLAVE_PENDING_UP,
+       SLAVE_PORT_UP,
+};
+
+enum slave_port_gen_event {
+       SLAVE_PORT_GEN_EVENT_DOWN = 0,
+       SLAVE_PORT_GEN_EVENT_UP,
+       SLAVE_PORT_GEN_EVENT_NONE,
+};
+
+enum slave_port_state_event {
+       MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+       MLX4_PORT_STATE_DEV_EVENT_PORT_UP,
+       MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
+       MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
+};
+
 enum {
        MLX4_PERM_LOCAL_READ    = 1 << 10,
        MLX4_PERM_LOCAL_WRITE   = 1 << 11,
@@ -303,6 +328,9 @@ struct mlx4_phys_caps {
        u32                     gid_phys_table_len[MLX4_MAX_PORTS + 1];
        u32                     pkey_phys_table_len[MLX4_MAX_PORTS + 1];
        u32                     num_phys_eqs;
+       u32                     base_sqpn;
+       u32                     base_proxy_sqpn;
+       u32                     base_tunnel_sqpn;
 };
 
 struct mlx4_caps {
@@ -333,9 +361,10 @@ struct mlx4_caps {
        int                     max_rq_desc_sz;
        int                     max_qp_init_rdma;
        int                     max_qp_dest_rdma;
-       int                     sqp_start;
-       u32                     base_sqpn;
-       u32                     base_tunnel_sqpn;
+       u32                     *qp0_proxy;
+       u32                     *qp1_proxy;
+       u32                     *qp0_tunnel;
+       u32                     *qp1_tunnel;
        int                     num_srqs;
        int                     max_srq_wqes;
        int                     max_srq_sge;
@@ -389,6 +418,7 @@ struct mlx4_caps {
        enum mlx4_port_type     possible_type[MLX4_MAX_PORTS + 1];
        u32                     max_counters;
        u8                      port_ib_mtu[MLX4_MAX_PORTS + 1];
+       u16                     sqp_demux;
 };
 
 struct mlx4_buf_list {
@@ -671,6 +701,10 @@ struct mlx4_init_port_param {
        for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
                if ((type) == (dev)->caps.port_mask[(port)])
 
+#define mlx4_foreach_non_ib_transport_port(port, dev)                     \
+       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)       \
+               if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
+
 #define mlx4_foreach_ib_transport_port(port, dev)                         \
        for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)       \
                if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
@@ -692,7 +726,18 @@ static inline int mlx4_is_master(struct mlx4_dev *dev)
 
 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
 {
-       return (qpn < dev->caps.sqp_start + 8);
+       return (qpn < dev->phys_caps.base_sqpn + 8 +
+               16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev));
+}
+
+static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
+{
+       int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8;
+
+       if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
+               return 1;
+
+       return 0;
 }
 
 static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
@@ -927,6 +972,20 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
                     struct mlx4_net_trans_rule *rule, u64 *reg_id);
 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
 
+void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
+                         int i, int val);
+
 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
 
+int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
+int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
+int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
+int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr);
+int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change);
+enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
+int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event);
+
+void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
+__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
+
 #endif /* MLX4_DEVICE_H */
index d813704b963b1e3ac4d9e04911e3bc7ad09113d0..c257e1b211be813989d1948906a637e358b0910c 100644 (file)
@@ -45,6 +45,8 @@ enum mlx4_dev_event {
        MLX4_DEV_EVENT_PORT_DOWN,
        MLX4_DEV_EVENT_PORT_REINIT,
        MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
+       MLX4_DEV_EVENT_SLAVE_INIT,
+       MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
 };
 
 struct mlx4_interface {
index 338388ba260a14887321889ee25fb8b3d64cb9e4..4b4ad6ffef9289594a47b00ba231d8afb4d69cce 100644 (file)
@@ -126,7 +126,8 @@ struct mlx4_rss_context {
 
 struct mlx4_qp_path {
        u8                      fl;
-       u8                      reserved1[2];
+       u8                      reserved1[1];
+       u8                      disable_pkey_check;
        u8                      pkey_index;
        u8                      counter_index;
        u8                      grh_mylmc;
index 2475228c1158e6f54e4f7ce02b2607ee9f487922..1f8d24bdafdac2bc28789c777315068311368765 100644 (file)
@@ -79,9 +79,10 @@ struct mtd_part_parser {
 extern int register_mtd_parser(struct mtd_part_parser *parser);
 extern int deregister_mtd_parser(struct mtd_part_parser *parser);
 
-int mtd_is_partition(struct mtd_info *mtd);
+int mtd_is_partition(const struct mtd_info *mtd);
 int mtd_add_partition(struct mtd_info *master, char *name,
                      long long offset, long long length);
 int mtd_del_partition(struct mtd_info *master, int partno);
+uint64_t mtd_get_device_size(const struct mtd_info *mtd);
 
 #endif
index 99276c3dc89aef2f3d2014ca87b427877247d6f8..6ab31cabef7c01f947a384092caa4c08564233f7 100644 (file)
@@ -65,6 +65,7 @@ typedef enum {
 struct poll_table_struct;
 struct pipe_inode_info;
 struct inode;
+struct file;
 struct net;
 
 #define SOCK_ASYNC_NOSPACE     0
@@ -246,7 +247,7 @@ extern int               sock_sendmsg(struct socket *sock, struct msghdr *msg,
                                  size_t len);
 extern int          sock_recvmsg(struct socket *sock, struct msghdr *msg,
                                  size_t size, int flags);
-extern int          sock_map_fd(struct socket *sock, int flags);
+extern struct file  *sock_alloc_file(struct socket *sock, int flags, const char *dname);
 extern struct socket *sockfd_lookup(int fd, int *err);
 extern struct socket *sock_from_file(struct file *file, int *err);
 #define                     sockfd_put(sock) fput(sock->file)
index 5f49cc0a107e2f75eafbedffd152f03565e16a15..01646aa53b0e6c30be6de8afb406516a2f4b69a5 100644 (file)
@@ -338,18 +338,16 @@ struct napi_struct {
 
        unsigned long           state;
        int                     weight;
+       unsigned int            gro_count;
        int                     (*poll)(struct napi_struct *, int);
 #ifdef CONFIG_NETPOLL
        spinlock_t              poll_lock;
        int                     poll_owner;
 #endif
-
-       unsigned int            gro_count;
-
        struct net_device       *dev;
-       struct list_head        dev_list;
        struct sk_buff          *gro_list;
        struct sk_buff          *skb;
+       struct list_head        dev_list;
 };
 
 enum {
@@ -906,11 +904,12 @@ struct netdev_fcoe_hbainfo {
  *     feature set might be less than what was returned by ndo_fix_features()).
  *     Must return >0 or -errno if it changed dev->features itself.
  *
- * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev,
- *                   unsigned char *addr, u16 flags)
+ * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
+ *                   struct net_device *dev,
+ *                   const unsigned char *addr, u16 flags)
  *     Adds an FDB entry to dev for addr.
  * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
- *                   unsigned char *addr)
+ *                   const unsigned char *addr)
  *     Deletes the FDB entry from dev coresponding to addr.
  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
  *                    struct net_device *dev, int idx)
@@ -1016,12 +1015,13 @@ struct net_device_ops {
        void                    (*ndo_neigh_destroy)(struct neighbour *n);
 
        int                     (*ndo_fdb_add)(struct ndmsg *ndm,
+                                              struct nlattr *tb[],
                                               struct net_device *dev,
-                                              unsigned char *addr,
+                                              const unsigned char *addr,
                                               u16 flags);
        int                     (*ndo_fdb_del)(struct ndmsg *ndm,
                                               struct net_device *dev,
-                                              unsigned char *addr);
+                                              const unsigned char *addr);
        int                     (*ndo_fdb_dump)(struct sk_buff *skb,
                                                struct netlink_callback *cb,
                                                struct net_device *dev,
@@ -1322,6 +1322,8 @@ struct net_device {
        /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
 
+       struct lock_class_key *qdisc_tx_busylock;
+
        /* group the device belongs to */
        int group;
 
@@ -1401,6 +1403,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
+extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                          struct sk_buff *skb);
+
 /*
  * Net namespace inlines
  */
@@ -1553,7 +1558,7 @@ struct packet_type {
 #define NETDEV_PRE_TYPE_CHANGE 0x000E
 #define NETDEV_POST_TYPE_CHANGE        0x000F
 #define NETDEV_POST_INIT       0x0010
-#define NETDEV_UNREGISTER_BATCH 0x0011
+#define NETDEV_UNREGISTER_FINAL 0x0011
 #define NETDEV_RELEASE         0x0012
 #define NETDEV_NOTIFY_PEERS    0x0013
 #define NETDEV_JOIN            0x0014
@@ -2227,6 +2232,7 @@ static inline void dev_hold(struct net_device *dev)
  * kind of lower layer not just hardware media.
  */
 
+extern void linkwatch_init_dev(struct net_device *dev);
 extern void linkwatch_fire_event(struct net_device *dev);
 extern void linkwatch_forget_dev(struct net_device *dev);
 
@@ -2249,8 +2255,6 @@ extern void netif_carrier_on(struct net_device *dev);
 
 extern void netif_carrier_off(struct net_device *dev);
 
-extern void netif_notify_peers(struct net_device *dev);
-
 /**
  *     netif_dormant_on - mark device as dormant.
  *     @dev: network device
@@ -2560,9 +2564,9 @@ extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
 extern void __hw_addr_init(struct netdev_hw_addr_list *list);
 
 /* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
+extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
                        unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
+extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
                        unsigned char addr_type);
 extern int dev_addr_add_multiple(struct net_device *to_dev,
                                 struct net_device *from_dev,
@@ -2574,20 +2578,20 @@ extern void dev_addr_flush(struct net_device *dev);
 extern int dev_addr_init(struct net_device *dev);
 
 /* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
+extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
 extern int dev_uc_sync(struct net_device *to, struct net_device *from);
 extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
 extern void dev_uc_flush(struct net_device *dev);
 extern void dev_uc_init(struct net_device *dev);
 
 /* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
 extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
 extern void dev_mc_flush(struct net_device *dev);
@@ -2599,8 +2603,7 @@ extern void               __dev_set_rx_mode(struct net_device *dev);
 extern int             dev_set_promiscuity(struct net_device *dev, int inc);
 extern int             dev_set_allmulti(struct net_device *dev, int inc);
 extern void            netdev_state_change(struct net_device *dev);
-extern int             netdev_bonding_change(struct net_device *dev,
-                                             unsigned long event);
+extern void            netdev_notify_peers(struct net_device *dev);
 extern void            netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
 extern void            dev_load(struct net *net, const char *name);
index c613cf0d7884f8d3f4cf85c188a17c04f8f5e054..1dcf2a38e51f69770395db885ab2aebc39558abe 100644 (file)
@@ -342,7 +342,7 @@ extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
 extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
 
 #include <net/flow.h>
-extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
+extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
 
 static inline void
 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
@@ -350,13 +350,11 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 #ifdef CONFIG_NF_NAT_NEEDED
        void (*decodefn)(struct sk_buff *, struct flowi *);
 
-       if (family == AF_INET) {
-               rcu_read_lock();
-               decodefn = rcu_dereference(ip_nat_decode_session);
-               if (decodefn)
-                       decodefn(skb, fl);
-               rcu_read_unlock();
-       }
+       rcu_read_lock();
+       decodefn = rcu_dereference(nf_nat_decode_session_hook);
+       if (decodefn)
+               decodefn(skb, fl);
+       rcu_read_unlock();
 #endif
 }
 
index 2edc64cab7395afa707edc7cca024a1b27b19e09..528697b3c1524d1e7e071986231e2d14979018c2 100644 (file)
@@ -190,6 +190,7 @@ enum ip_set_dim {
         * If changed, new revision of iptables match/target is required.
         */
        IPSET_DIM_MAX = 6,
+       IPSET_BIT_RETURN_NOMATCH = 7,
 };
 
 /* Option flags for kernel operations */
@@ -198,6 +199,7 @@ enum ip_set_kopt {
        IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
        IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
        IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+       IPSET_RETURN_NOMATCH = (1 << IPSET_BIT_RETURN_NOMATCH),
 };
 
 #ifdef __KERNEL__
@@ -206,9 +208,15 @@ enum ip_set_kopt {
 #include <linux/netlink.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter/x_tables.h>
+#include <linux/stringify.h>
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 
+#define _IP_SET_MODULE_DESC(a, b, c)           \
+       MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c)
+#define IP_SET_MODULE_DESC(a, b, c)            \
+       _IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c))
+
 /* Set features */
 enum ip_set_feature {
        IPSET_TYPE_IP_FLAG = 0,
@@ -223,6 +231,8 @@ enum ip_set_feature {
        IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
        IPSET_TYPE_IFACE_FLAG = 5,
        IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
+       IPSET_TYPE_NOMATCH_FLAG = 6,
+       IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
        /* Strictly speaking not a feature, but a flag for dumping:
         * this settype must be dumped last */
        IPSET_DUMP_LAST_FLAG = 7,
@@ -249,7 +259,7 @@ struct ip_set_type_variant {
         *              returns negative error code,
         *                      zero for no match/success to add/delete
         *                      positive for matching element */
-       int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+       int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
                    const struct xt_action_param *par,
                    enum ipset_adt adt, const struct ip_set_adt_opt *opt);
 
@@ -424,7 +434,8 @@ static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
        return ret;
 }
 
-static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr)
+static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
+                                 const struct in6_addr *ipaddrptr)
 {
        struct nlattr *__nested = ipset_nest_start(skb, type);
        int ret;
index b114d35aea5e652c90b13ef94e1ef00f6d864c90..ef9acd3c84506fcd3525166501387445181e92c0 100644 (file)
@@ -137,50 +137,59 @@ htable_bits(u32 hashsize)
 #endif
 
 #define SET_HOST_MASK(family)  (family == AF_INET ? 32 : 128)
+#ifdef IP_SET_HASH_WITH_MULTI
+#define NETS_LENGTH(family)    (SET_HOST_MASK(family) + 1)
+#else
+#define NETS_LENGTH(family)    SET_HOST_MASK(family)
+#endif
 
 /* Network cidr size book keeping when the hash stores different
  * sized networks */
 static void
-add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
 {
-       u8 i;
-
-       ++h->nets[cidr-1].nets;
-
-       pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+       int i, j;
 
-       if (h->nets[cidr-1].nets > 1)
-               return;
-
-       /* New cidr size */
-       for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
-               /* Add in increasing prefix order, so larger cidr first */
-               if (h->nets[i].cidr < cidr)
-                       swap(h->nets[i].cidr, cidr);
+       /* Add in increasing prefix order, so larger cidr first */
+       for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) {
+               if (j != -1)
+                       continue;
+               else if (h->nets[i].cidr < cidr)
+                       j = i;
+               else if (h->nets[i].cidr == cidr) {
+                       h->nets[i].nets++;
+                       return;
+               }
+       }
+       if (j != -1) {
+               for (; i > j; i--) {
+                       h->nets[i].cidr = h->nets[i - 1].cidr;
+                       h->nets[i].nets = h->nets[i - 1].nets;
+               }
        }
-       if (i < host_mask)
-               h->nets[i].cidr = cidr;
+       h->nets[i].cidr = cidr;
+       h->nets[i].nets = 1;
 }
 
 static void
-del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 nets_length)
 {
-       u8 i;
-
-       --h->nets[cidr-1].nets;
+       u8 i, j;
 
-       pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+       for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++)
+               ;
+       h->nets[i].nets--;
 
-       if (h->nets[cidr-1].nets != 0)
+       if (h->nets[i].nets != 0)
                return;
 
-       /* All entries with this cidr size deleted, so cleanup h->cidr[] */
-       for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
-               if (h->nets[i].cidr == cidr)
-                       h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+       for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) {
+               h->nets[j].cidr = h->nets[j + 1].cidr;
+               h->nets[j].nets = h->nets[j + 1].nets;
        }
-       h->nets[i - 1].cidr = 0;
 }
+#else
+#define NETS_LENGTH(family)            0
 #endif
 
 /* Destroy the hashtable part of the set */
@@ -202,14 +211,14 @@ ahash_destroy(struct htable *t)
 
 /* Calculate the actual memory size of the set data */
 static size_t
-ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 nets_length)
 {
        u32 i;
        struct htable *t = h->table;
        size_t memsize = sizeof(*h)
                         + sizeof(*t)
 #ifdef IP_SET_HASH_WITH_NETS
-                        + sizeof(struct ip_set_hash_nets) * host_mask
+                        + sizeof(struct ip_set_hash_nets) * nets_length
 #endif
                         + jhash_size(t->htable_bits) * sizeof(struct hbucket);
 
@@ -238,7 +247,7 @@ ip_set_hash_flush(struct ip_set *set)
        }
 #ifdef IP_SET_HASH_WITH_NETS
        memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
-                          * SET_HOST_MASK(set->family));
+                          * NETS_LENGTH(set->family));
 #endif
        h->elements = 0;
 }
@@ -271,9 +280,6 @@ ip_set_hash_destroy(struct ip_set *set)
 (jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval)      \
        & jhash_mask(htable_bits))
 
-#define CONCAT(a, b, c)                a##b##c
-#define TOKEN(a, b, c)         CONCAT(a, b, c)
-
 /* Type/family dependent function prototypes */
 
 #define type_pf_data_equal     TOKEN(TYPE, PF, _data_equal)
@@ -478,7 +484,7 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
        }
 
 #ifdef IP_SET_HASH_WITH_NETS
-       add_cidr(h, CIDR(d->cidr), HOST_MASK);
+       add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
        h->elements++;
 out:
@@ -513,7 +519,7 @@ type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
                n->pos--;
                h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
-               del_cidr(h, CIDR(d->cidr), HOST_MASK);
+               del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
                if (n->pos + AHASH_INIT_SIZE < n->size) {
                        void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -546,10 +552,10 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
        const struct type_pf_elem *data;
        int i, j = 0;
        u32 key, multi = 0;
-       u8 host_mask = SET_HOST_MASK(set->family);
+       u8 nets_length = NETS_LENGTH(set->family);
 
        pr_debug("test by nets\n");
-       for (; j < host_mask && h->nets[j].cidr && !multi; j++) {
+       for (; j < nets_length && h->nets[j].nets && !multi; j++) {
                type_pf_data_netmask(d, h->nets[j].cidr);
                key = HKEY(d, h->initval, t->htable_bits);
                n = hbucket(t, key);
@@ -604,7 +610,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
        memsize = ahash_memsize(h, with_timeout(h->timeout)
                                        ? sizeof(struct type_pf_telem)
                                        : sizeof(struct type_pf_elem),
-                               set->family == AF_INET ? 32 : 128);
+                               NETS_LENGTH(set->family));
        read_unlock_bh(&set->lock);
 
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
@@ -690,7 +696,7 @@ nla_put_failure:
 }
 
 static int
-type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+type_pf_kadt(struct ip_set *set, const struct sk_buff *skb,
             const struct xt_action_param *par,
             enum ipset_adt adt, const struct ip_set_adt_opt *opt);
 static int
@@ -783,7 +789,7 @@ type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
 
 /* Delete expired elements from the hashtable */
 static void
-type_pf_expire(struct ip_set_hash *h)
+type_pf_expire(struct ip_set_hash *h, u8 nets_length)
 {
        struct htable *t = h->table;
        struct hbucket *n;
@@ -798,7 +804,7 @@ type_pf_expire(struct ip_set_hash *h)
                        if (type_pf_data_expired(data)) {
                                pr_debug("expired %u/%u\n", i, j);
 #ifdef IP_SET_HASH_WITH_NETS
-                               del_cidr(h, CIDR(data->cidr), HOST_MASK);
+                               del_cidr(h, CIDR(data->cidr), nets_length);
 #endif
                                if (j != n->pos - 1)
                                        /* Not last one */
@@ -839,7 +845,7 @@ type_pf_tresize(struct ip_set *set, bool retried)
        if (!retried) {
                i = h->elements;
                write_lock_bh(&set->lock);
-               type_pf_expire(set->data);
+               type_pf_expire(set->data, NETS_LENGTH(set->family));
                write_unlock_bh(&set->lock);
                if (h->elements <  i)
                        return 0;
@@ -904,7 +910,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
 
        if (h->elements >= h->maxelem)
                /* FIXME: when set is full, we slow down here */
-               type_pf_expire(h);
+               type_pf_expire(h, NETS_LENGTH(set->family));
        if (h->elements >= h->maxelem) {
                if (net_ratelimit())
                        pr_warning("Set %s is full, maxelem %u reached\n",
@@ -933,8 +939,8 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
        if (j != AHASH_MAX(h) + 1) {
                data = ahash_tdata(n, j);
 #ifdef IP_SET_HASH_WITH_NETS
-               del_cidr(h, CIDR(data->cidr), HOST_MASK);
-               add_cidr(h, CIDR(d->cidr), HOST_MASK);
+               del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
+               add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
                type_pf_data_copy(data, d);
                type_pf_data_timeout_set(data, timeout);
@@ -952,7 +958,7 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
        }
 
 #ifdef IP_SET_HASH_WITH_NETS
-       add_cidr(h, CIDR(d->cidr), HOST_MASK);
+       add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
        h->elements++;
 out:
@@ -986,7 +992,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
                n->pos--;
                h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
-               del_cidr(h, CIDR(d->cidr), HOST_MASK);
+               del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
 #endif
                if (n->pos + AHASH_INIT_SIZE < n->size) {
                        void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
@@ -1016,9 +1022,9 @@ type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
        struct hbucket *n;
        int i, j = 0;
        u32 key, multi = 0;
-       u8 host_mask = SET_HOST_MASK(set->family);
+       u8 nets_length = NETS_LENGTH(set->family);
 
-       for (; j < host_mask && h->nets[j].cidr && !multi; j++) {
+       for (; j < nets_length && h->nets[j].nets && !multi; j++) {
                type_pf_data_netmask(d, h->nets[j].cidr);
                key = HKEY(d, h->initval, t->htable_bits);
                n = hbucket(t, key);
@@ -1147,7 +1153,7 @@ type_pf_gc(unsigned long ul_set)
 
        pr_debug("called\n");
        write_lock_bh(&set->lock);
-       type_pf_expire(h);
+       type_pf_expire(h, NETS_LENGTH(set->family));
        write_unlock_bh(&set->lock);
 
        h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
index 0bb5a6976bf380fc3e2686f1014960f1ae44eb1f..4b59a15849592333cc09f107c3407f712a5360f3 100644 (file)
@@ -4,6 +4,7 @@
 
 extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
                                          enum ip_conntrack_info ctinfo,
+                                         unsigned int protoff,
                                          unsigned int matchoff,
                                          unsigned int matchlen,
                                          struct nf_conntrack_expect *exp);
index 3e3aa08980c31f7bac7aca80ff3d473c25f4bdcc..8faf3f792d13034179d64b82e9ecc57014c6c3bb 100644 (file)
@@ -18,13 +18,17 @@ enum nf_ct_ftp_type {
 
 #define FTP_PORT       21
 
+#define NF_CT_FTP_SEQ_PICKUP   (1 << 0)
+
 #define NUM_SEQ_TO_REMEMBER 2
 /* This structure exists only once per master */
 struct nf_ct_ftp_master {
        /* Valid seq positions for cmd matching after newline */
        u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
        /* 0 means seq_match_aft_nl not set */
-       int seq_aft_nl_num[IP_CT_DIR_MAX];
+       u_int16_t seq_aft_nl_num[IP_CT_DIR_MAX];
+       /* pickup sequence tracking, useful for conntrackd */
+       u_int16_t flags[IP_CT_DIR_MAX];
 };
 
 struct nf_conntrack_expect;
@@ -34,6 +38,7 @@ struct nf_conntrack_expect;
 extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
                                       enum ip_conntrack_info ctinfo,
                                       enum nf_ct_ftp_type type,
+                                      unsigned int protoff,
                                       unsigned int matchoff,
                                       unsigned int matchlen,
                                       struct nf_conntrack_expect *exp);
index 26f9226ea72b18ff379e2d553ccdc00112c95617..f381020eee92835fa68ae19858e1a4ca0437138c 100644 (file)
@@ -36,12 +36,12 @@ extern void nf_conntrack_h245_expect(struct nf_conn *new,
                                     struct nf_conntrack_expect *this);
 extern void nf_conntrack_q931_expect(struct nf_conn *new,
                                     struct nf_conntrack_expect *this);
-extern int (*set_h245_addr_hook) (struct sk_buff *skb,
+extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  H245_TransportAddress *taddr,
                                  union nf_inet_addr *addr,
                                  __be16 port);
-extern int (*set_h225_addr_hook) (struct sk_buff *skb,
+extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  TransportAddress *taddr,
                                  union nf_inet_addr *addr,
@@ -49,40 +49,45 @@ extern int (*set_h225_addr_hook) (struct sk_buff *skb,
 extern int (*set_sig_addr_hook) (struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
-                                unsigned char **data,
+                                unsigned int protoff, unsigned char **data,
                                 TransportAddress *taddr, int count);
 extern int (*set_ras_addr_hook) (struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
-                                unsigned char **data,
+                                unsigned int protoff, unsigned char **data,
                                 TransportAddress *taddr, int count);
 extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
-                                unsigned char **data, int dataoff,
+                                unsigned int protoff, unsigned char **data,
+                                int dataoff,
                                 H245_TransportAddress *taddr,
                                 __be16 port, __be16 rtp_port,
                                 struct nf_conntrack_expect *rtp_exp,
                                 struct nf_conntrack_expect *rtcp_exp);
 extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
                             enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             unsigned char **data, int dataoff,
                             H245_TransportAddress *taddr, __be16 port,
                             struct nf_conntrack_expect *exp);
 extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
                             enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             unsigned char **data, int dataoff,
                             TransportAddress *taddr, __be16 port,
                             struct nf_conntrack_expect *exp);
 extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
                                       struct nf_conn *ct,
                                       enum ip_conntrack_info ctinfo,
+                                      unsigned int protoff,
                                       unsigned char **data, int dataoff,
                                       TransportAddress *taddr,
                                       __be16 port,
                                       struct nf_conntrack_expect *exp);
 extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
                             enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             unsigned char **data, TransportAddress *taddr,
                             int idx, __be16 port,
                             struct nf_conntrack_expect *exp);
index 36282bf71b63f3afb68fe11f467ed4ff95d7c5fd..4bb9bae671763d211a3f7efe9115db4965e5c85b 100644 (file)
@@ -7,6 +7,7 @@
 
 extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
                                       enum ip_conntrack_info ctinfo,
+                                      unsigned int protoff,
                                       unsigned int matchoff,
                                       unsigned int matchlen,
                                       struct nf_conntrack_expect *exp);
index 3bbde0c3a8a62cae3a01bb060cf0ffddbf93287f..2ab2830316b730d3ca9f29572500b43371aad641 100644 (file)
@@ -303,12 +303,14 @@ struct nf_conntrack_expect;
 extern int
 (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
                             struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                            unsigned int protoff,
                             struct PptpControlHeader *ctlh,
                             union pptp_ctrl_union *pptpReq);
 
 extern int
 (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
                            struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            struct PptpControlHeader *ctlh,
                            union pptp_ctrl_union *pptpReq);
 
index 89f2a627f3f086febbcf1a93ab0fe9f42b6eac30..387bdd02945d13555d79e0becd9cb0bc1fb5737f 100644 (file)
@@ -37,10 +37,12 @@ struct sdp_media_type {
 struct sip_handler {
        const char      *method;
        unsigned int    len;
-       int             (*request)(struct sk_buff *skb, unsigned int dataoff,
+       int             (*request)(struct sk_buff *skb, unsigned int protoff,
+                                  unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq);
-       int             (*response)(struct sk_buff *skb, unsigned int dataoff,
+       int             (*response)(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
                                    const char **dptr, unsigned int *datalen,
                                    unsigned int cseq, unsigned int code);
 };
@@ -97,19 +99,20 @@ enum sip_header_types {
 enum sdp_header_types {
        SDP_HDR_UNSPEC,
        SDP_HDR_VERSION,
-       SDP_HDR_OWNER_IP4,
-       SDP_HDR_CONNECTION_IP4,
-       SDP_HDR_OWNER_IP6,
-       SDP_HDR_CONNECTION_IP6,
+       SDP_HDR_OWNER,
+       SDP_HDR_CONNECTION,
        SDP_HDR_MEDIA,
 };
 
 extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
+                                      unsigned int protoff,
                                       unsigned int dataoff,
                                       const char **dptr,
                                       unsigned int *datalen);
-extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off);
+extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
+                                         unsigned int protoff, s16 off);
 extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
+                                             unsigned int protoff,
                                              unsigned int dataoff,
                                              const char **dptr,
                                              unsigned int *datalen,
@@ -117,6 +120,7 @@ extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
                                              unsigned int matchoff,
                                              unsigned int matchlen);
 extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
+                                           unsigned int protoff,
                                            unsigned int dataoff,
                                            const char **dptr,
                                            unsigned int *datalen,
@@ -125,6 +129,7 @@ extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
                                            enum sdp_header_types term,
                                            const union nf_inet_addr *addr);
 extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
+                                           unsigned int protoff,
                                            unsigned int dataoff,
                                            const char **dptr,
                                            unsigned int *datalen,
@@ -132,12 +137,14 @@ extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
                                            unsigned int matchlen,
                                            u_int16_t port);
 extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
+                                              unsigned int protoff,
                                               unsigned int dataoff,
                                               const char **dptr,
                                               unsigned int *datalen,
                                               unsigned int sdpoff,
                                               const union nf_inet_addr *addr);
 extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
+                                            unsigned int protoff,
                                             unsigned int dataoff,
                                             const char **dptr,
                                             unsigned int *datalen,
index 8df2d13730b2b75fff7262c0cf144e3a13c5f84f..bf0cc373ffb6ae02487e74ba478c024c3be3b477 100644 (file)
@@ -22,4 +22,12 @@ struct nf_nat_ipv4_multi_range_compat {
        struct nf_nat_ipv4_range        range[1];
 };
 
+struct nf_nat_range {
+       unsigned int                    flags;
+       union nf_inet_addr              min_addr;
+       union nf_inet_addr              max_addr;
+       union nf_conntrack_man_proto    min_proto;
+       union nf_conntrack_man_proto    max_proto;
+};
+
 #endif /* _NETFILTER_NF_NAT_H */
index f649f7423ca2d97210b55a8756e33a7a7fc3eeb7..43bfe3e1685b21d7dcf5bbb254a269ec941e5523 100644 (file)
@@ -142,9 +142,13 @@ enum ctattr_tstamp {
 
 enum ctattr_nat {
        CTA_NAT_UNSPEC,
-       CTA_NAT_MINIP,
-       CTA_NAT_MAXIP,
+       CTA_NAT_V4_MINIP,
+#define CTA_NAT_MINIP CTA_NAT_V4_MINIP
+       CTA_NAT_V4_MAXIP,
+#define CTA_NAT_MAXIP CTA_NAT_V4_MAXIP
        CTA_NAT_PROTO,
+       CTA_NAT_V6_MINIP,
+       CTA_NAT_V6_MAXIP,
        __CTA_NAT_MAX
 };
 #define CTA_NAT_MAX (__CTA_NAT_MAX - 1)
index 3b1c1360aedfffb906c0b90c52dfc86a5a26db25..70ec8c2bc11a6c97cdf1866aa5e9b0743938feff 100644 (file)
@@ -44,6 +44,7 @@ enum nfqnl_attr_type {
        NFQA_PAYLOAD,                   /* opaque data payload */
        NFQA_CT,                        /* nf_conntrack_netlink.h */
        NFQA_CT_INFO,                   /* enum ip_conntrack_info */
+       NFQA_CAP_LEN,                   /* __u32 length of captured packet */
 
        __NFQA_MAX
 };
index 7c37fac576c440d34c650dbc352bcceded891808..0958860193963e3b4290c0e053932edc6274ea65 100644 (file)
@@ -17,6 +17,9 @@ enum {
        /* Match against local time (instead of UTC) */
        XT_TIME_LOCAL_TZ = 1 << 0,
 
+       /* treat timestart > timestop (e.g. 23:00-01:00) as single period */
+       XT_TIME_CONTIGUOUS = 1 << 1,
+
        /* Shortcuts */
        XT_TIME_ALL_MONTHDAYS = 0xFFFFFFFE,
        XT_TIME_ALL_WEEKDAYS  = 0xFE,
@@ -24,4 +27,6 @@ enum {
        XT_TIME_MAX_DAYTIME   = 24 * 60 * 60 - 1,
 };
 
+#define XT_TIME_ALL_FLAGS (XT_TIME_LOCAL_TZ|XT_TIME_CONTIGUOUS)
+
 #endif /* _XT_TIME_H */
index e2b12801378d8bd2e239a9516aea967328fe7ca4..b962dfc695ae0e5e5787627468bdff6911dd146d 100644 (file)
@@ -79,7 +79,6 @@ enum nf_ip_hook_priorities {
 
 #ifdef __KERNEL__
 extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern int ip_xfrm_me_harder(struct sk_buff *skb);
 extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
                                   unsigned int dataoff, u_int8_t protocol);
 #endif /*__KERNEL__*/
index bd095bc075e9f832cc8ac659034e6040e17014f7..b88c0058bf73ad02a02229606cca38e6611652dc 100644 (file)
@@ -1,6 +1,7 @@
 header-y += ip6_tables.h
 header-y += ip6t_HL.h
 header-y += ip6t_LOG.h
+header-y += ip6t_NPT.h
 header-y += ip6t_REJECT.h
 header-y += ip6t_ah.h
 header-y += ip6t_frag.h
diff --git a/include/linux/netfilter_ipv6/ip6t_NPT.h b/include/linux/netfilter_ipv6/ip6t_NPT.h
new file mode 100644 (file)
index 0000000..f763355
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef __NETFILTER_IP6T_NPT
+#define __NETFILTER_IP6T_NPT
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+
+struct ip6t_npt_tginfo {
+       union nf_inet_addr      src_pfx;
+       union nf_inet_addr      dst_pfx;
+       __u8                    src_pfx_len;
+       __u8                    dst_pfx_len;
+       /* Used internally by the kernel */
+       __sum16                 adjustment;
+};
+
+#endif /* __NETFILTER_IP6T_NPT */
index f74dd133788f7c45bbd60175a321d0c26560630c..f80c56ac4d82a822efe44f0dbbd87cfba819dd92 100644 (file)
@@ -153,6 +153,8 @@ struct nlattr {
 
 #include <linux/capability.h>
 #include <linux/skbuff.h>
+#include <linux/export.h>
+#include <net/scm.h>
 
 struct net;
 
@@ -162,9 +164,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
 }
 
 struct netlink_skb_parms {
-       struct ucred            creds;          /* Skb credentials      */
-       __u32                   pid;
+       struct scm_creds        creds;          /* Skb credentials      */
+       __u32                   portid;
        __u32                   dst_group;
+       struct sock             *ssk;
 };
 
 #define NETLINK_CB(skb)                (*(struct netlink_skb_parms*)&((skb)->cb))
@@ -174,17 +177,27 @@ struct netlink_skb_parms {
 extern void netlink_table_grab(void);
 extern void netlink_table_ungrab(void);
 
+#define NL_CFG_F_NONROOT_RECV  (1 << 0)
+#define NL_CFG_F_NONROOT_SEND  (1 << 1)
+
 /* optional Netlink kernel configuration parameters */
 struct netlink_kernel_cfg {
        unsigned int    groups;
+       unsigned int    flags;
        void            (*input)(struct sk_buff *skb);
        struct mutex    *cb_mutex;
        void            (*bind)(int group);
 };
 
-extern struct sock *netlink_kernel_create(struct net *net, int unit,
-                                         struct module *module,
-                                         struct netlink_kernel_cfg *cfg);
+extern struct sock *__netlink_kernel_create(struct net *net, int unit,
+                                           struct module *module,
+                                           struct netlink_kernel_cfg *cfg);
+static inline struct sock *
+netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
+{
+       return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
+}
+
 extern void netlink_kernel_release(struct sock *sk);
 extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
 extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
@@ -192,14 +205,14 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group)
 extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
 extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
-extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
+extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
+extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
                             __u32 group, gfp_t allocation);
 extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
-       __u32 pid, __u32 group, gfp_t allocation,
+       __u32 portid, __u32 group, gfp_t allocation,
        int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
        void *filter_data);
-extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
+extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
 extern int netlink_register_notifier(struct notifier_block *nb);
 extern int netlink_unregister_notifier(struct notifier_block *nb);
 
@@ -240,12 +253,12 @@ struct netlink_callback {
 
 struct netlink_notify {
        struct net *net;
-       int pid;
+       int portid;
        int protocol;
 };
 
 struct nlmsghdr *
-__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags);
+__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
 
 struct netlink_dump_control {
        int (*dump)(struct sk_buff *skb, struct netlink_callback *);
@@ -258,11 +271,6 @@ extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              struct netlink_dump_control *control);
 
-
-#define NL_NONROOT_RECV 0x1
-#define NL_NONROOT_SEND 0x2
-extern void netlink_set_nonroot(int protocol, unsigned flag);
-
 #endif /* __KERNEL__ */
 
 #endif /* __LINUX_NETLINK_H */
index 6189f27e305b551a3d47bbbc8d9935041917e246..d908d17da56d677e62a07011c852cc38cdf824cf 100644 (file)
@@ -183,4 +183,15 @@ struct sockaddr_nfc_llcp {
 
 #define NFC_HEADER_SIZE 1
 
+/**
+ * Pseudo-header info for raw socket packets
+ * First byte is the adapter index
+ * Second byte contains flags
+ *  - 0x01 - Direction (0=RX, 1=TX)
+ *  - 0x02-0x80 - Reserved
+ **/
+#define NFC_LLCP_RAW_HEADER_SIZE       2
+#define NFC_LLCP_DIRECTION_RX          0x00
+#define NFC_LLCP_DIRECTION_TX          0x01
+
 #endif /*__LINUX_NFC_H */
index 2f38788064032c9ca40bb59208ac3a0519acc4ec..7df9b500c80493e944591a2192b07253f1b42669 100644 (file)
  *     %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
  *     %NL80211_ATTR_WIPHY_CHANNEL_TYPE.
  *
+ * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by
+ *     its %NL80211_ATTR_WDEV identifier. It must have been created with
+ *     %NL80211_CMD_NEW_INTERFACE previously. After it has been started, the
+ *     P2P Device can be used for P2P operations, e.g. remain-on-channel and
+ *     public action frame TX.
+ * @NL80211_CMD_STOP_P2P_DEVICE: Stop the given P2P Device, identified by
+ *     its %NL80211_ATTR_WDEV identifier.
+ *
+ * @NL80211_CMD_CONN_FAILED: connection request to an AP failed; used to
+ *     notify userspace that AP has rejected the connection request from a
+ *     station, due to particular reason. %NL80211_ATTR_CONN_FAILED_REASON
+ *     is used for this.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -708,6 +721,11 @@ enum nl80211_commands {
 
        NL80211_CMD_CH_SWITCH_NOTIFY,
 
+       NL80211_CMD_START_P2P_DEVICE,
+       NL80211_CMD_STOP_P2P_DEVICE,
+
+       NL80211_CMD_CONN_FAILED,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -1251,6 +1269,10 @@ enum nl80211_commands {
  *     was used to provide the hint. For the different types of
  *     allowed user regulatory hints see nl80211_user_reg_hint_type.
  *
+ * @NL80211_ATTR_CONN_FAILED_REASON: The reason for which AP has rejected
+ *     the connection request from a station. nl80211_connect_failed_reason
+ *     enum has different reasons of connection failure.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1506,6 +1528,8 @@ enum nl80211_attrs {
 
        NL80211_ATTR_USER_REG_HINT_TYPE,
 
+       NL80211_ATTR_CONN_FAILED_REASON,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -1575,6 +1599,10 @@ enum nl80211_attrs {
  * @NL80211_IFTYPE_MESH_POINT: mesh point
  * @NL80211_IFTYPE_P2P_CLIENT: P2P client
  * @NL80211_IFTYPE_P2P_GO: P2P group owner
+ * @NL80211_IFTYPE_P2P_DEVICE: P2P device interface type, this is not a netdev
+ *     and therefore can't be created in the normal ways, use the
+ *     %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE
+ *     commands to create and destroy one
  * @NL80211_IFTYPE_MAX: highest interface type number currently defined
  * @NUM_NL80211_IFTYPES: number of defined interface types
  *
@@ -1593,6 +1621,7 @@ enum nl80211_iftype {
        NL80211_IFTYPE_MESH_POINT,
        NL80211_IFTYPE_P2P_CLIENT,
        NL80211_IFTYPE_P2P_GO,
+       NL80211_IFTYPE_P2P_DEVICE,
 
        /* keep last */
        NUM_NL80211_IFTYPES,
@@ -2994,12 +3023,18 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: If this is set, an active
+ *     P2P Device (%NL80211_IFTYPE_P2P_DEVICE) requires its own channel
+ *     in the interface combinations, even when it's only used for scan
+ *     and remain-on-channel. This could be due to, for example, the
+ *     remain-on-channel implementation requiring a channel context.
  */
 enum nl80211_feature_flags {
-       NL80211_FEATURE_SK_TX_STATUS    = 1 << 0,
-       NL80211_FEATURE_HT_IBSS         = 1 << 1,
-       NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
-       NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
+       NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
+       NL80211_FEATURE_HT_IBSS                         = 1 << 1,
+       NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
+       NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
+       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
 };
 
 /**
@@ -3023,4 +3058,15 @@ enum nl80211_probe_resp_offload_support_attr {
        NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U =     1<<3,
 };
 
+/**
+ * enum nl80211_connect_failed_reason - connection request failed reasons
+ * @NL80211_CONN_FAIL_MAX_CLIENTS: Maximum number of clients that can be
+ *     handled by the AP is reached.
+ * @NL80211_CONN_FAIL_BLOCKED_CLIENT: Client's MAC is in the AP's blocklist.
+ */
+enum nl80211_connect_failed_reason {
+       NL80211_CONN_FAIL_MAX_CLIENTS,
+       NL80211_CONN_FAIL_BLOCKED_CLIENT,
+};
+
 #endif /* __LINUX_NL80211_H */
index 1b1163225f3b8ab1eafacbb21e83ab8f86d50d42..f594c528842fece04fc6931d627f0b34bf6ca931 100644 (file)
@@ -193,6 +193,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
 extern struct device_node *of_get_next_available_child(
        const struct device_node *node, struct device_node *prev);
 
+extern struct device_node *of_get_child_by_name(const struct device_node *node,
+                                       const char *name);
 #define for_each_child_of_node(parent, child) \
        for (child = of_get_next_child(parent, NULL); child != NULL; \
             child = of_get_next_child(parent, child))
index 01b925ad8d78c62b7fad6ac84dfb179d9bf3a763..c3cdc1025c30694eee350d9cd62609e4db40de02 100644 (file)
@@ -6,6 +6,7 @@
 
 #ifdef CONFIG_OF_ADDRESS
 extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
+extern bool of_can_translate_address(struct device_node *dev);
 extern int of_address_to_resource(struct device_node *dev, int index,
                                  struct resource *r);
 extern struct device_node *of_find_matching_node_by_address(
index 912c27a0f7eeede26c7659956b11c70f7255f78e..6ef49b803efb1fee5fdb9bdc02d6b34a4d445a89 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 
+#ifdef CONFIG_OF
 extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
 extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
 extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -24,4 +25,36 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
+#else /* CONFIG_OF */
+int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+{
+       return -ENOSYS;
+}
+
+struct phy_device *of_phy_find_device(struct device_node *phy_np)
+{
+       return NULL;
+}
+
+struct phy_device *of_phy_connect(struct net_device *dev,
+                                        struct device_node *phy_np,
+                                        void (*hndlr)(struct net_device *),
+                                        u32 flags, phy_interface_t iface)
+{
+       return NULL;
+}
+
+struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
+                                        void (*hndlr)(struct net_device *),
+                                        phy_interface_t iface)
+{
+       return NULL;
+}
+
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
 #endif /* __LINUX_OF_MDIO_H */
index 2a4e5faee904fcfd85fd54eba39901978bdeecf1..214e0ebcb84d85a80b31ee98bf4647eb25fd538c 100644 (file)
@@ -48,6 +48,14 @@ int opp_disable(struct device *dev, unsigned long freq);
 
 struct srcu_notifier_head *opp_get_notifier(struct device *dev);
 
+#ifdef CONFIG_OF
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_OF */
 #else
 static inline unsigned long opp_get_voltage(struct opp *opp)
 {
diff --git a/include/linux/packet_diag.h b/include/linux/packet_diag.h
new file mode 100644 (file)
index 0000000..93f5fa9
--- /dev/null
@@ -0,0 +1,72 @@
+#ifndef __PACKET_DIAG_H__
+#define __PACKET_DIAG_H__
+
+#include <linux/types.h>
+
+struct packet_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u16   pad;
+       __u32   pdiag_ino;
+       __u32   pdiag_show;
+       __u32   pdiag_cookie[2];
+};
+
+#define PACKET_SHOW_INFO       0x00000001 /* Basic packet_sk information */
+#define PACKET_SHOW_MCLIST     0x00000002 /* A set of packet_diag_mclist-s */
+#define PACKET_SHOW_RING_CFG   0x00000004 /* Rings configuration parameters */
+#define PACKET_SHOW_FANOUT     0x00000008
+
+struct packet_diag_msg {
+       __u8    pdiag_family;
+       __u8    pdiag_type;
+       __u16   pdiag_num;
+
+       __u32   pdiag_ino;
+       __u32   pdiag_cookie[2];
+};
+
+enum {
+       PACKET_DIAG_INFO,
+       PACKET_DIAG_MCLIST,
+       PACKET_DIAG_RX_RING,
+       PACKET_DIAG_TX_RING,
+       PACKET_DIAG_FANOUT,
+
+       PACKET_DIAG_MAX,
+};
+
+struct packet_diag_info {
+       __u32   pdi_index;
+       __u32   pdi_version;
+       __u32   pdi_reserve;
+       __u32   pdi_copy_thresh;
+       __u32   pdi_tstamp;
+       __u32   pdi_flags;
+
+#define PDI_RUNNING    0x1
+#define PDI_AUXDATA    0x2
+#define PDI_ORIGDEV    0x4
+#define PDI_VNETHDR    0x8
+#define PDI_LOSS       0x10
+};
+
+struct packet_diag_mclist {
+       __u32   pdmc_index;
+       __u32   pdmc_count;
+       __u16   pdmc_type;
+       __u16   pdmc_alen;
+       __u8    pdmc_addr[MAX_ADDR_LEN];
+};
+
+struct packet_diag_ring {
+       __u32   pdr_block_size;
+       __u32   pdr_block_nr;
+       __u32   pdr_frame_size;
+       __u32   pdr_frame_nr;
+       __u32   pdr_retire_tmo;
+       __u32   pdr_sizeof_priv;
+       __u32   pdr_features;
+};
+
+#endif
index 8d3c427193878dd3f5073ad6e4e0900f11bcc42b..33880f6f4e511e2951162ee05da0625bbce8dbee 100644 (file)
 #define PCI_DEVICE_ID_HP_CISSD         0x3238
 #define PCI_DEVICE_ID_HP_CISSE         0x323a
 #define PCI_DEVICE_ID_HP_CISSF         0x323b
+#define PCI_DEVICE_ID_HP_CISSH         0x323c
 #define PCI_DEVICE_ID_HP_ZX2_IOC       0x4031
 
 #define PCI_VENDOR_ID_PCTECH           0x1042
index cbb07f85079161eb5adc4b87b7e5e5951a32892b..4aad3cea69ae3034f451a0d67b406483d5d7e9e9 100644 (file)
@@ -140,7 +140,7 @@ static inline struct pinctrl * __must_check devm_pinctrl_get_select(
        s = pinctrl_lookup_state(p, name);
        if (IS_ERR(s)) {
                devm_pinctrl_put(p);
-               return ERR_PTR(PTR_ERR(s));
+               return ERR_CAST(s);
        }
 
        ret = pinctrl_select_state(p, s);
index 634608dc6c89fe8c59aeb70e7b6c473a4869085f..b5919f8e6d1ad87c618047cdcc7aa7a06324b354 100644 (file)
@@ -6,13 +6,18 @@
  * @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put
  *     into as default, usually this means the pins are up and ready to
  *     be used by the device driver. This state is commonly used by
- *     hogs to configure muxing and pins at boot.
+ *     hogs to configure muxing and pins at boot, and also as a state
+ *     to go into when returning from sleep and idle in
+ *     .pm_runtime_resume() or ordinary .resume() for example.
  * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into
- *     when the pins are idle. Could typically be set from a
- *     pm_runtime_suspend() operation.
+ *     when the pins are idle. This is a state where the system is relaxed
+ *     but not fully sleeping - some power may be on but clocks gated for
+ *     example. Could typically be set from a pm_runtime_suspend() or
+ *     pm_runtime_idle() operation.
  * @PINCTRL_STATE_SLEEP: the state the pinctrl handle shall be put into
- *     when the pins are sleeping. Could typically be set from a
- *     common suspend() function.
+ *     when the pins are sleeping. This is a state where the system is in
+ *     its lowest sleep state. Could typically be set from an
+ *     ordinary .suspend() function.
  */
 #define PINCTRL_STATE_DEFAULT "default"
 #define PINCTRL_STATE_IDLE "idle"
diff --git a/include/linux/platform_data/sc18is602.h b/include/linux/platform_data/sc18is602.h
new file mode 100644 (file)
index 0000000..997b066
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Platform data for NXP SC18IS602/603
+ *
+ * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * For further information, see the Documentation/spi/sc18is602 file.
+ */
+
+/**
+ * struct sc18is602_platform_data - sc18is602 info
+ * @clock_frequency            SC18IS603 oscillator frequency
+ */
+struct sc18is602_platform_data {
+       u32 clock_frequency;
+};
index 88f034a23f2c6a04d56c9cc07743d1e7148824b5..007e687c4f6915866b9d551bbc37cd14747f28ac 100644 (file)
@@ -510,12 +510,14 @@ struct dev_pm_info {
        bool                    is_prepared:1;  /* Owned by the PM core */
        bool                    is_suspended:1; /* Ditto */
        bool                    ignore_children:1;
+       bool                    early_init:1;   /* Owned by the PM core */
        spinlock_t              lock;
 #ifdef CONFIG_PM_SLEEP
        struct list_head        entry;
        struct completion       completion;
        struct wakeup_source    *wakeup;
        bool                    wakeup_path:1;
+       bool                    syscore:1;
 #else
        unsigned int            should_wakeup:1;
 #endif
index a7d6172922d405c950c701bf4a7d2d21bb4e0c77..7c1d252b20c08de0ec725976c67b6fc13326e2e0 100644 (file)
@@ -114,7 +114,6 @@ struct generic_pm_domain_data {
        struct mutex lock;
        unsigned int refcount;
        bool need_restore;
-       bool always_on;
 };
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -139,36 +138,32 @@ extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
                                    struct device *dev,
                                    struct gpd_timing_data *td);
 
-static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
-                                     struct device *dev)
-{
-       return __pm_genpd_add_device(genpd, dev, NULL);
-}
-
-static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
-                                        struct device *dev)
-{
-       return __pm_genpd_of_add_device(genpd_node, dev, NULL);
-}
+extern int __pm_genpd_name_add_device(const char *domain_name,
+                                     struct device *dev,
+                                     struct gpd_timing_data *td);
 
 extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
                                  struct device *dev);
-extern void pm_genpd_dev_always_on(struct device *dev, bool val);
 extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
 extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                                  struct generic_pm_domain *new_subdomain);
+extern int pm_genpd_add_subdomain_names(const char *master_name,
+                                       const char *subdomain_name);
 extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
                                     struct generic_pm_domain *target);
 extern int pm_genpd_add_callbacks(struct device *dev,
                                  struct gpd_dev_ops *ops,
                                  struct gpd_timing_data *td);
 extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
-extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
-extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd);
+extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
+extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
+extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
+extern int pm_genpd_name_detach_cpuidle(const char *name);
 extern void pm_genpd_init(struct generic_pm_domain *genpd,
                          struct dev_power_governor *gov, bool is_off);
 
 extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+extern int pm_genpd_name_poweron(const char *domain_name);
 
 extern bool default_stop_ok(struct device *dev);
 
@@ -189,8 +184,15 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
 {
        return -ENOSYS;
 }
-static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
-                                     struct device *dev)
+static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
+                                          struct device *dev,
+                                          struct gpd_timing_data *td)
+{
+       return -ENOSYS;
+}
+static inline int __pm_genpd_name_add_device(const char *domain_name,
+                                            struct device *dev,
+                                            struct gpd_timing_data *td)
 {
        return -ENOSYS;
 }
@@ -199,13 +201,17 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 {
        return -ENOSYS;
 }
-static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {}
 static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
 static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                                         struct generic_pm_domain *new_sd)
 {
        return -ENOSYS;
 }
+static inline int pm_genpd_add_subdomain_names(const char *master_name,
+                                              const char *subdomain_name)
+{
+       return -ENOSYS;
+}
 static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
                                            struct generic_pm_domain *target)
 {
@@ -221,11 +227,19 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
 {
        return -ENOSYS;
 }
-static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
+static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
 {
        return -ENOSYS;
 }
-static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+static inline int pm_genpd_name_attach_cpuidle(const char *name, int state)
+{
+       return -ENOSYS;
+}
+static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+       return -ENOSYS;
+}
+static inline int pm_genpd_name_detach_cpuidle(const char *name)
 {
        return -ENOSYS;
 }
@@ -237,6 +251,10 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
 {
        return -ENOSYS;
 }
+static inline int pm_genpd_name_poweron(const char *domain_name)
+{
+       return -ENOSYS;
+}
 static inline bool default_stop_ok(struct device *dev)
 {
        return false;
@@ -245,6 +263,24 @@ static inline bool default_stop_ok(struct device *dev)
 #define pm_domain_always_on_gov NULL
 #endif
 
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+{
+       return __pm_genpd_add_device(genpd, dev, NULL);
+}
+
+static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
+                                        struct device *dev)
+{
+       return __pm_genpd_of_add_device(genpd_node, dev, NULL);
+}
+
+static inline int pm_genpd_name_add_device(const char *domain_name,
+                                          struct device *dev)
+{
+       return __pm_genpd_name_add_device(domain_name, dev, NULL);
+}
+
 static inline int pm_genpd_remove_callbacks(struct device *dev)
 {
        return __pm_genpd_remove_callbacks(dev, true);
@@ -258,4 +294,20 @@ static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
 static inline void pm_genpd_poweroff_unused(void) {}
 #endif
 
+#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
+extern void pm_genpd_syscore_switch(struct device *dev, bool suspend);
+#else
+static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {}
+#endif
+
+static inline void pm_genpd_syscore_poweroff(struct device *dev)
+{
+       pm_genpd_syscore_switch(dev, true);
+}
+
+static inline void pm_genpd_syscore_poweron(struct device *dev)
+{
+       pm_genpd_syscore_switch(dev, false);
+}
+
 #endif /* _LINUX_PM_DOMAIN_H */
index 11bad91c443391d82521cfa1a48153d81e0a7034..7931efe7117553d00a920cfd4aef757f72578658 100644 (file)
 struct posix_acl_entry {
        short                   e_tag;
        unsigned short          e_perm;
-       unsigned int            e_id;
+       union {
+               kuid_t          e_uid;
+               kgid_t          e_gid;
+#ifndef CONFIG_UIDGID_STRICT_TYPE_CHECKS
+               unsigned int    e_id;
+#endif
+       };
 };
 
 struct posix_acl {
index 6e53c34035cd5b77c7c3e86878a00a3b1861070e..ad93ad0f1db0b0afe072d587a6fd1507f2e35d84 100644 (file)
@@ -52,7 +52,21 @@ posix_acl_xattr_count(size_t size)
        return size / sizeof(posix_acl_xattr_entry);
 }
 
-struct posix_acl *posix_acl_from_xattr(const void *value, size_t size);
-int posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size);
+#ifdef CONFIG_FS_POSIX_ACL
+void posix_acl_fix_xattr_from_user(void *value, size_t size);
+void posix_acl_fix_xattr_to_user(void *value, size_t size);
+#else
+static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
+{
+}
+static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
+{
+}
+#endif
+
+struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns, 
+                                      const void *value, size_t size);
+int posix_acl_to_xattr(struct user_namespace *user_ns,
+                      const struct posix_acl *acl, void *buffer, size_t size);
 
 #endif /* _POSIX_ACL_XATTR_H */
index 94048547f29ad424e27013c4eaa17ca6ca28c422..0cc45ae1afd54aca12247724c73972a5ab8f6cc4 100644 (file)
@@ -116,5 +116,14 @@ static inline void pps_get_ts(struct pps_event_time *ts)
 
 #endif /* CONFIG_NTP_PPS */
 
+/* Subtract known time delay from PPS event time(s) */
+static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta)
+{
+       ts->ts_real = timespec_sub(ts->ts_real, delta);
+#ifdef CONFIG_NTP_PPS
+       ts->ts_raw = timespec_sub(ts->ts_raw, delta);
+#endif
+}
+
 #endif /* LINUX_PPS_KERNEL_H */
 
diff --git a/include/linux/projid.h b/include/linux/projid.h
new file mode 100644 (file)
index 0000000..36517b9
--- /dev/null
@@ -0,0 +1,104 @@
+#ifndef _LINUX_PROJID_H
+#define _LINUX_PROJID_H
+
+/*
+ * A set of types for the internal kernel types representing project ids.
+ *
+ * The types defined in this header allow distinguishing which project ids in
+ * the kernel are values used by userspace and which project id values are
+ * the internal kernel values.  With the addition of user namespaces the values
+ * can be different.  Using the type system makes it possible for the compiler
+ * to detect when we overlook these differences.
+ *
+ */
+#include <linux/types.h>
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+typedef __kernel_uid32_t projid_t;
+
+#ifdef CONFIG_UIDGID_STRICT_TYPE_CHECKS
+
+typedef struct {
+       projid_t val;
+} kprojid_t;
+
+static inline projid_t __kprojid_val(kprojid_t projid)
+{
+       return projid.val;
+}
+
+#define KPROJIDT_INIT(value) (kprojid_t){ value }
+
+#else
+
+typedef projid_t kprojid_t;
+
+static inline projid_t __kprojid_val(kprojid_t projid)
+{
+       return projid;
+}
+
+#define KPROJIDT_INIT(value) ((kprojid_t) value )
+
+#endif
+
+#define INVALID_PROJID KPROJIDT_INIT(-1)
+#define OVERFLOW_PROJID 65534
+
+static inline bool projid_eq(kprojid_t left, kprojid_t right)
+{
+       return __kprojid_val(left) == __kprojid_val(right);
+}
+
+static inline bool projid_lt(kprojid_t left, kprojid_t right)
+{
+       return __kprojid_val(left) < __kprojid_val(right);
+}
+
+static inline bool projid_valid(kprojid_t projid)
+{
+       return !projid_eq(projid, INVALID_PROJID);
+}
+
+#ifdef CONFIG_USER_NS
+
+extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid);
+
+extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid);
+extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid);
+
+static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid)
+{
+       return from_kprojid(ns, projid) != (projid_t)-1;
+}
+
+#else
+
+static inline kprojid_t make_kprojid(struct user_namespace *from, projid_t projid)
+{
+       return KPROJIDT_INIT(projid);
+}
+
+static inline projid_t from_kprojid(struct user_namespace *to, kprojid_t kprojid)
+{
+       return __kprojid_val(kprojid);
+}
+
+static inline projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t kprojid)
+{
+       projid_t projid = from_kprojid(to, kprojid);
+       if (projid == (projid_t)-1)
+               projid = OVERFLOW_PROJID;
+       return projid;
+}
+
+static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid)
+{
+       return true;
+}
+
+#endif /* CONFIG_USER_NS */
+
+#endif /* _LINUX_PROJID_H */
index 945704c2ed65307bb8ec47c39cbd24d364b40fa3..f2dc6d8fc680f7ae02596558ce4571a675fb3bce 100644 (file)
@@ -21,6 +21,8 @@
 #ifndef _PTP_CLOCK_KERNEL_H_
 #define _PTP_CLOCK_KERNEL_H_
 
+#include <linux/device.h>
+#include <linux/pps_kernel.h>
 #include <linux/ptp_clock.h>
 
 
@@ -40,7 +42,9 @@ struct ptp_clock_request {
  * struct ptp_clock_info - decribes a PTP hardware clock
  *
  * @owner:     The clock driver should set to THIS_MODULE.
- * @name:      A short name to identify the clock.
+ * @name:      A short "friendly name" to identify the clock and to
+ *             help distinguish PHY based devices from MAC based ones.
+ *             The string is not meant to be a unique id.
  * @max_adj:   The maximum possible frequency adjustment, in parts per billon.
  * @n_alarm:   The number of programmable alarms.
  * @n_ext_ts:  The number of external time stamp channels.
@@ -92,10 +96,12 @@ struct ptp_clock;
 /**
  * ptp_clock_register() - register a PTP hardware clock driver
  *
- * @info:  Structure describing the new clock.
+ * @info:   Structure describing the new clock.
+ * @parent: Pointer to the parent device of the new clock.
  */
 
-extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info);
+extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+                                           struct device *parent);
 
 /**
  * ptp_clock_unregister() - unregister a PTP hardware clock driver
@@ -110,6 +116,7 @@ enum ptp_clock_events {
        PTP_CLOCK_ALARM,
        PTP_CLOCK_EXTTS,
        PTP_CLOCK_PPS,
+       PTP_CLOCK_PPSUSR,
 };
 
 /**
@@ -117,13 +124,17 @@ enum ptp_clock_events {
  *
  * @type:  One of the ptp_clock_events enumeration values.
  * @index: Identifies the source of the event.
- * @timestamp: When the event occured.
+ * @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
+ * @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
  */
 
 struct ptp_clock_event {
        int type;
        int index;
-       u64 timestamp;
+       union {
+               u64 timestamp;
+               struct pps_event_time pps_times;
+       };
 };
 
 /**
index 597e4fdb97fe4d676b5fa0b717ae53f023893e5e..3db698aee34cd9437f8f42823f17825796597f09 100644 (file)
@@ -130,8 +130,6 @@ extern void exit_ptrace(struct task_struct *tracer);
 #define PTRACE_MODE_READ       0x01
 #define PTRACE_MODE_ATTACH     0x02
 #define PTRACE_MODE_NOAUDIT    0x04
-/* Returns 0 on success, -errno on denial. */
-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
 /* Returns true on success, false on denial. */
 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
 
index 524ede8a160a2d85e426f277131fc803558ec748..dcd5721e626dbdba0e6e7221979fa4033d482b19 100644 (file)
@@ -181,10 +181,135 @@ enum {
 #include <linux/dqblk_v2.h>
 
 #include <linux/atomic.h>
+#include <linux/uidgid.h>
+#include <linux/projid.h>
+
+#undef USRQUOTA
+#undef GRPQUOTA
+enum quota_type {
+       USRQUOTA = 0,           /* element used for user quotas */
+       GRPQUOTA = 1,           /* element used for group quotas */
+       PRJQUOTA = 2,           /* element used for project quotas */
+};
 
 typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
 typedef long long qsize_t;     /* Type in which we store sizes */
 
+struct kqid {                  /* Type in which we store the quota identifier */
+       union {
+               kuid_t uid;
+               kgid_t gid;
+               kprojid_t projid;
+       };
+       enum quota_type type;  /* USRQUOTA (uid) or GRPQUOTA (gid) or PRJQUOTA (projid) */
+};
+
+extern bool qid_eq(struct kqid left, struct kqid right);
+extern bool qid_lt(struct kqid left, struct kqid right);
+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
+extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
+extern bool qid_valid(struct kqid qid);
+
+/**
+ *     make_kqid - Map a user-namespace, type, qid tuple into a kqid.
+ *     @from: User namespace that the qid is in
+ *     @type: The type of quota
+ *     @qid: Quota identifier
+ *
+ *     Maps a user-namespace, type qid tuple into a kernel internal
+ *     kqid, and returns that kqid.
+ *
+ *     When there is no mapping defined for the user-namespace, type,
+ *     qid tuple an invalid kqid is returned.  Callers are expected to
+ *     test for and handle handle invalid kqids being returned.
+ *     Invalid kqids may be tested for using qid_valid().
+ */
+static inline struct kqid make_kqid(struct user_namespace *from,
+                                   enum quota_type type, qid_t qid)
+{
+       struct kqid kqid;
+
+       kqid.type = type;
+       switch (type) {
+       case USRQUOTA:
+               kqid.uid = make_kuid(from, qid);
+               break;
+       case GRPQUOTA:
+               kqid.gid = make_kgid(from, qid);
+               break;
+       case PRJQUOTA:
+               kqid.projid = make_kprojid(from, qid);
+               break;
+       default:
+               BUG();
+       }
+       return kqid;
+}
+
+/**
+ *     make_kqid_invalid - Explicitly make an invalid kqid
+ *     @type: The type of quota identifier
+ *
+ *     Returns an invalid kqid with the specified type.
+ */
+static inline struct kqid make_kqid_invalid(enum quota_type type)
+{
+       struct kqid kqid;
+
+       kqid.type = type;
+       switch (type) {
+       case USRQUOTA:
+               kqid.uid = INVALID_UID;
+               break;
+       case GRPQUOTA:
+               kqid.gid = INVALID_GID;
+               break;
+       case PRJQUOTA:
+               kqid.projid = INVALID_PROJID;
+               break;
+       default:
+               BUG();
+       }
+       return kqid;
+}
+
+/**
+ *     make_kqid_uid - Make a kqid from a kuid
+ *     @uid: The kuid to make the quota identifier from
+ */
+static inline struct kqid make_kqid_uid(kuid_t uid)
+{
+       struct kqid kqid;
+       kqid.type = USRQUOTA;
+       kqid.uid = uid;
+       return kqid;
+}
+
+/**
+ *     make_kqid_gid - Make a kqid from a kgid
+ *     @gid: The kgid to make the quota identifier from
+ */
+static inline struct kqid make_kqid_gid(kgid_t gid)
+{
+       struct kqid kqid;
+       kqid.type = GRPQUOTA;
+       kqid.gid = gid;
+       return kqid;
+}
+
+/**
+ *     make_kqid_projid - Make a kqid from a projid
+ *     @projid: The kprojid to make the quota identifier from
+ */
+static inline struct kqid make_kqid_projid(kprojid_t projid)
+{
+       struct kqid kqid;
+       kqid.type = PRJQUOTA;
+       kqid.projid = projid;
+       return kqid;
+}
+
+
 extern spinlock_t dq_data_lock;
 
 /* Maximal numbers of writes for quota operation (insert/delete/update)
@@ -294,10 +419,9 @@ struct dquot {
        atomic_t dq_count;              /* Use count */
        wait_queue_head_t dq_wait_unused;       /* Wait queue for dquot to become unused */
        struct super_block *dq_sb;      /* superblock this applies to */
-       unsigned int dq_id;             /* ID this applies to (uid, gid) */
+       struct kqid dq_id;              /* ID this applies to (uid, gid, projid) */
        loff_t dq_off;                  /* Offset of dquot on disk */
        unsigned long dq_flags;         /* See DQ_* */
-       short dq_type;                  /* Type of quota */
        struct mem_dqblk dq_dqb;        /* Diskquota usage */
 };
 
@@ -336,8 +460,8 @@ struct quotactl_ops {
        int (*quota_sync)(struct super_block *, int);
        int (*get_info)(struct super_block *, int, struct if_dqinfo *);
        int (*set_info)(struct super_block *, int, struct if_dqinfo *);
-       int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
-       int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
+       int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
+       int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
        int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
        int (*set_xstate)(struct super_block *, unsigned int, int);
 };
@@ -386,10 +510,10 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
 }
 
 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
-extern void quota_send_warning(short type, unsigned int id, dev_t dev,
+extern void quota_send_warning(struct kqid qid, dev_t dev,
                               const char warntype);
 #else
-static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
+static inline void quota_send_warning(struct kqid qid, dev_t dev,
                                      const char warntype)
 {
        return;
index ec6b65feaabac1fef7222dd1ad1bc2d286136983..1c50093ae656d97211374aeaf30b6998a9e0a764 100644 (file)
@@ -44,7 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
 
 void dquot_initialize(struct inode *inode);
 void dquot_drop(struct inode *inode);
-struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
+struct dquot *dqget(struct super_block *sb, struct kqid qid);
 void dqput(struct dquot *dquot);
 int dquot_scan_active(struct super_block *sb,
                      int (*fn)(struct dquot *dquot, unsigned long priv),
@@ -87,9 +87,9 @@ int dquot_writeback_dquots(struct super_block *sb, int type);
 int dquot_quota_sync(struct super_block *sb, int type);
 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
-int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
+int dquot_get_dqblk(struct super_block *sb, struct kqid id,
                struct fs_disk_quota *di);
-int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
+int dquot_set_dqblk(struct super_block *sb, struct kqid id,
                struct fs_disk_quota *di);
 
 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
index 6fdf02737e9decd563ac8616fb510d4951c11515..0ec590bb361119ea10825c211f376d902ad294be 100644 (file)
@@ -354,6 +354,37 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
 }
 #endif /* RFKILL || RFKILL_MODULE */
 
+
+#ifdef CONFIG_RFKILL_LEDS
+/**
+ * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
+ * This function might return a NULL pointer if registering of the
+ * LED trigger failed. Use this as "default_trigger" for the LED.
+ */
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
+
+/**
+ * rfkill_set_led_trigger_name -- set the LED trigger name
+ * @rfkill: rfkill struct
+ * @name: LED trigger name
+ *
+ * This function sets the LED trigger name of the radio LED
+ * trigger that rfkill creates. It is optional, but if called
+ * must be called before rfkill_register() to be effective.
+ */
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
+#else
+static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+       return NULL;
+}
+
+static inline void
+rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+}
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* RFKILL_H */
index 765dffbb085ed2da1a70241658bc11daae5716e8..9d51e260bde03536c75b3fc66f034a38ea420993 100644 (file)
@@ -405,6 +405,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
 
 extern void set_dumpable(struct mm_struct *mm, int value);
 extern int get_dumpable(struct mm_struct *mm);
+extern int __get_dumpable(unsigned long mm_flags);
 
 /* get/set_dumpable() values */
 #define SUID_DUMPABLE_DISABLED 0
@@ -1414,7 +1415,7 @@ struct task_struct {
 
        struct audit_context *audit_context;
 #ifdef CONFIG_AUDITSYSCALL
-       uid_t loginuid;
+       kuid_t loginuid;
        unsigned int sessionid;
 #endif
        struct seccomp seccomp;
@@ -1526,6 +1527,9 @@ struct task_struct {
         * cache last used pipe for splice
         */
        struct pipe_inode_info *splice_pipe;
+
+       struct page_frag task_frag;
+
 #ifdef CONFIG_TASK_DELAY_ACCT
        struct task_delay_info *delays;
 #endif
index d143b8e01954ab14ac224d9894bf65e111ee97fb..5b50c4e1a7c25eeada9bb6756e2ebee3ad8b7473 100644 (file)
@@ -1436,7 +1436,7 @@ struct security_operations {
        int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
                            struct path *new_dir, struct dentry *new_dentry);
        int (*path_chmod) (struct path *path, umode_t mode);
-       int (*path_chown) (struct path *path, uid_t uid, gid_t gid);
+       int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid);
        int (*path_chroot) (struct path *path);
 #endif
 
@@ -2831,7 +2831,7 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
 int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
                         struct path *new_dir, struct dentry *new_dentry);
 int security_path_chmod(struct path *path, umode_t mode);
-int security_path_chown(struct path *path, uid_t uid, gid_t gid);
+int security_path_chown(struct path *path, kuid_t uid, kgid_t gid);
 int security_path_chroot(struct path *path);
 #else  /* CONFIG_SECURITY_PATH */
 static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
@@ -2887,7 +2887,7 @@ static inline int security_path_chmod(struct path *path, umode_t mode)
        return 0;
 }
 
-static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid)
+static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 {
        return 0;
 }
@@ -3022,5 +3022,36 @@ static inline void free_secdata(void *secdata)
 { }
 #endif /* CONFIG_SECURITY */
 
+#ifdef CONFIG_SECURITY_YAMA
+extern int yama_ptrace_access_check(struct task_struct *child,
+                                   unsigned int mode);
+extern int yama_ptrace_traceme(struct task_struct *parent);
+extern void yama_task_free(struct task_struct *task);
+extern int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+                          unsigned long arg4, unsigned long arg5);
+#else
+static inline int yama_ptrace_access_check(struct task_struct *child,
+                                          unsigned int mode)
+{
+       return 0;
+}
+
+static inline int yama_ptrace_traceme(struct task_struct *parent)
+{
+       return 0;
+}
+
+static inline void yama_task_free(struct task_struct *task)
+{
+}
+
+static inline int yama_task_prctl(int option, unsigned long arg2,
+                                 unsigned long arg3, unsigned long arg4,
+                                 unsigned long arg5)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_SECURITY_YAMA */
+
 #endif /* ! __LINUX_SECURITY_H */
 
index 83c44eefe698ba97a72700b8cac63a37fb558c82..68a04a343cadecc121ae9dc2515fa4c3d83a6105 100644 (file)
@@ -13,6 +13,7 @@ struct file;
 struct path;
 struct inode;
 struct dentry;
+struct user_namespace;
 
 struct seq_file {
        char *buf;
@@ -25,6 +26,9 @@ struct seq_file {
        struct mutex lock;
        const struct seq_operations *op;
        int poll_event;
+#ifdef CONFIG_USER_NS
+       struct user_namespace *user_ns;
+#endif
        void *private;
 };
 
@@ -128,6 +132,16 @@ int seq_put_decimal_ull(struct seq_file *m, char delimiter,
 int seq_put_decimal_ll(struct seq_file *m, char delimiter,
                        long long num);
 
+static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
+{
+#ifdef CONFIG_USER_NS
+       return seq->user_ns;
+#else
+       extern struct user_namespace init_user_ns;
+       return &init_user_ns;
+#endif
+}
+
 #define SEQ_START_TOKEN ((void *)1)
 /*
  * Helpers for iteration over list_head-s in seq_files
index bef2cf00b3be68f3d3ed69dd78f977e737ad84ce..30aa0dc60d75786287226bf3cbedffd9f1b266a3 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/mempolicy.h>
 #include <linux/pagemap.h>
 #include <linux/percpu_counter.h>
+#include <linux/xattr.h>
 
 /* inode in-kernel data */
 
@@ -18,7 +19,7 @@ struct shmem_inode_info {
        };
        struct shared_policy    policy;         /* NUMA memory alloc policy */
        struct list_head        swaplist;       /* chain of maybes on swap */
-       struct list_head        xattr_list;     /* list of shmem_xattr */
+       struct simple_xattrs    xattrs;         /* list of xattrs */
        struct inode            vfs_inode;
 };
 
index 7632c87da2c9fd66eab61b6c7415d5669b7aeefd..b33a3a1f205e45c747c9125a5b5920365ca2f8be 100644 (file)
@@ -846,13 +846,16 @@ static inline int skb_shared(const struct sk_buff *skb)
  *
  *     NULL is returned on a memory allocation failure.
  */
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
-                                             gfp_t pri)
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
 {
        might_sleep_if(pri & __GFP_WAIT);
        if (skb_shared(skb)) {
                struct sk_buff *nskb = skb_clone(skb, pri);
-               kfree_skb(skb);
+
+               if (likely(nskb))
+                       consume_skb(skb);
+               else
+                       kfree_skb(skb);
                skb = nskb;
        }
        return skb;
index 00bc189cb3955b57384bbcf3e458b3b593c94774..fdfba235f9f1a9bb6b8fc03958ae1982404dd125 100644 (file)
 enum
 {
        IPSTATS_MIB_NUM = 0,
+/* frequently written fields in fast path, kept in same cache line */
        IPSTATS_MIB_INPKTS,                     /* InReceives */
+       IPSTATS_MIB_INOCTETS,                   /* InOctets */
+       IPSTATS_MIB_INDELIVERS,                 /* InDelivers */
+       IPSTATS_MIB_OUTFORWDATAGRAMS,           /* OutForwDatagrams */
+       IPSTATS_MIB_OUTPKTS,                    /* OutRequests */
+       IPSTATS_MIB_OUTOCTETS,                  /* OutOctets */
+/* other fields */
        IPSTATS_MIB_INHDRERRORS,                /* InHdrErrors */
        IPSTATS_MIB_INTOOBIGERRORS,             /* InTooBigErrors */
        IPSTATS_MIB_INNOROUTES,                 /* InNoRoutes */
@@ -26,9 +33,6 @@ enum
        IPSTATS_MIB_INUNKNOWNPROTOS,            /* InUnknownProtos */
        IPSTATS_MIB_INTRUNCATEDPKTS,            /* InTruncatedPkts */
        IPSTATS_MIB_INDISCARDS,                 /* InDiscards */
-       IPSTATS_MIB_INDELIVERS,                 /* InDelivers */
-       IPSTATS_MIB_OUTFORWDATAGRAMS,           /* OutForwDatagrams */
-       IPSTATS_MIB_OUTPKTS,                    /* OutRequests */
        IPSTATS_MIB_OUTDISCARDS,                /* OutDiscards */
        IPSTATS_MIB_OUTNOROUTES,                /* OutNoRoutes */
        IPSTATS_MIB_REASMTIMEOUT,               /* ReasmTimeout */
@@ -42,8 +46,6 @@ enum
        IPSTATS_MIB_OUTMCASTPKTS,               /* OutMcastPkts */
        IPSTATS_MIB_INBCASTPKTS,                /* InBcastPkts */
        IPSTATS_MIB_OUTBCASTPKTS,               /* OutBcastPkts */
-       IPSTATS_MIB_INOCTETS,                   /* InOctets */
-       IPSTATS_MIB_OUTOCTETS,                  /* OutOctets */
        IPSTATS_MIB_INMCASTOCTETS,              /* InMcastOctets */
        IPSTATS_MIB_OUTMCASTOCTETS,             /* OutMcastOctets */
        IPSTATS_MIB_INBCASTOCTETS,              /* InBcastOctets */
@@ -239,6 +241,10 @@ enum
        LINUX_MIB_TCPCHALLENGEACK,              /* TCPChallengeACK */
        LINUX_MIB_TCPSYNCHALLENGE,              /* TCPSYNChallenge */
        LINUX_MIB_TCPFASTOPENACTIVE,            /* TCPFastOpenActive */
+       LINUX_MIB_TCPFASTOPENPASSIVE,           /* TCPFastOpenPassive*/
+       LINUX_MIB_TCPFASTOPENPASSIVEFAIL,       /* TCPFastOpenPassiveFail */
+       LINUX_MIB_TCPFASTOPENLISTENOVERFLOW,    /* TCPFastOpenListenOverflow */
+       LINUX_MIB_TCPFASTOPENCOOKIEREQD,        /* TCPFastOpenCookieReqd */
        __LINUX_MIB_MAX
 };
 
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
new file mode 100644 (file)
index 0000000..61ae130
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * include/linux/spi/mxs-spi.h
+ *
+ * Freescale i.MX233/i.MX28 SPI controller register definition
+ *
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef __LINUX_SPI_MXS_SPI_H__
+#define __LINUX_SPI_MXS_SPI_H__
+
+#include <linux/fsl/mxs-dma.h>
+
+#define ssp_is_old(host)       ((host)->devid == IMX23_SSP)
+
+/* SSP registers */
+#define HW_SSP_CTRL0                           0x000
+#define  BM_SSP_CTRL0_RUN                      (1 << 29)
+#define  BM_SSP_CTRL0_SDIO_IRQ_CHECK           (1 << 28)
+#define  BM_SSP_CTRL0_LOCK_CS                  (1 << 27)
+#define  BM_SSP_CTRL0_IGNORE_CRC               (1 << 26)
+#define  BM_SSP_CTRL0_READ                     (1 << 25)
+#define  BM_SSP_CTRL0_DATA_XFER                        (1 << 24)
+#define  BP_SSP_CTRL0_BUS_WIDTH                        22
+#define  BM_SSP_CTRL0_BUS_WIDTH                        (0x3 << 22)
+#define  BM_SSP_CTRL0_WAIT_FOR_IRQ             (1 << 21)
+#define  BM_SSP_CTRL0_WAIT_FOR_CMD             (1 << 20)
+#define  BM_SSP_CTRL0_LONG_RESP                        (1 << 19)
+#define  BM_SSP_CTRL0_GET_RESP                 (1 << 17)
+#define  BM_SSP_CTRL0_ENABLE                   (1 << 16)
+#define  BP_SSP_CTRL0_XFER_COUNT               0
+#define  BM_SSP_CTRL0_XFER_COUNT               0xffff
+#define HW_SSP_CMD0                            0x010
+#define  BM_SSP_CMD0_DBL_DATA_RATE_EN          (1 << 25)
+#define  BM_SSP_CMD0_SLOW_CLKING_EN            (1 << 22)
+#define  BM_SSP_CMD0_CONT_CLKING_EN            (1 << 21)
+#define  BM_SSP_CMD0_APPEND_8CYC               (1 << 20)
+#define  BP_SSP_CMD0_BLOCK_SIZE                        16
+#define  BM_SSP_CMD0_BLOCK_SIZE                        (0xf << 16)
+#define  BP_SSP_CMD0_BLOCK_COUNT               8
+#define  BM_SSP_CMD0_BLOCK_COUNT               (0xff << 8)
+#define  BP_SSP_CMD0_CMD                       0
+#define  BM_SSP_CMD0_CMD                       0xff
+#define HW_SSP_CMD1                            0x020
+#define HW_SSP_XFER_SIZE                       0x030
+#define HW_SSP_BLOCK_SIZE                      0x040
+#define  BP_SSP_BLOCK_SIZE_BLOCK_COUNT         4
+#define  BM_SSP_BLOCK_SIZE_BLOCK_COUNT         (0xffffff << 4)
+#define  BP_SSP_BLOCK_SIZE_BLOCK_SIZE          0
+#define  BM_SSP_BLOCK_SIZE_BLOCK_SIZE          0xf
+#define HW_SSP_TIMING(h)                       (ssp_is_old(h) ? 0x050 : 0x070)
+#define  BP_SSP_TIMING_TIMEOUT                 16
+#define  BM_SSP_TIMING_TIMEOUT                 (0xffff << 16)
+#define  BP_SSP_TIMING_CLOCK_DIVIDE            8
+#define  BM_SSP_TIMING_CLOCK_DIVIDE            (0xff << 8)
+#define  BF_SSP_TIMING_CLOCK_DIVIDE(v)         \
+                       (((v) << 8) & BM_SSP_TIMING_CLOCK_DIVIDE)
+#define  BP_SSP_TIMING_CLOCK_RATE              0
+#define  BM_SSP_TIMING_CLOCK_RATE              0xff
+#define BF_SSP_TIMING_CLOCK_RATE(v)            \
+                       (((v) << 0) & BM_SSP_TIMING_CLOCK_RATE)
+#define HW_SSP_CTRL1(h)                                (ssp_is_old(h) ? 0x060 : 0x080)
+#define  BM_SSP_CTRL1_SDIO_IRQ                 (1 << 31)
+#define  BM_SSP_CTRL1_SDIO_IRQ_EN              (1 << 30)
+#define  BM_SSP_CTRL1_RESP_ERR_IRQ             (1 << 29)
+#define  BM_SSP_CTRL1_RESP_ERR_IRQ_EN          (1 << 28)
+#define  BM_SSP_CTRL1_RESP_TIMEOUT_IRQ         (1 << 27)
+#define  BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN      (1 << 26)
+#define  BM_SSP_CTRL1_DATA_TIMEOUT_IRQ         (1 << 25)
+#define  BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN      (1 << 24)
+#define  BM_SSP_CTRL1_DATA_CRC_IRQ             (1 << 23)
+#define  BM_SSP_CTRL1_DATA_CRC_IRQ_EN          (1 << 22)
+#define  BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ                (1 << 21)
+#define  BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN     (1 << 20)
+#define  BM_SSP_CTRL1_RECV_TIMEOUT_IRQ         (1 << 17)
+#define  BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN      (1 << 16)
+#define  BM_SSP_CTRL1_FIFO_OVERRUN_IRQ         (1 << 15)
+#define  BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN      (1 << 14)
+#define  BM_SSP_CTRL1_DMA_ENABLE               (1 << 13)
+#define  BM_SSP_CTRL1_PHASE                    (1 << 10)
+#define  BM_SSP_CTRL1_POLARITY                 (1 << 9)
+#define  BP_SSP_CTRL1_WORD_LENGTH              4
+#define  BM_SSP_CTRL1_WORD_LENGTH              (0xf << 4)
+#define  BF_SSP_CTRL1_WORD_LENGTH(v)           \
+                       (((v) << 4) & BM_SSP_CTRL1_WORD_LENGTH)
+#define  BV_SSP_CTRL1_WORD_LENGTH__FOUR_BITS   0x3
+#define  BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS  0x7
+#define  BV_SSP_CTRL1_WORD_LENGTH__SIXTEEN_BITS        0xF
+#define  BP_SSP_CTRL1_SSP_MODE                 0
+#define  BM_SSP_CTRL1_SSP_MODE                 0xf
+#define  BF_SSP_CTRL1_SSP_MODE(v)              \
+                       (((v) << 0) & BM_SSP_CTRL1_SSP_MODE)
+#define  BV_SSP_CTRL1_SSP_MODE__SPI            0x0
+#define  BV_SSP_CTRL1_SSP_MODE__SSI            0x1
+#define  BV_SSP_CTRL1_SSP_MODE__SD_MMC         0x3
+#define  BV_SSP_CTRL1_SSP_MODE__MS             0x4
+
+#define HW_SSP_DATA(h)                         (ssp_is_old(h) ? 0x070 : 0x090)
+
+#define HW_SSP_SDRESP0(h)                      (ssp_is_old(h) ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1(h)                      (ssp_is_old(h) ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2(h)                      (ssp_is_old(h) ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3(h)                      (ssp_is_old(h) ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS(h)                       (ssp_is_old(h) ? 0x0c0 : 0x100)
+#define  BM_SSP_STATUS_CARD_DETECT             (1 << 28)
+#define  BM_SSP_STATUS_SDIO_IRQ                        (1 << 17)
+#define  BM_SSP_STATUS_FIFO_EMPTY              (1 << 5)
+
+#define BF_SSP(value, field)   (((value) << BP_SSP_##field) & BM_SSP_##field)
+
+#define SSP_PIO_NUM    3
+
+enum mxs_ssp_id {
+       IMX23_SSP,
+       IMX28_SSP,
+};
+
+struct mxs_ssp {
+       struct device                   *dev;
+       void __iomem                    *base;
+       struct clk                      *clk;
+       unsigned int                    clk_rate;
+       enum mxs_ssp_id                 devid;
+
+       int                             dma_channel;
+       struct dma_chan                 *dmach;
+       struct mxs_dma_data             dma_data;
+       unsigned int                    dma_dir;
+       enum dma_transfer_direction     slave_dirn;
+       u32                             ssp_pio_words[SSP_PIO_NUM];
+};
+
+void mxs_ssp_set_clk_rate(struct mxs_ssp *ssp, unsigned int rate);
+
+#endif /* __LINUX_SPI_MXS_SPI_H__ */
index 1a6b0045b06b63a616946bd421ce3f7f8e530160..c2b02a5c86ae0bb03798d9221c6199dc8be63d4b 100644 (file)
 #define SSB_CHIPCO_FLASHCTL_ST_SE      0x02D8          /* Sector Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_BE      0x00C7          /* Bulk Erase */
 #define SSB_CHIPCO_FLASHCTL_ST_DP      0x00B9          /* Deep Power-down */
-#define SSB_CHIPCO_FLASHCTL_ST_RSIG    0x03AB          /* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_RES     0x03AB          /* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_CSA     0x1000          /* Keep chip select asserted */
+#define SSB_CHIPCO_FLASHCTL_ST_SSE     0x0220          /* Sub-sector Erase */
 
 /* Status register bits for ST flashes */
 #define SSB_CHIPCO_FLASHSTA_ST_WIP     0x01            /* Write In Progress */
index b69bdb1e08b674f81b372a0dcab0ed2b5ec47038..a1547ea3920d226cf56286b119c0e72d7a5b8712 100644 (file)
@@ -76,7 +76,6 @@
 /* Platfrom data for platform device structure's platform_data field */
 
 struct stmmac_mdio_bus_data {
-       int bus_id;
        int (*phy_reset)(void *priv);
        unsigned int phy_mask;
        int *irqs;
index e872526fdc5fbb371ca0e420a961aec4738fdfa2..8d08b3ed406db688c6fa8c1cfe10d921870d2e72 100644 (file)
@@ -25,6 +25,7 @@ extern int swiotlb_force;
 extern void swiotlb_init(int verbose);
 extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 extern unsigned long swiotlb_nr_tbl(void);
+extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 
 /*
  * Enumeration for sync targets
index eb125a4c30b334b63ca9d06983a125360839d90e..67c789ae719c7f6216870db74df3afc0223de66b 100644 (file)
@@ -110,6 +110,7 @@ enum {
 #define TCP_REPAIR_QUEUE       20
 #define TCP_QUEUE_SEQ          21
 #define TCP_REPAIR_OPTIONS     22
+#define TCP_FASTOPEN           23      /* Enable FastOpen on listeners */
 
 struct tcp_repair_opt {
        __u32   opt_code;
@@ -246,6 +247,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
 /* TCP Fast Open */
 #define TCP_FASTOPEN_COOKIE_MIN        4       /* Min Fast Open Cookie size in bytes */
 #define TCP_FASTOPEN_COOKIE_MAX        16      /* Max Fast Open Cookie size in bytes */
+#define TCP_FASTOPEN_COOKIE_SIZE 8     /* the size employed by this impl. */
 
 /* TCP Fast Open Cookie as stored in memory */
 struct tcp_fastopen_cookie {
@@ -312,9 +314,14 @@ struct tcp_request_sock {
        /* Only used by TCP MD5 Signature so far. */
        const struct tcp_request_sock_ops *af_specific;
 #endif
+       struct sock                     *listener; /* needed for TFO */
        u32                             rcv_isn;
        u32                             snt_isn;
        u32                             snt_synack; /* synack sent time */
+       u32                             rcv_nxt; /* the ack # by SYNACK. For
+                                                 * FastOpen it's the seq#
+                                                 * after data-in-SYN.
+                                                 */
 };
 
 static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -505,14 +512,18 @@ struct tcp_sock {
        struct tcp_md5sig_info  __rcu *md5sig_info;
 #endif
 
-/* TCP fastopen related information */
-       struct tcp_fastopen_request *fastopen_req;
-
        /* When the cookie options are generated and exchanged, then this
         * object holds a reference to them (cookie_values->kref).  Also
         * contains related tcp_cookie_transactions fields.
         */
        struct tcp_cookie_values  *cookie_values;
+
+/* TCP fastopen related information */
+       struct tcp_fastopen_request *fastopen_req;
+       /* fastopen_rsk points to request_sock that resulted in this big
+        * socket. Used to retransmit SYNACKs etc.
+        */
+       struct request_sock *fastopen_rsk;
 };
 
 enum tsq_flags {
@@ -552,6 +563,38 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
        return (struct tcp_timewait_sock *)sk;
 }
 
+static inline bool tcp_passive_fastopen(const struct sock *sk)
+{
+       return (sk->sk_state == TCP_SYN_RECV &&
+               tcp_sk(sk)->fastopen_rsk != NULL);
+}
+
+static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc)
+{
+       return foc->len != -1;
+}
+
+extern void tcp_sock_destruct(struct sock *sk);
+
+static inline int fastopen_init_queue(struct sock *sk, int backlog)
+{
+       struct request_sock_queue *queue =
+           &inet_csk(sk)->icsk_accept_queue;
+
+       if (queue->fastopenq == NULL) {
+               queue->fastopenq = kzalloc(
+                   sizeof(struct fastopen_queue),
+                   sk->sk_allocation);
+               if (queue->fastopenq == NULL)
+                       return -ENOMEM;
+
+               sk->sk_destruct = tcp_sock_destruct;
+               spin_lock_init(&queue->fastopenq->lock);
+       }
+       queue->fastopenq->max_qlen = backlog;
+       return 0;
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_TCP_H */
diff --git a/include/linux/tcp_metrics.h b/include/linux/tcp_metrics.h
new file mode 100644 (file)
index 0000000..cb5157b
--- /dev/null
@@ -0,0 +1,54 @@
+/* tcp_metrics.h - TCP Metrics Interface */
+
+#ifndef _LINUX_TCP_METRICS_H
+#define _LINUX_TCP_METRICS_H
+
+#include <linux/types.h>
+
+/* NETLINK_GENERIC related info
+ */
+#define TCP_METRICS_GENL_NAME          "tcp_metrics"
+#define TCP_METRICS_GENL_VERSION       0x1
+
+enum tcp_metric_index {
+       TCP_METRIC_RTT,
+       TCP_METRIC_RTTVAR,
+       TCP_METRIC_SSTHRESH,
+       TCP_METRIC_CWND,
+       TCP_METRIC_REORDERING,
+
+       /* Always last.  */
+       __TCP_METRIC_MAX,
+};
+
+#define TCP_METRIC_MAX (__TCP_METRIC_MAX - 1)
+
+enum {
+       TCP_METRICS_ATTR_UNSPEC,
+       TCP_METRICS_ATTR_ADDR_IPV4,             /* u32 */
+       TCP_METRICS_ATTR_ADDR_IPV6,             /* binary */
+       TCP_METRICS_ATTR_AGE,                   /* msecs */
+       TCP_METRICS_ATTR_TW_TSVAL,              /* u32, raw, rcv tsval */
+       TCP_METRICS_ATTR_TW_TS_STAMP,           /* s32, sec age */
+       TCP_METRICS_ATTR_VALS,                  /* nested +1, u32 */
+       TCP_METRICS_ATTR_FOPEN_MSS,             /* u16 */
+       TCP_METRICS_ATTR_FOPEN_SYN_DROPS,       /* u16, count of drops */
+       TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,     /* msecs age */
+       TCP_METRICS_ATTR_FOPEN_COOKIE,          /* binary */
+
+       __TCP_METRICS_ATTR_MAX,
+};
+
+#define TCP_METRICS_ATTR_MAX   (__TCP_METRICS_ATTR_MAX - 1)
+
+enum {
+       TCP_METRICS_CMD_UNSPEC,
+       TCP_METRICS_CMD_GET,
+       TCP_METRICS_CMD_DEL,
+
+       __TCP_METRICS_CMD_MAX,
+};
+
+#define TCP_METRICS_CMD_MAX    (__TCP_METRICS_CMD_MAX - 1)
+
+#endif /* _LINUX_TCP_METRICS_H */
index c98928420100962e005aabc73ecfd9b73d395fea..0b1e3f218a36b95ff1f8163e89a365ae9d705dc6 100644 (file)
@@ -89,8 +89,8 @@
 
 #define  TIPC_CMD_GET_REMOTE_MNG    0x4003    /* tx none, rx unsigned */
 #define  TIPC_CMD_GET_MAX_PORTS     0x4004    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* tx none, rx unsigned */
-#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* obsoleted */
+#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* obsoleted */
 #define  TIPC_CMD_GET_MAX_NODES     0x4009    /* obsoleted */
 #define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
 #define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
 #define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
-#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* obsoleted */
+#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* obsoleted */
 #define  TIPC_CMD_SET_MAX_NODES     0x8009    /* obsoleted */
index fdc718abf83becac29f7dd3a01cabd8b6ad7f068..fcb627ff8d3eb71ad49ced6763a0ff061b83f62e 100644 (file)
@@ -32,6 +32,7 @@
 extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
 extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
 extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
+extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
 #else
 static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
        return -ENODEV;
@@ -42,5 +43,8 @@ static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
 static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
        return -ENODEV;
 }
+static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
+       return -ENODEV;
+}
 #endif
 #endif
index 7e50ac795b0b05b55b747fc96b07da8bcfcb6977..44893e5ec8f74c6d3bd358cb250a24c5aea25541 100644 (file)
 #include <linux/taskstats.h>
 
 #ifdef CONFIG_TASKSTATS
-extern void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk);
+extern void bacct_add_tsk(struct user_namespace *user_ns,
+                         struct pid_namespace *pid_ns,
+                         struct taskstats *stats, struct task_struct *tsk);
 #else
-static inline void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
+static inline void bacct_add_tsk(struct user_namespace *user_ns,
+                                struct pid_namespace *pid_ns,
+                                struct taskstats *stats, struct task_struct *tsk)
 {}
 #endif /* CONFIG_TASKSTATS */
 
index 1509b86825d8ec710713405b47d9931f465ddb89..4f6c59a5fb7941996e8062dd41efe99ec3623555 100644 (file)
@@ -575,7 +575,7 @@ extern void tty_audit_fork(struct signal_struct *sig);
 extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
 extern void tty_audit_push(struct tty_struct *tty);
 extern int tty_audit_push_task(struct task_struct *tsk,
-                              uid_t loginuid, u32 sessionid);
+                              kuid_t loginuid, u32 sessionid);
 #else
 static inline void tty_audit_add_data(struct tty_struct *tty,
                                      unsigned char *data, size_t size)
@@ -594,7 +594,7 @@ static inline void tty_audit_push(struct tty_struct *tty)
 {
 }
 static inline int tty_audit_push_task(struct task_struct *tsk,
-                                     uid_t loginuid, u32 sessionid)
+                                     kuid_t loginuid, u32 sessionid)
 {
        return 0;
 }
index 2aa2881b0df98a6faaee25917da8b9d499c533c7..c454bbe39ee78688d6c5e797236649148c909900 100644 (file)
@@ -32,6 +32,7 @@
  *             - first public version
  */
 
+#include <linux/types.h>
 #include <linux/input.h>
 
 #define UINPUT_VERSION         3
 enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
 
 struct uinput_request {
-       int                     id;
-       int                     code;   /* UI_FF_UPLOAD, UI_FF_ERASE */
+       unsigned int            id;
+       unsigned int            code;   /* UI_FF_UPLOAD, UI_FF_ERASE */
 
        int                     retval;
        struct completion       done;
 
        union {
-               int             effect_id;
+               unsigned int    effect_id;
                struct {
                        struct ff_effect *effect;
                        struct ff_effect *old;
@@ -77,16 +78,16 @@ struct uinput_device {
 #endif /* __KERNEL__ */
 
 struct uinput_ff_upload {
-       int                     request_id;
-       int                     retval;
+       __u32                   request_id;
+       __s32                   retval;
        struct ff_effect        effect;
        struct ff_effect        old;
 };
 
 struct uinput_ff_erase {
-       int                     request_id;
-       int                     retval;
-       int                     effect_id;
+       __u32                   request_id;
+       __s32                   retval;
+       __u32                   effect_id;
 };
 
 /* ioctl */
@@ -166,11 +167,11 @@ struct uinput_ff_erase {
 struct uinput_user_dev {
        char name[UINPUT_MAX_NAME_SIZE];
        struct input_id id;
-       int ff_effects_max;
-       int absmax[ABS_CNT];
-       int absmin[ABS_CNT];
-       int absfuzz[ABS_CNT];
-       int absflat[ABS_CNT];
+       __u32 ff_effects_max;
+       __s32 absmax[ABS_CNT];
+       __s32 absmin[ABS_CNT];
+       __s32 absfuzz[ABS_CNT];
+       __s32 absflat[ABS_CNT];
 };
 #endif /* __UINPUT_H_ */
 
index 4e72922e5a751c150386e4eb9f8e961ac1051d67..95142cae446a7205e9887029a7433eff1b147d81 100644 (file)
@@ -20,6 +20,7 @@ struct uid_gid_map {  /* 64 bytes -- 1 cache line */
 struct user_namespace {
        struct uid_gid_map      uid_map;
        struct uid_gid_map      gid_map;
+       struct uid_gid_map      projid_map;
        struct kref             kref;
        struct user_namespace   *parent;
        kuid_t                  owner;
@@ -49,8 +50,10 @@ static inline void put_user_ns(struct user_namespace *ns)
 struct seq_operations;
 extern struct seq_operations proc_uid_seq_operations;
 extern struct seq_operations proc_gid_seq_operations;
+extern struct seq_operations proc_projid_seq_operations;
 extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
 #else
 
 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
index af155450cabb8a91ec33d2991a011d43437e23f6..2b58905d3504f71f71bb7636f2e195c80090c577 100644 (file)
@@ -16,6 +16,7 @@ struct workqueue_struct;
 
 struct work_struct;
 typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(unsigned long __data);
 
 /*
  * The first word is the work queue pointer and the flags rolled into
@@ -67,9 +68,18 @@ enum {
        WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
                                  WORK_STRUCT_COLOR_BITS,
 
+       /* data contains off-queue information when !WORK_STRUCT_CWQ */
+       WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_FLAG_BITS,
+
+       WORK_OFFQ_CANCELING     = (1 << WORK_OFFQ_FLAG_BASE),
+
+       WORK_OFFQ_FLAG_BITS     = 1,
+       WORK_OFFQ_CPU_SHIFT     = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
+
+       /* convenience constants */
        WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
        WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
-       WORK_STRUCT_NO_CPU      = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
+       WORK_STRUCT_NO_CPU      = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
 
        /* bit mask for work_busy() return values */
        WORK_BUSY_PENDING       = 1 << 0,
@@ -92,6 +102,7 @@ struct work_struct {
 struct delayed_work {
        struct work_struct work;
        struct timer_list timer;
+       int cpu;
 };
 
 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -115,41 +126,38 @@ struct execute_work {
 #define __WORK_INIT_LOCKDEP_MAP(n, k)
 #endif
 
-#define __WORK_INITIALIZER(n, f) {                             \
-       .data = WORK_DATA_STATIC_INIT(),                        \
-       .entry  = { &(n).entry, &(n).entry },                   \
-       .func = (f),                                            \
-       __WORK_INIT_LOCKDEP_MAP(#n, &(n))                       \
+#define __WORK_INITIALIZER(n, f) {                                     \
+       .data = WORK_DATA_STATIC_INIT(),                                \
+       .entry  = { &(n).entry, &(n).entry },                           \
+       .func = (f),                                                    \
+       __WORK_INIT_LOCKDEP_MAP(#n, &(n))                               \
        }
 
-#define __DELAYED_WORK_INITIALIZER(n, f) {                     \
-       .work = __WORK_INITIALIZER((n).work, (f)),              \
-       .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
+#define __DELAYED_WORK_INITIALIZER(n, f, tflags) {                     \
+       .work = __WORK_INITIALIZER((n).work, (f)),                      \
+       .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,             \
+                                    0, (unsigned long)&(n),            \
+                                    (tflags) | TIMER_IRQSAFE),         \
        }
 
-#define __DEFERRED_WORK_INITIALIZER(n, f) {                    \
-       .work = __WORK_INITIALIZER((n).work, (f)),              \
-       .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0),        \
-       }
-
-#define DECLARE_WORK(n, f)                                     \
+#define DECLARE_WORK(n, f)                                             \
        struct work_struct n = __WORK_INITIALIZER(n, f)
 
-#define DECLARE_DELAYED_WORK(n, f)                             \
-       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
+#define DECLARE_DELAYED_WORK(n, f)                                     \
+       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
 
-#define DECLARE_DEFERRED_WORK(n, f)                            \
-       struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
+#define DECLARE_DEFERRABLE_WORK(n, f)                                  \
+       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
 
 /*
  * initialize a work item's function pointer
  */
-#define PREPARE_WORK(_work, _func)                             \
-       do {                                                    \
-               (_work)->func = (_func);                        \
+#define PREPARE_WORK(_work, _func)                                     \
+       do {                                                            \
+               (_work)->func = (_func);                                \
        } while (0)
 
-#define PREPARE_DELAYED_WORK(_work, _func)                     \
+#define PREPARE_DELAYED_WORK(_work, _func)                             \
        PREPARE_WORK(&(_work)->work, (_func))
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -179,7 +187,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                                                                        \
                __init_work((_work), _onstack);                         \
                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
-               lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
+               lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
                INIT_LIST_HEAD(&(_work)->entry);                        \
                PREPARE_WORK((_work), (_func));                         \
        } while (0)
@@ -193,33 +201,44 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
        } while (0)
 #endif
 
-#define INIT_WORK(_work, _func)                                        \
-       do {                                                    \
-               __INIT_WORK((_work), (_func), 0);               \
+#define INIT_WORK(_work, _func)                                                \
+       do {                                                            \
+               __INIT_WORK((_work), (_func), 0);                       \
        } while (0)
 
-#define INIT_WORK_ONSTACK(_work, _func)                                \
-       do {                                                    \
-               __INIT_WORK((_work), (_func), 1);               \
+#define INIT_WORK_ONSTACK(_work, _func)                                        \
+       do {                                                            \
+               __INIT_WORK((_work), (_func), 1);                       \
        } while (0)
 
-#define INIT_DELAYED_WORK(_work, _func)                                \
-       do {                                                    \
-               INIT_WORK(&(_work)->work, (_func));             \
-               init_timer(&(_work)->timer);                    \
+#define __INIT_DELAYED_WORK(_work, _func, _tflags)                     \
+       do {                                                            \
+               INIT_WORK(&(_work)->work, (_func));                     \
+               __setup_timer(&(_work)->timer, delayed_work_timer_fn,   \
+                             (unsigned long)(_work),                   \
+                             (_tflags) | TIMER_IRQSAFE);               \
        } while (0)
 
-#define INIT_DELAYED_WORK_ONSTACK(_work, _func)                        \
-       do {                                                    \
-               INIT_WORK_ONSTACK(&(_work)->work, (_func));     \
-               init_timer_on_stack(&(_work)->timer);           \
+#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)             \
+       do {                                                            \
+               INIT_WORK_ONSTACK(&(_work)->work, (_func));             \
+               __setup_timer_on_stack(&(_work)->timer,                 \
+                                      delayed_work_timer_fn,           \
+                                      (unsigned long)(_work),          \
+                                      (_tflags) | TIMER_IRQSAFE);      \
        } while (0)
 
-#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)             \
-       do {                                                    \
-               INIT_WORK(&(_work)->work, (_func));             \
-               init_timer_deferrable(&(_work)->timer);         \
-       } while (0)
+#define INIT_DELAYED_WORK(_work, _func)                                        \
+       __INIT_DELAYED_WORK(_work, _func, 0)
+
+#define INIT_DELAYED_WORK_ONSTACK(_work, _func)                                \
+       __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
+
+#define INIT_DEFERRABLE_WORK(_work, _func)                             \
+       __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
+
+#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)                     \
+       __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
 
 /**
  * work_pending - Find out whether a work item is currently pending
@@ -278,10 +297,6 @@ enum {
  * system_long_wq is similar to system_wq but may host long running
  * works.  Queue flushing might take relatively long.
  *
- * system_nrt_wq is non-reentrant and guarantees that any given work
- * item is never executed in parallel by multiple CPUs.  Queue
- * flushing might take relatively long.
- *
  * system_unbound_wq is unbound workqueue.  Workers are not bound to
  * any specific CPU, not concurrency managed, and all queued works are
  * executed immediately as long as max_active limit is not reached and
@@ -289,16 +304,25 @@ enum {
  *
  * system_freezable_wq is equivalent to system_wq except that it's
  * freezable.
- *
- * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
- * it's freezable.
  */
 extern struct workqueue_struct *system_wq;
 extern struct workqueue_struct *system_long_wq;
-extern struct workqueue_struct *system_nrt_wq;
 extern struct workqueue_struct *system_unbound_wq;
 extern struct workqueue_struct *system_freezable_wq;
-extern struct workqueue_struct *system_nrt_freezable_wq;
+
+static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
+{
+       return system_wq;
+}
+
+static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
+{
+       return system_freezable_wq;
+}
+
+/* equivlalent to system_wq and system_freezable_wq, deprecated */
+#define system_nrt_wq                  __system_nrt_wq()
+#define system_nrt_freezable_wq                __system_nrt_freezable_wq()
 
 extern struct workqueue_struct *
 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
@@ -321,22 +345,22 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
 #ifdef CONFIG_LOCKDEP
-#define alloc_workqueue(fmt, flags, max_active, args...)       \
-({                                                             \
-       static struct lock_class_key __key;                     \
-       const char *__lock_name;                                \
-                                                               \
-       if (__builtin_constant_p(fmt))                          \
-               __lock_name = (fmt);                            \
-       else                                                    \
-               __lock_name = #fmt;                             \
-                                                               \
-       __alloc_workqueue_key((fmt), (flags), (max_active),     \
-                             &__key, __lock_name, ##args);     \
+#define alloc_workqueue(fmt, flags, max_active, args...)               \
+({                                                                     \
+       static struct lock_class_key __key;                             \
+       const char *__lock_name;                                        \
+                                                                       \
+       if (__builtin_constant_p(fmt))                                  \
+               __lock_name = (fmt);                                    \
+       else                                                            \
+               __lock_name = #fmt;                                     \
+                                                                       \
+       __alloc_workqueue_key((fmt), (flags), (max_active),             \
+                             &__key, __lock_name, ##args);             \
 })
 #else
-#define alloc_workqueue(fmt, flags, max_active, args...)       \
-       __alloc_workqueue_key((fmt), (flags), (max_active),     \
+#define alloc_workqueue(fmt, flags, max_active, args...)               \
+       __alloc_workqueue_key((fmt), (flags), (max_active),             \
                              NULL, NULL, ##args)
 #endif
 
@@ -353,46 +377,50 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
  * RETURNS:
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
-#define alloc_ordered_workqueue(fmt, flags, args...)           \
+#define alloc_ordered_workqueue(fmt, flags, args...)                   \
        alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
 
-#define create_workqueue(name)                                 \
+#define create_workqueue(name)                                         \
        alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
-#define create_freezable_workqueue(name)                       \
+#define create_freezable_workqueue(name)                               \
        alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
-#define create_singlethread_workqueue(name)                    \
+#define create_singlethread_workqueue(name)                            \
        alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
-extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
-extern int queue_work_on(int cpu, struct workqueue_struct *wq,
+extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
                        struct work_struct *work);
-extern int queue_delayed_work(struct workqueue_struct *wq,
+extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
+extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
                        struct delayed_work *work, unsigned long delay);
-extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+extern bool queue_delayed_work(struct workqueue_struct *wq,
                        struct delayed_work *work, unsigned long delay);
+extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay);
+extern bool mod_delayed_work(struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay);
 
 extern void flush_workqueue(struct workqueue_struct *wq);
 extern void drain_workqueue(struct workqueue_struct *wq);
 extern void flush_scheduled_work(void);
 
-extern int schedule_work(struct work_struct *work);
-extern int schedule_work_on(int cpu, struct work_struct *work);
-extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
-extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
-                                       unsigned long delay);
+extern bool schedule_work_on(int cpu, struct work_struct *work);
+extern bool schedule_work(struct work_struct *work);
+extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
+                                    unsigned long delay);
+extern bool schedule_delayed_work(struct delayed_work *work,
+                                 unsigned long delay);
 extern int schedule_on_each_cpu(work_func_t func);
 extern int keventd_up(void);
 
 int execute_in_process_context(work_func_t fn, struct execute_work *);
 
 extern bool flush_work(struct work_struct *work);
-extern bool flush_work_sync(struct work_struct *work);
 extern bool cancel_work_sync(struct work_struct *work);
 
 extern bool flush_delayed_work(struct delayed_work *dwork);
-extern bool flush_delayed_work_sync(struct delayed_work *work);
+extern bool cancel_delayed_work(struct delayed_work *dwork);
 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 
 extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -401,28 +429,12 @@ extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
 extern unsigned int work_cpu(struct work_struct *work);
 extern unsigned int work_busy(struct work_struct *work);
 
-/*
- * Kill off a pending schedule_delayed_work().  Note that the work callback
- * function may still be running on return from cancel_delayed_work(), unless
- * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
- * cancel_work_sync() to wait on it.
- */
-static inline bool cancel_delayed_work(struct delayed_work *work)
-{
-       bool ret;
-
-       ret = del_timer_sync(&work->timer);
-       if (ret)
-               work_clear_pending(&work->work);
-       return ret;
-}
-
 /*
  * Like above, but uses del_timer() instead of del_timer_sync(). This means,
  * if it returns 0 the timer function may be running and the queueing is in
  * progress.
  */
-static inline bool __cancel_delayed_work(struct delayed_work *work)
+static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
 {
        bool ret;
 
@@ -432,6 +444,18 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
        return ret;
 }
 
+/* used to be different but now identical to flush_work(), deprecated */
+static inline bool __deprecated flush_work_sync(struct work_struct *work)
+{
+       return flush_work(work);
+}
+
+/* used to be different but now identical to flush_delayed_work(), deprecated */
+static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
+{
+       return flush_delayed_work(dwork);
+}
+
 #ifndef CONFIG_SMP
 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 {
index e5d122031542f5e3628d5a3bbbd044f0f36f51a3..cc13e1115970455e4240ff09e69d34f610cf337d 100644 (file)
@@ -33,6 +33,9 @@
 #define XATTR_EVM_SUFFIX "evm"
 #define XATTR_NAME_EVM XATTR_SECURITY_PREFIX XATTR_EVM_SUFFIX
 
+#define XATTR_IMA_SUFFIX "ima"
+#define XATTR_NAME_IMA XATTR_SECURITY_PREFIX XATTR_IMA_SUFFIX
+
 #define XATTR_SELINUX_SUFFIX "selinux"
 #define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
 
@@ -59,7 +62,9 @@
 
 #ifdef  __KERNEL__
 
+#include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/spinlock.h>
 
 struct inode;
 struct dentry;
@@ -96,6 +101,52 @@ ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
                           char **xattr_value, size_t size, gfp_t flags);
 int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
                  const char *value, size_t size, gfp_t flags);
+
+struct simple_xattrs {
+       struct list_head head;
+       spinlock_t lock;
+};
+
+struct simple_xattr {
+       struct list_head list;
+       char *name;
+       size_t size;
+       char value[0];
+};
+
+/*
+ * initialize the simple_xattrs structure
+ */
+static inline void simple_xattrs_init(struct simple_xattrs *xattrs)
+{
+       INIT_LIST_HEAD(&xattrs->head);
+       spin_lock_init(&xattrs->lock);
+}
+
+/*
+ * free all the xattrs
+ */
+static inline void simple_xattrs_free(struct simple_xattrs *xattrs)
+{
+       struct simple_xattr *xattr, *node;
+
+       list_for_each_entry_safe(xattr, node, &xattrs->head, list) {
+               kfree(xattr->name);
+               kfree(xattr);
+       }
+}
+
+struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
+int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
+                    void *buffer, size_t size);
+int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
+                    const void *value, size_t size, int flags);
+int simple_xattr_remove(struct simple_xattrs *xattrs, const char *name);
+ssize_t simple_xattr_list(struct simple_xattrs *xattrs, char *buffer,
+                         size_t size);
+void simple_xattr_list_add(struct simple_xattrs *xattrs,
+                          struct simple_xattr *new_xattr);
+
 #endif  /*  __KERNEL__  */
 
 #endif /* _LINUX_XATTR_H */
index 8787349fbafe2eb79bd07ca21171f0d5c001a49e..53cae1e11e57e18ee9dba30454155101995d678f 100644 (file)
@@ -222,6 +222,7 @@ enum {
  * @ubi_num: UBI device number to create
  * @mtd_num: MTD device number to attach
  * @vid_hdr_offset: VID header offset (use defaults if %0)
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  * @padding: reserved for future, not used, has to be zeroed
  *
  * This data structure is used to specify MTD device UBI has to attach and the
@@ -245,12 +246,25 @@ enum {
  * be 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
  * aligned, which is OK, as UBI is clever enough to realize this is 4th
  * sub-page of the first page and add needed padding.
+ *
+ * The @max_beb_per1024 is the maximum amount of bad PEBs UBI expects on the
+ * UBI device per 1024 eraseblocks.  This value is often given in an other form
+ * in the NAND datasheet (min NVB i.e. minimal number of valid blocks). The
+ * maximum expected bad eraseblocks per 1024 is then:
+ *    1024 * (1 - MinNVB / MaxNVB)
+ * Which gives 20 for most NAND devices.  This limit is used in order to derive
+ * amount of eraseblock UBI reserves for handling new bad blocks. If the device
+ * has more bad eraseblocks than this limit, UBI does not reserve any physical
+ * eraseblocks for new bad eraseblocks, but attempts to use available
+ * eraseblocks (if any). The accepted range is 0-768. If 0 is given, the
+ * default kernel value of %CONFIG_MTD_UBI_BEB_LIMIT will be used.
  */
 struct ubi_attach_req {
        __s32 ubi_num;
        __s32 mtd_num;
        __s32 vid_hdr_offset;
-       __s8 padding[12];
+       __s16 max_beb_per1024;
+       __s8 padding[10];
 };
 
 /**
index 089a09d001d12be5c7c99e32ac66614b0125bc82..9e63e76b20e7e0ce4b60aa5daf0e113b6985ae16 100644 (file)
@@ -78,7 +78,7 @@ extern struct inet6_ifaddr      *ipv6_get_ifaddr(struct net *net,
                                                 int strict);
 
 extern int                     ipv6_dev_get_saddr(struct net *net,
-                                              struct net_device *dev,
+                                              const struct net_device *dev,
                                               const struct in6_addr *daddr,
                                               unsigned int srcprefs,
                                               struct in6_addr *saddr);
index 7f7df93f37cd0d3b3259775ebf6ab23743b3c564..b630dae03411ae69694e842d75108a254c0584aa 100644 (file)
@@ -3,6 +3,7 @@
 #define _ARP_H
 
 #include <linux/if_arp.h>
+#include <linux/hash.h>
 #include <net/neighbour.h>
 
 
@@ -10,7 +11,7 @@ extern struct neigh_table arp_tbl;
 
 static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd)
 {
-       u32 val = key ^ dev->ifindex;
+       u32 val = key ^ hash32_ptr(dev);
 
        return val * hash_rnd;
 }
index 5d2352154cf67f7a116309fdcbf756e79d3b608e..53539acbd81a518a3123c7a4984d6666acd47df1 100644 (file)
@@ -157,7 +157,7 @@ enum {
 typedef struct ax25_uid_assoc {
        struct hlist_node       uid_node;
        atomic_t                refcount;
-       uid_t                   uid;
+       kuid_t                  uid;
        ax25_address            call;
 } ax25_uid_assoc;
 
@@ -434,7 +434,7 @@ extern unsigned long ax25_display_timer(struct timer_list *);
 
 /* ax25_uid.c */
 extern int  ax25_uid_policy;
-extern ax25_uid_assoc *ax25_findbyuid(uid_t);
+extern ax25_uid_assoc *ax25_findbyuid(kuid_t);
 extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
 extern const struct file_operations ax25_uid_fops;
 extern void ax25_uid_free(void);
index 565d4bee1e493bbaa145f5e3621bc27e4295727f..ede036977ae8b6debe3ee4560f7e93905bd92f9a 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/poll.h>
 #include <net/sock.h>
+#include <linux/seq_file.h>
 
 #ifndef AF_BLUETOOTH
 #define AF_BLUETOOTH   31
@@ -202,6 +203,10 @@ enum {
 struct bt_sock_list {
        struct hlist_head head;
        rwlock_t          lock;
+#ifdef CONFIG_PROC_FS
+        struct file_operations   fops;
+        int (* custom_seq_show)(struct seq_file *, void *);
+#endif
 };
 
 int  bt_sock_register(int proto, const struct net_proto_family *ops);
@@ -292,6 +297,11 @@ extern void hci_sock_cleanup(void);
 extern int bt_sysfs_init(void);
 extern void bt_sysfs_cleanup(void);
 
+extern int  bt_procfs_init(struct module* module, struct net *net, const char *name,
+                          struct bt_sock_list* sk_list,
+                          int (* seq_show)(struct seq_file *, void *));
+extern void bt_procfs_cleanup(struct net *net, const char *name);
+
 extern struct dentry *bt_debugfs;
 
 int l2cap_init(void);
index ccd723e0f783e34a14a382e116a72e2a0369becb..76b2b6bdcf36a281d558925a2a7a707ca2e1dddf 100644 (file)
 /* First BR/EDR Controller shall have ID = 0 */
 #define HCI_BREDR_ID   0
 
+/* AMP controller status */
+#define AMP_CTRL_POWERED_DOWN                  0x00
+#define AMP_CTRL_BLUETOOTH_ONLY                        0x01
+#define AMP_CTRL_NO_CAPACITY                   0x02
+#define AMP_CTRL_LOW_CAPACITY                  0x03
+#define AMP_CTRL_MEDIUM_CAPACITY               0x04
+#define AMP_CTRL_HIGH_CAPACITY                 0x05
+#define AMP_CTRL_FULL_CAPACITY                 0x06
+
 /* HCI device quirks */
 enum {
        HCI_QUIRK_RESET_ON_CLOSE,
@@ -293,8 +302,11 @@ enum {
 
 /* ---- HCI Error Codes ---- */
 #define HCI_ERROR_AUTH_FAILURE         0x05
+#define HCI_ERROR_CONNECTION_TIMEOUT   0x08
 #define HCI_ERROR_REJ_BAD_ADDR         0x0f
 #define HCI_ERROR_REMOTE_USER_TERM     0x13
+#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
+#define HCI_ERROR_REMOTE_POWER_OFF     0x15
 #define HCI_ERROR_LOCAL_HOST_TERM      0x16
 #define HCI_ERROR_PAIRING_NOT_ALLOWED  0x18
 
@@ -1237,6 +1249,24 @@ struct hci_ev_simple_pair_complete {
        bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_EV_USER_PASSKEY_NOTIFY     0x3b
+struct hci_ev_user_passkey_notify {
+       bdaddr_t        bdaddr;
+       __le32          passkey;
+} __packed;
+
+#define HCI_KEYPRESS_STARTED           0
+#define HCI_KEYPRESS_ENTERED           1
+#define HCI_KEYPRESS_ERASED            2
+#define HCI_KEYPRESS_CLEARED           3
+#define HCI_KEYPRESS_COMPLETED         4
+
+#define HCI_EV_KEYPRESS_NOTIFY         0x3c
+struct hci_ev_keypress_notify {
+       bdaddr_t        bdaddr;
+       __u8            type;
+} __packed;
+
 #define HCI_EV_REMOTE_HOST_FEATURES    0x3d
 struct hci_ev_remote_host_features {
        bdaddr_t bdaddr;
@@ -1295,6 +1325,8 @@ struct hci_ev_num_comp_blocks {
 } __packed;
 
 /* Low energy meta events */
+#define LE_CONN_ROLE_MASTER    0x00
+
 #define HCI_EV_LE_CONN_COMPLETE                0x01
 struct hci_ev_le_conn_complete {
        __u8     status;
index 475b8c04ba52c01530f9a17a50f2f895211c7e2b..e7d454609881a30d929ec8aa613390df157c8b43 100644 (file)
@@ -115,12 +115,6 @@ struct oob_data {
        u8 randomizer[16];
 };
 
-struct adv_entry {
-       struct list_head list;
-       bdaddr_t bdaddr;
-       u8 bdaddr_type;
-};
-
 struct le_scan_params {
        u8 type;
        u16 interval;
@@ -309,6 +303,8 @@ struct hci_conn {
        __u8            pin_length;
        __u8            enc_key_size;
        __u8            io_capability;
+       __u32           passkey_notify;
+       __u8            passkey_entered;
        __u16           disc_timeout;
        unsigned long   flags;
 
@@ -356,16 +352,16 @@ extern rwlock_t hci_cb_list_lock;
 
 /* ----- HCI interface to upper protocols ----- */
 extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
 extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
 extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
 extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
                              u16 flags);
 
 extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
 extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
@@ -434,15 +430,6 @@ static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
               test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
 }
 
-static inline void hci_conn_hash_init(struct hci_dev *hdev)
-{
-       struct hci_conn_hash *h = &hdev->conn_hash;
-       INIT_LIST_HEAD(&h->list);
-       h->acl_num = 0;
-       h->sco_num = 0;
-       h->le_num = 0;
-}
-
 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
@@ -557,9 +544,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
        return NULL;
 }
 
-void hci_acl_connect(struct hci_conn *conn);
 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
-void hci_add_sco(struct hci_conn *conn, __u16 handle);
 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
 
@@ -569,7 +554,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
 
 struct hci_chan *hci_chan_create(struct hci_conn *conn);
-int hci_chan_del(struct hci_chan *chan);
+void hci_chan_del(struct hci_chan *chan);
 void hci_chan_list_flush(struct hci_conn *conn);
 
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -587,8 +572,7 @@ void hci_conn_put_device(struct hci_conn *conn);
 
 static inline void hci_conn_hold(struct hci_conn *conn)
 {
-       BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-              atomic_read(&conn->refcnt) + 1);
+       BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
        atomic_inc(&conn->refcnt);
        cancel_delayed_work(&conn->disc_work);
@@ -596,8 +580,7 @@ static inline void hci_conn_hold(struct hci_conn *conn)
 
 static inline void hci_conn_put(struct hci_conn *conn)
 {
-       BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
-              atomic_read(&conn->refcnt) - 1);
+       BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
 
        if (atomic_dec_and_test(&conn->refcnt)) {
                unsigned long timeo;
@@ -622,11 +605,17 @@ static inline void hci_conn_put(struct hci_conn *conn)
 /* ----- HCI Devices ----- */
 static inline void hci_dev_put(struct hci_dev *d)
 {
+       BT_DBG("%s orig refcnt %d", d->name,
+              atomic_read(&d->dev.kobj.kref.refcount));
+
        put_device(&d->dev);
 }
 
 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
 {
+       BT_DBG("%s orig refcnt %d", d->name,
+              atomic_read(&d->dev.kobj.kref.refcount));
+
        get_device(&d->dev);
        return d;
 }
@@ -1012,7 +1001,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                          u8 addr_type, u32 flags, u8 *name, u8 name_len,
                          u8 *dev_class);
 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                            u8 link_type, u8 addr_type);
+                            u8 link_type, u8 addr_type, u8 reason);
 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
                           u8 link_type, u8 addr_type, u8 status);
 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -1035,6 +1024,9 @@ int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                     u8 link_type, u8 addr_type, u8 status);
 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                         u8 link_type, u8 addr_type, u8 status);
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                            u8 link_type, u8 addr_type, u32 passkey,
+                            u8 entered);
 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                     u8 addr_type, u8 status);
 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
@@ -1056,7 +1048,7 @@ int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 int mgmt_interleaved_discovery(struct hci_dev *hdev);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-
+bool mgmt_valid_hdev(struct hci_dev *hdev);
 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
 
 /* HCI info for socket */
index a7679f8913d2e9e8b9c14a0807bedd12c607b9cc..7ed8e356425a16dc33c5afd5e4a80eaafdd6ea64 100644 (file)
@@ -433,11 +433,10 @@ struct l2cap_chan {
        struct sock *sk;
 
        struct l2cap_conn       *conn;
+       struct kref     kref;
 
        __u8            state;
 
-       atomic_t        refcnt;
-
        __le16          psm;
        __u16           dcid;
        __u16           scid;
@@ -671,20 +670,8 @@ enum {
        L2CAP_EV_RECV_FRAME,
 };
 
-static inline void l2cap_chan_hold(struct l2cap_chan *c)
-{
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-       atomic_inc(&c->refcnt);
-}
-
-static inline void l2cap_chan_put(struct l2cap_chan *c)
-{
-       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
-
-       if (atomic_dec_and_test(&c->refcnt))
-               kfree(c);
-}
+void l2cap_chan_hold(struct l2cap_chan *c);
+void l2cap_chan_put(struct l2cap_chan *c);
 
 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
 {
@@ -771,7 +758,6 @@ int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid);
 
 struct l2cap_chan *l2cap_chan_create(void);
 void l2cap_chan_close(struct l2cap_chan *chan, int reason);
-void l2cap_chan_destroy(struct l2cap_chan *chan);
 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                       bdaddr_t *dst, u8 dst_type);
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
index 4348ee8bda6993a15193a4e1e6adb640c9512e1f..22980a7c38730f53305b181e338dc29009cf62d6 100644 (file)
@@ -405,7 +405,16 @@ struct mgmt_ev_device_connected {
        __u8    eir[0];
 } __packed;
 
+#define MGMT_DEV_DISCONN_UNKNOWN       0x00
+#define MGMT_DEV_DISCONN_TIMEOUT       0x01
+#define MGMT_DEV_DISCONN_LOCAL_HOST    0x02
+#define MGMT_DEV_DISCONN_REMOTE                0x03
+
 #define MGMT_EV_DEVICE_DISCONNECTED    0x000C
+struct mgmt_ev_device_disconnected {
+       struct mgmt_addr_info addr;
+       __u8    reason;
+} __packed;
 
 #define MGMT_EV_CONNECT_FAILED         0x000D
 struct mgmt_ev_connect_failed {
@@ -469,3 +478,10 @@ struct mgmt_ev_device_unblocked {
 struct mgmt_ev_device_unpaired {
        struct mgmt_addr_info addr;
 } __packed;
+
+#define MGMT_EV_PASSKEY_NOTIFY         0x0017
+struct mgmt_ev_passkey_notify {
+       struct mgmt_addr_info addr;
+       __le32  passkey;
+       __u8    entered;
+} __packed;
index 8b27927b2a55de3dfd5f94c5a40ef3c3b886eb06..f8ba07f3e5fa19427573128f07764e8733977b29 100644 (file)
@@ -108,8 +108,8 @@ struct smp_cmd_security_req {
 #define SMP_CONFIRM_FAILED             0x04
 #define SMP_PAIRING_NOTSUPP            0x05
 #define SMP_ENC_KEY_SIZE               0x06
-#define SMP_CMD_NOTSUPP                0x07
-#define SMP_UNSPECIFIED                0x08
+#define SMP_CMD_NOTSUPP                        0x07
+#define SMP_UNSPECIFIED                        0x08
 #define SMP_REPEATED_ATTEMPTS          0x09
 
 #define SMP_MIN_ENC_KEY_SIZE           7
@@ -123,8 +123,8 @@ struct smp_chan {
        struct l2cap_conn *conn;
        u8              preq[7]; /* SMP Pairing Request */
        u8              prsp[7]; /* SMP Pairing Response */
-       u8              prnd[16]; /* SMP Pairing Random (local) */
-       u8              rrnd[16]; /* SMP Pairing Random (remote) */
+       u8              prnd[16]; /* SMP Pairing Random (local) */
+       u8              rrnd[16]; /* SMP Pairing Random (remote) */
        u8              pcnf[16]; /* SMP Pairing Confirm */
        u8              tk[16]; /* SMP Temporary Key */
        u8              enc_key_size;
index 3d254e10ff30e7ab3c5a4fee2ee0b38f1309bd94..1b49890822449df661d576219181b10c64d57a6c 100644 (file)
@@ -245,6 +245,7 @@ struct ieee80211_sta_vht_cap {
  *     rates" IE, i.e. CCK rates first, then OFDM.
  * @n_bitrates: Number of bitrates in @bitrates
  * @ht_cap: HT capabilities in this band
+ * @vht_cap: VHT capabilities in this band
  */
 struct ieee80211_supported_band {
        struct ieee80211_channel *channels;
@@ -1439,7 +1440,8 @@ struct cfg80211_gtk_rekey_data {
  * @add_virtual_intf: create a new virtual interface with the given name,
  *     must set the struct wireless_dev's iftype. Beware: You must create
  *     the new netdev in the wiphy's network namespace! Returns the struct
- *     wireless_dev, or an ERR_PTR.
+ *     wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
+ *     also set the address member in the wdev.
  *
  * @del_virtual_intf: remove the virtual interface
  *
@@ -1578,9 +1580,7 @@ struct cfg80211_gtk_rekey_data {
  * @set_cqm_txe_config: Configure connection quality monitor TX error
  *     thresholds.
  * @sched_scan_start: Tell the driver to start a scheduled scan.
- * @sched_scan_stop: Tell the driver to stop an ongoing scheduled
- *     scan.  The driver_initiated flag specifies whether the driver
- *     itself has informed that the scan has stopped.
+ * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan.
  *
  * @mgmt_frame_register: Notify driver that a management frame type was
  *     registered. Note that this callback may not sleep, and cannot run
@@ -1618,6 +1618,9 @@ struct cfg80211_gtk_rekey_data {
  * @get_channel: Get the current operating channel for the virtual interface.
  *     For monitor interfaces, it should return %NULL unless there's a single
  *     current monitoring channel.
+ *
+ * @start_p2p_device: Start the given P2P device.
+ * @stop_p2p_device: Stop the given P2P device.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1625,7 +1628,7 @@ struct cfg80211_ops {
        void    (*set_wakeup)(struct wiphy *wiphy, bool enabled);
 
        struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
-                                                 char *name,
+                                                 const char *name,
                                                  enum nl80211_iftype type,
                                                  u32 *flags,
                                                  struct vif_params *params);
@@ -1834,6 +1837,11 @@ struct cfg80211_ops {
                (*get_channel)(struct wiphy *wiphy,
                               struct wireless_dev *wdev,
                               enum nl80211_channel_type *type);
+
+       int     (*start_p2p_device)(struct wiphy *wiphy,
+                                   struct wireless_dev *wdev);
+       void    (*stop_p2p_device)(struct wiphy *wiphy,
+                                  struct wireless_dev *wdev);
 };
 
 /*
@@ -2397,6 +2405,8 @@ struct cfg80211_cached_keys;
  * @cleanup_work: work struct used for cleanup that can't be done directly
  * @beacon_interval: beacon interval used on this device for transmitting
  *     beacons, 0 when not valid
+ * @address: The address for this device, valid only if @netdev is %NULL
+ * @p2p_started: true if this is a P2P Device that has been started
  */
 struct wireless_dev {
        struct wiphy *wiphy;
@@ -2415,7 +2425,9 @@ struct wireless_dev {
 
        struct work_struct cleanup_work;
 
-       bool use_4addr;
+       bool use_4addr, p2p_started;
+
+       u8 address[ETH_ALEN] __aligned(sizeof(u16));
 
        /* currently used for IBSS and SME - might be rearranged later */
        u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -2445,7 +2457,7 @@ struct wireless_dev {
 
        int beacon_interval;
 
-       u32 ap_unexpected_nlpid;
+       u32 ap_unexpected_nlportid;
 
 #ifdef CONFIG_CFG80211_WEXT
        /* wext data */
@@ -2463,6 +2475,13 @@ struct wireless_dev {
 #endif
 };
 
+static inline u8 *wdev_address(struct wireless_dev *wdev)
+{
+       if (wdev->netdev)
+               return wdev->netdev->dev_addr;
+       return wdev->address;
+}
+
 /**
  * wdev_priv - return wiphy priv from wireless_dev
  *
@@ -3341,6 +3360,25 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
  */
 void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
 
+/**
+ * cfg80211_conn_failed - connection request failed notification
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @reason: the reason for connection failure
+ * @gfp: allocation flags
+ *
+ * Whenever a station tries to connect to an AP and if the station
+ * could not connect to the AP as the AP has rejected the connection
+ * for some reasons, this function is called.
+ *
+ * The reason for connection failure can be any of the value from
+ * nl80211_connect_failed_reason enum
+ */
+void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
+                         enum nl80211_connect_failed_reason reason,
+                         gfp_t gfp);
+
 /**
  * cfg80211_rx_mgmt - notification of received, unprocessed management frame
  * @wdev: wireless device receiving the frame
@@ -3530,6 +3568,22 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
  */
 u32 cfg80211_calculate_bitrate(struct rate_info *rate);
 
+/**
+ * cfg80211_unregister_wdev - remove the given wdev
+ * @wdev: struct wireless_dev to remove
+ *
+ * Call this function only for wdevs that have no netdev assigned,
+ * e.g. P2P Devices. It removes the device from the list so that
+ * it can no longer be used. It is necessary to call this function
+ * even when cfg80211 requests the removal of the interface by
+ * calling the del_virtual_intf() callback. The function must also
+ * be called when the driver wishes to unregister the wdev, e.g.
+ * when the device is unbound from the driver.
+ *
+ * Requires the RTNL to be held.
+ */
+void cfg80211_unregister_wdev(struct wireless_dev *wdev);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index ba55d8b8c87cb5438f87fad5ae01d2aee77d59ab..600d1d705bb86f23b00a8d0feebabe6ea303934d 100644 (file)
@@ -109,6 +109,9 @@ static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
 struct sk_buff;
 extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
                                     __be32 from, __be32 to, int pseudohdr);
+extern void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+                                     const __be32 *from, const __be32 *to,
+                                     int pseudohdr);
 
 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
                                            __be16 from, __be16 to,
index a4dc5b027bd9cc7731b7fab1146508896c0b215c..b6a6eeb3905f15d956fd1190cf6c415508cea0bf 100644 (file)
 #include <linux/hardirq.h>
 #include <linux/rcupdate.h>
 
-#ifdef CONFIG_CGROUPS
+#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
 struct cgroup_cls_state
 {
        struct cgroup_subsys_state css;
        u32 classid;
 };
 
-#ifdef CONFIG_NET_CLS_CGROUP
+extern void sock_update_classid(struct sock *sk);
+
+#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
 static inline u32 task_cls_classid(struct task_struct *p)
 {
        int classid;
@@ -39,32 +41,33 @@ static inline u32 task_cls_classid(struct task_struct *p)
 
        return classid;
 }
-#else
-extern int net_cls_subsys_id;
-
+#elif IS_MODULE(CONFIG_NET_CLS_CGROUP)
 static inline u32 task_cls_classid(struct task_struct *p)
 {
-       int id;
+       struct cgroup_subsys_state *css;
        u32 classid = 0;
 
        if (in_interrupt())
                return 0;
 
        rcu_read_lock();
-       id = rcu_dereference_index_check(net_cls_subsys_id,
-                                        rcu_read_lock_held());
-       if (id >= 0)
-               classid = container_of(task_subsys_state(p, id),
+       css = task_subsys_state(p, net_cls_subsys_id);
+       if (css)
+               classid = container_of(css,
                                       struct cgroup_cls_state, css)->classid;
        rcu_read_unlock();
 
        return classid;
 }
 #endif
-#else
+#else /* !CGROUP_NET_CLS_CGROUP */
+static inline void sock_update_classid(struct sock *sk)
+{
+}
+
 static inline u32 task_cls_classid(struct task_struct *p)
 {
        return 0;
 }
-#endif
+#endif /* CGROUP_NET_CLS_CGROUP */
 #endif  /* _NET_CLS_CGROUP_H */
index 621e3513ef5ed2c833a16902761b3d5c1ed2ebaf..9a7881066fb316b02fdd7ed52aaf84d038ded1f6 100644 (file)
@@ -396,11 +396,15 @@ static inline void dst_confirm(struct dst_entry *dst)
 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
                                   struct sk_buff *skb)
 {
-       struct hh_cache *hh;
+       const struct hh_cache *hh;
+
+       if (dst->pending_confirm) {
+               unsigned long now = jiffies;
 
-       if (unlikely(dst->pending_confirm)) {
-               n->confirmed = jiffies;
                dst->pending_confirm = 0;
+               /* avoid dirtying neighbour */
+               if (n->confirmed != now)
+                       n->confirmed = now;
        }
 
        hh = &n->hh;
index 48905cd3884c8d687ba3f109e31f5268ee42feb1..bdfbe68c1c3b271bf20b2ef23d5d0325bda2f906 100644 (file)
@@ -65,7 +65,7 @@ struct genl_family {
 /**
  * struct genl_info - receiving information
  * @snd_seq: sending sequence number
- * @snd_pid: netlink pid of sender
+ * @snd_portid: netlink portid of sender
  * @nlhdr: netlink message header
  * @genlhdr: generic netlink message header
  * @userhdr: user specific header
@@ -75,7 +75,7 @@ struct genl_family {
  */
 struct genl_info {
        u32                     snd_seq;
-       u32                     snd_pid;
+       u32                     snd_portid;
        struct nlmsghdr *       nlhdr;
        struct genlmsghdr *     genlhdr;
        void *                  userhdr;
@@ -130,10 +130,10 @@ extern int genl_register_mc_group(struct genl_family *family,
                                  struct genl_multicast_group *grp);
 extern void genl_unregister_mc_group(struct genl_family *family,
                                     struct genl_multicast_group *grp);
-extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+extern void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
                        u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
-void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
                                struct genl_family *family, int flags, u8 cmd);
 
 /**
@@ -183,7 +183,7 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb,
                                      struct genl_family *family,
                                      int flags, u8 cmd)
 {
-       return genlmsg_put(skb, info->snd_pid, info->snd_seq, family,
+       return genlmsg_put(skb, info->snd_portid, info->snd_seq, family,
                           flags, cmd);
 }
 
@@ -212,49 +212,49 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
  * genlmsg_multicast_netns - multicast a netlink message to a specific netns
  * @net: the net namespace
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  */
 static inline int genlmsg_multicast_netns(struct net *net, struct sk_buff *skb,
-                                         u32 pid, unsigned int group, gfp_t flags)
+                                         u32 portid, unsigned int group, gfp_t flags)
 {
-       return nlmsg_multicast(net->genl_sock, skb, pid, group, flags);
+       return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
 }
 
 /**
  * genlmsg_multicast - multicast a netlink message to the default netns
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  */
-static inline int genlmsg_multicast(struct sk_buff *skb, u32 pid,
+static inline int genlmsg_multicast(struct sk_buff *skb, u32 portid,
                                    unsigned int group, gfp_t flags)
 {
-       return genlmsg_multicast_netns(&init_net, skb, pid, group, flags);
+       return genlmsg_multicast_netns(&init_net, skb, portid, group, flags);
 }
 
 /**
  * genlmsg_multicast_allns - multicast a netlink message to all net namespaces
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  *
  * This function must hold the RTNL or rcu_read_lock().
  */
-int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid,
+int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid,
                            unsigned int group, gfp_t flags);
 
 /**
  * genlmsg_unicast - unicast a netlink message
  * @skb: netlink message as socket buffer
- * @pid: netlink pid of the destination socket
+ * @portid: netlink portid of the destination socket
  */
-static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid)
+static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid)
 {
-       return nlmsg_unicast(net->genl_sock, skb, pid);
+       return nlmsg_unicast(net->genl_sock, skb, portid);
 }
 
 /**
@@ -264,7 +264,7 @@ static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 pid)
  */
 static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
 {
-       return genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
 }
 
 /**
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
new file mode 100644 (file)
index 0000000..4fd8a4b
--- /dev/null
@@ -0,0 +1,103 @@
+#ifndef _NET_GRO_CELLS_H
+#define _NET_GRO_CELLS_H
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+
+struct gro_cell {
+       struct sk_buff_head     napi_skbs;
+       struct napi_struct      napi;
+} ____cacheline_aligned_in_smp;
+
+struct gro_cells {
+       unsigned int            gro_cells_mask;
+       struct gro_cell         *cells;
+};
+
+static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+{
+       unsigned long flags;
+       struct gro_cell *cell = gcells->cells;
+       struct net_device *dev = skb->dev;
+
+       if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
+               netif_rx(skb);
+               return;
+       }
+
+       if (skb_rx_queue_recorded(skb))
+               cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
+
+       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+               atomic_long_inc(&dev->rx_dropped);
+               kfree_skb(skb);
+               return;
+       }
+
+       spin_lock_irqsave(&cell->napi_skbs.lock, flags);
+
+       __skb_queue_tail(&cell->napi_skbs, skb);
+       if (skb_queue_len(&cell->napi_skbs) == 1)
+               napi_schedule(&cell->napi);
+
+       spin_unlock_irqrestore(&cell->napi_skbs.lock, flags);
+}
+
+static inline int gro_cell_poll(struct napi_struct *napi, int budget)
+{
+       struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
+       struct sk_buff *skb;
+       int work_done = 0;
+
+       while (work_done < budget) {
+               skb = skb_dequeue(&cell->napi_skbs);
+               if (!skb)
+                       break;
+
+               napi_gro_receive(napi, skb);
+               work_done++;
+       }
+
+       if (work_done < budget)
+               napi_complete(napi);
+       return work_done;
+}
+
+static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
+{
+       int i;
+
+       gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
+       gcells->cells = kcalloc(sizeof(struct gro_cell),
+                               gcells->gro_cells_mask + 1,
+                               GFP_KERNEL);
+       if (!gcells->cells)
+               return -ENOMEM;
+
+       for (i = 0; i <= gcells->gro_cells_mask; i++) {
+               struct gro_cell *cell = gcells->cells + i;
+
+               skb_queue_head_init(&cell->napi_skbs);
+               netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
+               napi_enable(&cell->napi);
+       }
+       return 0;
+}
+
+static inline void gro_cells_destroy(struct gro_cells *gcells)
+{
+       struct gro_cell *cell = gcells->cells;
+       int i;
+
+       if (!cell)
+               return;
+       for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
+               netif_napi_del(&cell->napi);
+               skb_queue_purge(&cell->napi_skbs);
+       }
+       kfree(gcells->cells);
+       gcells->cells = NULL;
+}
+
+#endif
index 71392545d0a110906abbe4e51b8cb367cd4fc70f..7f0df133d1197cf7c449d841d9ea81a5f3a07d70 100644 (file)
@@ -183,6 +183,9 @@ struct ieee80211_radiotap_header {
  *     Contains a bitmap of known fields/flags, the flags, and
  *     the MCS index.
  *
+ * IEEE80211_RADIOTAP_AMPDU_STATUS     u32, u16, u8, u8        unitless
+ *
+ *     Contains the AMPDU information for the subframe.
  */
 enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_TSFT = 0,
@@ -205,6 +208,7 @@ enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_DATA_RETRIES = 17,
 
        IEEE80211_RADIOTAP_MCS = 19,
+       IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
 
        /* valid in every it_present bitmap, even vendor namespaces */
        IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -270,6 +274,13 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_MCS_FMT_GF          0x08
 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
 
+/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
+#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN                0x0001
+#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN            0x0002
+#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN            0x0004
+#define IEEE80211_RADIOTAP_AMPDU_IS_LAST               0x0008
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR         0x0010
+#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN       0x0020
 
 /* helpers */
 static inline int ieee80211_get_radiotap_len(unsigned char *data)
index 2fa14691869ca0299d86be8ed87626e297eb808d..aab73757bc4da4c4b5fce8390ec1234a86ea55c0 100644 (file)
@@ -15,6 +15,8 @@ enum {
        INET_ECN_MASK = 3,
 };
 
+extern int sysctl_tunnel_ecn_log;
+
 static inline int INET_ECN_is_ce(__u8 dsfield)
 {
        return (dsfield & INET_ECN_MASK) == INET_ECN_CE;
@@ -145,4 +147,78 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
        return 0;
 }
 
+/*
+ * RFC 6080 4.2
+ *  To decapsulate the inner header at the tunnel egress, a compliant
+ *  tunnel egress MUST set the outgoing ECN field to the codepoint at the
+ *  intersection of the appropriate arriving inner header (row) and outer
+ *  header (column) in Figure 4
+ *
+ *      +---------+------------------------------------------------+
+ *      |Arriving |            Arriving Outer Header               |
+ *      |   Inner +---------+------------+------------+------------+
+ *      |  Header | Not-ECT | ECT(0)     | ECT(1)     |     CE     |
+ *      +---------+---------+------------+------------+------------+
+ *      | Not-ECT | Not-ECT |Not-ECT(!!!)|Not-ECT(!!!)| <drop>(!!!)|
+ *      |  ECT(0) |  ECT(0) | ECT(0)     | ECT(1)     |     CE     |
+ *      |  ECT(1) |  ECT(1) | ECT(1) (!) | ECT(1)     |     CE     |
+ *      |    CE   |      CE |     CE     |     CE(!!!)|     CE     |
+ *      +---------+---------+------------+------------+------------+
+ *
+ *             Figure 4: New IP in IP Decapsulation Behaviour
+ *
+ *  returns 0 on success
+ *          1 if something is broken and should be logged (!!! above)
+ *          2 if packet should be dropped
+ */
+static inline int INET_ECN_decapsulate(struct sk_buff *skb,
+                                      __u8 outer, __u8 inner)
+{
+       if (INET_ECN_is_not_ect(inner)) {
+               switch (outer & INET_ECN_MASK) {
+               case INET_ECN_NOT_ECT:
+                       return 0;
+               case INET_ECN_ECT_0:
+               case INET_ECN_ECT_1:
+                       return 1;
+               case INET_ECN_CE:
+                       return 2;
+               }
+       }
+
+       if (INET_ECN_is_ce(outer))
+               INET_ECN_set_ce(skb);
+
+       return 0;
+}
+
+static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
+                                    struct sk_buff *skb)
+{
+       __u8 inner;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               inner = ip_hdr(skb)->tos;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               inner = ipv6_get_dsfield(ipv6_hdr(skb));
+       else
+               return 0;
+
+       return INET_ECN_decapsulate(skb, oiph->tos, inner);
+}
+
+static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
+                                     struct sk_buff *skb)
+{
+       __u8 inner;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               inner = ip_hdr(skb)->tos;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               inner = ipv6_get_dsfield(ipv6_hdr(skb));
+       else
+               return 0;
+
+       return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
+}
 #endif
index 2431cf83aecafb74ca1e9464bec6f6a669046d75..32786a0447187f44e1264f4dc4c0b4721ebc3e08 100644 (file)
@@ -29,6 +29,8 @@ struct inet_frag_queue {
 #define INET_FRAG_COMPLETE     4
 #define INET_FRAG_FIRST_IN     2
 #define INET_FRAG_LAST_IN      1
+
+       u16                     max_size;
 };
 
 #define INETFRAGS_HASHSZ               64
@@ -59,7 +61,7 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
 void inet_frag_destroy(struct inet_frag_queue *q,
                                struct inet_frags *f, int *work);
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
+int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
                struct inet_frags *f, void *key, unsigned int hash)
        __releases(&f->lock);
index 613cfa4016728300da88a8fc6f1f664de99da5cc..256c1ed2d69afc06cfbc8b51550c7b374a166afe 100644 (file)
@@ -101,10 +101,8 @@ struct inet_cork {
        __be32                  addr;
        struct ip_options       *opt;
        unsigned int            fragsize;
-       struct dst_entry        *dst;
        int                     length; /* Total length of all frames */
-       struct page             *page;
-       u32                     off;
+       struct dst_entry        *dst;
        u8                      tx_flags;
 };
 
index 5a5d84d3d2c6b6e3777035a631fb10e7479ab8de..0707fb9551aa4c1011c88969a42cd4482450d035 100644 (file)
@@ -42,6 +42,8 @@ struct inet_skb_parm {
 #define IPSKB_XFRM_TRANSFORMED 4
 #define IPSKB_FRAG_COMPLETE    8
 #define IPSKB_REROUTED         16
+
+       u16                     frag_max_size;
 };
 
 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
index 9fc7114159e885031550abedebf67c74f08bde76..8a2a203eb15d087c4838e80aa52e55208cbee1d5 100644 (file)
@@ -37,6 +37,7 @@ struct fib6_config {
        int             fc_ifindex;
        u32             fc_flags;
        u32             fc_protocol;
+       u32             fc_type;        /* only 8 bits are used */
 
        struct in6_addr fc_dst;
        struct in6_addr fc_src;
index 358fb86f57eb952816bc76736b7b85644dd77184..e03047f7090bb3419c2aa6f8b39f3a7ff0494f8a 100644 (file)
@@ -5,6 +5,8 @@
 #include <linux/netdevice.h>
 #include <linux/ip6_tunnel.h>
 
+#define IP6TUNNEL_ERR_TIMEO (30*HZ)
+
 /* capable of sending packets */
 #define IP6_TNL_F_CAP_XMIT 0x10000
 /* capable of receiving packets */
 /* determine capability on a per-packet basis */
 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
 
-/* IPv6 tunnel */
+struct __ip6_tnl_parm {
+       char name[IFNAMSIZ];    /* name of tunnel device */
+       int link;               /* ifindex of underlying L2 interface */
+       __u8 proto;             /* tunnel protocol */
+       __u8 encap_limit;       /* encapsulation limit for tunnel */
+       __u8 hop_limit;         /* hop limit for tunnel */
+       __be32 flowinfo;        /* traffic class and flowlabel for tunnel */
+       __u32 flags;            /* tunnel flags */
+       struct in6_addr laddr;  /* local tunnel end-point address */
+       struct in6_addr raddr;  /* remote tunnel end-point address */
+
+       __be16                  i_flags;
+       __be16                  o_flags;
+       __be32                  i_key;
+       __be32                  o_key;
+};
 
+/* IPv6 tunnel */
 struct ip6_tnl {
        struct ip6_tnl __rcu *next;     /* next tunnel in list */
        struct net_device *dev; /* virtual device associated with tunnel */
-       struct ip6_tnl_parm parms;      /* tunnel configuration parameters */
+       struct __ip6_tnl_parm parms;    /* tunnel configuration parameters */
        struct flowi fl;        /* flowi template for xmit */
        struct dst_entry *dst_cache;    /* cached dst */
        u32 dst_cookie;
+
+       int err_count;
+       unsigned long err_time;
+
+       /* These fields used only by GRE */
+       __u32 i_seqno;  /* The last seen seqno  */
+       __u32 o_seqno;  /* The last output seqno */
+       int hlen;       /* Precalculated GRE header length */
+       int mlink;
 };
 
 /* Tunnel encapsulation limit destination sub-option */
@@ -31,4 +58,14 @@ struct ipv6_tlv_tnl_enc_lim {
        __u8 encap_limit;       /* tunnel encapsulation limit   */
 } __packed;
 
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t);
+void ip6_tnl_dst_reset(struct ip6_tnl *t);
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
+               const struct in6_addr *raddr);
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t);
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
+                            const struct in6_addr *raddr);
+
 #endif
index 95374d1696a163a75f8aa006bf99fe958df195aa..ee75ccdf5188cbf4eac840714a22b456d6700ad6 100644 (file)
@@ -808,8 +808,6 @@ struct netns_ipvs {
        struct list_head        rs_table[IP_VS_RTAB_SIZE];
        /* ip_vs_app */
        struct list_head        app_list;
-       /* ip_vs_ftp */
-       struct ip_vs_app        *ftp_app;
        /* ip_vs_proto */
        #define IP_VS_PROTO_TAB_SIZE    32      /* must be power of 2 */
        struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
@@ -890,6 +888,7 @@ struct netns_ipvs {
        unsigned int            sysctl_sync_refresh_period;
        int                     sysctl_sync_retries;
        int                     sysctl_nat_icmp_send;
+       int                     sysctl_pmtu_disc;
 
        /* ip_vs_lblc */
        int                     sysctl_lblc_expiration;
@@ -976,6 +975,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
        return ipvs->sysctl_sync_sock_size;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+       return ipvs->sysctl_pmtu_disc;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1018,6 +1022,11 @@ static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
        return 0;
 }
 
+static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
+{
+       return 1;
+}
+
 #endif
 
 /*
@@ -1179,7 +1188,8 @@ extern void ip_vs_service_net_cleanup(struct net *net);
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern struct ip_vs_app *register_ip_vs_app(struct net *net,
+                                           struct ip_vs_app *app);
 extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
index a93cf6d7e94b160dc2ad1ec061948c05713264ab..ddc077c51f324d74e248fada33fd152238cf5337 100644 (file)
@@ -2,6 +2,7 @@
 #define __NET_IPIP_H 1
 
 #include <linux/if_tunnel.h>
+#include <net/gro_cells.h>
 #include <net/ip.h>
 
 /* Keep error state on tunnel for 30 sec */
@@ -36,6 +37,8 @@ struct ip_tunnel {
 #endif
        struct ip_tunnel_prl_entry __rcu *prl;          /* potential router list */
        unsigned int                    prl_count;      /* # of entries in PRL */
+
+       struct gro_cells                gro_cells;
 };
 
 struct ip_tunnel_prl_entry {
index 01c34b363a34d1f7b126d55dffd8e170963528ed..979bf6c131412be9a4662d4738056feb91a26272 100644 (file)
@@ -34,6 +34,7 @@
 #define NEXTHDR_IPV6           41      /* IPv6 in IPv6 */
 #define NEXTHDR_ROUTING                43      /* Routing header. */
 #define NEXTHDR_FRAGMENT       44      /* Fragmentation/reassembly header. */
+#define NEXTHDR_GRE            47      /* GRE header. */
 #define NEXTHDR_ESP            50      /* Encapsulating security payload. */
 #define NEXTHDR_AUTH           51      /* Authentication header. */
 #define NEXTHDR_ICMP           58      /* ICMP for IPv6. */
@@ -222,7 +223,10 @@ struct ip6_flowlabel {
        struct ipv6_txoptions   *opt;
        unsigned long           linger;
        u8                      share;
-       u32                     owner;
+       union {
+               struct pid *pid;
+               kuid_t uid;
+       } owner;
        unsigned long           lastuse;
        unsigned long           expires;
        struct net              *fl_net;
@@ -267,8 +271,17 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
 
 extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
 
-int ip6_frag_nqueues(struct net *net);
-int ip6_frag_mem(struct net *net);
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int ip6_frag_nqueues(struct net *net)
+{
+       return net->ipv6.frags.nqueues;
+}
+
+static inline int ip6_frag_mem(struct net *net)
+{
+       return atomic_read(&net->ipv6.frags.mem);
+}
+#endif
 
 #define IPV6_FRAG_HIGH_THRESH  (256 * 1024)    /* 262144 */
 #define IPV6_FRAG_LOW_THRESH   (192 * 1024)    /* 196608 */
@@ -407,6 +420,25 @@ struct ip6_create_arg {
 void ip6_frag_init(struct inet_frag_queue *q, void *a);
 bool ip6_frag_match(struct inet_frag_queue *q, void *a);
 
+/*
+ *     Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+       struct inet_frag_queue  q;
+
+       __be32                  id;             /* fragment id          */
+       u32                     user;
+       struct in6_addr         saddr;
+       struct in6_addr         daddr;
+
+       int                     iif;
+       unsigned int            csum;
+       __u16                   nhoffset;
+};
+
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+                          struct inet_frags *frags);
+
 static inline bool ipv6_addr_any(const struct in6_addr *a)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
index f2d0fc570527baf216f34cd364c0162330dcbd28..9e7d7f08ef77c5539ea887b483b274fc0ddb91fd 100644 (file)
@@ -151,7 +151,6 @@ extern int sysctl_llc2_ack_timeout;
 extern int sysctl_llc2_busy_timeout;
 extern int sysctl_llc2_p_timeout;
 extern int sysctl_llc2_rej_timeout;
-extern int sysctl_llc_station_ack_timeout;
 #else
 #define llc_sysctl_init() (0)
 #define llc_sysctl_exit() do { } while(0)
index bb86aa6f98dd065d701d37d6ba4a18b2d55f00ae..82558c8decf86e7cf720d6b7a190455c31e60be3 100644 (file)
@@ -171,6 +171,7 @@ struct ieee80211_low_level_stats {
  * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
  * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
  * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
+ * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
  */
 enum ieee80211_bss_change {
        BSS_CHANGED_ASSOC               = 1<<0,
@@ -190,6 +191,7 @@ enum ieee80211_bss_change {
        BSS_CHANGED_IDLE                = 1<<14,
        BSS_CHANGED_SSID                = 1<<15,
        BSS_CHANGED_AP_PROBE_RESP       = 1<<16,
+       BSS_CHANGED_PS                  = 1<<17,
 
        /* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -266,6 +268,8 @@ enum ieee80211_rssi_event {
  * @idle: This interface is idle. There's also a global idle flag in the
  *     hardware config which may be more appropriate depending on what
  *     your driver/device needs to do.
+ * @ps: power-save mode (STA only). This flag is NOT affected by
+ *     offchannel/dynamic_ps operations.
  * @ssid: The SSID of the current vif. Only valid in AP-mode.
  * @ssid_len: Length of SSID given in @ssid.
  * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
@@ -296,6 +300,7 @@ struct ieee80211_bss_conf {
        bool arp_filter_enabled;
        bool qos;
        bool idle;
+       bool ps;
        u8 ssid[IEEE80211_MAX_SSID_LEN];
        size_t ssid_len;
        bool hidden_ssid;
@@ -522,9 +527,6 @@ struct ieee80211_tx_rate {
  *  (2) driver internal use (if applicable)
  *  (3) TX status information - driver tells mac80211 what happened
  *
- * The TX control's sta pointer is only valid during the ->tx call,
- * it may be NULL.
- *
  * @flags: transmit info flags, defined above
  * @band: the band to transmit on (use for checking for races)
  * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
@@ -555,6 +557,7 @@ struct ieee80211_tx_info {
                                        struct ieee80211_tx_rate rates[
                                                IEEE80211_TX_MAX_RATES];
                                        s8 rts_cts_rate_idx;
+                                       /* 3 bytes free */
                                };
                                /* only needed before rate control */
                                unsigned long jiffies;
@@ -562,7 +565,7 @@ struct ieee80211_tx_info {
                        /* NB: vif can be NULL for injected frames */
                        struct ieee80211_vif *vif;
                        struct ieee80211_key_conf *hw_key;
-                       struct ieee80211_sta *sta;
+                       /* 8 bytes free */
                } control;
                struct {
                        struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -673,21 +676,41 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
  *     the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
  *     to hw.radiotap_mcs_details to advertise that fact
+ * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
+ *     number (@ampdu_reference) must be populated and be a distinct number for
+ *     each A-MPDU
+ * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
+ * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
+ *     monitoring purposes only
+ * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
+ *     subframes of a single A-MPDU
+ * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
+ * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
+ *     on this subframe
+ * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
+ *     is stored in the @ampdu_delimiter_crc field)
  */
 enum mac80211_rx_flags {
-       RX_FLAG_MMIC_ERROR      = 1<<0,
-       RX_FLAG_DECRYPTED       = 1<<1,
-       RX_FLAG_MMIC_STRIPPED   = 1<<3,
-       RX_FLAG_IV_STRIPPED     = 1<<4,
-       RX_FLAG_FAILED_FCS_CRC  = 1<<5,
-       RX_FLAG_FAILED_PLCP_CRC = 1<<6,
-       RX_FLAG_MACTIME_MPDU    = 1<<7,
-       RX_FLAG_SHORTPRE        = 1<<8,
-       RX_FLAG_HT              = 1<<9,
-       RX_FLAG_40MHZ           = 1<<10,
-       RX_FLAG_SHORT_GI        = 1<<11,
-       RX_FLAG_NO_SIGNAL_VAL   = 1<<12,
-       RX_FLAG_HT_GF           = 1<<13,
+       RX_FLAG_MMIC_ERROR              = BIT(0),
+       RX_FLAG_DECRYPTED               = BIT(1),
+       RX_FLAG_MMIC_STRIPPED           = BIT(3),
+       RX_FLAG_IV_STRIPPED             = BIT(4),
+       RX_FLAG_FAILED_FCS_CRC          = BIT(5),
+       RX_FLAG_FAILED_PLCP_CRC         = BIT(6),
+       RX_FLAG_MACTIME_MPDU            = BIT(7),
+       RX_FLAG_SHORTPRE                = BIT(8),
+       RX_FLAG_HT                      = BIT(9),
+       RX_FLAG_40MHZ                   = BIT(10),
+       RX_FLAG_SHORT_GI                = BIT(11),
+       RX_FLAG_NO_SIGNAL_VAL           = BIT(12),
+       RX_FLAG_HT_GF                   = BIT(13),
+       RX_FLAG_AMPDU_DETAILS           = BIT(14),
+       RX_FLAG_AMPDU_REPORT_ZEROLEN    = BIT(15),
+       RX_FLAG_AMPDU_IS_ZEROLEN        = BIT(16),
+       RX_FLAG_AMPDU_LAST_KNOWN        = BIT(17),
+       RX_FLAG_AMPDU_IS_LAST           = BIT(18),
+       RX_FLAG_AMPDU_DELIM_CRC_ERROR   = BIT(19),
+       RX_FLAG_AMPDU_DELIM_CRC_KNOWN   = BIT(20),
 };
 
 /**
@@ -711,17 +734,22 @@ enum mac80211_rx_flags {
  *     HT rates are use (RX_FLAG_HT)
  * @flag: %RX_FLAG_*
  * @rx_flags: internal RX flags for mac80211
+ * @ampdu_reference: A-MPDU reference number, must be a different value for
+ *     each A-MPDU but the same for each subframe within one A-MPDU
+ * @ampdu_delimiter_crc: A-MPDU delimiter CRC
  */
 struct ieee80211_rx_status {
        u64 mactime;
        u32 device_timestamp;
-       u16 flag;
+       u32 ampdu_reference;
+       u32 flag;
        u16 freq;
        u8 rate_idx;
        u8 rx_flags;
        u8 band;
        u8 antenna;
        s8 signal;
+       u8 ampdu_delimiter_crc;
 };
 
 /**
@@ -945,21 +973,29 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
  *     generation in software.
  * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
  *     that the key is pairwise rather then a shared key.
- * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
+ * @IEEE80211_KEY_FLAG_SW_MGMT_TX: This flag should be set by the driver for a
  *     CCMP key if it requires CCMP encryption of management frames (MFP) to
  *     be done in software.
  * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
  *     if space should be prepared for the IV, but the IV
  *     itself should not be generated. Do not set together with
  *     @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
+ * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
+ *     management frames. The flag can help drivers that have a hardware
+ *     crypto implementation that doesn't deal with management frames
+ *     properly by allowing them to not upload the keys to hardware and
+ *     fall back to software crypto. Note that this flag deals only with
+ *     RX, if your crypto engine can't deal with TX you can also set the
+ *     %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
  */
 enum ieee80211_key_flags {
        IEEE80211_KEY_FLAG_WMM_STA      = 1<<0,
        IEEE80211_KEY_FLAG_GENERATE_IV  = 1<<1,
        IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
        IEEE80211_KEY_FLAG_PAIRWISE     = 1<<3,
-       IEEE80211_KEY_FLAG_SW_MGMT      = 1<<4,
+       IEEE80211_KEY_FLAG_SW_MGMT_TX   = 1<<4,
        IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
+       IEEE80211_KEY_FLAG_RX_MGMT      = 1<<6,
 };
 
 /**
@@ -1073,6 +1109,16 @@ enum sta_notify_cmd {
        STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE,
 };
 
+/**
+ * struct ieee80211_tx_control - TX control data
+ *
+ * @sta: station table entry, this sta pointer may be NULL and
+ *     it is not allowed to copy the pointer, due to RCU.
+ */
+struct ieee80211_tx_control {
+       struct ieee80211_sta *sta;
+};
+
 /**
  * enum ieee80211_hw_flags - hardware flags
  *
@@ -1203,6 +1249,10 @@ enum sta_notify_cmd {
  *     queue mapping in order to use different queues (not just one per AC)
  *     for different virtual interfaces. See the doc section on HW queue
  *     control for more details.
+ *
+ * @IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF: Use the P2P Device address for any
+ *     P2P Interface. This will be honoured even if more than one interface
+ *     is supported.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1230,6 +1280,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_AP_LINK_PS                         = 1<<22,
        IEEE80211_HW_TX_AMPDU_SETUP_IN_HW               = 1<<23,
        IEEE80211_HW_SCAN_WHILE_IDLE                    = 1<<24,
+       IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF              = 1<<25,
 };
 
 /**
@@ -1884,10 +1935,14 @@ enum ieee80211_frame_release_type {
  * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
  *     to this station changed.
  * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
+ * @IEEE80211_RC_SUPP_RATES_CHANGED: The supported rate set of this peer
+ *     changed (in IBSS mode) due to discovering more information about
+ *     the peer.
  */
 enum ieee80211_rate_control_changed {
        IEEE80211_RC_BW_CHANGED         = BIT(0),
        IEEE80211_RC_SMPS_CHANGED       = BIT(1),
+       IEEE80211_RC_SUPP_RATES_CHANGED = BIT(2),
 };
 
 /**
@@ -2264,7 +2319,9 @@ enum ieee80211_rate_control_changed {
  *     The callback is optional and can (should!) sleep.
  */
 struct ieee80211_ops {
-       void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       void (*tx)(struct ieee80211_hw *hw,
+                  struct ieee80211_tx_control *control,
+                  struct sk_buff *skb);
        int (*start)(struct ieee80211_hw *hw);
        void (*stop)(struct ieee80211_hw *hw);
 #ifdef CONFIG_PM
index 96a3b5c03e37d965b51e9d9754af3969df6146fc..980d263765cf41059ede684d54abafb6a9e66c6c 100644 (file)
@@ -49,6 +49,7 @@ enum {
 #include <linux/types.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
+#include <linux/hash.h>
 
 #include <net/neighbour.h>
 
@@ -134,7 +135,7 @@ static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, _
 {
        const u32 *p32 = pkey;
 
-       return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) +
+       return (((p32[0] ^ hash32_ptr(dev)) * hash_rnd[0]) +
                (p32[1] * hash_rnd[1]) +
                (p32[2] * hash_rnd[2]) +
                (p32[3] * hash_rnd[3]));
index 344d8988842a527fbec3bffc674cda865f67b15c..0dab173e27da6e8e66a8eea4913f5b0230f612bd 100644 (file)
@@ -334,18 +334,22 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 }
 #endif
 
-static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
        unsigned int seq;
        int hh_len;
 
        do {
-               int hh_alen;
-
                seq = read_seqbegin(&hh->hh_lock);
                hh_len = hh->hh_len;
-               hh_alen = HH_DATA_ALIGN(hh_len);
-               memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+               if (likely(hh_len <= HH_DATA_MOD)) {
+                       /* this is inlined by gcc */
+                       memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+               } else {
+                       int hh_alen = HH_DATA_ALIGN(hh_len);
+
+                       memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+               }
        } while (read_seqretry(&hh->hh_lock, seq));
 
        skb_push(skb, hh_len);
index fd87963a0ea5878cecae892541b9fc9f79e64814..4faf6612ecacc05d2b00877691e440338ebb0144 100644 (file)
@@ -15,6 +15,7 @@
 #include <net/netns/packet.h>
 #include <net/netns/ipv4.h>
 #include <net/netns/ipv6.h>
+#include <net/netns/sctp.h>
 #include <net/netns/dccp.h>
 #include <net/netns/x_tables.h>
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -66,6 +67,7 @@ struct net {
        struct hlist_head       *dev_name_head;
        struct hlist_head       *dev_index_head;
        unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
+       int                     ifindex;
 
        /* core fib_rules */
        struct list_head        rules_ops;
@@ -80,6 +82,9 @@ struct net {
 #if IS_ENABLED(CONFIG_IPV6)
        struct netns_ipv6       ipv6;
 #endif
+#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
+       struct netns_sctp       sctp;
+#endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
        struct netns_dccp       dccp;
 #endif
@@ -87,6 +92,9 @@ struct net {
        struct netns_xt         xt;
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        struct netns_ct         ct;
+#endif
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+       struct netns_nf_frag    nf_frag;
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
@@ -105,6 +113,13 @@ struct net {
        atomic_t                rt_genid;
 };
 
+/*
+ * ifindex generation is per-net namespace, and loopback is
+ * always the 1st device in ns (see net_dev_init), thus any
+ * loopback device should get ifindex 1
+ */
+
+#define LOOPBACK_IFINDEX       1
 
 #include <linux/seq_file_net.h>
 
index 4a045cda9c60c75a96b956b051dfbf5a6d6581b7..5654d292efd4f0883f6051610f6144552f20cb61 100644 (file)
@@ -17,7 +17,7 @@ struct nf_conntrack_ecache {
        unsigned long missed;   /* missed events */
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
-       u32 pid;                /* netlink pid of destroyer */
+       u32 portid;             /* netlink portid of destroyer */
        struct timer_list timeout;
 };
 
@@ -60,7 +60,7 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
 /* This structure is passed to event handler */
 struct nf_ct_event {
        struct nf_conn *ct;
-       u32 pid;
+       u32 portid;
        int report;
 };
 
@@ -92,7 +92,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
 static inline int
 nf_conntrack_eventmask_report(unsigned int eventmask,
                              struct nf_conn *ct,
-                             u32 pid,
+                             u32 portid,
                              int report)
 {
        int ret = 0;
@@ -112,11 +112,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
        if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
                struct nf_ct_event item = {
                        .ct     = ct,
-                       .pid    = e->pid ? e->pid : pid,
+                       .portid = e->portid ? e->portid : portid,
                        .report = report
                };
                /* This is a resent of a destroy event? If so, skip missed */
-               unsigned long missed = e->pid ? 0 : e->missed;
+               unsigned long missed = e->portid ? 0 : e->missed;
 
                if (!((eventmask | missed) & e->ctmask))
                        goto out_unlock;
@@ -126,11 +126,11 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
                        spin_lock_bh(&ct->lock);
                        if (ret < 0) {
                                /* This is a destroy event that has been
-                                * triggered by a process, we store the PID
+                                * triggered by a process, we store the PORTID
                                 * to include it in the retransmission. */
                                if (eventmask & (1 << IPCT_DESTROY) &&
-                                   e->pid == 0 && pid != 0)
-                                       e->pid = pid;
+                                   e->portid == 0 && portid != 0)
+                                       e->portid = portid;
                                else
                                        e->missed |= eventmask;
                        } else
@@ -145,9 +145,9 @@ out_unlock:
 
 static inline int
 nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
-                         u32 pid, int report)
+                         u32 portid, int report)
 {
-       return nf_conntrack_eventmask_report(1 << event, ct, pid, report);
+       return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
 }
 
 static inline int
@@ -158,7 +158,7 @@ nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
 
 struct nf_exp_event {
        struct nf_conntrack_expect *exp;
-       u32 pid;
+       u32 portid;
        int report;
 };
 
@@ -172,7 +172,7 @@ extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_even
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
                          struct nf_conntrack_expect *exp,
-                         u32 pid,
+                         u32 portid,
                          int report)
 {
        struct net *net = nf_ct_exp_net(exp);
@@ -191,7 +191,7 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
        if (e->expmask & (1 << event)) {
                struct nf_exp_event item = {
                        .exp    = exp,
-                       .pid    = pid,
+                       .portid = portid,
                        .report = report
                };
                notify->fcn(1 << event, &item);
@@ -216,20 +216,20 @@ static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
                                            struct nf_conn *ct) {}
 static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
                                                struct nf_conn *ct,
-                                               u32 pid,
+                                               u32 portid,
                                                int report) { return 0; }
 static inline int nf_conntrack_event(enum ip_conntrack_events event,
                                     struct nf_conn *ct) { return 0; }
 static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
                                            struct nf_conn *ct,
-                                           u32 pid,
+                                           u32 portid,
                                            int report) { return 0; }
 static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
 static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
                                      struct nf_conntrack_expect *exp) {}
 static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
                                             struct nf_conntrack_expect *exp,
-                                            u32 pid,
+                                            u32 portid,
                                             int report) {}
 
 static inline int nf_conntrack_ecache_init(struct net *net)
index 983f00263243c66407ad4a1281167d9888378bd1..cc13f377a705c36c62987060d3cbe5e63eac8147 100644 (file)
@@ -43,7 +43,7 @@ struct nf_conntrack_expect {
        unsigned int class;
 
 #ifdef CONFIG_NF_NAT_NEEDED
-       __be32 saved_ip;
+       union nf_inet_addr saved_addr;
        /* This is the original per-proto part, used to map the
         * expected connection the way the recipient expects. */
        union nf_conntrack_man_proto saved_proto;
index 34ec89f8dbf90303246af81061e8746bad99e867..e41e472d08f2123a7a0917e598196ec4ebf8e5d3 100644 (file)
@@ -55,6 +55,26 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
 #endif
 };
 
+static inline unsigned int *
+nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
+                    struct nf_conntrack_l4proto *l4proto)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+       struct nf_conn_timeout *timeout_ext;
+       unsigned int *timeouts;
+
+       timeout_ext = nf_ct_timeout_find(ct);
+       if (timeout_ext)
+               timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
+       else
+               timeouts = l4proto->get_timeouts(net);
+
+       return timeouts;
+#else
+       return l4proto->get_timeouts(net);
+#endif
+}
+
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 extern int nf_conntrack_timeout_init(struct net *net);
 extern void nf_conntrack_timeout_fini(struct net *net);
index b4de990b55f123e7f2c9ad1095634743e2ad4698..bd8eea720f2ed0c3e0b61de6b03cb36035ff61cf 100644 (file)
@@ -43,14 +43,16 @@ struct nf_conn_nat {
        struct nf_conn *ct;
        union nf_conntrack_nat_help help;
 #if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
-    defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
+    defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE) || \
+    defined(CONFIG_IP6_NF_TARGET_MASQUERADE) || \
+    defined(CONFIG_IP6_NF_TARGET_MASQUERADE_MODULE)
        int masq_index;
 #endif
 };
 
 /* Set up the info structure to map into this range. */
 extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
-                                     const struct nf_nat_ipv4_range *range,
+                                     const struct nf_nat_range *range,
                                      enum nf_nat_manip_type maniptype);
 
 /* Is this tuple already taken? (not by us)*/
index b13d8d18d595b320d454b4d6b91e05d7aeea41b2..972e1e47ec79819f610cf9081be732b9131aa46a 100644 (file)
@@ -12,10 +12,7 @@ extern unsigned int nf_nat_packet(struct nf_conn *ct,
                                  unsigned int hooknum,
                                  struct sk_buff *skb);
 
-extern int nf_nat_icmp_reply_translation(struct nf_conn *ct,
-                                        enum ip_conntrack_info ctinfo,
-                                        unsigned int hooknum,
-                                        struct sk_buff *skb);
+extern int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
 
 static inline int nf_nat_initialized(struct nf_conn *ct,
                                     enum nf_nat_manip_type manip)
index 7d8fb7b46c442552f1d4375f3465d6dded328a4e..b4d6bfc2af034a32c1a5f9b847c91b99039cd868 100644 (file)
@@ -10,6 +10,7 @@ struct sk_buff;
 extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                      struct nf_conn *ct,
                                      enum ip_conntrack_info ctinfo,
+                                     unsigned int protoff,
                                      unsigned int match_offset,
                                      unsigned int match_len,
                                      const char *rep_buffer,
@@ -18,12 +19,13 @@ extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
 static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                           struct nf_conn *ct,
                                           enum ip_conntrack_info ctinfo,
+                                          unsigned int protoff,
                                           unsigned int match_offset,
                                           unsigned int match_len,
                                           const char *rep_buffer,
                                           unsigned int rep_len)
 {
-       return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+       return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
                                          match_offset, match_len,
                                          rep_buffer, rep_len, true);
 }
@@ -31,6 +33,7 @@ static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
 extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
                                    struct nf_conn *ct,
                                    enum ip_conntrack_info ctinfo,
+                                   unsigned int protoff,
                                    unsigned int match_offset,
                                    unsigned int match_len,
                                    const char *rep_buffer,
@@ -41,10 +44,12 @@ extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
                                  __be32 seq, s16 off);
 extern int nf_nat_seq_adjust(struct sk_buff *skb,
                             struct nf_conn *ct,
-                            enum ip_conntrack_info ctinfo);
+                            enum ip_conntrack_info ctinfo,
+                            unsigned int protoff);
 extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
                                     struct nf_conn *ct,
-                                    enum ip_conntrack_info ctinfo);
+                                    enum ip_conntrack_info ctinfo,
+                                    unsigned int protoff);
 
 /* Setup NAT on this expected conntrack so it follows master, but goes
  * to port ct->master->saved_proto. */
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
new file mode 100644 (file)
index 0000000..bd3b97e
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef _NF_NAT_L3PROTO_H
+#define _NF_NAT_L3PROTO_H
+
+struct nf_nat_l4proto;
+struct nf_nat_l3proto {
+       u8      l3proto;
+
+       bool    (*in_range)(const struct nf_conntrack_tuple *t,
+                           const struct nf_nat_range *range);
+
+       u32     (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
+
+       bool    (*manip_pkt)(struct sk_buff *skb,
+                            unsigned int iphdroff,
+                            const struct nf_nat_l4proto *l4proto,
+                            const struct nf_conntrack_tuple *target,
+                            enum nf_nat_manip_type maniptype);
+
+       void    (*csum_update)(struct sk_buff *skb, unsigned int iphdroff,
+                              __sum16 *check,
+                              const struct nf_conntrack_tuple *t,
+                              enum nf_nat_manip_type maniptype);
+
+       void    (*csum_recalc)(struct sk_buff *skb, u8 proto,
+                              void *data, __sum16 *check,
+                              int datalen, int oldlen);
+
+       void    (*decode_session)(struct sk_buff *skb,
+                                 const struct nf_conn *ct,
+                                 enum ip_conntrack_dir dir,
+                                 unsigned long statusbit,
+                                 struct flowi *fl);
+
+       int     (*nlattr_to_range)(struct nlattr *tb[],
+                                  struct nf_nat_range *range);
+};
+
+extern int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+extern void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
+extern const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
+
+extern int nf_nat_icmp_reply_translation(struct sk_buff *skb,
+                                        struct nf_conn *ct,
+                                        enum ip_conntrack_info ctinfo,
+                                        unsigned int hooknum);
+extern int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
+                                          struct nf_conn *ct,
+                                          enum ip_conntrack_info ctinfo,
+                                          unsigned int hooknum,
+                                          unsigned int hdrlen);
+
+#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
new file mode 100644 (file)
index 0000000..24feb68
--- /dev/null
@@ -0,0 +1,72 @@
+/* Header for use in defining a given protocol. */
+#ifndef _NF_NAT_L4PROTO_H
+#define _NF_NAT_L4PROTO_H
+#include <net/netfilter/nf_nat.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+struct nf_nat_range;
+struct nf_nat_l3proto;
+
+struct nf_nat_l4proto {
+       /* Protocol number. */
+       u8 l4proto;
+
+       /* Translate a packet to the target according to manip type.
+        * Return true if succeeded.
+        */
+       bool (*manip_pkt)(struct sk_buff *skb,
+                         const struct nf_nat_l3proto *l3proto,
+                         unsigned int iphdroff, unsigned int hdroff,
+                         const struct nf_conntrack_tuple *tuple,
+                         enum nf_nat_manip_type maniptype);
+
+       /* Is the manipable part of the tuple between min and max incl? */
+       bool (*in_range)(const struct nf_conntrack_tuple *tuple,
+                        enum nf_nat_manip_type maniptype,
+                        const union nf_conntrack_man_proto *min,
+                        const union nf_conntrack_man_proto *max);
+
+       /* Alter the per-proto part of the tuple (depending on
+        * maniptype), to give a unique tuple in the given range if
+        * possible.  Per-protocol part of tuple is initialized to the
+        * incoming packet.
+        */
+       void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
+                            struct nf_conntrack_tuple *tuple,
+                            const struct nf_nat_range *range,
+                            enum nf_nat_manip_type maniptype,
+                            const struct nf_conn *ct);
+
+       int (*nlattr_to_range)(struct nlattr *tb[],
+                              struct nf_nat_range *range);
+};
+
+/* Protocol registration. */
+extern int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+extern void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+
+extern const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
+
+/* Built-in protocols. */
+extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
+extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
+extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
+
+extern bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+                                   enum nf_nat_manip_type maniptype,
+                                   const union nf_conntrack_man_proto *min,
+                                   const union nf_conntrack_man_proto *max);
+
+extern void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                       struct nf_conntrack_tuple *tuple,
+                                       const struct nf_nat_range *range,
+                                       enum nf_nat_manip_type maniptype,
+                                       const struct nf_conn *ct,
+                                       u16 *rover);
+
+extern int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+                                         struct nf_nat_range *range);
+
+#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h
deleted file mode 100644 (file)
index 7b0b511..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Header for use in defining a given protocol. */
-#ifndef _NF_NAT_PROTOCOL_H
-#define _NF_NAT_PROTOCOL_H
-#include <net/netfilter/nf_nat.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-struct nf_nat_ipv4_range;
-
-struct nf_nat_protocol {
-       /* Protocol number. */
-       unsigned int protonum;
-
-       /* Translate a packet to the target according to manip type.
-          Return true if succeeded. */
-       bool (*manip_pkt)(struct sk_buff *skb,
-                         unsigned int iphdroff,
-                         const struct nf_conntrack_tuple *tuple,
-                         enum nf_nat_manip_type maniptype);
-
-       /* Is the manipable part of the tuple between min and max incl? */
-       bool (*in_range)(const struct nf_conntrack_tuple *tuple,
-                        enum nf_nat_manip_type maniptype,
-                        const union nf_conntrack_man_proto *min,
-                        const union nf_conntrack_man_proto *max);
-
-       /* Alter the per-proto part of the tuple (depending on
-          maniptype), to give a unique tuple in the given range if
-          possible.  Per-protocol part of tuple is initialized to the
-          incoming packet. */
-       void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
-                            const struct nf_nat_ipv4_range *range,
-                            enum nf_nat_manip_type maniptype,
-                            const struct nf_conn *ct);
-
-       int (*nlattr_to_range)(struct nlattr *tb[],
-                              struct nf_nat_ipv4_range *range);
-};
-
-/* Protocol registration. */
-extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto);
-extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto);
-
-/* Built-in protocols. */
-extern const struct nf_nat_protocol nf_nat_protocol_tcp;
-extern const struct nf_nat_protocol nf_nat_protocol_udp;
-extern const struct nf_nat_protocol nf_nat_protocol_icmp;
-extern const struct nf_nat_protocol nf_nat_unknown_protocol;
-
-extern int init_protocols(void) __init;
-extern void cleanup_protocols(void);
-extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum);
-
-extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
-                                 enum nf_nat_manip_type maniptype,
-                                 const union nf_conntrack_man_proto *min,
-                                 const union nf_conntrack_man_proto *max);
-
-extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-                                     const struct nf_nat_ipv4_range *range,
-                                     enum nf_nat_manip_type maniptype,
-                                     const struct nf_conn *ct,
-                                     u_int16_t *rover);
-
-extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-                                       struct nf_nat_ipv4_range *range);
-
-#endif /*_NF_NAT_PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h
deleted file mode 100644 (file)
index 2890bdc..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _NF_NAT_RULE_H
-#define _NF_NAT_RULE_H
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-extern int nf_nat_rule_init(void) __init;
-extern void nf_nat_rule_cleanup(void);
-extern int nf_nat_rule_find(struct sk_buff *skb,
-                           unsigned int hooknum,
-                           const struct net_device *in,
-                           const struct net_device *out,
-                           struct nf_conn *ct);
-
-#endif /* _NF_NAT_RULE_H */
index f67440970d7e78d7177252eeafbaead56b74f6dd..2c95d55f79149173d3a1f5f27bb1213dd24a323b 100644 (file)
@@ -110,7 +110,7 @@ struct cipso_v4_doi;
 /* NetLabel audit information */
 struct netlbl_audit {
        u32 secid;
-       uid_t loginuid;
+       kuid_t loginuid;
        u32 sessionid;
 };
 
index 785f37a3b44ee80e1336d7301bd54c6ff48b7ba7..9690b0f6698a1d1b433ea572bdc4efb938f501c5 100644 (file)
  *   nla_put_u16(skb, type, value)     add u16 attribute to skb
  *   nla_put_u32(skb, type, value)     add u32 attribute to skb
  *   nla_put_u64(skb, type, value)     add u64 attribute to skb
+ *   nla_put_s8(skb, type, value)      add s8 attribute to skb
+ *   nla_put_s16(skb, type, value)     add s16 attribute to skb
+ *   nla_put_s32(skb, type, value)     add s32 attribute to skb
+ *   nla_put_s64(skb, type, value)     add s64 attribute to skb
  *   nla_put_string(skb, type, str)    add string attribute to skb
  *   nla_put_flag(skb, type)           add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
  *   nla_get_u16(nla)                  get payload for a u16 attribute
  *   nla_get_u32(nla)                  get payload for a u32 attribute
  *   nla_get_u64(nla)                  get payload for a u64 attribute
+ *   nla_get_s8(nla)                   get payload for a s8 attribute
+ *   nla_get_s16(nla)                  get payload for a s16 attribute
+ *   nla_get_s32(nla)                  get payload for a s32 attribute
+ *   nla_get_s64(nla)                  get payload for a s64 attribute
  *   nla_get_flag(nla)                 return 1 if flag is true
  *   nla_get_msecs(nla)                        get payload for a msecs attribute
  *
@@ -160,6 +168,10 @@ enum {
        NLA_NESTED_COMPAT,
        NLA_NUL_STRING,
        NLA_BINARY,
+       NLA_S8,
+       NLA_S16,
+       NLA_S32,
+       NLA_S64,
        __NLA_TYPE_MAX,
 };
 
@@ -183,6 +195,8 @@ enum {
  *    NLA_NESTED_COMPAT    Minimum length of structure payload
  *    NLA_U8, NLA_U16,
  *    NLA_U32, NLA_U64,
+ *    NLA_S8, NLA_S16,
+ *    NLA_S32, NLA_S64,
  *    NLA_MSECS            Leaving the length field zero will verify the
  *                         given type fits, using it verifies minimum length
  *                         just like "All other"
@@ -203,19 +217,19 @@ struct nla_policy {
 /**
  * struct nl_info - netlink source information
  * @nlh: Netlink message header of original request
- * @pid: Netlink PID of requesting application
+ * @portid: Netlink PORTID of requesting application
  */
 struct nl_info {
        struct nlmsghdr         *nlh;
        struct net              *nl_net;
-       u32                     pid;
+       u32                     portid;
 };
 
 extern int             netlink_rcv_skb(struct sk_buff *skb,
                                        int (*cb)(struct sk_buff *,
                                                  struct nlmsghdr *));
 extern int             nlmsg_notify(struct sock *sk, struct sk_buff *skb,
-                                    u32 pid, unsigned int group, int report,
+                                    u32 portid, unsigned int group, int report,
                                     gfp_t flags);
 
 extern int             nla_validate(const struct nlattr *head,
@@ -430,7 +444,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
 /**
  * nlmsg_put - Add a new netlink message to an skb
  * @skb: socket buffer to store message in
- * @pid: netlink process id
+ * @portid: netlink process id
  * @seq: sequence number of message
  * @type: message type
  * @payload: length of message payload
@@ -439,13 +453,13 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
  * Returns NULL if the tailroom of the skb is insufficient to store
  * the message header and payload.
  */
-static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
                                         int type, int payload, int flags)
 {
        if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
                return NULL;
 
-       return __nlmsg_put(skb, pid, seq, type, payload, flags);
+       return __nlmsg_put(skb, portid, seq, type, payload, flags);
 }
 
 /**
@@ -464,7 +478,7 @@ static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
                                                int type, int payload,
                                                int flags)
 {
-       return nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       return nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                         type, payload, flags);
 }
 
@@ -549,18 +563,18 @@ static inline void nlmsg_free(struct sk_buff *skb)
  * nlmsg_multicast - multicast a netlink message
  * @sk: netlink socket to spread messages to
  * @skb: netlink message as socket buffer
- * @pid: own netlink pid to avoid sending to yourself
+ * @portid: own netlink portid to avoid sending to yourself
  * @group: multicast group id
  * @flags: allocation flags
  */
 static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
-                                 u32 pid, unsigned int group, gfp_t flags)
+                                 u32 portid, unsigned int group, gfp_t flags)
 {
        int err;
 
        NETLINK_CB(skb).dst_group = group;
 
-       err = netlink_broadcast(sk, skb, pid, group, flags);
+       err = netlink_broadcast(sk, skb, portid, group, flags);
        if (err > 0)
                err = 0;
 
@@ -571,13 +585,13 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
  * nlmsg_unicast - unicast a netlink message
  * @sk: netlink socket to spread message to
  * @skb: netlink message as socket buffer
- * @pid: netlink pid of the destination socket
+ * @portid: netlink portid of the destination socket
  */
-static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid)
+static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
 {
        int err;
 
-       err = netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
+       err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
        if (err > 0)
                err = 0;
 
@@ -878,6 +892,50 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
        return nla_put(skb, attrtype, sizeof(__le64), &value);
 }
 
+/**
+ * nla_put_s8 - Add a s8 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+{
+       return nla_put(skb, attrtype, sizeof(s8), &value);
+}
+
+/**
+ * nla_put_s16 - Add a s16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+{
+       return nla_put(skb, attrtype, sizeof(s16), &value);
+}
+
+/**
+ * nla_put_s32 - Add a s32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+{
+       return nla_put(skb, attrtype, sizeof(s32), &value);
+}
+
+/**
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+{
+       return nla_put(skb, attrtype, sizeof(s64), &value);
+}
+
 /**
  * nla_put_string - Add a string netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
@@ -993,6 +1051,46 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
        return tmp;
 }
 
+/**
+ * nla_get_s32 - return payload of s32 attribute
+ * @nla: s32 netlink attribute
+ */
+static inline s32 nla_get_s32(const struct nlattr *nla)
+{
+       return *(s32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s16 - return payload of s16 attribute
+ * @nla: s16 netlink attribute
+ */
+static inline s16 nla_get_s16(const struct nlattr *nla)
+{
+       return *(s16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s8 - return payload of s8 attribute
+ * @nla: s8 netlink attribute
+ */
+static inline s8 nla_get_s8(const struct nlattr *nla)
+{
+       return *(s8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_s64 - return payload of s64 attribute
+ * @nla: s64 netlink attribute
+ */
+static inline s64 nla_get_s64(const struct nlattr *nla)
+{
+       s64 tmp;
+
+       nla_memcpy(&tmp, nla, sizeof(tmp));
+
+       return tmp;
+}
+
 /**
  * nla_get_flag - return payload of flag attribute
  * @nla: flag netlink attribute
index 3aecdc7a84fb145255d6ae2c8457945d55a3c732..a1d83cc8bf859d356b9d4b437c4d03590fa25453 100644 (file)
@@ -83,6 +83,10 @@ struct netns_ct {
        int                     sysctl_auto_assign_helper;
        bool                    auto_assign_helper_warned;
        struct nf_ip_net        nf_ct_proto;
+#ifdef CONFIG_NF_NAT_NEEDED
+       struct hlist_head       *nat_bysource;
+       unsigned int            nat_htable_size;
+#endif
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_header;
        struct ctl_table_header *acct_sysctl_header;
index eb24dbccd81e81aff5fac76c8792e93df66e7bb6..2ae2b8372cfdc1a64210218fe3824d7f7973b60c 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef __NETNS_IPV4_H__
 #define __NETNS_IPV4_H__
 
+#include <linux/uidgid.h>
 #include <net/inet_frag.h>
 
 struct tcpm_hash_bucket;
@@ -51,8 +52,6 @@ struct netns_ipv4 {
        struct xt_table         *iptable_security;
 #endif
        struct xt_table         *nat_table;
-       struct hlist_head       *nat_bysource;
-       unsigned int            nat_htable_size;
 #endif
 
        int sysctl_icmp_echo_ignore_all;
@@ -62,7 +61,7 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       unsigned int sysctl_ping_group_range[2];
+       kgid_t sysctl_ping_group_range[2];
        long sysctl_tcp_mem[3];
 
        atomic_t dev_addr_genid;
index df0a5456a3fd08b78b7213457a94e5bfd282b87e..214cb0a53359e03b476670e49eb73298545e1859 100644 (file)
@@ -42,6 +42,7 @@ struct netns_ipv6 {
 #ifdef CONFIG_SECURITY
        struct xt_table         *ip6table_security;
 #endif
+       struct xt_table         *ip6table_nat;
 #endif
        struct rt6_info         *ip6_null_entry;
        struct rt6_statistics   *rt6_stats;
@@ -70,4 +71,12 @@ struct netns_ipv6 {
 #endif
 #endif
 };
+
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+struct netns_nf_frag {
+       struct netns_sysctl_ipv6 sysctl;
+       struct netns_frags      frags;
+};
+#endif
+
 #endif
index cb4e894c0f8dbe5fefb6e82e50c4ad6b524e777d..17ec2b95c062d0b0dc13d2c9702c2ab9da831682 100644 (file)
@@ -5,10 +5,10 @@
 #define __NETNS_PACKET_H__
 
 #include <linux/rculist.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 
 struct netns_packet {
-       spinlock_t              sklist_lock;
+       struct mutex            sklist_lock;
        struct hlist_head       sklist;
 };
 
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
new file mode 100644 (file)
index 0000000..5e5eb1f
--- /dev/null
@@ -0,0 +1,131 @@
+#ifndef __NETNS_SCTP_H__
+#define __NETNS_SCTP_H__
+
+struct sock;
+struct proc_dir_entry;
+struct sctp_mib;
+struct ctl_table_header;
+
+struct netns_sctp {
+       DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics);
+
+#ifdef CONFIG_PROC_FS
+       struct proc_dir_entry *proc_net_sctp;
+#endif
+#ifdef CONFIG_SYSCTL
+       struct ctl_table_header *sysctl_header;
+#endif
+       /* This is the global socket data structure used for responding to
+        * the Out-of-the-blue (OOTB) packets.  A control sock will be created
+        * for this socket at the initialization time.
+        */
+       struct sock *ctl_sock;
+
+       /* This is the global local address list.
+        * We actively maintain this complete list of addresses on
+        * the system by catching address add/delete events.
+        *
+        * It is a list of sctp_sockaddr_entry.
+        */
+       struct list_head local_addr_list;
+       struct list_head addr_waitq;
+       struct timer_list addr_wq_timer;
+       struct list_head auto_asconf_splist;
+       spinlock_t addr_wq_lock;
+
+       /* Lock that protects the local_addr_list writers */
+       spinlock_t local_addr_lock;
+
+       /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
+        *
+        * The following protocol parameters are RECOMMENDED:
+        *
+        * RTO.Initial              - 3  seconds
+        * RTO.Min                  - 1  second
+        * RTO.Max                 -  60 seconds
+        * RTO.Alpha                - 1/8  (3 when converted to right shifts.)
+        * RTO.Beta                 - 1/4  (2 when converted to right shifts.)
+        */
+       unsigned int rto_initial;
+       unsigned int rto_min;
+       unsigned int rto_max;
+
+       /* Note: rto_alpha and rto_beta are really defined as inverse
+        * powers of two to facilitate integer operations.
+        */
+       int rto_alpha;
+       int rto_beta;
+
+       /* Max.Burst                - 4 */
+       int max_burst;
+
+       /* Whether Cookie Preservative is enabled(1) or not(0) */
+       int cookie_preserve_enable;
+
+       /* Valid.Cookie.Life        - 60  seconds  */
+       unsigned int valid_cookie_life;
+
+       /* Delayed SACK timeout  200ms default*/
+       unsigned int sack_timeout;
+
+       /* HB.interval              - 30 seconds  */
+       unsigned int hb_interval;
+
+       /* Association.Max.Retrans  - 10 attempts
+        * Path.Max.Retrans         - 5  attempts (per destination address)
+        * Max.Init.Retransmits     - 8  attempts
+        */
+       int max_retrans_association;
+       int max_retrans_path;
+       int max_retrans_init;
+       /* Potentially-Failed.Max.Retrans sysctl value
+        * taken from:
+        * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
+        */
+       int pf_retrans;
+
+       /*
+        * Policy for preforming sctp/socket accounting
+        * 0   - do socket level accounting, all assocs share sk_sndbuf
+        * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
+        */
+       int sndbuf_policy;
+
+       /*
+        * Policy for preforming sctp/socket accounting
+        * 0   - do socket level accounting, all assocs share sk_rcvbuf
+        * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
+        */
+       int rcvbuf_policy;
+
+       int default_auto_asconf;
+
+       /* Flag to indicate if addip is enabled. */
+       int addip_enable;
+       int addip_noauth;
+
+       /* Flag to indicate if PR-SCTP is enabled. */
+       int prsctp_enable;
+
+       /* Flag to idicate if SCTP-AUTH is enabled */
+       int auth_enable;
+
+       /*
+        * Policy to control SCTP IPv4 address scoping
+        * 0   - Disable IPv4 address scoping
+        * 1   - Enable IPv4 address scoping
+        * 2   - Selectively allow only IPv4 private addresses
+        * 3   - Selectively allow only IPv4 link local address
+        */
+       int scope_policy;
+
+       /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
+        * bits is an indicator of when to send and window update SACK.
+        */
+       int rwnd_upd_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
+};
+
+#endif /* __NETNS_SCTP_H__ */
index 2719dec6b5a8d32c6d36f818adc8ba006eabc02e..2760f4f4ae9b6779a7abd721b4ab428eb6c688fa 100644 (file)
 #include <linux/rcupdate.h>
 
 
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 struct netprio_map {
        struct rcu_head rcu;
        u32 priomap_len;
        u32 priomap[];
 };
 
-#ifdef CONFIG_CGROUPS
-
 struct cgroup_netprio_state {
        struct cgroup_subsys_state css;
        u32 prioidx;
 };
 
-#ifndef CONFIG_NETPRIO_CGROUP
-extern int net_prio_subsys_id;
-#endif
-
 extern void sock_update_netprioidx(struct sock *sk, struct task_struct *task);
 
 #if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
@@ -56,33 +51,28 @@ static inline u32 task_netprioidx(struct task_struct *p)
 
 static inline u32 task_netprioidx(struct task_struct *p)
 {
-       struct cgroup_netprio_state *state;
-       int subsys_id;
+       struct cgroup_subsys_state *css;
        u32 idx = 0;
 
        rcu_read_lock();
-       subsys_id = rcu_dereference_index_check(net_prio_subsys_id,
-                                               rcu_read_lock_held());
-       if (subsys_id >= 0) {
-               state = container_of(task_subsys_state(p, subsys_id),
-                                    struct cgroup_netprio_state, css);
-               idx = state->prioidx;
-       }
+       css = task_subsys_state(p, net_prio_subsys_id);
+       if (css)
+               idx = container_of(css,
+                                  struct cgroup_netprio_state, css)->prioidx;
        rcu_read_unlock();
        return idx;
 }
+#endif
 
-#else
+#else /* !CONFIG_NETPRIO_CGROUP */
 
 static inline u32 task_netprioidx(struct task_struct *p)
 {
        return 0;
 }
 
-#endif /* CONFIG_NETPRIO_CGROUP */
-
-#else
 #define sock_update_netprioidx(sk, task)
-#endif
+
+#endif /* CONFIG_NETPRIO_CGROUP */
 
 #endif  /* _NET_CLS_CGROUP_H */
index f5169b04f0829aa10a438fcfb33e336b4cc26f33..e900072950cb8cf5635e4a106389e34bd5df21f1 100644 (file)
@@ -30,6 +30,11 @@ struct nfc_hci_ops {
        int (*open) (struct nfc_hci_dev *hdev);
        void (*close) (struct nfc_hci_dev *hdev);
        int (*hci_ready) (struct nfc_hci_dev *hdev);
+       /*
+        * xmit must always send the complete buffer before
+        * returning. Returned result must be 0 for success
+        * or negative for failure.
+        */
        int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
        int (*start_poll) (struct nfc_hci_dev *hdev,
                           u32 im_protocols, u32 tm_protocols);
@@ -38,8 +43,8 @@ struct nfc_hci_ops {
        int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
                                           struct nfc_target *target);
        int (*data_exchange) (struct nfc_hci_dev *hdev,
-                             struct nfc_target *target,
-                             struct sk_buff *skb, struct sk_buff **res_skb);
+                             struct nfc_target *target, struct sk_buff *skb,
+                             data_exchange_cb_t cb, void *cb_context);
        int (*check_presence)(struct nfc_hci_dev *hdev,
                              struct nfc_target *target);
 };
@@ -74,7 +79,6 @@ struct nfc_hci_dev {
 
        struct list_head msg_tx_queue;
 
-       struct workqueue_struct *msg_tx_wq;
        struct work_struct msg_tx_work;
 
        struct timer_list cmd_timer;
@@ -82,13 +86,14 @@ struct nfc_hci_dev {
 
        struct sk_buff_head rx_hcp_frags;
 
-       struct workqueue_struct *msg_rx_wq;
        struct work_struct msg_rx_work;
 
        struct sk_buff_head msg_rx_queue;
 
        struct nfc_hci_ops *ops;
 
+       struct nfc_llc *llc;
+
        struct nfc_hci_init_data init_data;
 
        void *clientdata;
@@ -105,12 +110,17 @@ struct nfc_hci_dev {
        u8 hw_mpw;
        u8 hw_software;
        u8 hw_bsid;
+
+       int async_cb_type;
+       data_exchange_cb_t async_cb;
+       void *async_cb_context;
 };
 
 /* hci device allocation */
 struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
                                            struct nfc_hci_init_data *init_data,
                                            u32 protocols,
+                                           const char *llc_name,
                                            int tx_headroom,
                                            int tx_tailroom,
                                            int max_link_payload);
@@ -202,6 +212,9 @@ int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
                      const u8 *param, size_t param_len);
 int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
                     const u8 *param, size_t param_len, struct sk_buff **skb);
+int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+                          const u8 *param, size_t param_len,
+                          data_exchange_cb_t cb, void *cb_context);
 int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
                          const u8 *param, size_t param_len);
 int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
diff --git a/include/net/nfc/llc.h b/include/net/nfc/llc.h
new file mode 100644 (file)
index 0000000..400ab7a
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Link Layer Control manager public interface
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __NFC_LLC_H_
+#define __NFC_LLC_H_
+
+#include <net/nfc/hci.h>
+#include <linux/skbuff.h>
+
+#define LLC_NOP_NAME "nop"
+#define LLC_SHDLC_NAME "shdlc"
+
+typedef void (*rcv_to_hci_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef int (*xmit_to_drv_t) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+typedef void (*llc_failure_t) (struct nfc_hci_dev *hdev, int err);
+
+struct nfc_llc;
+
+struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
+                                xmit_to_drv_t xmit_to_drv,
+                                rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                                int tx_tailroom, llc_failure_t llc_failure);
+void nfc_llc_free(struct nfc_llc *llc);
+
+void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom,
+                                  int *rx_tailroom);
+
+
+int nfc_llc_start(struct nfc_llc *llc);
+int nfc_llc_stop(struct nfc_llc *llc);
+void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb);
+int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb);
+
+int nfc_llc_init(void);
+void nfc_llc_exit(void);
+
+#endif /* __NFC_LLC_H_ */
index 276094b91d7ced880477730fd487f162cf5b4a6e..88785e5c6b2cf6d2c32a8af553259a9d21678025 100644 (file)
@@ -32,6 +32,7 @@
 #define NCI_MAX_NUM_MAPPING_CONFIGS                            10
 #define NCI_MAX_NUM_RF_CONFIGS                                 10
 #define NCI_MAX_NUM_CONN                                       10
+#define NCI_MAX_PARAM_LEN                                      251
 
 /* NCI Status Codes */
 #define NCI_STATUS_OK                                          0x00
 #define NCI_RF_INTERFACE_ISO_DEP                               0x02
 #define NCI_RF_INTERFACE_NFC_DEP                               0x03
 
+/* NCI Configuration Parameter Tags */
+#define NCI_PN_ATR_REQ_GEN_BYTES                               0x29
+
 /* NCI Reset types */
 #define NCI_RESET_TYPE_KEEP_CONFIG                             0x00
 #define NCI_RESET_TYPE_RESET_CONFIG                            0x01
@@ -188,6 +192,18 @@ struct nci_core_reset_cmd {
 
 #define NCI_OP_CORE_INIT_CMD           nci_opcode_pack(NCI_GID_CORE, 0x01)
 
+#define NCI_OP_CORE_SET_CONFIG_CMD     nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct set_config_param {
+       __u8    id;
+       __u8    len;
+       __u8    val[NCI_MAX_PARAM_LEN];
+} __packed;
+
+struct nci_core_set_config_cmd {
+       __u8    num_params;
+       struct  set_config_param param; /* support 1 param per cmd is enough */
+} __packed;
+
 #define NCI_OP_RF_DISCOVER_MAP_CMD     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 struct disc_map_config {
        __u8    rf_protocol;
@@ -252,6 +268,13 @@ struct nci_core_init_rsp_2 {
        __le32  manufact_specific_info;
 } __packed;
 
+#define NCI_OP_CORE_SET_CONFIG_RSP     nci_opcode_pack(NCI_GID_CORE, 0x02)
+struct nci_core_set_config_rsp {
+       __u8    status;
+       __u8    num_params;
+       __u8    params_id[0];   /* variable size array */
+} __packed;
+
 #define NCI_OP_RF_DISCOVER_MAP_RSP     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 
 #define NCI_OP_RF_DISCOVER_RSP         nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -328,6 +351,11 @@ struct activation_params_nfcb_poll_iso_dep {
        __u8    attrib_res[50];
 };
 
+struct activation_params_poll_nfc_dep {
+       __u8    atr_res_len;
+       __u8    atr_res[63];
+};
+
 struct nci_rf_intf_activated_ntf {
        __u8    rf_discovery_id;
        __u8    rf_interface;
@@ -351,6 +379,7 @@ struct nci_rf_intf_activated_ntf {
        union {
                struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep;
                struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep;
+               struct activation_params_poll_nfc_dep poll_nfc_dep;
        } activation_params;
 
 } __packed;
index feba74027ff8bc18674c68625f8f6e5a9cd6562f..d705d867494987b30da4272fe4c1967a7884412a 100644 (file)
@@ -54,6 +54,7 @@ enum nci_state {
 /* NCI timeouts */
 #define NCI_RESET_TIMEOUT                      5000
 #define NCI_INIT_TIMEOUT                       5000
+#define NCI_SET_CONFIG_TIMEOUT                 5000
 #define NCI_RF_DISC_TIMEOUT                    5000
 #define NCI_RF_DISC_SELECT_TIMEOUT             5000
 #define NCI_RF_DEACTIVATE_TIMEOUT              30000
@@ -137,6 +138,10 @@ struct nci_dev {
        data_exchange_cb_t      data_exchange_cb;
        void                    *data_exchange_cb_context;
        struct sk_buff          *rx_data_reassembly;
+
+       /* stored during intf_activated_ntf */
+       __u8 remote_gb[NFC_MAX_GT_LEN];
+       __u8 remote_gb_len;
 };
 
 /* ----- NCI Devices ----- */
index 6431f5e3902217cba706b36f269844242f907856..f05b10682c9d9bbe0aa65579fc73bd8adf1747df 100644 (file)
@@ -72,6 +72,7 @@ struct nfc_ops {
 
 #define NFC_TARGET_IDX_ANY -1
 #define NFC_MAX_GT_LEN 48
+#define NFC_ATR_RES_GT_OFFSET 15
 
 struct nfc_target {
        u32 idx;
@@ -89,7 +90,7 @@ struct nfc_target {
 };
 
 struct nfc_genl_data {
-       u32 poll_req_pid;
+       u32 poll_req_portid;
        struct mutex genl_data_mutex;
 };
 
@@ -112,7 +113,6 @@ struct nfc_dev {
        int tx_tailroom;
 
        struct timer_list check_pres_timer;
-       struct workqueue_struct *check_pres_wq;
        struct work_struct check_pres_work;
 
        struct nfc_ops *ops;
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
deleted file mode 100644 (file)
index 35e930d..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2012  Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __NFC_SHDLC_H
-#define __NFC_SHDLC_H
-
-struct nfc_shdlc;
-
-struct nfc_shdlc_ops {
-       int (*open) (struct nfc_shdlc *shdlc);
-       void (*close) (struct nfc_shdlc *shdlc);
-       int (*hci_ready) (struct nfc_shdlc *shdlc);
-       int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb);
-       int (*start_poll) (struct nfc_shdlc *shdlc,
-                          u32 im_protocols, u32 tm_protocols);
-       int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate,
-                                struct nfc_target *target);
-       int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate,
-                                          struct nfc_target *target);
-       int (*data_exchange) (struct nfc_shdlc *shdlc,
-                             struct nfc_target *target,
-                             struct sk_buff *skb, struct sk_buff **res_skb);
-       int (*check_presence)(struct nfc_shdlc *shdlc,
-                             struct nfc_target *target);
-};
-
-enum shdlc_state {
-       SHDLC_DISCONNECTED = 0,
-       SHDLC_CONNECTING = 1,
-       SHDLC_NEGOCIATING = 2,
-       SHDLC_CONNECTED = 3
-};
-
-struct nfc_shdlc {
-       struct mutex state_mutex;
-       enum shdlc_state state;
-       int hard_fault;
-
-       struct nfc_hci_dev *hdev;
-
-       wait_queue_head_t *connect_wq;
-       int connect_tries;
-       int connect_result;
-       struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
-
-       u8 w;                           /* window size */
-       bool srej_support;
-
-       struct timer_list t1_timer;     /* send ack timeout */
-       bool t1_active;
-
-       struct timer_list t2_timer;     /* guard/retransmit timeout */
-       bool t2_active;
-
-       int ns;                         /* next seq num for send */
-       int nr;                         /* next expected seq num for receive */
-       int dnr;                        /* oldest sent unacked seq num */
-
-       struct sk_buff_head rcv_q;
-
-       struct sk_buff_head send_q;
-       bool rnr;                       /* other side is not ready to receive */
-
-       struct sk_buff_head ack_pending_q;
-
-       struct workqueue_struct *sm_wq;
-       struct work_struct sm_work;
-
-       struct nfc_shdlc_ops *ops;
-
-       int client_headroom;
-       int client_tailroom;
-
-       void *clientdata;
-};
-
-void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb);
-
-struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
-                                    struct nfc_hci_init_data *init_data,
-                                    u32 protocols,
-                                    int tx_headroom, int tx_tailroom,
-                                    int max_link_payload, const char *devname);
-
-void nfc_shdlc_free(struct nfc_shdlc *shdlc);
-
-void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata);
-void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc);
-struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc);
-
-#endif /* __NFC_SHDLC_H */
index 4c0766e201e39b7650773e6ba8a6329d997269b4..b01d8dd9ee7ce99eb1bf702f00007ae456f28c16 100644 (file)
@@ -106,6 +106,34 @@ struct listen_sock {
        struct request_sock     *syn_table[0];
 };
 
+/*
+ * For a TCP Fast Open listener -
+ *     lock - protects the access to all the reqsk, which is co-owned by
+ *             the listener and the child socket.
+ *     qlen - pending TFO requests (still in TCP_SYN_RECV).
+ *     max_qlen - max TFO reqs allowed before TFO is disabled.
+ *
+ *     XXX (TFO) - ideally these fields can be made as part of "listen_sock"
+ *     structure above. But there is some implementation difficulty due to
+ *     listen_sock being part of request_sock_queue hence will be freed when
+ *     a listener is stopped. But TFO related fields may continue to be
+ *     accessed even after a listener is closed, until its sk_refcnt drops
+ *     to 0 implying no more outstanding TFO reqs. One solution is to keep
+ *     listen_opt around until sk_refcnt drops to 0. But there is some other
+ *     complexity that needs to be resolved. E.g., a listener can be disabled
+ *     temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
+ */
+struct fastopen_queue {
+       struct request_sock     *rskq_rst_head; /* Keep track of past TFO */
+       struct request_sock     *rskq_rst_tail; /* requests that caused RST.
+                                                * This is part of the defense
+                                                * against spoofing attack.
+                                                */
+       spinlock_t      lock;
+       int             qlen;           /* # of pending (TCP_SYN_RECV) reqs */
+       int             max_qlen;       /* != 0 iff TFO is currently enabled */
+};
+
 /** struct request_sock_queue - queue of request_socks
  *
  * @rskq_accept_head - FIFO head of established children
@@ -129,6 +157,12 @@ struct request_sock_queue {
        u8                      rskq_defer_accept;
        /* 3 bytes hole, try to pack */
        struct listen_sock      *listen_opt;
+       struct fastopen_queue   *fastopenq; /* This is non-NULL iff TFO has been
+                                            * enabled on this listener. Check
+                                            * max_qlen != 0 in fastopen_queue
+                                            * to determine if TFO is enabled
+                                            * right at this moment.
+                                            */
 };
 
 extern int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -136,6 +170,8 @@ extern int reqsk_queue_alloc(struct request_sock_queue *queue,
 
 extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
 extern void reqsk_queue_destroy(struct request_sock_queue *queue);
+extern void reqsk_fastopen_remove(struct sock *sk,
+                                 struct request_sock *req, bool reset);
 
 static inline struct request_sock *
        reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
@@ -190,19 +226,6 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
        return req;
 }
 
-static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
-                                                struct sock *parent)
-{
-       struct request_sock *req = reqsk_queue_remove(queue);
-       struct sock *child = req->sk;
-
-       WARN_ON(child == NULL);
-
-       sk_acceptq_removed(parent);
-       __reqsk_free(req);
-       return child;
-}
-
 static inline int reqsk_queue_removed(struct request_sock_queue *queue,
                                      struct request_sock *req)
 {
index d9611e03241873fd5cb4140cf0d9c5e3ae7a2486..4616f468d5995f9a0b46d7351c5c8f7dc3a869c8 100644 (file)
@@ -188,7 +188,8 @@ struct tcf_proto_ops {
 
        unsigned long           (*get)(struct tcf_proto*, u32 handle);
        void                    (*put)(struct tcf_proto*, unsigned long);
-       int                     (*change)(struct tcf_proto*, unsigned long,
+       int                     (*change)(struct sk_buff *,
+                                       struct tcf_proto*, unsigned long,
                                        u32 handle, struct nlattr **,
                                        unsigned long *);
        int                     (*delete)(struct tcf_proto*, unsigned long);
index 7dc0854f0b3891992696002b7f75edf7a01233ab..975cca01048bee3b7da9017725654053921250ae 100644 (file)
  */
 #define SCM_MAX_FD     253
 
+struct scm_creds {
+       u32     pid;
+       kuid_t  uid;
+       kgid_t  gid;
+};
+
 struct scm_fp_list {
        short                   count;
        short                   max;
@@ -22,7 +28,7 @@ struct scm_cookie {
        struct pid              *pid;           /* Skb credentials */
        const struct cred       *cred;
        struct scm_fp_list      *fp;            /* Passed files         */
-       struct ucred            creds;          /* Skb credentials      */
+       struct scm_creds        creds;          /* Skb credentials      */
 #ifdef CONFIG_SECURITY_NETWORK
        u32                     secid;          /* Passed security ID   */
 #endif
@@ -49,7 +55,9 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
 {
        scm->pid  = get_pid(pid);
        scm->cred = cred ? get_cred(cred) : NULL;
-       cred_to_ucred(pid, cred, &scm->creds);
+       scm->creds.pid = pid_vnr(pid);
+       scm->creds.uid = cred ? cred->euid : INVALID_UID;
+       scm->creds.gid = cred ? cred->egid : INVALID_GID;
 }
 
 static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
@@ -65,7 +73,7 @@ static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
 static __inline__ void scm_destroy(struct scm_cookie *scm)
 {
        scm_destroy_cred(scm);
-       if (scm && scm->fp)
+       if (scm->fp)
                __scm_destroy(scm);
 }
 
@@ -112,8 +120,15 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
                return;
        }
 
-       if (test_bit(SOCK_PASSCRED, &sock->flags))
-               put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
+       if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+               struct user_namespace *current_ns = current_user_ns();
+               struct ucred ucreds = {
+                       .pid = scm->creds.pid,
+                       .uid = from_kuid_munged(current_ns, scm->creds.uid),
+                       .gid = from_kgid_munged(current_ns, scm->creds.gid),
+               };
+               put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
+       }
 
        scm_destroy_cred(scm);
 
index ff499640528b0012fd76c09141e978688f00b0a9..9c6414f553f91f2256698323ea5d8ec72db4e868 100644 (file)
 /*
  * sctp/protocol.c
  */
-extern struct sock *sctp_get_ctl_sock(void);
-extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
+extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
                                     sctp_scope_t, gfp_t gfp,
                                     int flags);
 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *, int);
+extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -140,12 +139,12 @@ extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 /*
  * sctp/primitive.c
  */
-int sctp_primitive_ASSOCIATE(struct sctp_association *, void *arg);
-int sctp_primitive_SHUTDOWN(struct sctp_association *, void *arg);
-int sctp_primitive_ABORT(struct sctp_association *, void *arg);
-int sctp_primitive_SEND(struct sctp_association *, void *arg);
-int sctp_primitive_REQUESTHEARTBEAT(struct sctp_association *, void *arg);
-int sctp_primitive_ASCONF(struct sctp_association *, void *arg);
+int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
 
 /*
  * sctp/input.c
@@ -156,7 +155,7 @@ void sctp_hash_established(struct sctp_association *);
 void sctp_unhash_established(struct sctp_association *);
 void sctp_hash_endpoint(struct sctp_endpoint *);
 void sctp_unhash_endpoint(struct sctp_endpoint *);
-struct sock *sctp_err_lookup(int family, struct sk_buff *,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
                             struct sctphdr *, struct sctp_association **,
                             struct sctp_transport **);
 void sctp_err_finish(struct sock *, struct sctp_association *);
@@ -173,14 +172,14 @@ void sctp_backlog_migrate(struct sctp_association *assoc,
 /*
  * sctp/proc.c
  */
-int sctp_snmp_proc_init(void);
-void sctp_snmp_proc_exit(void);
-int sctp_eps_proc_init(void);
-void sctp_eps_proc_exit(void);
-int sctp_assocs_proc_init(void);
-void sctp_assocs_proc_exit(void);
-int sctp_remaddr_proc_init(void);
-void sctp_remaddr_proc_exit(void);
+int sctp_snmp_proc_init(struct net *net);
+void sctp_snmp_proc_exit(struct net *net);
+int sctp_eps_proc_init(struct net *net);
+void sctp_eps_proc_exit(struct net *net);
+int sctp_assocs_proc_init(struct net *net);
+void sctp_assocs_proc_exit(struct net *net);
+int sctp_remaddr_proc_init(struct net *net);
+void sctp_remaddr_proc_exit(struct net *net);
 
 
 /*
@@ -222,11 +221,10 @@ extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
 #define sctp_bh_unlock_sock(sk)  bh_unlock_sock(sk)
 
 /* SCTP SNMP MIB stats handlers */
-DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
-#define SCTP_INC_STATS(field)      SNMP_INC_STATS(sctp_statistics, field)
-#define SCTP_INC_STATS_BH(field)   SNMP_INC_STATS_BH(sctp_statistics, field)
-#define SCTP_INC_STATS_USER(field) SNMP_INC_STATS_USER(sctp_statistics, field)
-#define SCTP_DEC_STATS(field)      SNMP_DEC_STATS(sctp_statistics, field)
+#define SCTP_INC_STATS(net, field)      SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
 
 #endif /* !TEST_FRAME */
 
@@ -361,25 +359,29 @@ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
 #define SCTP_DBG_OBJCNT_ENTRY(name) \
 {.label= #name, .counter= &sctp_dbg_objcnt_## name}
 
-void sctp_dbg_objcnt_init(void);
-void sctp_dbg_objcnt_exit(void);
+void sctp_dbg_objcnt_init(struct net *);
+void sctp_dbg_objcnt_exit(struct net *);
 
 #else
 
 #define SCTP_DBG_OBJCNT_INC(name)
 #define SCTP_DBG_OBJCNT_DEC(name)
 
-static inline void sctp_dbg_objcnt_init(void) { return; }
-static inline void sctp_dbg_objcnt_exit(void) { return; }
+static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
+static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
 
 #endif /* CONFIG_SCTP_DBG_OBJCOUNT */
 
 #if defined CONFIG_SYSCTL
 void sctp_sysctl_register(void);
 void sctp_sysctl_unregister(void);
+int sctp_sysctl_net_register(struct net *net);
+void sctp_sysctl_net_unregister(struct net *net);
 #else
 static inline void sctp_sysctl_register(void) { return; }
 static inline void sctp_sysctl_unregister(void) { return; }
+static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
+static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
 #endif
 
 /* Size of Supported Address Parameter for 'x' address types. */
@@ -586,7 +588,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
 
 extern struct proto sctp_prot;
 extern struct proto sctpv6_prot;
-extern struct proc_dir_entry *proc_net_sctp;
 void sctp_put_port(struct sock *sk);
 
 extern struct idr sctp_assocs_id;
@@ -632,21 +633,21 @@ static inline int sctp_sanity_check(void)
 
 /* Warning: The following hash functions assume a power of two 'size'. */
 /* This is the hash function for the SCTP port hash table. */
-static inline int sctp_phashfn(__u16 lport)
+static inline int sctp_phashfn(struct net *net, __u16 lport)
 {
-       return lport & (sctp_port_hashsize - 1);
+       return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
 }
 
 /* This is the hash function for the endpoint hash table. */
-static inline int sctp_ep_hashfn(__u16 lport)
+static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
 {
-       return lport & (sctp_ep_hashsize - 1);
+       return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
 }
 
 /* This is the hash function for the association hash table. */
-static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
+static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport)
 {
-       int h = (lport << 16) + rport;
+       int h = (lport << 16) + rport + net_hash_mix(net);
        h ^= h>>8;
        return h & (sctp_assoc_hashsize - 1);
 }
index 9148632b820467ff3e64ec911c63429e56272539..b5887e1677e4e421479919399b945844133595af 100644 (file)
@@ -77,7 +77,8 @@ typedef struct {
        int action;
 } sctp_sm_command_t;
 
-typedef sctp_disposition_t (sctp_state_fn_t) (const struct sctp_endpoint *,
+typedef sctp_disposition_t (sctp_state_fn_t) (struct net *,
+                                             const struct sctp_endpoint *,
                                              const struct sctp_association *,
                                              const sctp_subtype_t type,
                                              void *arg,
@@ -178,7 +179,8 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
 
 /* Prototypes for utility support functions.  */
 __u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *,
+                                           sctp_event_t,
                                            sctp_state_t,
                                            sctp_subtype_t);
 int sctp_chunk_iif(const struct sctp_chunk *);
@@ -268,7 +270,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *);
 
 /* Prototypes for statetable processing. */
 
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
               sctp_state_t state,
                struct sctp_endpoint *,
                struct sctp_association *asoc,
index fc5e60016e37422e9408d9ca0c0b00136aaf2bb6..0fef00f5d3ce1fe65e2e6483ba1502f69615597b 100644 (file)
@@ -102,6 +102,7 @@ struct sctp_bind_bucket {
        unsigned short  fastreuse;
        struct hlist_node       node;
        struct hlist_head       owner;
+       struct net      *net;
 };
 
 struct sctp_bind_hashbucket {
@@ -118,69 +119,6 @@ struct sctp_hashbucket {
 
 /* The SCTP globals structure. */
 extern struct sctp_globals {
-       /* RFC2960 Section 14. Suggested SCTP Protocol Parameter Values
-        *
-        * The following protocol parameters are RECOMMENDED:
-        *
-        * RTO.Initial              - 3  seconds
-        * RTO.Min                  - 1  second
-        * RTO.Max                 -  60 seconds
-        * RTO.Alpha                - 1/8  (3 when converted to right shifts.)
-        * RTO.Beta                 - 1/4  (2 when converted to right shifts.)
-        */
-       unsigned int rto_initial;
-       unsigned int rto_min;
-       unsigned int rto_max;
-
-       /* Note: rto_alpha and rto_beta are really defined as inverse
-        * powers of two to facilitate integer operations.
-        */
-       int rto_alpha;
-       int rto_beta;
-
-       /* Max.Burst                - 4 */
-       int max_burst;
-
-       /* Whether Cookie Preservative is enabled(1) or not(0) */
-       int cookie_preserve_enable;
-
-       /* Valid.Cookie.Life        - 60  seconds  */
-       unsigned int valid_cookie_life;
-
-       /* Delayed SACK timeout  200ms default*/
-       unsigned int sack_timeout;
-
-       /* HB.interval              - 30 seconds  */
-       unsigned int hb_interval;
-
-       /* Association.Max.Retrans  - 10 attempts
-        * Path.Max.Retrans         - 5  attempts (per destination address)
-        * Max.Init.Retransmits     - 8  attempts
-        */
-       int max_retrans_association;
-       int max_retrans_path;
-       int max_retrans_init;
-
-       /* Potentially-Failed.Max.Retrans sysctl value
-        * taken from:
-        * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
-        */
-       int pf_retrans;
-
-       /*
-        * Policy for preforming sctp/socket accounting
-        * 0   - do socket level accounting, all assocs share sk_sndbuf
-        * 1   - do sctp accounting, each asoc may use sk_sndbuf bytes
-        */
-       int sndbuf_policy;
-
-       /*
-        * Policy for preforming sctp/socket accounting
-        * 0   - do socket level accounting, all assocs share sk_rcvbuf
-        * 1   - do sctp accounting, each asoc may use sk_rcvbuf bytes
-        */
-       int rcvbuf_policy;
-
        /* The following variables are implementation specific.  */
 
        /* Default initialization values to be applied to new associations. */
@@ -204,70 +142,11 @@ extern struct sctp_globals {
        int port_hashsize;
        struct sctp_bind_hashbucket *port_hashtable;
 
-       /* This is the global local address list.
-        * We actively maintain this complete list of addresses on
-        * the system by catching address add/delete events.
-        *
-        * It is a list of sctp_sockaddr_entry.
-        */
-       struct list_head local_addr_list;
-       int default_auto_asconf;
-       struct list_head addr_waitq;
-       struct timer_list addr_wq_timer;
-       struct list_head auto_asconf_splist;
-       spinlock_t addr_wq_lock;
-
-       /* Lock that protects the local_addr_list writers */
-       spinlock_t addr_list_lock;
-       
-       /* Flag to indicate if addip is enabled. */
-       int addip_enable;
-       int addip_noauth_enable;
-
-       /* Flag to indicate if PR-SCTP is enabled. */
-       int prsctp_enable;
-
-       /* Flag to idicate if SCTP-AUTH is enabled */
-       int auth_enable;
-
-       /*
-        * Policy to control SCTP IPv4 address scoping
-        * 0   - Disable IPv4 address scoping
-        * 1   - Enable IPv4 address scoping
-        * 2   - Selectively allow only IPv4 private addresses
-        * 3   - Selectively allow only IPv4 link local address
-        */
-       int ipv4_scope_policy;
-
        /* Flag to indicate whether computing and verifying checksum
         * is disabled. */
         bool checksum_disable;
-
-       /* Threshold for rwnd update SACKS.  Receive buffer shifted this many
-        * bits is an indicator of when to send and window update SACK.
-        */
-       int rwnd_update_shift;
-
-       /* Threshold for autoclose timeout, in seconds. */
-       unsigned long max_autoclose;
 } sctp_globals;
 
-#define sctp_rto_initial               (sctp_globals.rto_initial)
-#define sctp_rto_min                   (sctp_globals.rto_min)
-#define sctp_rto_max                   (sctp_globals.rto_max)
-#define sctp_rto_alpha                 (sctp_globals.rto_alpha)
-#define sctp_rto_beta                  (sctp_globals.rto_beta)
-#define sctp_max_burst                 (sctp_globals.max_burst)
-#define sctp_valid_cookie_life         (sctp_globals.valid_cookie_life)
-#define sctp_cookie_preserve_enable    (sctp_globals.cookie_preserve_enable)
-#define sctp_max_retrans_association   (sctp_globals.max_retrans_association)
-#define sctp_sndbuf_policy             (sctp_globals.sndbuf_policy)
-#define sctp_rcvbuf_policy             (sctp_globals.rcvbuf_policy)
-#define sctp_max_retrans_path          (sctp_globals.max_retrans_path)
-#define sctp_pf_retrans                        (sctp_globals.pf_retrans)
-#define sctp_max_retrans_init          (sctp_globals.max_retrans_init)
-#define sctp_sack_timeout              (sctp_globals.sack_timeout)
-#define sctp_hb_interval               (sctp_globals.hb_interval)
 #define sctp_max_instreams             (sctp_globals.max_instreams)
 #define sctp_max_outstreams            (sctp_globals.max_outstreams)
 #define sctp_address_families          (sctp_globals.address_families)
@@ -277,21 +156,7 @@ extern struct sctp_globals {
 #define sctp_assoc_hashtable           (sctp_globals.assoc_hashtable)
 #define sctp_port_hashsize             (sctp_globals.port_hashsize)
 #define sctp_port_hashtable            (sctp_globals.port_hashtable)
-#define sctp_local_addr_list           (sctp_globals.local_addr_list)
-#define sctp_local_addr_lock           (sctp_globals.addr_list_lock)
-#define sctp_auto_asconf_splist                (sctp_globals.auto_asconf_splist)
-#define sctp_addr_waitq                        (sctp_globals.addr_waitq)
-#define sctp_addr_wq_timer             (sctp_globals.addr_wq_timer)
-#define sctp_addr_wq_lock              (sctp_globals.addr_wq_lock)
-#define sctp_default_auto_asconf       (sctp_globals.default_auto_asconf)
-#define sctp_scope_policy              (sctp_globals.ipv4_scope_policy)
-#define sctp_addip_enable              (sctp_globals.addip_enable)
-#define sctp_addip_noauth              (sctp_globals.addip_noauth_enable)
-#define sctp_prsctp_enable             (sctp_globals.prsctp_enable)
-#define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
-#define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
-#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
@@ -1085,7 +950,7 @@ struct sctp_transport {
        __u64 hb_nonce;
 };
 
-struct sctp_transport *sctp_transport_new(const union sctp_addr *,
+struct sctp_transport *sctp_transport_new(struct net *, const union sctp_addr *,
                                          gfp_t);
 void sctp_transport_set_owner(struct sctp_transport *,
                              struct sctp_association *);
@@ -1240,7 +1105,7 @@ struct sctp_bind_addr {
 
 void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
 void sctp_bind_addr_free(struct sctp_bind_addr *);
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                        const struct sctp_bind_addr *src,
                        sctp_scope_t scope, gfp_t gfp,
                        int flags);
@@ -1267,7 +1132,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
                           __u16 port, gfp_t gfp);
 
 sctp_scope_t sctp_scope(const union sctp_addr *);
-int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope);
 int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
 int sctp_addr_is_valid(const union sctp_addr *addr);
 int sctp_is_ep_boundall(struct sock *sk);
@@ -1425,13 +1290,13 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
 int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
                                const union sctp_addr *);
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
-                                       const union sctp_addr *);
-int sctp_has_association(const union sctp_addr *laddr,
+                                       struct net *, const union sctp_addr *);
+int sctp_has_association(struct net *net, const union sctp_addr *laddr,
                         const union sctp_addr *paddr);
 
-int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t,
-                    sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk,
-                    struct sctp_chunk **err_chunk);
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
+                    sctp_cid_t, sctp_init_chunk_t *peer_init,
+                    struct sctp_chunk *chunk, struct sctp_chunk **err_chunk);
 int sctp_process_init(struct sctp_association *, struct sctp_chunk *chunk,
                      const union sctp_addr *peer,
                      sctp_init_chunk_t *init, gfp_t gfp);
@@ -2013,6 +1878,7 @@ void sctp_assoc_control_transport(struct sctp_association *,
                                  sctp_transport_cmd_t, sctp_sn_error_t);
 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32);
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *,
+                                          struct net *,
                                           const union sctp_addr *,
                                           const union sctp_addr *);
 void sctp_assoc_migrate(struct sctp_association *, struct sock *);
index 0147b901e79c4d6b43eef9dbe45f76eef0fc1720..71596261fa997ec7014b77f0bbee9b47b6146493 100644 (file)
@@ -154,13 +154,15 @@ struct linux_xfrm_mib {
  */
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)      \
        do { \
-               this_cpu_inc(mib[0]->mibs[basefield##PKTS]);            \
-               this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);  \
+               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               this_cpu_inc(ptr[basefield##PKTS]);             \
+               this_cpu_add(ptr[basefield##OCTETS], addend);   \
        } while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)   \
        do { \
-               __this_cpu_inc(mib[0]->mibs[basefield##PKTS]);          \
-               __this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend);        \
+               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               __this_cpu_inc(ptr[basefield##PKTS]);           \
+               __this_cpu_add(ptr[basefield##OCTETS], addend); \
        } while (0)
 
 
index adb7da20b5a10b2679d59734f6f46b971acf0094..c945fba4f54351475ff2efb989f77b23237f60d4 100644 (file)
@@ -247,8 +247,7 @@ struct cg_proto;
   *    @sk_stamp: time stamp of last packet received
   *    @sk_socket: Identd and reporting IO signals
   *    @sk_user_data: RPC layer private data
-  *    @sk_sndmsg_page: cached page for sendmsg
-  *    @sk_sndmsg_off: cached offset for sendmsg
+  *    @sk_frag: cached page frag
   *    @sk_peek_off: current peek_offset value
   *    @sk_send_head: front of stuff to transmit
   *    @sk_security: used by security modules
@@ -362,9 +361,8 @@ struct sock {
        ktime_t                 sk_stamp;
        struct socket           *sk_socket;
        void                    *sk_user_data;
-       struct page             *sk_sndmsg_page;
+       struct page_frag        sk_frag;
        struct sk_buff          *sk_send_head;
-       __u32                   sk_sndmsg_off;
        __s32                   sk_peek_off;
        int                     sk_write_pending;
 #ifdef CONFIG_SECURITY
@@ -606,6 +604,15 @@ static inline void sk_add_bind_node(struct sock *sk,
 #define sk_for_each_bound(__sk, node, list) \
        hlist_for_each_entry(__sk, node, list, sk_bind_node)
 
+static inline struct user_namespace *sk_user_ns(struct sock *sk)
+{
+       /* Careful only use this in a context where these parameters
+        * can not change and must all be valid, such as recvmsg from
+        * userspace.
+        */
+       return sk->sk_socket->file->f_cred->user_ns;
+}
+
 /* Sock flags */
 enum sock_flags {
        SOCK_DEAD,
@@ -1486,14 +1493,6 @@ extern void *sock_kmalloc(struct sock *sk, int size,
 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
 extern void sk_send_sigurg(struct sock *sk);
 
-#ifdef CONFIG_CGROUPS
-extern void sock_update_classid(struct sock *sk);
-#else
-static inline void sock_update_classid(struct sock *sk)
-{
-}
-#endif
-
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * does not implement a particular function.
@@ -1670,7 +1669,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
-extern int sock_i_uid(struct sock *sk);
+extern kuid_t sock_i_uid(struct sock *sk);
 extern unsigned long sock_i_ino(struct sock *sk);
 
 static inline struct dst_entry *
@@ -2025,18 +2024,23 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
 
 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
 
-static inline struct page *sk_stream_alloc_page(struct sock *sk)
+/**
+ * sk_page_frag - return an appropriate page_frag
+ * @sk: socket
+ *
+ * If socket allocation mode allows current thread to sleep, it means its
+ * safe to use the per task page_frag instead of the per socket one.
+ */
+static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       struct page *page = NULL;
+       if (sk->sk_allocation & __GFP_WAIT)
+               return &current->task_frag;
 
-       page = alloc_pages(sk->sk_allocation, 0);
-       if (!page) {
-               sk_enter_memory_pressure(sk);
-               sk_stream_moderate_sndbuf(sk);
-       }
-       return page;
+       return &sk->sk_frag;
 }
 
+extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+
 /*
  *     Default write policy as shown to user space via poll/select/SIGIO
  */
@@ -2217,8 +2221,6 @@ extern int net_msg_warn;
 extern __u32 sysctl_wmem_max;
 extern __u32 sysctl_rmem_max;
 
-extern void sk_init(void);
-
 extern int sysctl_optmem_max;
 
 extern __u32 sysctl_wmem_default;
index 1f000ffe70758c0596b8c4d18230e781b88f495f..6feeccd83dd7557abd30e32c0547f483b79bd238 100644 (file)
@@ -98,11 +98,21 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
                                 * 15 is ~13-30min depending on RTO.
                                 */
 
-#define TCP_SYN_RETRIES         5      /* number of times to retry active opening a
-                                * connection: ~180sec is RFC minimum   */
+#define TCP_SYN_RETRIES         6      /* This is how many retries are done
+                                * when active opening a connection.
+                                * RFC1122 says the minimum retry MUST
+                                * be at least 180secs.  Nevertheless
+                                * this value is corresponding to
+                                * 63secs of retransmission with the
+                                * current initial RTO.
+                                */
 
-#define TCP_SYNACK_RETRIES 5   /* number of times to retry passive opening a
-                                * connection: ~180sec is RFC minimum   */
+#define TCP_SYNACK_RETRIES 5   /* This is how may retries are done
+                                * when passive opening a connection.
+                                * This is corresponding to 31secs of
+                                * retransmission with the current
+                                * initial RTO.
+                                */
 
 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
                                  * state, about 60 seconds     */
@@ -214,8 +224,24 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 /* Bit Flags for sysctl_tcp_fastopen */
 #define        TFO_CLIENT_ENABLE       1
+#define        TFO_SERVER_ENABLE       2
 #define        TFO_CLIENT_NO_COOKIE    4       /* Data in SYN w/o cookie option */
 
+/* Process SYN data but skip cookie validation */
+#define        TFO_SERVER_COOKIE_NOT_CHKED     0x100
+/* Accept SYN data w/o any cookie option */
+#define        TFO_SERVER_COOKIE_NOT_REQD      0x200
+
+/* Force enable TFO on all listeners, i.e., not requiring the
+ * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
+ */
+#define        TFO_SERVER_WO_SOCKOPT1  0x400
+#define        TFO_SERVER_WO_SOCKOPT2  0x800
+/* Always create TFO child sockets on a TFO listener even when
+ * cookie/data not present. (For testing purpose!)
+ */
+#define        TFO_SERVER_ALWAYS       0x1000
+
 extern struct inet_timewait_death_row tcp_death_row;
 
 /* sysctl variables for tcp */
@@ -398,7 +424,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *
                                                     const struct tcphdr *th);
 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
                                   struct request_sock *req,
-                                  struct request_sock **prev);
+                                  struct request_sock **prev,
+                                  bool fastopen);
 extern int tcp_child_process(struct sock *parent, struct sock *child,
                             struct sk_buff *skb);
 extern bool tcp_use_frto(struct sock *sk);
@@ -411,12 +438,6 @@ extern void tcp_metrics_init(void);
 extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
 extern bool tcp_remember_stamp(struct sock *sk);
 extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  int *syn_loss, unsigned long *last_syn_loss);
-extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  bool syn_lost);
 extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
 extern void tcp_disable_fack(struct tcp_sock *tp);
 extern void tcp_close(struct sock *sk, long timeout);
@@ -458,7 +479,8 @@ extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
 extern int tcp_connect(struct sock *sk);
 extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
                                        struct request_sock *req,
-                                       struct request_values *rvp);
+                                       struct request_values *rvp,
+                                       struct tcp_fastopen_cookie *foc);
 extern int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_connect_init(struct sock *sk);
@@ -527,6 +549,7 @@ extern void tcp_send_delayed_ack(struct sock *sk);
 extern void tcp_cwnd_application_limited(struct sock *sk);
 extern void tcp_resume_early_retransmit(struct sock *sk);
 extern void tcp_rearm_rto(struct sock *sk);
+extern void tcp_reset(struct sock *sk);
 
 /* tcp_timer.c */
 extern void tcp_init_xmit_timers(struct sock *);
@@ -576,6 +599,7 @@ extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
 extern void tcp_mtup_init(struct sock *sk);
 extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
+extern void tcp_init_buffer_space(struct sock *sk);
 
 static inline void tcp_bound_rto(const struct sock *sk)
 {
@@ -889,15 +913,21 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
        return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
 }
 
+static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
+{
+       return (TCPF_CA_CWR | TCPF_CA_Recovery) &
+              (1 << inet_csk(sk)->icsk_ca_state);
+}
+
 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
- * The exception is rate halving phase, when cwnd is decreasing towards
+ * The exception is cwnd reduction phase, when cwnd is decreasing towards
  * ssthresh.
  */
 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
 
-       if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
+       if (tcp_in_cwnd_reduction(sk))
                return tp->snd_ssthresh;
        else
                return max(tp->snd_ssthresh,
@@ -1094,6 +1124,8 @@ static inline void tcp_openreq_init(struct request_sock *req,
        req->rcv_wnd = 0;               /* So that tcp_send_synack() knows! */
        req->cookie_ts = 0;
        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+       tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+       tcp_rsk(req)->snt_synack = 0;
        req->mss = rx_opt->mss_clamp;
        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
        ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1106,6 +1138,15 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->loc_port = tcp_hdr(skb)->dest;
 }
 
+/* Compute time elapsed between SYNACK and the ACK completing 3WHS */
+static inline void tcp_synack_rtt_meas(struct sock *sk,
+                                      struct request_sock *req)
+{
+       if (tcp_rsk(req)->snt_synack)
+               tcp_valid_rtt_meas(sk,
+                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
+}
+
 extern void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1298,15 +1339,34 @@ extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff
 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
                            const struct tcp_md5sig_key *key);
 
+/* From tcp_fastopen.c */
+extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+                                  struct tcp_fastopen_cookie *cookie,
+                                  int *syn_loss, unsigned long *last_syn_loss);
+extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+                                  struct tcp_fastopen_cookie *cookie,
+                                  bool syn_lost);
 struct tcp_fastopen_request {
        /* Fast Open cookie. Size 0 means a cookie request */
        struct tcp_fastopen_cookie      cookie;
        struct msghdr                   *data;  /* data in MSG_FASTOPEN */
        u16                             copied; /* queued in tcp_connect() */
 };
-
 void tcp_free_fastopen_req(struct tcp_sock *tp);
 
+extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+int tcp_fastopen_reset_cipher(void *key, unsigned int len);
+void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
+
+#define TCP_FASTOPEN_KEY_LENGTH 16
+
+/* Fastopen key context */
+struct tcp_fastopen_context {
+       struct crypto_cipher __rcu      *tfm;
+       __u8                            key[TCP_FASTOPEN_KEY_LENGTH];
+       struct rcu_head                 rcu;
+};
+
 /* write queue abstraction */
 static inline void tcp_write_queue_purge(struct sock *sk)
 {
@@ -1510,7 +1570,8 @@ struct tcp_iter_state {
        sa_family_t             family;
        enum tcp_seq_states     state;
        struct sock             *syn_wait_sk;
-       int                     bucket, offset, sbucket, num, uid;
+       int                     bucket, offset, sbucket, num;
+       kuid_t                  uid;
        loff_t                  last_pos;
 };
 
index 639dd1316d375aeb2802c73032cc4cae6dcb8b0c..6f0ba01afe7315d443a0aad152a9df2d5a22abf6 100644 (file)
@@ -263,7 +263,7 @@ struct km_event {
        } data;
 
        u32     seq;
-       u32     pid;
+       u32     portid;
        u32     event;
        struct net *net;
 };
@@ -313,7 +313,7 @@ extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
 
 struct xfrm_tmpl;
 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
+extern void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
 extern int __xfrm_state_delete(struct xfrm_state *x);
 
 struct xfrm_state_afinfo {
@@ -576,7 +576,7 @@ struct xfrm_mgr {
        struct list_head        list;
        char                    *id;
        int                     (*notify)(struct xfrm_state *x, const struct km_event *c);
-       int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
+       int                     (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
        struct xfrm_policy      *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
        int                     (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
        int                     (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
@@ -671,7 +671,7 @@ struct xfrm_spi_skb_cb {
 /* Audit Information */
 struct xfrm_audit {
        u32     secid;
-       uid_t   loginuid;
+       kuid_t  loginuid;
        u32     sessionid;
 };
 
@@ -690,13 +690,14 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
        return audit_buf;
 }
 
-static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
+static inline void xfrm_audit_helper_usrinfo(kuid_t auid, u32 ses, u32 secid,
                                             struct audit_buffer *audit_buf)
 {
        char *secctx;
        u32 secctx_len;
 
-       audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
+       audit_log_format(audit_buf, " auid=%u ses=%u",
+                        from_kuid(&init_user_ns, auid), ses);
        if (secid != 0 &&
            security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
                audit_log_format(audit_buf, " subj=%s", secctx);
@@ -706,13 +707,13 @@ static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
 }
 
 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid);
+                                 kuid_t auid, u32 ses, u32 secid);
 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid);
+                                 kuid_t auid, u32 ses, u32 secid);
 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                u32 auid, u32 ses, u32 secid);
+                                kuid_t auid, u32 ses, u32 secid);
 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   u32 auid, u32 ses, u32 secid);
+                                   kuid_t auid, u32 ses, u32 secid);
 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
                                             struct sk_buff *skb);
 extern void xfrm_audit_state_replay(struct xfrm_state *x,
@@ -725,22 +726,22 @@ extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
 #else
 
 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid)
+                                 kuid_t auid, u32 ses, u32 secid)
 {
 }
 
 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 ses, u32 secid)
+                                 kuid_t auid, u32 ses, u32 secid)
 {
 }
 
 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                u32 auid, u32 ses, u32 secid)
+                                kuid_t auid, u32 ses, u32 secid)
 {
 }
 
 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   u32 auid, u32 ses, u32 secid)
+                                   kuid_t auid, u32 ses, u32 secid)
 {
 }
 
@@ -1557,7 +1558,7 @@ extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
 #endif
 
 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
+extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
 extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
 
 extern void xfrm_input_init(void);
index 00a2b8ec327f7c1ce089172c2a1a92474bfee39e..ad9a3c280944ff3581ac9313a04387198f2c0fd9 100644 (file)
@@ -100,6 +100,22 @@ int ib_find_cached_pkey(struct ib_device    *device,
                        u16                  pkey,
                        u16                 *index);
 
+/**
+ * ib_find_exact_cached_pkey - Returns the PKey table index where a specified
+ *   PKey value occurs. Comparison uses the FULL 16 bits (incl membership bit)
+ * @device: The device to query.
+ * @port_num: The port number of the device to search for the PKey.
+ * @pkey: The PKey value to search for.
+ * @index: The index into the cached PKey table where the PKey was found.
+ *
+ * ib_find_exact_cached_pkey() searches the specified PKey table in
+ * the local software cache.
+ */
+int ib_find_exact_cached_pkey(struct ib_device    *device,
+                             u8                   port_num,
+                             u16                  pkey,
+                             u16                 *index);
+
 /**
  * ib_get_cached_lmc - Returns a cached lmc table entry
  * @device: The device to query.
index 07996af8265a524b60a356faa4f59cf5cf712de1..46bc045bbe1596b5273f9225a5b3cf74d097fd86 100644 (file)
@@ -614,6 +614,9 @@ enum ib_qp_type {
 enum ib_qp_create_flags {
        IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
+       /* reserve bits 26-31 for low level drivers' internal use */
+       IB_QP_CREATE_RESERVED_START             = 1 << 26,
+       IB_QP_CREATE_RESERVED_END               = 1 << 31,
 };
 
 struct ib_qp_init_attr {
index ae33706afeb06eb24976c70f7d04ea047486939a..ef937b56f9b54c44e4003d762eaa7105534077da 100644 (file)
@@ -79,7 +79,8 @@ enum phy_event {
        PHYE_OOB_DONE         = 1,
        PHYE_OOB_ERROR        = 2,
        PHYE_SPINUP_HOLD      = 3, /* hot plug SATA, no COMWAKE sent */
-       PHY_NUM_EVENTS        = 4,
+       PHYE_RESUME_TIMEOUT   = 4,
+       PHY_NUM_EVENTS        = 5,
 };
 
 enum discover_event {
@@ -87,8 +88,10 @@ enum discover_event {
        DISCE_REVALIDATE_DOMAIN = 1,
        DISCE_PORT_GONE         = 2,
        DISCE_PROBE             = 3,
-       DISCE_DESTRUCT          = 4,
-       DISC_NUM_EVENTS         = 5,
+       DISCE_SUSPEND           = 4,
+       DISCE_RESUME            = 5,
+       DISCE_DESTRUCT          = 6,
+       DISC_NUM_EVENTS         = 7,
 };
 
 /* ---------- Expander Devices ---------- */
@@ -128,7 +131,7 @@ struct ex_phy {
        u8   attached_sas_addr[SAS_ADDR_SIZE];
        u8   attached_phy_id;
 
-       u8   phy_change_count;
+       int phy_change_count;
        enum routing_attribute routing_attr;
        u8   virtual:1;
 
@@ -141,7 +144,7 @@ struct ex_phy {
 struct expander_device {
        struct list_head children;
 
-       u16    ex_change_count;
+       int    ex_change_count;
        u16    max_route_indexes;
        u8     num_phys;
 
@@ -169,6 +172,7 @@ struct sata_device {
         enum   ata_command_set command_set;
         struct smp_resp        rps_resp; /* report_phy_sata_resp */
         u8     port_no;        /* port number, if this is a PM (Port) */
+       int    pm_result;
 
        struct ata_port *ap;
        struct ata_host ata_host;
@@ -182,6 +186,7 @@ struct ssp_device {
 
 enum {
        SAS_DEV_GONE,
+       SAS_DEV_FOUND, /* device notified to lldd */
        SAS_DEV_DESTROY,
        SAS_DEV_EH_PENDING,
        SAS_DEV_LU_RESET,
@@ -273,6 +278,7 @@ struct asd_sas_port {
        enum   sas_linkrate linkrate;
 
        struct sas_work work;
+       int suspended;
 
 /* public: */
        int id;
@@ -321,6 +327,7 @@ struct asd_sas_phy {
        unsigned long phy_events_pending;
 
        int error;
+       int suspended;
 
        struct sas_phy *phy;
 
@@ -687,6 +694,9 @@ struct sas_domain_function_template {
 
 extern int sas_register_ha(struct sas_ha_struct *);
 extern int sas_unregister_ha(struct sas_ha_struct *);
+extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
+extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
+extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
 
 int sas_set_phy_speed(struct sas_phy *phy,
                      struct sas_phy_linkrates *rates);
index 2dfbdaa0b34a8b64cf94e22ec2a5f21bdc0990a9..ff71a56546845f2e29266ecf466ecd6d7174f38a 100644 (file)
@@ -45,6 +45,8 @@ void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
 void sas_ata_schedule_reset(struct domain_device *dev);
 void sas_ata_wait_eh(struct domain_device *dev);
 void sas_probe_sata(struct asd_sas_port *port);
+void sas_suspend_sata(struct asd_sas_port *port);
+void sas_resume_sata(struct asd_sas_port *port);
 void sas_ata_end_eh(struct ata_port *ap);
 #else
 
@@ -82,6 +84,14 @@ static inline void sas_probe_sata(struct asd_sas_port *port)
 {
 }
 
+static inline void sas_suspend_sata(struct asd_sas_port *port)
+{
+}
+
+static inline void sas_resume_sata(struct asd_sas_port *port)
+{
+}
+
 static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
 {
        return 0;
index 91a4e4ff9a9bbeb28d66394299e3ef3c938e5b0c..3031b900b087011a18f4a6f321d7ac3d71c7414a 100644 (file)
@@ -26,8 +26,6 @@
  * This file intended to be included by both kernel and user space
  */
 
-#include <scsi/scsi.h>
-
 /*
  * FC Transport SGIO v4 BSG Message Support
  */
index 9895f69294fc4a65113f3368155f087e86d42807..88fae8d2015471885972da8407d0077106b527cc 100644 (file)
@@ -156,6 +156,7 @@ struct scsi_device {
        unsigned is_visible:1;  /* is the device visible in sysfs */
        unsigned can_power_off:1; /* Device supports runtime power off */
        unsigned wce_default_on:1;      /* Cache is ON by default */
+       unsigned no_dif:1;      /* T10 PI (DIF) should be disabled */
 
        DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
        struct list_head event_list;    /* asserted events */
@@ -476,6 +477,9 @@ static inline int scsi_device_enclosure(struct scsi_device *sdev)
 
 static inline int scsi_device_protection(struct scsi_device *sdev)
 {
+       if (sdev->no_dif)
+               return 0;
+
        return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0);
 }
 
index b4ddd3b18b4c1619fb7c7d9b4c484ed0bca89b2b..cc1f3e786ad71e863bc1fc5d0d3e2e91b884998a 100644 (file)
@@ -30,4 +30,5 @@
 #define BLIST_RETRY_HWERROR    0x400000 /* retry HARDWARE_ERROR */
 #define BLIST_MAX_512          0x800000 /* maximum 512 sector cdb length */
 #define BLIST_ATTACH_PQ3       0x1000000 /* Scan: Attach to PQ3 devices */
+#define BLIST_NO_DIF           0x2000000 /* Disable T10 PI (DIF) */
 #endif
index 5f7d5b3b1c6eaa82c73b3ffcf85321d26f1c705b..49084807eb6b37448bc659a2c0290768038d89d3 100644 (file)
@@ -873,6 +873,9 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign
                                       SHOST_DIF_TYPE2_PROTECTION,
                                       SHOST_DIF_TYPE3_PROTECTION };
 
+       if (target_type > SHOST_DIF_TYPE3_PROTECTION)
+               return 0;
+
        return shost->prot_capabilities & cap[target_type] ? target_type : 0;
 }
 
@@ -884,6 +887,9 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign
                                       SHOST_DIX_TYPE2_PROTECTION,
                                       SHOST_DIX_TYPE3_PROTECTION };
 
+       if (target_type > SHOST_DIX_TYPE3_PROTECTION)
+               return 0;
+
        return shost->prot_capabilities & cap[target_type];
 #endif
        return 0;
index 5cb20ccb195606b9cc0fdac6869b2789c06e1649..62b4edab15d32ca59871921433f378f1aa842602 100644 (file)
@@ -119,29 +119,5 @@ struct scsi_nl_host_vendor_msg {
        (hdr)->msglen = mlen;                                   \
        }
 
-
-#ifdef __KERNEL__
-
-#include <scsi/scsi_host.h>
-
-/* Exported Kernel Interfaces */
-int scsi_nl_add_transport(u8 tport,
-        int (*msg_handler)(struct sk_buff *),
-       void (*event_handler)(struct notifier_block *, unsigned long, void *));
-void scsi_nl_remove_transport(u8 tport);
-
-int scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
-       int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
-                                u32 len, u32 pid),
-       void (*nlevt_handler)(struct notifier_block *nb,
-                                unsigned long event, void *notify_ptr));
-void scsi_nl_remove_driver(u64 vendor_id);
-
-void scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr);
-int scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
-                        char *data_buf, u32 data_len);
-
-#endif /* __KERNEL__ */
-
 #endif /* SCSI_NETLINK_H */
 
index f19fff8650e93075aae1717034f29898f1199646..aecee9d112cbe6aea2532652c8c3f768bb829cce 100644 (file)
@@ -190,4 +190,16 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
                      struct gnttab_map_grant_ref *kunmap_ops,
                      struct page **pages, unsigned int count);
 
+/* Perform a batch of grant map/copy operations. Retry every batch slot
+ * for which the hypervisor returns GNTST_eagain. This is typically due
+ * to paged out target frames.
+ *
+ * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
+ *
+ * Return value in each iand every status field of the batch guaranteed
+ * to not be GNTST_eagain.
+ */
+void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
+void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
+
 #endif /* __ASM_GNTTAB_H__ */
index a17d84433e6a1ab54e660fc4f25493a180ce15f2..f9f8b975ae7448bfdae47100ad7da63a34fbe39b 100644 (file)
@@ -338,7 +338,7 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
 #define GNTTABOP_transfer                4
 struct gnttab_transfer {
     /* IN parameters. */
-    unsigned long mfn;
+    xen_pfn_t mfn;
     domid_t       domid;
     grant_ref_t   ref;
     /* OUT parameters. */
@@ -375,7 +375,7 @@ struct gnttab_copy {
        struct {
                union {
                        grant_ref_t ref;
-                       unsigned long   gmfn;
+                       xen_pfn_t   gmfn;
                } u;
                domid_t  domid;
                uint16_t offset;
@@ -519,7 +519,9 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
 #define GNTST_no_device_space  (-7) /* Out of space in I/O MMU.              */
 #define GNTST_permission_denied (-8) /* Not enough privilege for operation.  */
 #define GNTST_bad_page         (-9) /* Specified page was invalid for op.    */
-#define GNTST_bad_copy_arg    (-10) /* copy arguments cross page boundary */
+#define GNTST_bad_copy_arg    (-10) /* copy arguments cross page boundary.   */
+#define GNTST_address_too_big (-11) /* transfer page address too large.      */
+#define GNTST_eagain          (-12) /* Operation not done; try again.        */
 
 #define GNTTABOP_error_msgs {                   \
     "okay",                                     \
@@ -532,7 +534,9 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
     "no spare translation slot in the I/O MMU", \
     "permission denied",                        \
     "bad page",                                 \
-    "copy arguments cross page boundary"        \
+    "copy arguments cross page boundary",       \
+    "page address size too large",              \
+    "operation not done; try again"             \
 }
 
 #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
index eac3ce1537190228ba8dc74f0398d8e6e4795572..d8e33a93ea4d4483d69ea256c5abb99b16862a8c 100644 (file)
@@ -31,7 +31,7 @@ struct xen_memory_reservation {
      *   OUT: GMFN bases of extents that were allocated
      *   (NB. This command also updates the mach_to_phys translation table)
      */
-    GUEST_HANDLE(ulong) extent_start;
+    GUEST_HANDLE(xen_pfn_t) extent_start;
 
     /* Number of extents, and size/alignment of each (2^extent_order pages). */
     unsigned long  nr_extents;
@@ -130,7 +130,7 @@ struct xen_machphys_mfn_list {
      * any large discontiguities in the machine address space, 2MB gaps in
      * the machphys table will be represented by an MFN base of zero.
      */
-    GUEST_HANDLE(ulong) extent_start;
+    GUEST_HANDLE(xen_pfn_t) extent_start;
 
     /*
      * Number of extents written to the above array. This will be smaller
@@ -163,6 +163,9 @@ struct xen_add_to_physmap {
     /* Which domain to change the mapping for. */
     domid_t domid;
 
+    /* Number of pages to go through for gmfn_range */
+    uint16_t    size;
+
     /* Source mapping space. */
 #define XENMAPSPACE_shared_info 0 /* shared info page */
 #define XENMAPSPACE_grant_table 1 /* grant table page */
@@ -172,7 +175,7 @@ struct xen_add_to_physmap {
     unsigned long idx;
 
     /* GPFN where the source mapping page should appear. */
-    unsigned long gpfn;
+    xen_pfn_t gpfn;
 };
 DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
 
index 52ff8377d3bdc793fbb504b8de038287bf777dbb..4755b5fac9c736b5fc010ae5d9a3e8c70838a232 100644 (file)
@@ -54,7 +54,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_settime_t);
 #define XENPF_add_memtype         31
 struct xenpf_add_memtype {
        /* IN variables. */
-       unsigned long mfn;
+       xen_pfn_t mfn;
        uint64_t nr_mfns;
        uint32_t type;
        /* OUT variables. */
@@ -84,7 +84,7 @@ struct xenpf_read_memtype {
        /* IN variables. */
        uint32_t reg;
        /* OUT variables. */
-       unsigned long mfn;
+       xen_pfn_t mfn;
        uint64_t nr_mfns;
        uint32_t type;
 };
@@ -112,6 +112,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_platform_quirk_t);
 #define XEN_FW_DISK_INFO          1 /* from int 13 AH=08/41/48 */
 #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
 #define XEN_FW_VBEDDC_INFO        3 /* from int 10 AX=4f15 */
+#define XEN_FW_KBD_SHIFT_FLAGS    5 /* Int16, Fn02: Get keyboard shift flags. */
 struct xenpf_firmware_info {
        /* IN variables. */
        uint32_t type;
@@ -142,6 +143,8 @@ struct xenpf_firmware_info {
                        /* must refer to 128-byte buffer */
                        GUEST_HANDLE(uchar) edid;
                } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
+
+               uint8_t kbd_shift_flags; /* XEN_FW_KBD_SHIFT_FLAGS */
        } u;
 };
 DEFINE_GUEST_HANDLE_STRUCT(xenpf_firmware_info_t);
index ff372a5ddfe790a82364ebb3e7bac6db8ca1575e..5f5e551cf546548a034e7423387a1a2e8e2f09db 100644 (file)
@@ -60,4 +60,7 @@ struct xen_feature_info {
 /* arg == NULL; returns host memory page size. */
 #define XENVER_pagesize 7
 
+/* arg == xen_domain_handle_t. */
+#define XENVER_guest_handle 8
+
 #endif /* __XEN_PUBLIC_VERSION_H__ */
index 0801468f9abed879eb1b9b1d52363aaeb979a2ae..886a5d80a18fdcbaa572c8350a400d59ab4455ba 100644 (file)
@@ -10,7 +10,6 @@
 #define __XEN_PUBLIC_XEN_H__
 
 #include <asm/xen/interface.h>
-#include <asm/pvclock-abi.h>
 
 /*
  * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
@@ -190,7 +189,7 @@ struct mmuext_op {
        unsigned int cmd;
        union {
                /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
-               unsigned long mfn;
+               xen_pfn_t mfn;
                /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
                unsigned long linear_addr;
        } arg1;
@@ -430,11 +429,11 @@ struct start_info {
        unsigned long nr_pages;     /* Total pages allocated to this domain.  */
        unsigned long shared_info;  /* MACHINE address of shared info struct. */
        uint32_t flags;             /* SIF_xxx flags.                         */
-       unsigned long store_mfn;    /* MACHINE page number of shared page.    */
+       xen_pfn_t store_mfn;        /* MACHINE page number of shared page.    */
        uint32_t store_evtchn;      /* Event channel for store communication. */
        union {
                struct {
-                       unsigned long mfn;  /* MACHINE page number of console page.   */
+                       xen_pfn_t mfn;      /* MACHINE page number of console page.   */
                        uint32_t  evtchn;   /* Event channel for console page.        */
                } domU;
                struct {
@@ -455,6 +454,7 @@ struct dom0_vga_console_info {
        uint8_t video_type;
 #define XEN_VGATYPE_TEXT_MODE_3 0x03
 #define XEN_VGATYPE_VESA_LFB    0x23
+#define XEN_VGATYPE_EFI_LFB     0x70
 
        union {
                struct {
index 17857fb4d5509b24a475609e2806af53c79d5317..a85316811d79ad43493d21c1e35d51ed10aa257c 100644 (file)
@@ -35,8 +35,7 @@
 
 #include <linux/types.h>
 #include <linux/compiler.h>
-
-typedef unsigned long xen_pfn_t;
+#include <xen/interface/xen.h>
 
 struct privcmd_hypercall {
        __u64 op;
@@ -59,13 +58,33 @@ struct privcmd_mmapbatch {
        int num;     /* number of pages to populate */
        domid_t dom; /* target domain */
        __u64 addr;  /* virtual address */
-       xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
+       xen_pfn_t __user *arr; /* array of mfns - or'd with
+                                 PRIVCMD_MMAPBATCH_*_ERROR on err */
+};
+
+#define PRIVCMD_MMAPBATCH_MFN_ERROR     0xf0000000U
+#define PRIVCMD_MMAPBATCH_PAGED_ERROR   0x80000000U
+
+struct privcmd_mmapbatch_v2 {
+       unsigned int num; /* number of pages to populate */
+       domid_t dom;      /* target domain */
+       __u64 addr;       /* virtual address */
+       const xen_pfn_t __user *arr; /* array of mfns */
+       int __user *err;  /* array of error codes */
 };
 
 /*
  * @cmd: IOCTL_PRIVCMD_HYPERCALL
  * @arg: &privcmd_hypercall_t
  * Return: Value returned from execution of the specified hypercall.
+ *
+ * @cmd: IOCTL_PRIVCMD_MMAPBATCH_V2
+ * @arg: &struct privcmd_mmapbatch_v2
+ * Return: 0 on success (i.e., arg->err contains valid error codes for
+ * each frame).  On an error other than a failed frame remap, -1 is
+ * returned and errno is set to EINVAL, EFAULT etc.  As an exception,
+ * if the operation was otherwise successful but any frame failed with
+ * -ENOENT, then -1 is returned and errno is set to ENOENT.
  */
 #define IOCTL_PRIVCMD_HYPERCALL                                        \
        _IOC(_IOC_NONE, 'P', 0, sizeof(struct privcmd_hypercall))
@@ -73,5 +92,7 @@ struct privcmd_mmapbatch {
        _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap))
 #define IOCTL_PRIVCMD_MMAPBATCH                                        \
        _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch))
+#define IOCTL_PRIVCMD_MMAPBATCH_V2                             \
+       _IOC(_IOC_NONE, 'P', 4, sizeof(struct privcmd_mmapbatch_v2))
 
 #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
index 4f4d449f00f6b98644be40b85f108667c4cb1642..de8bcc641c49ae64c6971c11b792b696216cce0a 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/swiotlb.h>
 
-extern void xen_swiotlb_init(int verbose);
+extern int xen_swiotlb_init(int verbose, bool early);
 
 extern void
 *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -23,15 +23,6 @@ extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
                                   size_t size, enum dma_data_direction dir,
                                   struct dma_attrs *attrs);
-/*
-extern int
-xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-                  enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-                    enum dma_data_direction dir);
-*/
 extern int
 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                         int nelems, enum dma_data_direction dir,
index 73e4adfa91dca4783e53772c27a3b29e77ef704f..cb003a3c9122e5be10ec0628049f954a4e3310a9 100644 (file)
@@ -986,113 +986,24 @@ config UIDGID_CONVERTED
        bool
        default y
 
-       # List of kernel pieces that need user namespace work
-       # Features
-       depends on SYSVIPC = n
-       depends on IMA = n
-       depends on EVM = n
-       depends on KEYS = n
-       depends on AUDIT = n
-       depends on AUDITSYSCALL = n
-       depends on TASKSTATS = n
-       depends on TRACING = n
-       depends on FS_POSIX_ACL = n
-       depends on QUOTA = n
-       depends on QUOTACTL = n
-       depends on DEBUG_CREDENTIALS = n
-       depends on BSD_PROCESS_ACCT = n
-       depends on DRM = n
-       depends on PROC_EVENTS = n
-
        # Networking
-       depends on NET = n
        depends on NET_9P = n
-       depends on IPX = n
-       depends on PHONET = n
-       depends on NET_CLS_FLOW = n
-       depends on NETFILTER_XT_MATCH_OWNER = n
-       depends on NETFILTER_XT_MATCH_RECENT = n
-       depends on NETFILTER_XT_TARGET_LOG = n
-       depends on NETFILTER_NETLINK_LOG = n
-       depends on INET = n
-       depends on IPV6 = n
-       depends on IP_SCTP = n
-       depends on AF_RXRPC = n
-       depends on LLC2 = n
-       depends on NET_KEY = n
-       depends on INET_DIAG = n
-       depends on DNS_RESOLVER = n
-       depends on AX25 = n
-       depends on ATALK = n
 
        # Filesystems
-       depends on USB_DEVICEFS = n
-       depends on USB_GADGETFS = n
-       depends on USB_FUNCTIONFS = n
-       depends on DEVTMPFS = n
-       depends on XENFS = n
-
        depends on 9P_FS = n
-       depends on ADFS_FS = n
-       depends on AFFS_FS = n
        depends on AFS_FS = n
        depends on AUTOFS4_FS = n
-       depends on BEFS_FS = n
-       depends on BFS_FS = n
-       depends on BTRFS_FS = n
        depends on CEPH_FS = n
        depends on CIFS = n
        depends on CODA_FS = n
-       depends on CONFIGFS_FS = n
-       depends on CRAMFS = n
-       depends on DEBUG_FS = n
-       depends on ECRYPT_FS = n
-       depends on EFS_FS = n
-       depends on EXOFS_FS = n
-       depends on FAT_FS = n
        depends on FUSE_FS = n
        depends on GFS2_FS = n
-       depends on HFS_FS = n
-       depends on HFSPLUS_FS = n
-       depends on HPFS_FS = n
-       depends on HUGETLBFS = n
-       depends on ISO9660_FS = n
-       depends on JFFS2_FS = n
-       depends on JFS_FS = n
-       depends on LOGFS = n
-       depends on MINIX_FS = n
        depends on NCP_FS = n
        depends on NFSD = n
        depends on NFS_FS = n
-       depends on NILFS2_FS = n
-       depends on NTFS_FS = n
        depends on OCFS2_FS = n
-       depends on OMFS_FS = n
-       depends on QNX4FS_FS = n
-       depends on QNX6FS_FS = n
-       depends on REISERFS_FS = n
-       depends on SQUASHFS = n
-       depends on SYSV_FS = n
-       depends on UBIFS_FS = n
-       depends on UDF_FS = n
-       depends on UFS_FS = n
-       depends on VXFS_FS = n
        depends on XFS_FS = n
 
-       depends on !UML || HOSTFS = n
-
-       # The rare drivers that won't build
-       depends on AIRO = n
-       depends on AIRO_CS = n
-       depends on TUN = n
-       depends on INFINIBAND_QIB = n
-       depends on BLK_DEV_LOOP = n
-       depends on ANDROID_BINDER_IPC = n
-
-       # Security modules
-       depends on SECURITY_TOMOYO = n
-       depends on SECURITY_APPARMOR = n
-
 config UIDGID_STRICT_TYPE_CHECKS
        bool "Require conversions between uid/gids and their internal representation"
        depends on UIDGID_CONVERTED
index 9a08acc9e64923ea8af5bb9286d6f2ddd48c052e..6d255e535d038c167aace43bfa2d572a092e2585 100644 (file)
@@ -944,7 +944,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
                size_t, msg_len, unsigned int, msg_prio,
                const struct timespec __user *, u_abs_timeout)
 {
-       struct file *filp;
+       struct fd f;
        struct inode *inode;
        struct ext_wait_queue wait;
        struct ext_wait_queue *receiver;
@@ -967,21 +967,21 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
 
        audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
 
-       filp = fget(mqdes);
-       if (unlikely(!filp)) {
+       f = fdget(mqdes);
+       if (unlikely(!f.file)) {
                ret = -EBADF;
                goto out;
        }
 
-       inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+       inode = f.file->f_path.dentry->d_inode;
+       if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
        }
        info = MQUEUE_I(inode);
-       audit_inode(NULL, filp->f_path.dentry);
+       audit_inode(NULL, f.file->f_path.dentry);
 
-       if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
+       if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
                ret = -EBADF;
                goto out_fput;
        }
@@ -1023,7 +1023,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        }
 
        if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
-               if (filp->f_flags & O_NONBLOCK) {
+               if (f.file->f_flags & O_NONBLOCK) {
                        ret = -EAGAIN;
                } else {
                        wait.task = current;
@@ -1056,7 +1056,7 @@ out_free:
        if (ret)
                free_msg(msg_ptr);
 out_fput:
-       fput(filp);
+       fdput(f);
 out:
        return ret;
 }
@@ -1067,7 +1067,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
 {
        ssize_t ret;
        struct msg_msg *msg_ptr;
-       struct file *filp;
+       struct fd f;
        struct inode *inode;
        struct mqueue_inode_info *info;
        struct ext_wait_queue wait;
@@ -1084,21 +1084,21 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
 
        audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
 
-       filp = fget(mqdes);
-       if (unlikely(!filp)) {
+       f = fdget(mqdes);
+       if (unlikely(!f.file)) {
                ret = -EBADF;
                goto out;
        }
 
-       inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+       inode = f.file->f_path.dentry->d_inode;
+       if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
        }
        info = MQUEUE_I(inode);
-       audit_inode(NULL, filp->f_path.dentry);
+       audit_inode(NULL, f.file->f_path.dentry);
 
-       if (unlikely(!(filp->f_mode & FMODE_READ))) {
+       if (unlikely(!(f.file->f_mode & FMODE_READ))) {
                ret = -EBADF;
                goto out_fput;
        }
@@ -1130,7 +1130,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
        }
 
        if (info->attr.mq_curmsgs == 0) {
-               if (filp->f_flags & O_NONBLOCK) {
+               if (f.file->f_flags & O_NONBLOCK) {
                        spin_unlock(&info->lock);
                        ret = -EAGAIN;
                } else {
@@ -1160,7 +1160,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                free_msg(msg_ptr);
        }
 out_fput:
-       fput(filp);
+       fdput(f);
 out:
        return ret;
 }
@@ -1174,7 +1174,7 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
                const struct sigevent __user *, u_notification)
 {
        int ret;
-       struct file *filp;
+       struct fd f;
        struct sock *sock;
        struct inode *inode;
        struct sigevent notification;
@@ -1220,13 +1220,13 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
                        skb_put(nc, NOTIFY_COOKIE_LEN);
                        /* and attach it to the socket */
 retry:
-                       filp = fget(notification.sigev_signo);
-                       if (!filp) {
+                       f = fdget(notification.sigev_signo);
+                       if (!f.file) {
                                ret = -EBADF;
                                goto out;
                        }
-                       sock = netlink_getsockbyfilp(filp);
-                       fput(filp);
+                       sock = netlink_getsockbyfilp(f.file);
+                       fdput(f);
                        if (IS_ERR(sock)) {
                                ret = PTR_ERR(sock);
                                sock = NULL;
@@ -1245,14 +1245,14 @@ retry:
                }
        }
 
-       filp = fget(mqdes);
-       if (!filp) {
+       f = fdget(mqdes);
+       if (!f.file) {
                ret = -EBADF;
                goto out;
        }
 
-       inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+       inode = f.file->f_path.dentry->d_inode;
+       if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
        }
@@ -1292,7 +1292,7 @@ retry:
        }
        spin_unlock(&info->lock);
 out_fput:
-       fput(filp);
+       fdput(f);
 out:
        if (sock) {
                netlink_detachskb(sock, nc);
@@ -1308,7 +1308,7 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
 {
        int ret;
        struct mq_attr mqstat, omqstat;
-       struct file *filp;
+       struct fd f;
        struct inode *inode;
        struct mqueue_inode_info *info;
 
@@ -1319,14 +1319,14 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
                        return -EINVAL;
        }
 
-       filp = fget(mqdes);
-       if (!filp) {
+       f = fdget(mqdes);
+       if (!f.file) {
                ret = -EBADF;
                goto out;
        }
 
-       inode = filp->f_path.dentry->d_inode;
-       if (unlikely(filp->f_op != &mqueue_file_operations)) {
+       inode = f.file->f_path.dentry->d_inode;
+       if (unlikely(f.file->f_op != &mqueue_file_operations)) {
                ret = -EBADF;
                goto out_fput;
        }
@@ -1335,15 +1335,15 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
        spin_lock(&info->lock);
 
        omqstat = info->attr;
-       omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
+       omqstat.mq_flags = f.file->f_flags & O_NONBLOCK;
        if (u_mqstat) {
                audit_mq_getsetattr(mqdes, &mqstat);
-               spin_lock(&filp->f_lock);
+               spin_lock(&f.file->f_lock);
                if (mqstat.mq_flags & O_NONBLOCK)
-                       filp->f_flags |= O_NONBLOCK;
+                       f.file->f_flags |= O_NONBLOCK;
                else
-                       filp->f_flags &= ~O_NONBLOCK;
-               spin_unlock(&filp->f_lock);
+                       f.file->f_flags &= ~O_NONBLOCK;
+               spin_unlock(&f.file->f_lock);
 
                inode->i_atime = inode->i_ctime = CURRENT_TIME;
        }
@@ -1356,7 +1356,7 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
                ret = -EFAULT;
 
 out_fput:
-       fput(filp);
+       fdput(f);
 out:
        return ret;
 }
index 7385de25788a80c0d9cb51e6a9921997ae135e74..a71af5a65abf2b6a3029c4792d13594317dba8d2 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -443,9 +443,12 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
                        goto out_unlock;
                }
 
+               err = ipc_update_perm(&msqid64.msg_perm, ipcp);
+               if (err)
+                       goto out_unlock;
+
                msq->q_qbytes = msqid64.msg_qbytes;
 
-               ipc_update_perm(&msqid64.msg_perm, ipcp);
                msq->q_ctime = get_seconds();
                /* sleeping receivers might be excluded by
                 * stricter permissions.
@@ -922,6 +925,7 @@ out:
 #ifdef CONFIG_PROC_FS
 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
 {
+       struct user_namespace *user_ns = seq_user_ns(s);
        struct msg_queue *msq = it;
 
        return seq_printf(s,
@@ -933,10 +937,10 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
                        msq->q_qnum,
                        msq->q_lspid,
                        msq->q_lrpid,
-                       msq->q_perm.uid,
-                       msq->q_perm.gid,
-                       msq->q_perm.cuid,
-                       msq->q_perm.cgid,
+                       from_kuid_munged(user_ns, msq->q_perm.uid),
+                       from_kgid_munged(user_ns, msq->q_perm.gid),
+                       from_kuid_munged(user_ns, msq->q_perm.cuid),
+                       from_kgid_munged(user_ns, msq->q_perm.cgid),
                        msq->q_stime,
                        msq->q_rtime,
                        msq->q_ctime);
index 5215a81420df9b1802dd9f6c40c465f1b63a2bbd..58d31f1c1eb59920a558705b677c8db3ff80b6d9 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1104,7 +1104,9 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
                freeary(ns, ipcp);
                goto out_up;
        case IPC_SET:
-               ipc_update_perm(&semid64.sem_perm, ipcp);
+               err = ipc_update_perm(&semid64.sem_perm, ipcp);
+               if (err)
+                       goto out_unlock;
                sma->sem_ctime = get_seconds();
                break;
        default:
@@ -1677,6 +1679,7 @@ void exit_sem(struct task_struct *tsk)
 #ifdef CONFIG_PROC_FS
 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
 {
+       struct user_namespace *user_ns = seq_user_ns(s);
        struct sem_array *sma = it;
 
        return seq_printf(s,
@@ -1685,10 +1688,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
                          sma->sem_perm.id,
                          sma->sem_perm.mode,
                          sma->sem_nsems,
-                         sma->sem_perm.uid,
-                         sma->sem_perm.gid,
-                         sma->sem_perm.cuid,
-                         sma->sem_perm.cgid,
+                         from_kuid_munged(user_ns, sma->sem_perm.uid),
+                         from_kgid_munged(user_ns, sma->sem_perm.gid),
+                         from_kuid_munged(user_ns, sma->sem_perm.cuid),
+                         from_kgid_munged(user_ns, sma->sem_perm.cgid),
                          sma->sem_otime,
                          sma->sem_ctime);
 }
index 00faa05cf72adf914bc40a41113182da964eec31..dff40c9f73c9df09544c057f7325004726d27715 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -758,7 +758,9 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
                do_shm_rmid(ns, ipcp);
                goto out_up;
        case IPC_SET:
-               ipc_update_perm(&shmid64.shm_perm, ipcp);
+               err = ipc_update_perm(&shmid64.shm_perm, ipcp);
+               if (err)
+                       goto out_unlock;
                shp->shm_ctim = get_seconds();
                break;
        default:
@@ -893,10 +895,10 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
                audit_ipc_obj(&(shp->shm_perm));
 
                if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
-                       uid_t euid = current_euid();
+                       kuid_t euid = current_euid();
                        err = -EPERM;
-                       if (euid != shp->shm_perm.uid &&
-                           euid != shp->shm_perm.cuid)
+                       if (!uid_eq(euid, shp->shm_perm.uid) &&
+                           !uid_eq(euid, shp->shm_perm.cuid))
                                goto out_unlock;
                        if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
                                goto out_unlock;
@@ -1220,6 +1222,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
 #ifdef CONFIG_PROC_FS
 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
 {
+       struct user_namespace *user_ns = seq_user_ns(s);
        struct shmid_kernel *shp = it;
        unsigned long rss = 0, swp = 0;
 
@@ -1242,10 +1245,10 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
                          shp->shm_cprid,
                          shp->shm_lprid,
                          shp->shm_nattch,
-                         shp->shm_perm.uid,
-                         shp->shm_perm.gid,
-                         shp->shm_perm.cuid,
-                         shp->shm_perm.cgid,
+                         from_kuid_munged(user_ns, shp->shm_perm.uid),
+                         from_kgid_munged(user_ns, shp->shm_perm.gid),
+                         from_kuid_munged(user_ns, shp->shm_perm.cuid),
+                         from_kgid_munged(user_ns, shp->shm_perm.cgid),
                          shp->shm_atim,
                          shp->shm_dtim,
                          shp->shm_ctim,
index eb07fd356f2759c74a74206a441bf3928ba95f5b..72fd0785ac948b4c3813a2aac8f463ea41e7955f 100644 (file)
@@ -249,8 +249,8 @@ int ipc_get_maxid(struct ipc_ids *ids)
  
 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
 {
-       uid_t euid;
-       gid_t egid;
+       kuid_t euid;
+       kgid_t egid;
        int id, err;
 
        if (size > IPCMNI)
@@ -606,14 +606,14 @@ void ipc_rcu_putref(void *ptr)
  
 int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
 {
-       uid_t euid = current_euid();
+       kuid_t euid = current_euid();
        int requested_mode, granted_mode;
 
        audit_ipc_obj(ipcp);
        requested_mode = (flag >> 6) | (flag >> 3) | flag;
        granted_mode = ipcp->mode;
-       if (euid == ipcp->cuid ||
-           euid == ipcp->uid)
+       if (uid_eq(euid, ipcp->cuid) ||
+           uid_eq(euid, ipcp->uid))
                granted_mode >>= 6;
        else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
                granted_mode >>= 3;
@@ -643,10 +643,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
 {
        out->key        = in->key;
-       out->uid        = in->uid;
-       out->gid        = in->gid;
-       out->cuid       = in->cuid;
-       out->cgid       = in->cgid;
+       out->uid        = from_kuid_munged(current_user_ns(), in->uid);
+       out->gid        = from_kgid_munged(current_user_ns(), in->gid);
+       out->cuid       = from_kuid_munged(current_user_ns(), in->cuid);
+       out->cgid       = from_kgid_munged(current_user_ns(), in->cgid);
        out->mode       = in->mode;
        out->seq        = in->seq;
 }
@@ -747,12 +747,19 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
  * @in:  the permission given as input.
  * @out: the permission of the ipc to set.
  */
-void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
+int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
 {
-       out->uid = in->uid;
-       out->gid = in->gid;
+       kuid_t uid = make_kuid(current_user_ns(), in->uid);
+       kgid_t gid = make_kgid(current_user_ns(), in->gid);
+       if (!uid_valid(uid) || !gid_valid(gid))
+               return -EINVAL;
+
+       out->uid = uid;
+       out->gid = gid;
        out->mode = (out->mode & ~S_IRWXUGO)
                | (in->mode & S_IRWXUGO);
+
+       return 0;
 }
 
 /**
@@ -777,7 +784,7 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
                                      struct ipc64_perm *perm, int extra_perm)
 {
        struct kern_ipc_perm *ipcp;
-       uid_t euid;
+       kuid_t euid;
        int err;
 
        down_write(&ids->rw_mutex);
@@ -793,7 +800,7 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
                                         perm->gid, perm->mode);
 
        euid = current_euid();
-       if (euid == ipcp->cuid || euid == ipcp->uid  ||
+       if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid)  ||
            ns_capable(ns->user_ns, CAP_SYS_ADMIN))
                return ipcp;
 
index 850ef3e962cb36a13b2cdcd560f6bbccb8fcb489..c8fe2f7631e9b616177d69c8ac614e901db669d2 100644 (file)
@@ -125,7 +125,7 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
 
 void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
 void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
-void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
+int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
 struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
                                      struct ipc_ids *ids, int id, int cmd,
                                      struct ipc64_perm *perm, int extra_perm);
index 02e6167a53b0af6bcbc166cc810cae9ab04e54f7..6cd7529c9e6a31a7702c77f38c45289f72d907b8 100644 (file)
@@ -507,8 +507,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
        do_div(elapsed, AHZ);
        ac.ac_btime = get_seconds() - elapsed;
        /* we really need to bite the bullet and change layout */
-       ac.ac_uid = orig_cred->uid;
-       ac.ac_gid = orig_cred->gid;
+       ac.ac_uid = from_kuid_munged(file->f_cred->user_ns, orig_cred->uid);
+       ac.ac_gid = from_kgid_munged(file->f_cred->user_ns, orig_cred->gid);
 #if ACCT_VERSION==2
        ac.ac_ahz = AHZ;
 #endif
index ea3b7b6191c7af3347dce055a88af200c55d1f5d..4d0ceede33194e4d29334899ce3c85dba141fb65 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/netlink.h>
 #include <linux/freezer.h>
 #include <linux/tty.h>
+#include <linux/pid_namespace.h>
 
 #include "audit.h"
 
@@ -87,11 +88,11 @@ static int  audit_failure = AUDIT_FAIL_PRINTK;
 
 /*
  * If audit records are to be written to the netlink socket, audit_pid
- * contains the pid of the auditd process and audit_nlk_pid contains
- * the pid to use to send netlink messages to that process.
+ * contains the pid of the auditd process and audit_nlk_portid contains
+ * the portid to use to send netlink messages to that process.
  */
 int            audit_pid;
-static int     audit_nlk_pid;
+static int     audit_nlk_portid;
 
 /* If audit_rate_limit is non-zero, limit the rate of sending audit records
  * to that number per second.  This prevents DoS attacks, but results in
@@ -104,7 +105,7 @@ static int  audit_backlog_wait_time = 60 * HZ;
 static int     audit_backlog_wait_overflow = 0;
 
 /* The identity of the user shutting down the audit system. */
-uid_t          audit_sig_uid = -1;
+kuid_t         audit_sig_uid = INVALID_UID;
 pid_t          audit_sig_pid = -1;
 u32            audit_sig_sid = 0;
 
@@ -264,7 +265,7 @@ void audit_log_lost(const char *message)
 }
 
 static int audit_log_config_change(char *function_name, int new, int old,
-                                  uid_t loginuid, u32 sessionid, u32 sid,
+                                  kuid_t loginuid, u32 sessionid, u32 sid,
                                   int allow_changes)
 {
        struct audit_buffer *ab;
@@ -272,7 +273,7 @@ static int audit_log_config_change(char *function_name, int new, int old,
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
-                        old, loginuid, sessionid);
+                        old, from_kuid(&init_user_ns, loginuid), sessionid);
        if (sid) {
                char *ctx = NULL;
                u32 len;
@@ -292,7 +293,7 @@ static int audit_log_config_change(char *function_name, int new, int old,
 }
 
 static int audit_do_config_change(char *function_name, int *to_change,
-                                 int new, uid_t loginuid, u32 sessionid,
+                                 int new, kuid_t loginuid, u32 sessionid,
                                  u32 sid)
 {
        int allow_changes, rc = 0, old = *to_change;
@@ -319,21 +320,21 @@ static int audit_do_config_change(char *function_name, int *to_change,
        return rc;
 }
 
-static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sessionid,
+static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid,
                                u32 sid)
 {
        return audit_do_config_change("audit_rate_limit", &audit_rate_limit,
                                      limit, loginuid, sessionid, sid);
 }
 
-static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sessionid,
+static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid,
                                   u32 sid)
 {
        return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit,
                                      limit, loginuid, sessionid, sid);
 }
 
-static int audit_set_enabled(int state, uid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid)
 {
        int rc;
        if (state < AUDIT_OFF || state > AUDIT_LOCKED)
@@ -348,7 +349,7 @@ static int audit_set_enabled(int state, uid_t loginuid, u32 sessionid, u32 sid)
        return rc;
 }
 
-static int audit_set_failure(int state, uid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid)
 {
        if (state != AUDIT_FAIL_SILENT
            && state != AUDIT_FAIL_PRINTK
@@ -401,7 +402,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
        int err;
        /* take a reference in case we can't send it and we want to hold it */
        skb_get(skb);
-       err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0);
+       err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
        if (err < 0) {
                BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
                printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
@@ -467,24 +468,6 @@ static int kauditd_thread(void *dummy)
        return 0;
 }
 
-static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid)
-{
-       struct task_struct *tsk;
-       int err;
-
-       rcu_read_lock();
-       tsk = find_task_by_vpid(pid);
-       if (!tsk) {
-               rcu_read_unlock();
-               return -ESRCH;
-       }
-       get_task_struct(tsk);
-       rcu_read_unlock();
-       err = tty_audit_push_task(tsk, loginuid, sessionid);
-       put_task_struct(tsk);
-       return err;
-}
-
 int audit_send_list(void *_dest)
 {
        struct audit_netlink_list *dest = _dest;
@@ -588,6 +571,11 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
 {
        int err = 0;
 
+       /* Only support the initial namespaces for now. */
+       if ((current_user_ns() != &init_user_ns) ||
+           (task_active_pid_ns(current) != &init_pid_ns))
+               return -EPERM;
+
        switch (msg_type) {
        case AUDIT_GET:
        case AUDIT_LIST:
@@ -619,8 +607,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
 }
 
 static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
-                                    u32 pid, u32 uid, uid_t auid, u32 ses,
-                                    u32 sid)
+                                    kuid_t auid, u32 ses, u32 sid)
 {
        int rc = 0;
        char *ctx = NULL;
@@ -633,7 +620,9 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
 
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
        audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
-                        pid, uid, auid, ses);
+                        task_tgid_vnr(current),
+                        from_kuid(&init_user_ns, current_uid()),
+                        from_kuid(&init_user_ns, auid), ses);
        if (sid) {
                rc = security_secid_to_secctx(sid, &ctx, &len);
                if (rc)
@@ -649,13 +638,13 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       u32                     uid, pid, seq, sid;
+       u32                     seq, sid;
        void                    *data;
        struct audit_status     *status_get, status_set;
        int                     err;
        struct audit_buffer     *ab;
        u16                     msg_type = nlh->nlmsg_type;
-       uid_t                   loginuid; /* loginuid of sender */
+       kuid_t                  loginuid; /* loginuid of sender */
        u32                     sessionid;
        struct audit_sig_info   *sig_data;
        char                    *ctx = NULL;
@@ -675,8 +664,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                return err;
        }
 
-       pid  = NETLINK_CREDS(skb)->pid;
-       uid  = NETLINK_CREDS(skb)->uid;
        loginuid = audit_get_loginuid(current);
        sessionid = audit_get_sessionid(current);
        security_task_getsecid(current, &sid);
@@ -692,7 +679,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                status_set.backlog_limit = audit_backlog_limit;
                status_set.lost          = atomic_read(&audit_lost);
                status_set.backlog       = skb_queue_len(&audit_skb_queue);
-               audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
+               audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
                                 &status_set, sizeof(status_set));
                break;
        case AUDIT_SET:
@@ -720,7 +707,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                                        sessionid, sid, 1);
 
                        audit_pid = new_pid;
-                       audit_nlk_pid = NETLINK_CB(skb).pid;
+                       audit_nlk_portid = NETLINK_CB(skb).portid;
                }
                if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
                        err = audit_set_rate_limit(status_get->rate_limit,
@@ -738,16 +725,16 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (!audit_enabled && msg_type != AUDIT_USER_AVC)
                        return 0;
 
-               err = audit_filter_user(&NETLINK_CB(skb));
+               err = audit_filter_user();
                if (err == 1) {
                        err = 0;
                        if (msg_type == AUDIT_USER_TTY) {
-                               err = audit_prepare_user_tty(pid, loginuid,
+                               err = tty_audit_push_task(current, loginuid,
                                                             sessionid);
                                if (err)
                                        break;
                        }
-                       audit_log_common_recv_msg(&ab, msg_type, pid, uid,
+                       audit_log_common_recv_msg(&ab, msg_type,
                                                  loginuid, sessionid, sid);
 
                        if (msg_type != AUDIT_USER_TTY)
@@ -763,7 +750,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                        size--;
                                audit_log_n_untrustedstring(ab, data, size);
                        }
-                       audit_set_pid(ab, pid);
+                       audit_set_pid(ab, NETLINK_CB(skb).portid);
                        audit_log_end(ab);
                }
                break;
@@ -772,8 +759,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (nlmsg_len(nlh) < sizeof(struct audit_rule))
                        return -EINVAL;
                if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                                 uid, loginuid, sessionid, sid);
+                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
+                                                 loginuid, sessionid, sid);
 
                        audit_log_format(ab, " audit_enabled=%d res=0",
                                         audit_enabled);
@@ -782,8 +769,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
                /* fallthrough */
        case AUDIT_LIST:
-               err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
-                                          uid, seq, data, nlmsg_len(nlh),
+               err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
+                                          seq, data, nlmsg_len(nlh),
                                           loginuid, sessionid, sid);
                break;
        case AUDIT_ADD_RULE:
@@ -791,8 +778,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
                        return -EINVAL;
                if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                                 uid, loginuid, sessionid, sid);
+                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
+                                                 loginuid, sessionid, sid);
 
                        audit_log_format(ab, " audit_enabled=%d res=0",
                                         audit_enabled);
@@ -801,15 +788,15 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
                /* fallthrough */
        case AUDIT_LIST_RULES:
-               err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
-                                          uid, seq, data, nlmsg_len(nlh),
+               err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
+                                          seq, data, nlmsg_len(nlh),
                                           loginuid, sessionid, sid);
                break;
        case AUDIT_TRIM:
                audit_trim_trees();
 
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                         uid, loginuid, sessionid, sid);
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
+                                         loginuid, sessionid, sid);
 
                audit_log_format(ab, " op=trim res=1");
                audit_log_end(ab);
@@ -840,8 +827,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                /* OK, here comes... */
                err = audit_tag_tree(old, new);
 
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                         uid, loginuid, sessionid, sid);
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
+                                         loginuid, sessionid, sid);
 
                audit_log_format(ab, " op=make_equiv old=");
                audit_log_untrustedstring(ab, old);
@@ -866,53 +853,41 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                security_release_secctx(ctx, len);
                        return -ENOMEM;
                }
-               sig_data->uid = audit_sig_uid;
+               sig_data->uid = from_kuid(&init_user_ns, audit_sig_uid);
                sig_data->pid = audit_sig_pid;
                if (audit_sig_sid) {
                        memcpy(sig_data->ctx, ctx, len);
                        security_release_secctx(ctx, len);
                }
-               audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
+               audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO,
                                0, 0, sig_data, sizeof(*sig_data) + len);
                kfree(sig_data);
                break;
        case AUDIT_TTY_GET: {
                struct audit_tty_status s;
-               struct task_struct *tsk;
-               unsigned long flags;
-
-               rcu_read_lock();
-               tsk = find_task_by_vpid(pid);
-               if (tsk && lock_task_sighand(tsk, &flags)) {
-                       s.enabled = tsk->signal->audit_tty != 0;
-                       unlock_task_sighand(tsk, &flags);
-               } else
-                       err = -ESRCH;
-               rcu_read_unlock();
-
-               if (!err)
-                       audit_send_reply(NETLINK_CB(skb).pid, seq,
-                                        AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
+               struct task_struct *tsk = current;
+
+               spin_lock_irq(&tsk->sighand->siglock);
+               s.enabled = tsk->signal->audit_tty != 0;
+               spin_unlock_irq(&tsk->sighand->siglock);
+
+               audit_send_reply(NETLINK_CB(skb).portid, seq,
+                                AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
                break;
        }
        case AUDIT_TTY_SET: {
                struct audit_tty_status *s;
-               struct task_struct *tsk;
-               unsigned long flags;
+               struct task_struct *tsk = current;
 
                if (nlh->nlmsg_len < sizeof(struct audit_tty_status))
                        return -EINVAL;
                s = data;
                if (s->enabled != 0 && s->enabled != 1)
                        return -EINVAL;
-               rcu_read_lock();
-               tsk = find_task_by_vpid(pid);
-               if (tsk && lock_task_sighand(tsk, &flags)) {
-                       tsk->signal->audit_tty = s->enabled != 0;
-                       unlock_task_sighand(tsk, &flags);
-               } else
-                       err = -ESRCH;
-               rcu_read_unlock();
+
+               spin_lock_irq(&tsk->sighand->siglock);
+               tsk->signal->audit_tty = s->enabled != 0;
+               spin_unlock_irq(&tsk->sighand->siglock);
                break;
        }
        default:
@@ -971,8 +946,7 @@ static int __init audit_init(void)
 
        printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
               audit_default ? "enabled" : "disabled");
-       audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT,
-                                          THIS_MODULE, &cfg);
+       audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, &cfg);
        if (!audit_sock)
                audit_panic("cannot initialize netlink socket");
        else
index 81676680337158e20ce6077a522b64cdfa15f59f..9eb3d79482b64ca398ed38ae4b9ab861048a17a1 100644 (file)
@@ -76,6 +76,8 @@ static inline int audit_hash_ino(u32 ino)
 
 extern int audit_match_class(int class, unsigned syscall);
 extern int audit_comparator(const u32 left, const u32 op, const u32 right);
+extern int audit_uid_comparator(kuid_t left, u32 op, kuid_t right);
+extern int audit_gid_comparator(kgid_t left, u32 op, kgid_t right);
 extern int audit_compare_dname_path(const char *dname, const char *path,
                                    int *dirlen);
 extern struct sk_buff *            audit_make_reply(int pid, int seq, int type,
@@ -144,7 +146,7 @@ extern void audit_kill_trees(struct list_head *);
 extern char *audit_unpack_string(void **, size_t *, size_t);
 
 extern pid_t audit_sig_pid;
-extern uid_t audit_sig_uid;
+extern kuid_t audit_sig_uid;
 extern u32 audit_sig_sid;
 
 #ifdef CONFIG_AUDITSYSCALL
index 3823281401b57e423f9a4ba9255b5904920e7e1d..1c22ec3d87bc6f24c2b33a4086cb460812e0387c 100644 (file)
@@ -241,7 +241,7 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
                struct audit_buffer *ab;
                ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
                audit_log_format(ab, "auid=%u ses=%u op=",
-                                audit_get_loginuid(current),
+                                from_kuid(&init_user_ns, audit_get_loginuid(current)),
                                 audit_get_sessionid(current));
                audit_log_string(ab, op);
                audit_log_format(ab, " path=");
index a6c3f1abd206c9d9736cbe5834483e36fd1d62ff..c4bcdbaf4d4de9bdc6f22de554c4bb18c8e9ca93 100644 (file)
@@ -342,6 +342,8 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
 
                f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
                f->val = rule->values[i];
+               f->uid = INVALID_UID;
+               f->gid = INVALID_GID;
 
                err = -EINVAL;
                if (f->op == Audit_bad)
@@ -350,16 +352,32 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
                switch(f->type) {
                default:
                        goto exit_free;
-               case AUDIT_PID:
                case AUDIT_UID:
                case AUDIT_EUID:
                case AUDIT_SUID:
                case AUDIT_FSUID:
+               case AUDIT_LOGINUID:
+                       /* bit ops not implemented for uid comparisons */
+                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
+                               goto exit_free;
+
+                       f->uid = make_kuid(current_user_ns(), f->val);
+                       if (!uid_valid(f->uid))
+                               goto exit_free;
+                       break;
                case AUDIT_GID:
                case AUDIT_EGID:
                case AUDIT_SGID:
                case AUDIT_FSGID:
-               case AUDIT_LOGINUID:
+                       /* bit ops not implemented for gid comparisons */
+                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
+                               goto exit_free;
+
+                       f->gid = make_kgid(current_user_ns(), f->val);
+                       if (!gid_valid(f->gid))
+                               goto exit_free;
+                       break;
+               case AUDIT_PID:
                case AUDIT_PERS:
                case AUDIT_MSGTYPE:
                case AUDIT_PPID:
@@ -437,19 +455,39 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
 
                f->type = data->fields[i];
                f->val = data->values[i];
+               f->uid = INVALID_UID;
+               f->gid = INVALID_GID;
                f->lsm_str = NULL;
                f->lsm_rule = NULL;
                switch(f->type) {
-               case AUDIT_PID:
                case AUDIT_UID:
                case AUDIT_EUID:
                case AUDIT_SUID:
                case AUDIT_FSUID:
+               case AUDIT_LOGINUID:
+               case AUDIT_OBJ_UID:
+                       /* bit ops not implemented for uid comparisons */
+                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
+                               goto exit_free;
+
+                       f->uid = make_kuid(current_user_ns(), f->val);
+                       if (!uid_valid(f->uid))
+                               goto exit_free;
+                       break;
                case AUDIT_GID:
                case AUDIT_EGID:
                case AUDIT_SGID:
                case AUDIT_FSGID:
-               case AUDIT_LOGINUID:
+               case AUDIT_OBJ_GID:
+                       /* bit ops not implemented for gid comparisons */
+                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
+                               goto exit_free;
+
+                       f->gid = make_kgid(current_user_ns(), f->val);
+                       if (!gid_valid(f->gid))
+                               goto exit_free;
+                       break;
+               case AUDIT_PID:
                case AUDIT_PERS:
                case AUDIT_MSGTYPE:
                case AUDIT_PPID:
@@ -461,8 +499,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                case AUDIT_ARG1:
                case AUDIT_ARG2:
                case AUDIT_ARG3:
-               case AUDIT_OBJ_UID:
-               case AUDIT_OBJ_GID:
                        break;
                case AUDIT_ARCH:
                        entry->rule.arch_f = f;
@@ -707,6 +743,23 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
                        if (strcmp(a->filterkey, b->filterkey))
                                return 1;
                        break;
+               case AUDIT_UID:
+               case AUDIT_EUID:
+               case AUDIT_SUID:
+               case AUDIT_FSUID:
+               case AUDIT_LOGINUID:
+               case AUDIT_OBJ_UID:
+                       if (!uid_eq(a->fields[i].uid, b->fields[i].uid))
+                               return 1;
+                       break;
+               case AUDIT_GID:
+               case AUDIT_EGID:
+               case AUDIT_SGID:
+               case AUDIT_FSGID:
+               case AUDIT_OBJ_GID:
+                       if (!gid_eq(a->fields[i].gid, b->fields[i].gid))
+                               return 1;
+                       break;
                default:
                        if (a->fields[i].val != b->fields[i].val)
                                return 1;
@@ -1056,7 +1109,7 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 }
 
 /* Log rule additions and removals */
-static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
+static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
                                  char *action, struct audit_krule *rule,
                                  int res)
 {
@@ -1068,7 +1121,8 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (!ab)
                return;
-       audit_log_format(ab, "auid=%u ses=%u", loginuid, sessionid);
+       audit_log_format(ab, "auid=%u ses=%u",
+                        from_kuid(&init_user_ns, loginuid), sessionid);
        if (sid) {
                char *ctx = NULL;
                u32 len;
@@ -1098,8 +1152,8 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
  * @sessionid: sessionid for netlink audit message
  * @sid: SE Linux Security ID of sender
  */
-int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
-                        size_t datasz, uid_t loginuid, u32 sessionid, u32 sid)
+int audit_receive_filter(int type, int pid, int seq, void *data,
+                        size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid)
 {
        struct task_struct *tsk;
        struct audit_netlink_list *dest;
@@ -1198,6 +1252,52 @@ int audit_comparator(u32 left, u32 op, u32 right)
        }
 }
 
+int audit_uid_comparator(kuid_t left, u32 op, kuid_t right)
+{
+       switch (op) {
+       case Audit_equal:
+               return uid_eq(left, right);
+       case Audit_not_equal:
+               return !uid_eq(left, right);
+       case Audit_lt:
+               return uid_lt(left, right);
+       case Audit_le:
+               return uid_lte(left, right);
+       case Audit_gt:
+               return uid_gt(left, right);
+       case Audit_ge:
+               return uid_gte(left, right);
+       case Audit_bitmask:
+       case Audit_bittest:
+       default:
+               BUG();
+               return 0;
+       }
+}
+
+int audit_gid_comparator(kgid_t left, u32 op, kgid_t right)
+{
+       switch (op) {
+       case Audit_equal:
+               return gid_eq(left, right);
+       case Audit_not_equal:
+               return !gid_eq(left, right);
+       case Audit_lt:
+               return gid_lt(left, right);
+       case Audit_le:
+               return gid_lte(left, right);
+       case Audit_gt:
+               return gid_gt(left, right);
+       case Audit_ge:
+               return gid_gte(left, right);
+       case Audit_bitmask:
+       case Audit_bittest:
+       default:
+               BUG();
+               return 0;
+       }
+}
+
 /* Compare given dentry name with last component in given path,
  * return of 0 indicates a match. */
 int audit_compare_dname_path(const char *dname, const char *path,
@@ -1236,8 +1336,7 @@ int audit_compare_dname_path(const char *dname, const char *path,
        return strncmp(p, dname, dlen);
 }
 
-static int audit_filter_user_rules(struct netlink_skb_parms *cb,
-                                  struct audit_krule *rule,
+static int audit_filter_user_rules(struct audit_krule *rule,
                                   enum audit_state *state)
 {
        int i;
@@ -1249,17 +1348,17 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
 
                switch (f->type) {
                case AUDIT_PID:
-                       result = audit_comparator(cb->creds.pid, f->op, f->val);
+                       result = audit_comparator(task_pid_vnr(current), f->op, f->val);
                        break;
                case AUDIT_UID:
-                       result = audit_comparator(cb->creds.uid, f->op, f->val);
+                       result = audit_uid_comparator(current_uid(), f->op, f->uid);
                        break;
                case AUDIT_GID:
-                       result = audit_comparator(cb->creds.gid, f->op, f->val);
+                       result = audit_gid_comparator(current_gid(), f->op, f->gid);
                        break;
                case AUDIT_LOGINUID:
-                       result = audit_comparator(audit_get_loginuid(current),
-                                                 f->op, f->val);
+                       result = audit_uid_comparator(audit_get_loginuid(current),
+                                                 f->op, f->uid);
                        break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
@@ -1287,7 +1386,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
        return 1;
 }
 
-int audit_filter_user(struct netlink_skb_parms *cb)
+int audit_filter_user(void)
 {
        enum audit_state state = AUDIT_DISABLED;
        struct audit_entry *e;
@@ -1295,7 +1394,7 @@ int audit_filter_user(struct netlink_skb_parms *cb)
 
        rcu_read_lock();
        list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) {
-               if (audit_filter_user_rules(cb, &e->rule, &state)) {
+               if (audit_filter_user_rules(&e->rule, &state)) {
                        if (state == AUDIT_DISABLED)
                                ret = 0;
                        break;
index 4b96415527b8664753e18cb169f0de9f391f9314..29e090cc0e46a45a415d133dd8977d5b1a839368 100644 (file)
@@ -113,8 +113,8 @@ struct audit_names {
        unsigned long   ino;
        dev_t           dev;
        umode_t         mode;
-       uid_t           uid;
-       gid_t           gid;
+       kuid_t          uid;
+       kgid_t          gid;
        dev_t           rdev;
        u32             osid;
        struct audit_cap_data fcap;
@@ -149,8 +149,8 @@ struct audit_aux_data_execve {
 struct audit_aux_data_pids {
        struct audit_aux_data   d;
        pid_t                   target_pid[AUDIT_AUX_PIDS];
-       uid_t                   target_auid[AUDIT_AUX_PIDS];
-       uid_t                   target_uid[AUDIT_AUX_PIDS];
+       kuid_t                  target_auid[AUDIT_AUX_PIDS];
+       kuid_t                  target_uid[AUDIT_AUX_PIDS];
        unsigned int            target_sessionid[AUDIT_AUX_PIDS];
        u32                     target_sid[AUDIT_AUX_PIDS];
        char                    target_comm[AUDIT_AUX_PIDS][TASK_COMM_LEN];
@@ -208,14 +208,14 @@ struct audit_context {
        size_t sockaddr_len;
                                /* Save things to print about task_struct */
        pid_t               pid, ppid;
-       uid_t               uid, euid, suid, fsuid;
-       gid_t               gid, egid, sgid, fsgid;
+       kuid_t              uid, euid, suid, fsuid;
+       kgid_t              gid, egid, sgid, fsgid;
        unsigned long       personality;
        int                 arch;
 
        pid_t               target_pid;
-       uid_t               target_auid;
-       uid_t               target_uid;
+       kuid_t              target_auid;
+       kuid_t              target_uid;
        unsigned int        target_sessionid;
        u32                 target_sid;
        char                target_comm[TASK_COMM_LEN];
@@ -231,8 +231,8 @@ struct audit_context {
                        long args[6];
                } socketcall;
                struct {
-                       uid_t                   uid;
-                       gid_t                   gid;
+                       kuid_t                  uid;
+                       kgid_t                  gid;
                        umode_t                 mode;
                        u32                     osid;
                        int                     has_perm;
@@ -464,37 +464,47 @@ static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
        return 0;
 }
 
-static int audit_compare_id(uid_t uid1,
-                           struct audit_names *name,
-                           unsigned long name_offset,
-                           struct audit_field *f,
-                           struct audit_context *ctx)
+static int audit_compare_uid(kuid_t uid,
+                            struct audit_names *name,
+                            struct audit_field *f,
+                            struct audit_context *ctx)
 {
        struct audit_names *n;
-       unsigned long addr;
-       uid_t uid2;
        int rc;
-
-       BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
-
        if (name) {
-               addr = (unsigned long)name;
-               addr += name_offset;
-
-               uid2 = *(uid_t *)addr;
-               rc = audit_comparator(uid1, f->op, uid2);
+               rc = audit_uid_comparator(uid, f->op, name->uid);
                if (rc)
                        return rc;
        }
-
        if (ctx) {
                list_for_each_entry(n, &ctx->names_list, list) {
-                       addr = (unsigned long)n;
-                       addr += name_offset;
-
-                       uid2 = *(uid_t *)addr;
+                       rc = audit_uid_comparator(uid, f->op, n->uid);
+                       if (rc)
+                               return rc;
+               }
+       }
+       return 0;
+}
 
-                       rc = audit_comparator(uid1, f->op, uid2);
+static int audit_compare_gid(kgid_t gid,
+                            struct audit_names *name,
+                            struct audit_field *f,
+                            struct audit_context *ctx)
+{
+       struct audit_names *n;
+       int rc;
+       if (name) {
+               rc = audit_gid_comparator(gid, f->op, name->gid);
+               if (rc)
+                       return rc;
+       }
+       if (ctx) {
+               list_for_each_entry(n, &ctx->names_list, list) {
+                       rc = audit_gid_comparator(gid, f->op, n->gid);
                        if (rc)
                                return rc;
                }
@@ -511,80 +521,62 @@ static int audit_field_compare(struct task_struct *tsk,
        switch (f->val) {
        /* process to file object comparisons */
        case AUDIT_COMPARE_UID_TO_OBJ_UID:
-               return audit_compare_id(cred->uid,
-                                       name, offsetof(struct audit_names, uid),
-                                       f, ctx);
+               return audit_compare_uid(cred->uid, name, f, ctx);
        case AUDIT_COMPARE_GID_TO_OBJ_GID:
-               return audit_compare_id(cred->gid,
-                                       name, offsetof(struct audit_names, gid),
-                                       f, ctx);
+               return audit_compare_gid(cred->gid, name, f, ctx);
        case AUDIT_COMPARE_EUID_TO_OBJ_UID:
-               return audit_compare_id(cred->euid,
-                                       name, offsetof(struct audit_names, uid),
-                                       f, ctx);
+               return audit_compare_uid(cred->euid, name, f, ctx);
        case AUDIT_COMPARE_EGID_TO_OBJ_GID:
-               return audit_compare_id(cred->egid,
-                                       name, offsetof(struct audit_names, gid),
-                                       f, ctx);
+               return audit_compare_gid(cred->egid, name, f, ctx);
        case AUDIT_COMPARE_AUID_TO_OBJ_UID:
-               return audit_compare_id(tsk->loginuid,
-                                       name, offsetof(struct audit_names, uid),
-                                       f, ctx);
+               return audit_compare_uid(tsk->loginuid, name, f, ctx);
        case AUDIT_COMPARE_SUID_TO_OBJ_UID:
-               return audit_compare_id(cred->suid,
-                                       name, offsetof(struct audit_names, uid),
-                                       f, ctx);
+               return audit_compare_uid(cred->suid, name, f, ctx);
        case AUDIT_COMPARE_SGID_TO_OBJ_GID:
-               return audit_compare_id(cred->sgid,
-                                       name, offsetof(struct audit_names, gid),
-                                       f, ctx);
+               return audit_compare_gid(cred->sgid, name, f, ctx);
        case AUDIT_COMPARE_FSUID_TO_OBJ_UID:
-               return audit_compare_id(cred->fsuid,
-                                       name, offsetof(struct audit_names, uid),
-                                       f, ctx);
+               return audit_compare_uid(cred->fsuid, name, f, ctx);
        case AUDIT_COMPARE_FSGID_TO_OBJ_GID:
-               return audit_compare_id(cred->fsgid,
-                                       name, offsetof(struct audit_names, gid),
-                                       f, ctx);
+               return audit_compare_gid(cred->fsgid, name, f, ctx);
        /* uid comparisons */
        case AUDIT_COMPARE_UID_TO_AUID:
-               return audit_comparator(cred->uid, f->op, tsk->loginuid);
+               return audit_uid_comparator(cred->uid, f->op, tsk->loginuid);
        case AUDIT_COMPARE_UID_TO_EUID:
-               return audit_comparator(cred->uid, f->op, cred->euid);
+               return audit_uid_comparator(cred->uid, f->op, cred->euid);
        case AUDIT_COMPARE_UID_TO_SUID:
-               return audit_comparator(cred->uid, f->op, cred->suid);
+               return audit_uid_comparator(cred->uid, f->op, cred->suid);
        case AUDIT_COMPARE_UID_TO_FSUID:
-               return audit_comparator(cred->uid, f->op, cred->fsuid);
+               return audit_uid_comparator(cred->uid, f->op, cred->fsuid);
        /* auid comparisons */
        case AUDIT_COMPARE_AUID_TO_EUID:
-               return audit_comparator(tsk->loginuid, f->op, cred->euid);
+               return audit_uid_comparator(tsk->loginuid, f->op, cred->euid);
        case AUDIT_COMPARE_AUID_TO_SUID:
-               return audit_comparator(tsk->loginuid, f->op, cred->suid);
+               return audit_uid_comparator(tsk->loginuid, f->op, cred->suid);
        case AUDIT_COMPARE_AUID_TO_FSUID:
-               return audit_comparator(tsk->loginuid, f->op, cred->fsuid);
+               return audit_uid_comparator(tsk->loginuid, f->op, cred->fsuid);
        /* euid comparisons */
        case AUDIT_COMPARE_EUID_TO_SUID:
-               return audit_comparator(cred->euid, f->op, cred->suid);
+               return audit_uid_comparator(cred->euid, f->op, cred->suid);
        case AUDIT_COMPARE_EUID_TO_FSUID:
-               return audit_comparator(cred->euid, f->op, cred->fsuid);
+               return audit_uid_comparator(cred->euid, f->op, cred->fsuid);
        /* suid comparisons */
        case AUDIT_COMPARE_SUID_TO_FSUID:
-               return audit_comparator(cred->suid, f->op, cred->fsuid);
+               return audit_uid_comparator(cred->suid, f->op, cred->fsuid);
        /* gid comparisons */
        case AUDIT_COMPARE_GID_TO_EGID:
-               return audit_comparator(cred->gid, f->op, cred->egid);
+               return audit_gid_comparator(cred->gid, f->op, cred->egid);
        case AUDIT_COMPARE_GID_TO_SGID:
-               return audit_comparator(cred->gid, f->op, cred->sgid);
+               return audit_gid_comparator(cred->gid, f->op, cred->sgid);
        case AUDIT_COMPARE_GID_TO_FSGID:
-               return audit_comparator(cred->gid, f->op, cred->fsgid);
+               return audit_gid_comparator(cred->gid, f->op, cred->fsgid);
        /* egid comparisons */
        case AUDIT_COMPARE_EGID_TO_SGID:
-               return audit_comparator(cred->egid, f->op, cred->sgid);
+               return audit_gid_comparator(cred->egid, f->op, cred->sgid);
        case AUDIT_COMPARE_EGID_TO_FSGID:
-               return audit_comparator(cred->egid, f->op, cred->fsgid);
+               return audit_gid_comparator(cred->egid, f->op, cred->fsgid);
        /* sgid comparison */
        case AUDIT_COMPARE_SGID_TO_FSGID:
-               return audit_comparator(cred->sgid, f->op, cred->fsgid);
+               return audit_gid_comparator(cred->sgid, f->op, cred->fsgid);
        default:
                WARN(1, "Missing AUDIT_COMPARE define.  Report as a bug\n");
                return 0;
@@ -630,28 +622,28 @@ static int audit_filter_rules(struct task_struct *tsk,
                        }
                        break;
                case AUDIT_UID:
-                       result = audit_comparator(cred->uid, f->op, f->val);
+                       result = audit_uid_comparator(cred->uid, f->op, f->uid);
                        break;
                case AUDIT_EUID:
-                       result = audit_comparator(cred->euid, f->op, f->val);
+                       result = audit_uid_comparator(cred->euid, f->op, f->uid);
                        break;
                case AUDIT_SUID:
-                       result = audit_comparator(cred->suid, f->op, f->val);
+                       result = audit_uid_comparator(cred->suid, f->op, f->uid);
                        break;
                case AUDIT_FSUID:
-                       result = audit_comparator(cred->fsuid, f->op, f->val);
+                       result = audit_uid_comparator(cred->fsuid, f->op, f->uid);
                        break;
                case AUDIT_GID:
-                       result = audit_comparator(cred->gid, f->op, f->val);
+                       result = audit_gid_comparator(cred->gid, f->op, f->gid);
                        break;
                case AUDIT_EGID:
-                       result = audit_comparator(cred->egid, f->op, f->val);
+                       result = audit_gid_comparator(cred->egid, f->op, f->gid);
                        break;
                case AUDIT_SGID:
-                       result = audit_comparator(cred->sgid, f->op, f->val);
+                       result = audit_gid_comparator(cred->sgid, f->op, f->gid);
                        break;
                case AUDIT_FSGID:
-                       result = audit_comparator(cred->fsgid, f->op, f->val);
+                       result = audit_gid_comparator(cred->fsgid, f->op, f->gid);
                        break;
                case AUDIT_PERS:
                        result = audit_comparator(tsk->personality, f->op, f->val);
@@ -717,10 +709,10 @@ static int audit_filter_rules(struct task_struct *tsk,
                        break;
                case AUDIT_OBJ_UID:
                        if (name) {
-                               result = audit_comparator(name->uid, f->op, f->val);
+                               result = audit_uid_comparator(name->uid, f->op, f->uid);
                        } else if (ctx) {
                                list_for_each_entry(n, &ctx->names_list, list) {
-                                       if (audit_comparator(n->uid, f->op, f->val)) {
+                                       if (audit_uid_comparator(n->uid, f->op, f->uid)) {
                                                ++result;
                                                break;
                                        }
@@ -729,10 +721,10 @@ static int audit_filter_rules(struct task_struct *tsk,
                        break;
                case AUDIT_OBJ_GID:
                        if (name) {
-                               result = audit_comparator(name->gid, f->op, f->val);
+                               result = audit_gid_comparator(name->gid, f->op, f->gid);
                        } else if (ctx) {
                                list_for_each_entry(n, &ctx->names_list, list) {
-                                       if (audit_comparator(n->gid, f->op, f->val)) {
+                                       if (audit_gid_comparator(n->gid, f->op, f->gid)) {
                                                ++result;
                                                break;
                                        }
@@ -750,7 +742,7 @@ static int audit_filter_rules(struct task_struct *tsk,
                case AUDIT_LOGINUID:
                        result = 0;
                        if (ctx)
-                               result = audit_comparator(tsk->loginuid, f->op, f->val);
+                               result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
                        break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
@@ -1154,13 +1146,44 @@ error_path:
 
 EXPORT_SYMBOL(audit_log_task_context);
 
-static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
 {
+       const struct cred *cred;
        char name[sizeof(tsk->comm)];
        struct mm_struct *mm = tsk->mm;
        struct vm_area_struct *vma;
+       char *tty;
+
+       if (!ab)
+               return;
 
        /* tsk == current */
+       cred = current_cred();
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
+               tty = tsk->signal->tty->name;
+       else
+               tty = "(none)";
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+
+       audit_log_format(ab,
+                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
+                        " euid=%u suid=%u fsuid=%u"
+                        " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
+                        sys_getppid(),
+                        tsk->pid,
+                        from_kuid(&init_user_ns, tsk->loginuid),
+                        from_kuid(&init_user_ns, cred->uid),
+                        from_kgid(&init_user_ns, cred->gid),
+                        from_kuid(&init_user_ns, cred->euid),
+                        from_kuid(&init_user_ns, cred->suid),
+                        from_kuid(&init_user_ns, cred->fsuid),
+                        from_kgid(&init_user_ns, cred->egid),
+                        from_kgid(&init_user_ns, cred->sgid),
+                        from_kgid(&init_user_ns, cred->fsgid),
+                        tsk->sessionid, tty);
 
        get_task_comm(name, tsk);
        audit_log_format(ab, " comm=");
@@ -1183,8 +1206,10 @@ static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk
        audit_log_task_context(ab);
 }
 
+EXPORT_SYMBOL(audit_log_task_info);
+
 static int audit_log_pid_context(struct audit_context *context, pid_t pid,
-                                uid_t auid, uid_t uid, unsigned int sessionid,
+                                kuid_t auid, kuid_t uid, unsigned int sessionid,
                                 u32 sid, char *comm)
 {
        struct audit_buffer *ab;
@@ -1196,8 +1221,9 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
        if (!ab)
                return rc;
 
-       audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, auid,
-                        uid, sessionid);
+       audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid,
+                        from_kuid(&init_user_ns, auid),
+                        from_kuid(&init_user_ns, uid), sessionid);
        if (security_secid_to_secctx(sid, &ctx, &len)) {
                audit_log_format(ab, " obj=(none)");
                rc = 1;
@@ -1447,7 +1473,9 @@ static void show_special(struct audit_context *context, int *call_panic)
                u32 osid = context->ipc.osid;
 
                audit_log_format(ab, "ouid=%u ogid=%u mode=%#ho",
-                        context->ipc.uid, context->ipc.gid, context->ipc.mode);
+                                from_kuid(&init_user_ns, context->ipc.uid),
+                                from_kgid(&init_user_ns, context->ipc.gid),
+                                context->ipc.mode);
                if (osid) {
                        char *ctx = NULL;
                        u32 len;
@@ -1560,8 +1588,8 @@ static void audit_log_name(struct audit_context *context, struct audit_names *n,
                                 MAJOR(n->dev),
                                 MINOR(n->dev),
                                 n->mode,
-                                n->uid,
-                                n->gid,
+                                from_kuid(&init_user_ns, n->uid),
+                                from_kgid(&init_user_ns, n->gid),
                                 MAJOR(n->rdev),
                                 MINOR(n->rdev));
        }
@@ -1585,26 +1613,12 @@ static void audit_log_name(struct audit_context *context, struct audit_names *n,
 
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
-       const struct cred *cred;
        int i, call_panic = 0;
        struct audit_buffer *ab;
        struct audit_aux_data *aux;
-       const char *tty;
        struct audit_names *n;
 
        /* tsk == current */
-       context->pid = tsk->pid;
-       if (!context->ppid)
-               context->ppid = sys_getppid();
-       cred = current_cred();
-       context->uid   = cred->uid;
-       context->gid   = cred->gid;
-       context->euid  = cred->euid;
-       context->suid  = cred->suid;
-       context->fsuid = cred->fsuid;
-       context->egid  = cred->egid;
-       context->sgid  = cred->sgid;
-       context->fsgid = cred->fsgid;
        context->personality = tsk->personality;
 
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
@@ -1619,32 +1633,13 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
                                 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
                                 context->return_code);
 
-       spin_lock_irq(&tsk->sighand->siglock);
-       if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
-               tty = tsk->signal->tty->name;
-       else
-               tty = "(none)";
-       spin_unlock_irq(&tsk->sighand->siglock);
-
        audit_log_format(ab,
-                 " a0=%lx a1=%lx a2=%lx a3=%lx items=%d"
-                 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
-                 " euid=%u suid=%u fsuid=%u"
-                 " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
-                 context->argv[0],
-                 context->argv[1],
-                 context->argv[2],
-                 context->argv[3],
-                 context->name_count,
-                 context->ppid,
-                 context->pid,
-                 tsk->loginuid,
-                 context->uid,
-                 context->gid,
-                 context->euid, context->suid, context->fsuid,
-                 context->egid, context->sgid, context->fsgid, tty,
-                 tsk->sessionid);
-
+                        " a0=%lx a1=%lx a2=%lx a3=%lx items=%d",
+                        context->argv[0],
+                        context->argv[1],
+                        context->argv[2],
+                        context->argv[3],
+                        context->name_count);
 
        audit_log_task_info(ab, tsk);
        audit_log_key(ab, context->filterkey);
@@ -2299,14 +2294,14 @@ static atomic_t session_id = ATOMIC_INIT(0);
  *
  * Called (set) from fs/proc/base.c::proc_loginuid_write().
  */
-int audit_set_loginuid(uid_t loginuid)
+int audit_set_loginuid(kuid_t loginuid)
 {
        struct task_struct *task = current;
        struct audit_context *context = task->audit_context;
        unsigned int sessionid;
 
 #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
-       if (task->loginuid != -1)
+       if (uid_valid(task->loginuid))
                return -EPERM;
 #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
        if (!capable(CAP_AUDIT_CONTROL))
@@ -2322,8 +2317,10 @@ int audit_set_loginuid(uid_t loginuid)
                        audit_log_format(ab, "login pid=%d uid=%u "
                                "old auid=%u new auid=%u"
                                " old ses=%u new ses=%u",
-                               task->pid, task_uid(task),
-                               task->loginuid, loginuid,
+                               task->pid,
+                               from_kuid(&init_user_ns, task_uid(task)),
+                               from_kuid(&init_user_ns, task->loginuid),
+                               from_kuid(&init_user_ns, loginuid),
                                task->sessionid, sessionid);
                        audit_log_end(ab);
                }
@@ -2546,12 +2543,12 @@ int __audit_signal_info(int sig, struct task_struct *t)
        struct audit_aux_data_pids *axp;
        struct task_struct *tsk = current;
        struct audit_context *ctx = tsk->audit_context;
-       uid_t uid = current_uid(), t_uid = task_uid(t);
+       kuid_t uid = current_uid(), t_uid = task_uid(t);
 
        if (audit_pid && t->tgid == audit_pid) {
                if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
                        audit_sig_pid = tsk->pid;
-                       if (tsk->loginuid != -1)
+                       if (uid_valid(tsk->loginuid))
                                audit_sig_uid = tsk->loginuid;
                        else
                                audit_sig_uid = uid;
@@ -2672,8 +2669,8 @@ void __audit_mmap_fd(int fd, int flags)
 
 static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
 {
-       uid_t auid, uid;
-       gid_t gid;
+       kuid_t auid, uid;
+       kgid_t gid;
        unsigned int sessionid;
 
        auid = audit_get_loginuid(current);
@@ -2681,7 +2678,10 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
        current_uid_gid(&uid, &gid);
 
        audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
-                        auid, uid, gid, sessionid);
+                        from_kuid(&init_user_ns, auid),
+                        from_kuid(&init_user_ns, uid),
+                        from_kgid(&init_user_ns, gid),
+                        sessionid);
        audit_log_task_context(ab);
        audit_log_format(ab, " pid=%d comm=", current->pid);
        audit_log_untrustedstring(ab, current->comm);
index 79818507e444aa3050c9994841258292c294bd2e..13774b3b39aac9b73e25ba34a366d5402b70df00 100644 (file)
@@ -88,11 +88,12 @@ static DEFINE_MUTEX(cgroup_root_mutex);
 
 /*
  * Generate an array of cgroup subsystem pointers. At boot time, this is
- * populated up to CGROUP_BUILTIN_SUBSYS_COUNT, and modular subsystems are
+ * populated with the built in subsystems, and modular subsystems are
  * registered after that. The mutable section of this array is protected by
  * cgroup_mutex.
  */
-#define SUBSYS(_x) &_x ## _subsys,
+#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys,
+#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
 static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
 #include <linux/cgroup_subsys.h>
 };
@@ -111,13 +112,13 @@ struct cgroupfs_root {
         * The bitmask of subsystems intended to be attached to this
         * hierarchy
         */
-       unsigned long subsys_bits;
+       unsigned long subsys_mask;
 
        /* Unique id for this hierarchy. */
        int hierarchy_id;
 
        /* The bitmask of subsystems currently attached to this hierarchy */
-       unsigned long actual_subsys_bits;
+       unsigned long actual_subsys_mask;
 
        /* A list running through the attached subsystems */
        struct list_head subsys_list;
@@ -276,7 +277,8 @@ inline int cgroup_is_removed(const struct cgroup *cgrp)
 
 /* bits in struct cgroupfs_root flags field */
 enum {
-       ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
+       ROOT_NOPREFIX,  /* mounted subsystems have no named prefix */
+       ROOT_XATTR,     /* supports extended attributes */
 };
 
 static int cgroup_is_releasable(const struct cgroup *cgrp)
@@ -556,7 +558,7 @@ static struct css_set *find_existing_css_set(
         * won't change, so no need for locking.
         */
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
-               if (root->subsys_bits & (1UL << i)) {
+               if (root->subsys_mask & (1UL << i)) {
                        /* Subsystem is in this hierarchy. So we want
                         * the subsystem state from the new
                         * cgroup */
@@ -824,7 +826,8 @@ EXPORT_SYMBOL_GPL(cgroup_unlock);
 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
 static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
-static int cgroup_populate_dir(struct cgroup *cgrp);
+static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
+                              unsigned long subsys_mask);
 static const struct inode_operations cgroup_dir_inode_operations;
 static const struct file_operations proc_cgroupstats_operations;
 
@@ -912,15 +915,19 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
                 */
                BUG_ON(!list_empty(&cgrp->pidlists));
 
+               simple_xattrs_free(&cgrp->xattrs);
+
                kfree_rcu(cgrp, rcu_head);
        } else {
                struct cfent *cfe = __d_cfe(dentry);
                struct cgroup *cgrp = dentry->d_parent->d_fsdata;
+               struct cftype *cft = cfe->type;
 
                WARN_ONCE(!list_empty(&cfe->node) &&
                          cgrp != &cgrp->root->top_cgroup,
                          "cfe still linked for %s\n", cfe->type->name);
                kfree(cfe);
+               simple_xattrs_free(&cft->xattrs);
        }
        iput(inode);
 }
@@ -963,12 +970,29 @@ static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
        return -ENOENT;
 }
 
-static void cgroup_clear_directory(struct dentry *dir)
+/**
+ * cgroup_clear_directory - selective removal of base and subsystem files
+ * @dir: directory containing the files
+ * @base_files: true if the base files should be removed
+ * @subsys_mask: mask of the subsystem ids whose files should be removed
+ */
+static void cgroup_clear_directory(struct dentry *dir, bool base_files,
+                                  unsigned long subsys_mask)
 {
        struct cgroup *cgrp = __d_cgrp(dir);
+       struct cgroup_subsys *ss;
 
-       while (!list_empty(&cgrp->files))
-               cgroup_rm_file(cgrp, NULL);
+       for_each_subsys(cgrp->root, ss) {
+               struct cftype_set *set;
+               if (!test_bit(ss->subsys_id, &subsys_mask))
+                       continue;
+               list_for_each_entry(set, &ss->cftsets, node)
+                       cgroup_rm_file(cgrp, set->cfts);
+       }
+       if (base_files) {
+               while (!list_empty(&cgrp->files))
+                       cgroup_rm_file(cgrp, NULL);
+       }
 }
 
 /*
@@ -977,8 +1001,9 @@ static void cgroup_clear_directory(struct dentry *dir)
 static void cgroup_d_remove_dir(struct dentry *dentry)
 {
        struct dentry *parent;
+       struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
 
-       cgroup_clear_directory(dentry);
+       cgroup_clear_directory(dentry, true, root->subsys_mask);
 
        parent = dentry->d_parent;
        spin_lock(&parent->d_lock);
@@ -1022,22 +1047,22 @@ void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
  * returns an error, no reference counts are touched.
  */
 static int rebind_subsystems(struct cgroupfs_root *root,
-                             unsigned long final_bits)
+                             unsigned long final_subsys_mask)
 {
-       unsigned long added_bits, removed_bits;
+       unsigned long added_mask, removed_mask;
        struct cgroup *cgrp = &root->top_cgroup;
        int i;
 
        BUG_ON(!mutex_is_locked(&cgroup_mutex));
        BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
 
-       removed_bits = root->actual_subsys_bits & ~final_bits;
-       added_bits = final_bits & ~root->actual_subsys_bits;
+       removed_mask = root->actual_subsys_mask & ~final_subsys_mask;
+       added_mask = final_subsys_mask & ~root->actual_subsys_mask;
        /* Check that any added subsystems are currently free */
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                unsigned long bit = 1UL << i;
                struct cgroup_subsys *ss = subsys[i];
-               if (!(bit & added_bits))
+               if (!(bit & added_mask))
                        continue;
                /*
                 * Nobody should tell us to do a subsys that doesn't exist:
@@ -1062,7 +1087,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                struct cgroup_subsys *ss = subsys[i];
                unsigned long bit = 1UL << i;
-               if (bit & added_bits) {
+               if (bit & added_mask) {
                        /* We're binding this subsystem to this hierarchy */
                        BUG_ON(ss == NULL);
                        BUG_ON(cgrp->subsys[i]);
@@ -1075,7 +1100,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                        if (ss->bind)
                                ss->bind(cgrp);
                        /* refcount was already taken, and we're keeping it */
-               } else if (bit & removed_bits) {
+               } else if (bit & removed_mask) {
                        /* We're removing this subsystem */
                        BUG_ON(ss == NULL);
                        BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
@@ -1088,7 +1113,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                        list_move(&ss->sibling, &rootnode.subsys_list);
                        /* subsystem is now free - drop reference on module */
                        module_put(ss->module);
-               } else if (bit & final_bits) {
+               } else if (bit & final_subsys_mask) {
                        /* Subsystem state should already exist */
                        BUG_ON(ss == NULL);
                        BUG_ON(!cgrp->subsys[i]);
@@ -1105,7 +1130,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                        BUG_ON(cgrp->subsys[i]);
                }
        }
-       root->subsys_bits = root->actual_subsys_bits = final_bits;
+       root->subsys_mask = root->actual_subsys_mask = final_subsys_mask;
        synchronize_rcu();
 
        return 0;
@@ -1121,6 +1146,8 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
                seq_printf(seq, ",%s", ss->name);
        if (test_bit(ROOT_NOPREFIX, &root->flags))
                seq_puts(seq, ",noprefix");
+       if (test_bit(ROOT_XATTR, &root->flags))
+               seq_puts(seq, ",xattr");
        if (strlen(root->release_agent_path))
                seq_printf(seq, ",release_agent=%s", root->release_agent_path);
        if (clone_children(&root->top_cgroup))
@@ -1132,7 +1159,7 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
 }
 
 struct cgroup_sb_opts {
-       unsigned long subsys_bits;
+       unsigned long subsys_mask;
        unsigned long flags;
        char *release_agent;
        bool clone_children;
@@ -1189,6 +1216,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                        opts->clone_children = true;
                        continue;
                }
+               if (!strcmp(token, "xattr")) {
+                       set_bit(ROOT_XATTR, &opts->flags);
+                       continue;
+               }
                if (!strncmp(token, "release_agent=", 14)) {
                        /* Specifying two release agents is forbidden */
                        if (opts->release_agent)
@@ -1237,7 +1268,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                        /* Mutually exclusive option 'all' + subsystem name */
                        if (all_ss)
                                return -EINVAL;
-                       set_bit(i, &opts->subsys_bits);
+                       set_bit(i, &opts->subsys_mask);
                        one_ss = true;
 
                        break;
@@ -1258,7 +1289,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                                continue;
                        if (ss->disabled)
                                continue;
-                       set_bit(i, &opts->subsys_bits);
+                       set_bit(i, &opts->subsys_mask);
                }
        }
 
@@ -1270,19 +1301,19 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
         * the cpuset subsystem.
         */
        if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
-           (opts->subsys_bits & mask))
+           (opts->subsys_mask & mask))
                return -EINVAL;
 
 
        /* Can't specify "none" and some subsystems */
-       if (opts->subsys_bits && opts->none)
+       if (opts->subsys_mask && opts->none)
                return -EINVAL;
 
        /*
         * We either have to specify by name or by subsystems. (So all
         * empty hierarchies must have a name).
         */
-       if (!opts->subsys_bits && !opts->name)
+       if (!opts->subsys_mask && !opts->name)
                return -EINVAL;
 
        /*
@@ -1291,10 +1322,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
         * take duplicate reference counts on a subsystem that's already used,
         * but rebind_subsystems handles this case.
         */
-       for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                unsigned long bit = 1UL << i;
 
-               if (!(bit & opts->subsys_bits))
+               if (!(bit & opts->subsys_mask))
                        continue;
                if (!try_module_get(subsys[i]->module)) {
                        module_pin_failed = true;
@@ -1307,11 +1338,11 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                 * raced with a module_delete call, and to the user this is
                 * essentially a "subsystem doesn't exist" case.
                 */
-               for (i--; i >= CGROUP_BUILTIN_SUBSYS_COUNT; i--) {
+               for (i--; i >= 0; i--) {
                        /* drop refcounts only on the ones we took */
                        unsigned long bit = 1UL << i;
 
-                       if (!(bit & opts->subsys_bits))
+                       if (!(bit & opts->subsys_mask))
                                continue;
                        module_put(subsys[i]->module);
                }
@@ -1321,13 +1352,13 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        return 0;
 }
 
-static void drop_parsed_module_refcounts(unsigned long subsys_bits)
+static void drop_parsed_module_refcounts(unsigned long subsys_mask)
 {
        int i;
-       for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                unsigned long bit = 1UL << i;
 
-               if (!(bit & subsys_bits))
+               if (!(bit & subsys_mask))
                        continue;
                module_put(subsys[i]->module);
        }
@@ -1339,6 +1370,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
        struct cgroupfs_root *root = sb->s_fs_info;
        struct cgroup *cgrp = &root->top_cgroup;
        struct cgroup_sb_opts opts;
+       unsigned long added_mask, removed_mask;
 
        mutex_lock(&cgrp->dentry->d_inode->i_mutex);
        mutex_lock(&cgroup_mutex);
@@ -1350,27 +1382,31 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
                goto out_unlock;
 
        /* See feature-removal-schedule.txt */
-       if (opts.subsys_bits != root->actual_subsys_bits || opts.release_agent)
+       if (opts.subsys_mask != root->actual_subsys_mask || opts.release_agent)
                pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
                           task_tgid_nr(current), current->comm);
 
+       added_mask = opts.subsys_mask & ~root->subsys_mask;
+       removed_mask = root->subsys_mask & ~opts.subsys_mask;
+
        /* Don't allow flags or name to change at remount */
        if (opts.flags != root->flags ||
            (opts.name && strcmp(opts.name, root->name))) {
                ret = -EINVAL;
-               drop_parsed_module_refcounts(opts.subsys_bits);
+               drop_parsed_module_refcounts(opts.subsys_mask);
                goto out_unlock;
        }
 
-       ret = rebind_subsystems(root, opts.subsys_bits);
+       ret = rebind_subsystems(root, opts.subsys_mask);
        if (ret) {
-               drop_parsed_module_refcounts(opts.subsys_bits);
+               drop_parsed_module_refcounts(opts.subsys_mask);
                goto out_unlock;
        }
 
        /* clear out any existing files and repopulate subsystem files */
-       cgroup_clear_directory(cgrp->dentry);
-       cgroup_populate_dir(cgrp);
+       cgroup_clear_directory(cgrp->dentry, false, removed_mask);
+       /* re-populate subsystem files */
+       cgroup_populate_dir(cgrp, false, added_mask);
 
        if (opts.release_agent)
                strcpy(root->release_agent_path, opts.release_agent);
@@ -1401,6 +1437,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
        mutex_init(&cgrp->pidlist_mutex);
        INIT_LIST_HEAD(&cgrp->event_list);
        spin_lock_init(&cgrp->event_list_lock);
+       simple_xattrs_init(&cgrp->xattrs);
 }
 
 static void init_cgroup_root(struct cgroupfs_root *root)
@@ -1455,8 +1492,8 @@ static int cgroup_test_super(struct super_block *sb, void *data)
         * If we asked for subsystems (or explicitly for no
         * subsystems) then they must match
         */
-       if ((opts->subsys_bits || opts->none)
-           && (opts->subsys_bits != root->subsys_bits))
+       if ((opts->subsys_mask || opts->none)
+           && (opts->subsys_mask != root->subsys_mask))
                return 0;
 
        return 1;
@@ -1466,7 +1503,7 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
 {
        struct cgroupfs_root *root;
 
-       if (!opts->subsys_bits && !opts->none)
+       if (!opts->subsys_mask && !opts->none)
                return NULL;
 
        root = kzalloc(sizeof(*root), GFP_KERNEL);
@@ -1479,7 +1516,7 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
        }
        init_cgroup_root(root);
 
-       root->subsys_bits = opts->subsys_bits;
+       root->subsys_mask = opts->subsys_mask;
        root->flags = opts->flags;
        if (opts->release_agent)
                strcpy(root->release_agent_path, opts->release_agent);
@@ -1511,7 +1548,7 @@ static int cgroup_set_super(struct super_block *sb, void *data)
        if (!opts->new_root)
                return -EINVAL;
 
-       BUG_ON(!opts->subsys_bits && !opts->none);
+       BUG_ON(!opts->subsys_mask && !opts->none);
 
        ret = set_anon_super(sb, NULL);
        if (ret)
@@ -1629,7 +1666,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                if (ret)
                        goto unlock_drop;
 
-               ret = rebind_subsystems(root, root->subsys_bits);
+               ret = rebind_subsystems(root, root->subsys_mask);
                if (ret == -EBUSY) {
                        free_cg_links(&tmp_cg_links);
                        goto unlock_drop;
@@ -1669,7 +1706,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                BUG_ON(root->number_of_cgroups != 1);
 
                cred = override_creds(&init_cred);
-               cgroup_populate_dir(root_cgrp);
+               cgroup_populate_dir(root_cgrp, true, root->subsys_mask);
                revert_creds(cred);
                mutex_unlock(&cgroup_root_mutex);
                mutex_unlock(&cgroup_mutex);
@@ -1681,7 +1718,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                 */
                cgroup_drop_root(opts.new_root);
                /* no subsys rebinding, so refcounts don't change */
-               drop_parsed_module_refcounts(opts.subsys_bits);
+               drop_parsed_module_refcounts(opts.subsys_mask);
        }
 
        kfree(opts.release_agent);
@@ -1695,7 +1732,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
  drop_new_super:
        deactivate_locked_super(sb);
  drop_modules:
-       drop_parsed_module_refcounts(opts.subsys_bits);
+       drop_parsed_module_refcounts(opts.subsys_mask);
  out_err:
        kfree(opts.release_agent);
        kfree(opts.name);
@@ -1745,6 +1782,8 @@ static void cgroup_kill_sb(struct super_block *sb) {
        mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
 
+       simple_xattrs_free(&cgrp->xattrs);
+
        kill_litter_super(sb);
        cgroup_drop_root(root);
 }
@@ -2551,6 +2590,64 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
        return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
 }
 
+static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
+{
+       if (S_ISDIR(dentry->d_inode->i_mode))
+               return &__d_cgrp(dentry)->xattrs;
+       else
+               return &__d_cft(dentry)->xattrs;
+}
+
+static inline int xattr_enabled(struct dentry *dentry)
+{
+       struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
+       return test_bit(ROOT_XATTR, &root->flags);
+}
+
+static bool is_valid_xattr(const char *name)
+{
+       if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+           !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
+               return true;
+       return false;
+}
+
+static int cgroup_setxattr(struct dentry *dentry, const char *name,
+                          const void *val, size_t size, int flags)
+{
+       if (!xattr_enabled(dentry))
+               return -EOPNOTSUPP;
+       if (!is_valid_xattr(name))
+               return -EINVAL;
+       return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags);
+}
+
+static int cgroup_removexattr(struct dentry *dentry, const char *name)
+{
+       if (!xattr_enabled(dentry))
+               return -EOPNOTSUPP;
+       if (!is_valid_xattr(name))
+               return -EINVAL;
+       return simple_xattr_remove(__d_xattrs(dentry), name);
+}
+
+static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name,
+                              void *buf, size_t size)
+{
+       if (!xattr_enabled(dentry))
+               return -EOPNOTSUPP;
+       if (!is_valid_xattr(name))
+               return -EINVAL;
+       return simple_xattr_get(__d_xattrs(dentry), name, buf, size);
+}
+
+static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size)
+{
+       if (!xattr_enabled(dentry))
+               return -EOPNOTSUPP;
+       return simple_xattr_list(__d_xattrs(dentry), buf, size);
+}
+
 static const struct file_operations cgroup_file_operations = {
        .read = cgroup_file_read,
        .write = cgroup_file_write,
@@ -2559,11 +2656,22 @@ static const struct file_operations cgroup_file_operations = {
        .release = cgroup_file_release,
 };
 
+static const struct inode_operations cgroup_file_inode_operations = {
+       .setxattr = cgroup_setxattr,
+       .getxattr = cgroup_getxattr,
+       .listxattr = cgroup_listxattr,
+       .removexattr = cgroup_removexattr,
+};
+
 static const struct inode_operations cgroup_dir_inode_operations = {
        .lookup = cgroup_lookup,
        .mkdir = cgroup_mkdir,
        .rmdir = cgroup_rmdir,
        .rename = cgroup_rename,
+       .setxattr = cgroup_setxattr,
+       .getxattr = cgroup_getxattr,
+       .listxattr = cgroup_listxattr,
+       .removexattr = cgroup_removexattr,
 };
 
 static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
@@ -2611,6 +2719,7 @@ static int cgroup_create_file(struct dentry *dentry, umode_t mode,
        } else if (S_ISREG(mode)) {
                inode->i_size = 0;
                inode->i_fop = &cgroup_file_operations;
+               inode->i_op = &cgroup_file_inode_operations;
        }
        d_instantiate(dentry, inode);
        dget(dentry);   /* Extra count - pin the dentry in core */
@@ -2671,7 +2780,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft)
 }
 
 static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
-                          const struct cftype *cft)
+                          struct cftype *cft)
 {
        struct dentry *dir = cgrp->dentry;
        struct cgroup *parent = __d_cgrp(dir);
@@ -2681,6 +2790,8 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
        umode_t mode;
        char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
 
+       simple_xattrs_init(&cft->xattrs);
+
        /* does @cft->flags tell us to skip creation on @cgrp? */
        if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
                return 0;
@@ -2721,9 +2832,9 @@ out:
 }
 
 static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
-                             const struct cftype cfts[], bool is_add)
+                             struct cftype cfts[], bool is_add)
 {
-       const struct cftype *cft;
+       struct cftype *cft;
        int err, ret = 0;
 
        for (cft = cfts; cft->name[0] != '\0'; cft++) {
@@ -2757,7 +2868,7 @@ static void cgroup_cfts_prepare(void)
 }
 
 static void cgroup_cfts_commit(struct cgroup_subsys *ss,
-                              const struct cftype *cfts, bool is_add)
+                              struct cftype *cfts, bool is_add)
        __releases(&cgroup_mutex) __releases(&cgroup_cft_mutex)
 {
        LIST_HEAD(pending);
@@ -2808,7 +2919,7 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
  * function currently returns 0 as long as @cfts registration is successful
  * even if some file creation attempts on existing cgroups fail.
  */
-int cgroup_add_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts)
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        struct cftype_set *set;
 
@@ -2838,7 +2949,7 @@ EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
  * Returns 0 on successful unregistration, -ENOENT if @cfts is not
  * registered with @ss.
  */
-int cgroup_rm_cftypes(struct cgroup_subsys *ss, const struct cftype *cfts)
+int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        struct cftype_set *set;
 
@@ -3843,18 +3954,29 @@ static struct cftype files[] = {
        { }     /* terminate */
 };
 
-static int cgroup_populate_dir(struct cgroup *cgrp)
+/**
+ * cgroup_populate_dir - selectively creation of files in a directory
+ * @cgrp: target cgroup
+ * @base_files: true if the base files should be added
+ * @subsys_mask: mask of the subsystem ids whose files should be added
+ */
+static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
+                              unsigned long subsys_mask)
 {
        int err;
        struct cgroup_subsys *ss;
 
-       err = cgroup_addrm_files(cgrp, NULL, files, true);
-       if (err < 0)
-               return err;
+       if (base_files) {
+               err = cgroup_addrm_files(cgrp, NULL, files, true);
+               if (err < 0)
+                       return err;
+       }
 
        /* process cftsets of each subsystem */
        for_each_subsys(cgrp->root, ss) {
                struct cftype_set *set;
+               if (!test_bit(ss->subsys_id, &subsys_mask))
+                       continue;
 
                list_for_each_entry(set, &ss->cftsets, node)
                        cgroup_addrm_files(cgrp, ss, set->cfts, true);
@@ -3954,8 +4076,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
 
        for_each_subsys(root, ss) {
-               struct cgroup_subsys_state *css = ss->create(cgrp);
+               struct cgroup_subsys_state *css;
 
+               css = ss->create(cgrp);
                if (IS_ERR(css)) {
                        err = PTR_ERR(css);
                        goto err_destroy;
@@ -3969,6 +4092,15 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                /* At error, ->destroy() callback has to free assigned ID. */
                if (clone_children(parent) && ss->post_clone)
                        ss->post_clone(cgrp);
+
+               if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
+                   parent->parent) {
+                       pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
+                                  current->comm, current->pid, ss->name);
+                       if (!strcmp(ss->name, "memory"))
+                               pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
+                       ss->warned_broken_hierarchy = true;
+               }
        }
 
        list_add(&cgrp->sibling, &cgrp->parent->children);
@@ -3988,7 +4120,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
 
        list_add_tail(&cgrp->allcg_node, &root->allcg_list);
 
-       err = cgroup_populate_dir(cgrp);
+       err = cgroup_populate_dir(cgrp, true, root->subsys_mask);
        /* If err < 0, we have a half-filled directory - oh well ;) */
 
        mutex_unlock(&cgroup_mutex);
@@ -4321,8 +4453,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
         * since cgroup_init_subsys will have already taken care of it.
         */
        if (ss->module == NULL) {
-               /* a few sanity checks */
-               BUG_ON(ss->subsys_id >= CGROUP_BUILTIN_SUBSYS_COUNT);
+               /* a sanity check */
                BUG_ON(subsys[ss->subsys_id] != ss);
                return 0;
        }
@@ -4330,24 +4461,8 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
        /* init base cftset */
        cgroup_init_cftsets(ss);
 
-       /*
-        * need to register a subsys id before anything else - for example,
-        * init_cgroup_css needs it.
-        */
        mutex_lock(&cgroup_mutex);
-       /* find the first empty slot in the array */
-       for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
-               if (subsys[i] == NULL)
-                       break;
-       }
-       if (i == CGROUP_SUBSYS_COUNT) {
-               /* maximum number of subsystems already registered! */
-               mutex_unlock(&cgroup_mutex);
-               return -EBUSY;
-       }
-       /* assign ourselves the subsys_id */
-       ss->subsys_id = i;
-       subsys[i] = ss;
+       subsys[ss->subsys_id] = ss;
 
        /*
         * no ss->create seems to need anything important in the ss struct, so
@@ -4356,7 +4471,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
        css = ss->create(dummytop);
        if (IS_ERR(css)) {
                /* failure case - need to deassign the subsys[] slot. */
-               subsys[i] = NULL;
+               subsys[ss->subsys_id] = NULL;
                mutex_unlock(&cgroup_mutex);
                return PTR_ERR(css);
        }
@@ -4372,7 +4487,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
                if (ret) {
                        dummytop->subsys[ss->subsys_id] = NULL;
                        ss->destroy(dummytop);
-                       subsys[i] = NULL;
+                       subsys[ss->subsys_id] = NULL;
                        mutex_unlock(&cgroup_mutex);
                        return ret;
                }
@@ -4439,7 +4554,6 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
 
        mutex_lock(&cgroup_mutex);
        /* deassign the subsys_id */
-       BUG_ON(ss->subsys_id < CGROUP_BUILTIN_SUBSYS_COUNT);
        subsys[ss->subsys_id] = NULL;
 
        /* remove subsystem from rootnode's list of subsystems */
@@ -4502,10 +4616,13 @@ int __init cgroup_init_early(void)
        for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
                INIT_HLIST_HEAD(&css_set_table[i]);
 
-       /* at bootup time, we don't worry about modular subsystems */
-       for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                struct cgroup_subsys *ss = subsys[i];
 
+               /* at bootup time, we don't worry about modular subsystems */
+               if (!ss || ss->module)
+                       continue;
+
                BUG_ON(!ss->name);
                BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
                BUG_ON(!ss->create);
@@ -4538,9 +4655,12 @@ int __init cgroup_init(void)
        if (err)
                return err;
 
-       /* at bootup time, we don't worry about modular subsystems */
-       for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                struct cgroup_subsys *ss = subsys[i];
+
+               /* at bootup time, we don't worry about modular subsystems */
+               if (!ss || ss->module)
+                       continue;
                if (!ss->early_init)
                        cgroup_init_subsys(ss);
                if (ss->use_id)
@@ -4735,13 +4855,16 @@ void cgroup_fork_callbacks(struct task_struct *child)
 {
        if (need_forkexit_callback) {
                int i;
-               /*
-                * forkexit callbacks are only supported for builtin
-                * subsystems, and the builtin section of the subsys array is
-                * immutable, so we don't need to lock the subsys array here.
-                */
-               for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                        struct cgroup_subsys *ss = subsys[i];
+
+                       /*
+                        * forkexit callbacks are only supported for
+                        * builtin subsystems.
+                        */
+                       if (!ss || ss->module)
+                               continue;
+
                        if (ss->fork)
                                ss->fork(child);
                }
@@ -4846,12 +4969,13 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
        tsk->cgroups = &init_css_set;
 
        if (run_callbacks && need_forkexit_callback) {
-               /*
-                * modular subsystems can't use callbacks, so no need to lock
-                * the subsys array
-                */
-               for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                        struct cgroup_subsys *ss = subsys[i];
+
+                       /* modular subsystems can't use callbacks */
+                       if (!ss || ss->module)
+                               continue;
+
                        if (ss->exit) {
                                struct cgroup *old_cgrp =
                                        rcu_dereference_raw(cg->subsys[i])->cgroup;
@@ -5037,13 +5161,17 @@ static int __init cgroup_disable(char *str)
        while ((token = strsep(&str, ",")) != NULL) {
                if (!*token)
                        continue;
-               /*
-                * cgroup_disable, being at boot time, can't know about module
-                * subsystems, so we don't worry about them.
-                */
-               for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                        struct cgroup_subsys *ss = subsys[i];
 
+                       /*
+                        * cgroup_disable, being at boot time, can't
+                        * know about module subsystems, so we don't
+                        * worry about them.
+                        */
+                       if (!ss || ss->module)
+                               continue;
+
                        if (!strcmp(token, ss->name)) {
                                ss->disabled = 1;
                                printk(KERN_INFO "Disabling %s control group"
index 3649fc6b3eaa9a2aa78b6a0ef2af73d1a9fd9775..b1724ce98981d25e76980193b7e3984c9f7717cd 100644 (file)
@@ -373,4 +373,12 @@ struct cgroup_subsys freezer_subsys = {
        .can_attach     = freezer_can_attach,
        .fork           = freezer_fork,
        .base_cftypes   = files,
+
+       /*
+        * freezer subsys doesn't handle hierarchy at all.  Frozen state
+        * should be inherited through the hierarchy - if a parent is
+        * frozen, all its children should be frozen.  Fix it and remove
+        * the following.
+        */
+       .broken_hierarchy = true,
 };
index de728ac50d821b9f38340534a4ba6202137d55a2..48cea3da6d052c77bdfe3b5b8e766f0a3033c28c 100644 (file)
@@ -799,9 +799,15 @@ static void dump_invalid_creds(const struct cred *cred, const char *label,
               atomic_read(&cred->usage),
               read_cred_subscribers(cred));
        printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n",
-              cred->uid, cred->euid, cred->suid, cred->fsuid);
+               from_kuid_munged(&init_user_ns, cred->uid),
+               from_kuid_munged(&init_user_ns, cred->euid),
+               from_kuid_munged(&init_user_ns, cred->suid),
+               from_kuid_munged(&init_user_ns, cred->fsuid));
        printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n",
-              cred->gid, cred->egid, cred->sgid, cred->fsgid);
+               from_kgid_munged(&init_user_ns, cred->gid),
+               from_kgid_munged(&init_user_ns, cred->egid),
+               from_kgid_munged(&init_user_ns, cred->sgid),
+               from_kgid_munged(&init_user_ns, cred->fsgid));
 #ifdef CONFIG_SECURITY
        printk(KERN_ERR "CRED: ->security is %p\n", cred->security);
        if ((unsigned long) cred->security >= PAGE_SIZE &&
index 7b9df353ba1b85e95b220572e5d847b82dee5541..f16f3c58f11ad008d145edc1517a57eccfb03d65 100644 (file)
@@ -468,14 +468,13 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
 {
        struct perf_cgroup *cgrp;
        struct cgroup_subsys_state *css;
-       struct file *file;
-       int ret = 0, fput_needed;
+       struct fd f = fdget(fd);
+       int ret = 0;
 
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       if (!f.file)
                return -EBADF;
 
-       css = cgroup_css_from_dir(file, perf_subsys_id);
+       css = cgroup_css_from_dir(f.file, perf_subsys_id);
        if (IS_ERR(css)) {
                ret = PTR_ERR(css);
                goto out;
@@ -501,7 +500,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
                ret = -EINVAL;
        }
 out:
-       fput_light(file, fput_needed);
+       fdput(f);
        return ret;
 }
 
@@ -3234,21 +3233,18 @@ unlock:
 
 static const struct file_operations perf_fops;
 
-static struct file *perf_fget_light(int fd, int *fput_needed)
+static inline int perf_fget_light(int fd, struct fd *p)
 {
-       struct file *file;
-
-       file = fget_light(fd, fput_needed);
-       if (!file)
-               return ERR_PTR(-EBADF);
+       struct fd f = fdget(fd);
+       if (!f.file)
+               return -EBADF;
 
-       if (file->f_op != &perf_fops) {
-               fput_light(file, *fput_needed);
-               *fput_needed = 0;
-               return ERR_PTR(-EBADF);
+       if (f.file->f_op != &perf_fops) {
+               fdput(f);
+               return -EBADF;
        }
-
-       return file;
+       *p = f;
+       return 0;
 }
 
 static int perf_event_set_output(struct perf_event *event,
@@ -3280,22 +3276,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        case PERF_EVENT_IOC_SET_OUTPUT:
        {
-               struct file *output_file = NULL;
-               struct perf_event *output_event = NULL;
-               int fput_needed = 0;
                int ret;
-
                if (arg != -1) {
-                       output_file = perf_fget_light(arg, &fput_needed);
-                       if (IS_ERR(output_file))
-                               return PTR_ERR(output_file);
-                       output_event = output_file->private_data;
+                       struct perf_event *output_event;
+                       struct fd output;
+                       ret = perf_fget_light(arg, &output);
+                       if (ret)
+                               return ret;
+                       output_event = output.file->private_data;
+                       ret = perf_event_set_output(event, output_event);
+                       fdput(output);
+               } else {
+                       ret = perf_event_set_output(event, NULL);
                }
-
-               ret = perf_event_set_output(event, output_event);
-               if (output_event)
-                       fput_light(output_file, fput_needed);
-
                return ret;
        }
 
@@ -6443,12 +6436,11 @@ SYSCALL_DEFINE5(perf_event_open,
        struct perf_event_attr attr;
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
-       struct file *group_file = NULL;
+       struct fd group = {NULL, 0};
        struct task_struct *task = NULL;
        struct pmu *pmu;
        int event_fd;
        int move_group = 0;
-       int fput_needed = 0;
        int err;
 
        /* for future expandability... */
@@ -6478,17 +6470,15 @@ SYSCALL_DEFINE5(perf_event_open,
        if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
                return -EINVAL;
 
-       event_fd = get_unused_fd_flags(O_RDWR);
+       event_fd = get_unused_fd();
        if (event_fd < 0)
                return event_fd;
 
        if (group_fd != -1) {
-               group_file = perf_fget_light(group_fd, &fput_needed);
-               if (IS_ERR(group_file)) {
-                       err = PTR_ERR(group_file);
+               err = perf_fget_light(group_fd, &group);
+               if (err)
                        goto err_fd;
-               }
-               group_leader = group_file->private_data;
+               group_leader = group.file->private_data;
                if (flags & PERF_FLAG_FD_OUTPUT)
                        output_event = group_leader;
                if (flags & PERF_FLAG_FD_NO_GROUP)
@@ -6664,7 +6654,7 @@ SYSCALL_DEFINE5(perf_event_open,
         * of the group leader will find the pointer to itself in
         * perf_group_detach().
         */
-       fput_light(group_file, fput_needed);
+       fdput(group);
        fd_install(event_fd, event_file);
        return event_fd;
 
@@ -6678,7 +6668,7 @@ err_task:
        if (task)
                put_task_struct(task);
 err_group_fd:
-       fput_light(group_file, fput_needed);
+       fdput(group);
 err_fd:
        put_unused_fd(event_fd);
        return err;
@@ -7503,5 +7493,12 @@ struct cgroup_subsys perf_subsys = {
        .destroy        = perf_cgroup_destroy,
        .exit           = perf_cgroup_exit,
        .attach         = perf_cgroup_attach,
+
+       /*
+        * perf_event cgroup doesn't handle nesting correctly.
+        * ctx->nr_cgroups adjustments should be propagated through the
+        * cgroup hierarchy.  Fix it and remove the following.
+        */
+       .broken_hierarchy = true,
 };
 #endif /* CONFIG_CGROUP_PERF */
index f65345f9e5bbe2aa06b69db5b91b1c4002857c0f..346616c0092cfe3993fbcfb9b7f36d0d02986fb2 100644 (file)
@@ -457,108 +457,13 @@ void daemonize(const char *name, ...)
        /* Become as one with the init task */
 
        daemonize_fs_struct();
-       exit_files(current);
-       current->files = init_task.files;
-       atomic_inc(&current->files->count);
+       daemonize_descriptors();
 
        reparent_to_kthreadd();
 }
 
 EXPORT_SYMBOL(daemonize);
 
-static void close_files(struct files_struct * files)
-{
-       int i, j;
-       struct fdtable *fdt;
-
-       j = 0;
-
-       /*
-        * It is safe to dereference the fd table without RCU or
-        * ->file_lock because this is the last reference to the
-        * files structure.  But use RCU to shut RCU-lockdep up.
-        */
-       rcu_read_lock();
-       fdt = files_fdtable(files);
-       rcu_read_unlock();
-       for (;;) {
-               unsigned long set;
-               i = j * BITS_PER_LONG;
-               if (i >= fdt->max_fds)
-                       break;
-               set = fdt->open_fds[j++];
-               while (set) {
-                       if (set & 1) {
-                               struct file * file = xchg(&fdt->fd[i], NULL);
-                               if (file) {
-                                       filp_close(file, files);
-                                       cond_resched();
-                               }
-                       }
-                       i++;
-                       set >>= 1;
-               }
-       }
-}
-
-struct files_struct *get_files_struct(struct task_struct *task)
-{
-       struct files_struct *files;
-
-       task_lock(task);
-       files = task->files;
-       if (files)
-               atomic_inc(&files->count);
-       task_unlock(task);
-
-       return files;
-}
-
-void put_files_struct(struct files_struct *files)
-{
-       struct fdtable *fdt;
-
-       if (atomic_dec_and_test(&files->count)) {
-               close_files(files);
-               /*
-                * Free the fd and fdset arrays if we expanded them.
-                * If the fdtable was embedded, pass files for freeing
-                * at the end of the RCU grace period. Otherwise,
-                * you can free files immediately.
-                */
-               rcu_read_lock();
-               fdt = files_fdtable(files);
-               if (fdt != &files->fdtab)
-                       kmem_cache_free(files_cachep, files);
-               free_fdtable(fdt);
-               rcu_read_unlock();
-       }
-}
-
-void reset_files_struct(struct files_struct *files)
-{
-       struct task_struct *tsk = current;
-       struct files_struct *old;
-
-       old = tsk->files;
-       task_lock(tsk);
-       tsk->files = files;
-       task_unlock(tsk);
-       put_files_struct(old);
-}
-
-void exit_files(struct task_struct *tsk)
-{
-       struct files_struct * files = tsk->files;
-
-       if (files) {
-               task_lock(tsk);
-               tsk->files = NULL;
-               task_unlock(tsk);
-               put_files_struct(files);
-       }
-}
-
 #ifdef CONFIG_MM_OWNER
 /*
  * A task is exiting.   If it owned this mm, find a new owner for the mm.
@@ -1046,6 +951,9 @@ void do_exit(long code)
        if (tsk->splice_pipe)
                __free_pipe_info(tsk->splice_pipe);
 
+       if (tsk->task_frag.page)
+               put_page(tsk->task_frag.page);
+
        validate_creds_for_do_exit(tsk);
 
        preempt_disable();
index 5a0e74d89a5aa2e459e42679d6105fda07b7e8bd..a2b1efc2092809904024e33e8bf0405d0d73de12 100644 (file)
@@ -330,6 +330,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        tsk->btrace_seq = 0;
 #endif
        tsk->splice_pipe = NULL;
+       tsk->task_frag.page = NULL;
 
        account_kernel_stack(ti, 1);
 
index e86b291ad83467d9b828691ab146b962931104d6..aebd4f5aaf41ffaf59e4bf2a12d18600705d08e9 100644 (file)
@@ -479,6 +479,7 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
        }
        return nr;
 }
+EXPORT_SYMBOL_GPL(pid_nr_ns);
 
 pid_t pid_vnr(struct pid *pid)
 {
index 6144bab8fd8eeed14327c116bea39ba0061cbdb5..478bad2745e3a8357fcc2f9a56059d167bf67e49 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/reboot.h>
+#include <linux/export.h>
 
 #define BITS_PER_PAGE          (PAGE_SIZE*8)
 
@@ -144,6 +145,7 @@ void free_pid_ns(struct kref *kref)
        if (parent != NULL)
                put_pid_ns(parent);
 }
+EXPORT_SYMBOL_GPL(free_pid_ns);
 
 void zap_pid_ns_processes(struct pid_namespace *pid_ns)
 {
index a70518c9d82f42c57eb400208ce95dd4dd2aafaa..5dfdc9ea180b8ac497ed915c45b809fb65799261 100644 (file)
@@ -263,6 +263,10 @@ config PM_GENERIC_DOMAINS
        bool
        depends on PM
 
+config PM_GENERIC_DOMAINS_SLEEP
+       def_bool y
+       depends on PM_SLEEP && PM_GENERIC_DOMAINS
+
 config PM_GENERIC_DOMAINS_RUNTIME
        def_bool y
        depends on PM_RUNTIME && PM_GENERIC_DOMAINS
index d52359374e8501e8c1f4e8a88427468249509fb3..68197a4e8fc95e9f9bac2c609d0b69909d8961f2 100644 (file)
@@ -37,7 +37,7 @@ static struct sysrq_key_op    sysrq_poweroff_op = {
        .enable_mask    = SYSRQ_ENABLE_BOOT,
 };
 
-static int pm_sysrq_init(void)
+static int __init pm_sysrq_init(void)
 {
        register_sysrq_key('o', &sysrq_poweroff_op);
        return 0;
index 19db29f67558fef712764d78d7feccc0318ed650..87da817f9e132204add38dc73685962081de9dd8 100644 (file)
@@ -79,7 +79,7 @@ static int try_to_freeze_tasks(bool user_only)
 
                /*
                 * We need to retry, but first give the freezing tasks some
-                * time to enter the regrigerator.
+                * time to enter the refrigerator.
                 */
                msleep(10);
        }
index 6a031e684026f99507946c371544cbae34a35b48..846bd42c7ed179673846817da8bb2e69078ce673 100644 (file)
@@ -139,6 +139,7 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c)
        default:
                /* runtime check for not using enum */
                BUG();
+               return PM_QOS_DEFAULT_VALUE;
        }
 }
 
index a232bb59d93fa220e3720ce3d068b44d96ca24f3..1f5e55dda955544ca4a3f1f944e967f6b561bea2 100644 (file)
@@ -180,7 +180,8 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
                return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 }
 
-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+/* Returns 0 on success, -errno on denial. */
+static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 {
        const struct cred *cred = current_cred(), *tcred;
 
index 2095be3318d519dede48a5fcfaa5b51883cfd139..97c465ebd8444cebc0f3a6e44ae0fdb04b552a2e 100644 (file)
@@ -379,7 +379,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
        rcu_batch_queue(&sp->batch_queue, head);
        if (!sp->running) {
                sp->running = true;
-               queue_delayed_work(system_nrt_wq, &sp->work, 0);
+               schedule_delayed_work(&sp->work, 0);
        }
        spin_unlock_irqrestore(&sp->queue_lock, flags);
 }
@@ -631,7 +631,7 @@ static void srcu_reschedule(struct srcu_struct *sp)
        }
 
        if (pending)
-               queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL);
+               schedule_delayed_work(&sp->work, SRCU_INTERVAL);
 }
 
 /*
index 241507f23eca097871bec58f5976476f352f3d75..f9492284e5d23b76862dd87dcf458fc80a7072bb 100644 (file)
@@ -1788,15 +1788,15 @@ SYSCALL_DEFINE1(umask, int, mask)
 #ifdef CONFIG_CHECKPOINT_RESTORE
 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
 {
-       struct file *exe_file;
+       struct fd exe;
        struct dentry *dentry;
        int err;
 
-       exe_file = fget(fd);
-       if (!exe_file)
+       exe = fdget(fd);
+       if (!exe.file)
                return -EBADF;
 
-       dentry = exe_file->f_path.dentry;
+       dentry = exe.file->f_path.dentry;
 
        /*
         * Because the original mm->exe_file points to executable file, make
@@ -1805,7 +1805,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
         */
        err = -EACCES;
        if (!S_ISREG(dentry->d_inode->i_mode)   ||
-           exe_file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+           exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
                goto exit;
 
        err = inode_permission(dentry->d_inode, MAY_EXEC);
@@ -1839,12 +1839,12 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
                goto exit_unlock;
 
        err = 0;
-       set_mm_exe_file(mm, exe_file);
+       set_mm_exe_file(mm, exe.file);  /* this grabs a reference to exe.file */
 exit_unlock:
        up_write(&mm->mmap_sem);
 
 exit:
-       fput(exe_file);
+       fdput(exe);
        return err;
 }
 
index d0a32796550fcdf81e40d18cda31a3b353d332d5..610f0838d555393a08de2ac740d31fa05d8bafa9 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/cgroup.h>
 #include <linux/fs.h>
 #include <linux/file.h>
+#include <linux/pid_namespace.h>
 #include <net/genetlink.h>
 #include <linux/atomic.h>
 
@@ -174,7 +175,9 @@ static void send_cpu_listeners(struct sk_buff *skb,
        up_write(&listeners->sem);
 }
 
-static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
+static void fill_stats(struct user_namespace *user_ns,
+                      struct pid_namespace *pid_ns,
+                      struct task_struct *tsk, struct taskstats *stats)
 {
        memset(stats, 0, sizeof(*stats));
        /*
@@ -190,7 +193,7 @@ static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
        stats->version = TASKSTATS_VERSION;
        stats->nvcsw = tsk->nvcsw;
        stats->nivcsw = tsk->nivcsw;
-       bacct_add_tsk(stats, tsk);
+       bacct_add_tsk(user_ns, pid_ns, stats, tsk);
 
        /* fill in extended acct fields */
        xacct_add_tsk(stats, tsk);
@@ -207,7 +210,7 @@ static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
        rcu_read_unlock();
        if (!tsk)
                return -ESRCH;
-       fill_stats(tsk, stats);
+       fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
        put_task_struct(tsk);
        return 0;
 }
@@ -291,6 +294,12 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
        if (!cpumask_subset(mask, cpu_possible_mask))
                return -EINVAL;
 
+       if (current_user_ns() != &init_user_ns)
+               return -EINVAL;
+
+       if (task_active_pid_ns(current) != &init_pid_ns)
+               return -EINVAL;
+
        if (isadd == REGISTER) {
                for_each_cpu(cpu, mask) {
                        s = kmalloc_node(sizeof(struct listener),
@@ -415,16 +424,15 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *na;
        size_t size;
        u32 fd;
-       struct file *file;
-       int fput_needed;
+       struct fd f;
 
        na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
        if (!na)
                return -EINVAL;
 
        fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
-       file = fget_light(fd, &fput_needed);
-       if (!file)
+       f = fdget(fd);
+       if (!f.file)
                return 0;
 
        size = nla_total_size(sizeof(struct cgroupstats));
@@ -444,7 +452,7 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
        stats = nla_data(na);
        memset(stats, 0, sizeof(*stats));
 
-       rc = cgroupstats_build(stats, file->f_dentry);
+       rc = cgroupstats_build(stats, f.file->f_dentry);
        if (rc < 0) {
                nlmsg_free(rep_skb);
                goto err;
@@ -453,7 +461,7 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
        rc = send_reply(rep_skb, info);
 
 err:
-       fput_light(file, fput_needed);
+       fdput(f);
        return rc;
 }
 
@@ -467,7 +475,7 @@ static int cmd_attr_register_cpumask(struct genl_info *info)
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
        if (rc < 0)
                goto out;
-       rc = add_del_listener(info->snd_pid, mask, REGISTER);
+       rc = add_del_listener(info->snd_portid, mask, REGISTER);
 out:
        free_cpumask_var(mask);
        return rc;
@@ -483,7 +491,7 @@ static int cmd_attr_deregister_cpumask(struct genl_info *info)
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
        if (rc < 0)
                goto out;
-       rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
+       rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
 out:
        free_cpumask_var(mask);
        return rc;
@@ -631,11 +639,12 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
        if (rc < 0)
                return;
 
-       stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
+                        task_pid_nr_ns(tsk, &init_pid_ns));
        if (!stats)
                goto err;
 
-       fill_stats(tsk, stats);
+       fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
 
        /*
         * Doesn't matter if tsk is the leader or the last group member leaving
@@ -643,7 +652,8 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
        if (!is_thread_group || !group_dead)
                goto send;
 
-       stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
+                        task_tgid_nr_ns(tsk, &init_pid_ns));
        if (!stats)
                goto err;
 
index 7e1ce012a851351c10853fe732e714afa2bb8a5c..30b6de0d977c4734eb479d7489f61e56b35afaa8 100644 (file)
@@ -397,6 +397,30 @@ void clockevents_exchange_device(struct clock_event_device *old,
        local_irq_restore(flags);
 }
 
+/**
+ * clockevents_suspend - suspend clock devices
+ */
+void clockevents_suspend(void)
+{
+       struct clock_event_device *dev;
+
+       list_for_each_entry_reverse(dev, &clockevent_devices, list)
+               if (dev->suspend)
+                       dev->suspend(dev);
+}
+
+/**
+ * clockevents_resume - resume clock devices
+ */
+void clockevents_resume(void)
+{
+       struct clock_event_device *dev;
+
+       list_for_each_entry(dev, &clockevent_devices, list)
+               if (dev->resume)
+                       dev->resume(dev);
+}
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 /**
  * clockevents_notify - notification about relevant events
index d3b91e75cecd0df7895033aff87f588c6ba2d4df..5ce06a3fa91e01af6a3cfc7fbbc828af2824fae8 100644 (file)
@@ -776,6 +776,7 @@ static void timekeeping_resume(void)
 
        read_persistent_clock(&ts);
 
+       clockevents_resume();
        clocksource_resume();
 
        write_seqlock_irqsave(&tk->lock, flags);
@@ -835,6 +836,7 @@ static int timekeeping_suspend(void)
 
        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
        clocksource_suspend();
+       clockevents_suspend();
 
        return 0;
 }
index 1ec5c1dab6295d921f7c7763f799fbca5e71bc60..cdcb59450b491c2af7acf64bda54a82eaf0c16ab 100644 (file)
@@ -2061,7 +2061,8 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
        seq_puts(m, "#    -----------------\n");
        seq_printf(m, "#    | task: %.16s-%d "
                   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
-                  data->comm, data->pid, data->uid, data->nice,
+                  data->comm, data->pid,
+                  from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
                   data->policy, data->rt_priority);
        seq_puts(m, "#    -----------------\n");
 
index 63a2da0b9a6edf0881cc6f536b014daf11b8ab70..c15f528c1af4e4ee44b308bddf1bb4b83195d4c2 100644 (file)
@@ -147,7 +147,7 @@ struct trace_array_cpu {
        unsigned long           skipped_entries;
        cycle_t                 preempt_timestamp;
        pid_t                   pid;
-       uid_t                   uid;
+       kuid_t                  uid;
        char                    comm[TASK_COMM_LEN];
 };
 
index 23b4d784ebdd3e448ade6cb425e6ab3ba4e865fb..625df0b44690a67a1ca6d5f6e7f264fc1ac28fb5 100644 (file)
@@ -26,7 +26,9 @@
 /*
  * fill in basic accounting fields
  */
-void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
+void bacct_add_tsk(struct user_namespace *user_ns,
+                  struct pid_namespace *pid_ns,
+                  struct taskstats *stats, struct task_struct *tsk)
 {
        const struct cred *tcred;
        struct timespec uptime, ts;
@@ -55,13 +57,13 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
                stats->ac_flag |= AXSIG;
        stats->ac_nice   = task_nice(tsk);
        stats->ac_sched  = tsk->policy;
-       stats->ac_pid    = tsk->pid;
+       stats->ac_pid    = task_pid_nr_ns(tsk, pid_ns);
        rcu_read_lock();
        tcred = __task_cred(tsk);
-       stats->ac_uid    = tcred->uid;
-       stats->ac_gid    = tcred->gid;
+       stats->ac_uid    = from_kuid_munged(user_ns, tcred->uid);
+       stats->ac_gid    = from_kgid_munged(user_ns, tcred->gid);
        stats->ac_ppid   = pid_alive(tsk) ?
-                               rcu_dereference(tsk->real_parent)->tgid : 0;
+               task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0;
        rcu_read_unlock();
        stats->ac_utime = cputime_to_usecs(tsk->utime);
        stats->ac_stime = cputime_to_usecs(tsk->stime);
index b815fefbe76fa4743bd3081af6c6e469dff2f4db..750acffbe9ec5b20191607040c7ec8e840158253 100644 (file)
@@ -38,6 +38,14 @@ struct user_namespace init_user_ns = {
                        .count = 4294967295U,
                },
        },
+       .projid_map = {
+               .nr_extents = 1,
+               .extent[0] = {
+                       .first = 0,
+                       .lower_first = 0,
+                       .count = 4294967295U,
+               },
+       },
        .kref = {
                .refcount       = ATOMIC_INIT(3),
        },
index 86602316422d203fffb8c530cbf44f825199a5f7..456a6b9fba34f3104bc9db64119acd9d3fdfa8df 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/fs.h>
 #include <linux/uaccess.h>
 #include <linux/ctype.h>
+#include <linux/projid.h>
 
 static struct kmem_cache *user_ns_cachep __read_mostly;
 
@@ -295,6 +296,75 @@ gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
 }
 EXPORT_SYMBOL(from_kgid_munged);
 
+/**
+ *     make_kprojid - Map a user-namespace projid pair into a kprojid.
+ *     @ns:  User namespace that the projid is in
+ *     @projid: Project identifier
+ *
+ *     Maps a user-namespace uid pair into a kernel internal kuid,
+ *     and returns that kuid.
+ *
+ *     When there is no mapping defined for the user-namespace projid
+ *     pair INVALID_PROJID is returned.  Callers are expected to test
+ *     for and handle handle INVALID_PROJID being returned.  INVALID_PROJID
+ *     may be tested for using projid_valid().
+ */
+kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
+{
+       /* Map the uid to a global kernel uid */
+       return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
+}
+EXPORT_SYMBOL(make_kprojid);
+
+/**
+ *     from_kprojid - Create a projid from a kprojid user-namespace pair.
+ *     @targ: The user namespace we want a projid in.
+ *     @kprojid: The kernel internal project identifier to start with.
+ *
+ *     Map @kprojid into the user-namespace specified by @targ and
+ *     return the resulting projid.
+ *
+ *     There is always a mapping into the initial user_namespace.
+ *
+ *     If @kprojid has no mapping in @targ (projid_t)-1 is returned.
+ */
+projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
+{
+       /* Map the uid from a global kernel uid */
+       return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
+}
+EXPORT_SYMBOL(from_kprojid);
+
+/**
+ *     from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
+ *     @targ: The user namespace we want a projid in.
+ *     @kprojid: The kernel internal projid to start with.
+ *
+ *     Map @kprojid into the user-namespace specified by @targ and
+ *     return the resulting projid.
+ *
+ *     There is always a mapping into the initial user_namespace.
+ *
+ *     Unlike from_kprojid from_kprojid_munged never fails and always
+ *     returns a valid projid.  This makes from_kprojid_munged
+ *     appropriate for use in syscalls like stat and where
+ *     failing the system call and failing to provide a valid projid are
+ *     not an options.
+ *
+ *     If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
+ */
+projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
+{
+       projid_t projid;
+       projid = from_kprojid(targ, kprojid);
+
+       if (projid == (projid_t) -1)
+               projid = OVERFLOW_PROJID;
+       return projid;
+}
+EXPORT_SYMBOL(from_kprojid_munged);
+
+
 static int uid_m_show(struct seq_file *seq, void *v)
 {
        struct user_namespace *ns = seq->private;
@@ -337,6 +407,27 @@ static int gid_m_show(struct seq_file *seq, void *v)
        return 0;
 }
 
+static int projid_m_show(struct seq_file *seq, void *v)
+{
+       struct user_namespace *ns = seq->private;
+       struct uid_gid_extent *extent = v;
+       struct user_namespace *lower_ns;
+       projid_t lower;
+
+       lower_ns = seq_user_ns(seq);
+       if ((lower_ns == ns) && lower_ns->parent)
+               lower_ns = lower_ns->parent;
+
+       lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
+
+       seq_printf(seq, "%10u %10u %10u\n",
+               extent->first,
+               lower,
+               extent->count);
+
+       return 0;
+}
+
 static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map)
 {
        struct uid_gid_extent *extent = NULL;
@@ -362,6 +453,13 @@ static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
        return m_start(seq, ppos, &ns->gid_map);
 }
 
+static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
+{
+       struct user_namespace *ns = seq->private;
+
+       return m_start(seq, ppos, &ns->projid_map);
+}
+
 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        (*pos)++;
@@ -387,6 +485,13 @@ struct seq_operations proc_gid_seq_operations = {
        .show = gid_m_show,
 };
 
+struct seq_operations proc_projid_seq_operations = {
+       .start = projid_m_start,
+       .stop = m_stop,
+       .next = m_next,
+       .show = projid_m_show,
+};
+
 static DEFINE_MUTEX(id_map_mutex);
 
 static ssize_t map_write(struct file *file, const char __user *buf,
@@ -434,7 +539,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
        /* Require the appropriate privilege CAP_SETUID or CAP_SETGID
         * over the user namespace in order to set the id mapping.
         */
-       if (!ns_capable(ns, cap_setid))
+       if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid))
                goto out;
 
        /* Get a buffer */
@@ -584,9 +689,30 @@ ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t siz
                         &ns->gid_map, &ns->parent->gid_map);
 }
 
+ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
+{
+       struct seq_file *seq = file->private_data;
+       struct user_namespace *ns = seq->private;
+       struct user_namespace *seq_ns = seq_user_ns(seq);
+
+       if (!ns->parent)
+               return -EPERM;
+
+       if ((seq_ns != ns) && (seq_ns != ns->parent))
+               return -EPERM;
+
+       /* Anyone can set any valid project id no capability needed */
+       return map_write(file, buf, size, ppos, -1,
+                        &ns->projid_map, &ns->parent->projid_map);
+}
+
 static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
                                struct uid_gid_map *new_map)
 {
+       /* Allow anyone to set a mapping that doesn't require privilege */
+       if (!cap_valid(cap_setid))
+               return true;
+
        /* Allow the specified ids if we have the appropriate capability
         * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
         */
index 3c5a79e2134cac6f404673093a84df5c8b371d80..d951daa0ca9a81b21f6b1387b140f537b53c27c1 100644 (file)
@@ -58,7 +58,7 @@ enum {
         * be executing on any CPU.  The gcwq behaves as an unbound one.
         *
         * Note that DISASSOCIATED can be flipped only while holding
-        * managership of all pools on the gcwq to avoid changing binding
+        * assoc_mutex of all pools on the gcwq to avoid changing binding
         * state while create_worker() is in progress.
         */
        GCWQ_DISASSOCIATED      = 1 << 0,       /* cpu can't serve workers */
@@ -73,11 +73,10 @@ enum {
        WORKER_DIE              = 1 << 1,       /* die die die */
        WORKER_IDLE             = 1 << 2,       /* is idle */
        WORKER_PREP             = 1 << 3,       /* preparing to run works */
-       WORKER_REBIND           = 1 << 5,       /* mom is home, come back */
        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
 
-       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND |
+       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_UNBOUND |
                                  WORKER_CPU_INTENSIVE,
 
        NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
@@ -126,7 +125,6 @@ enum {
 
 struct global_cwq;
 struct worker_pool;
-struct idle_rebind;
 
 /*
  * The poor guys doing the actual heavy lifting.  All on-duty workers
@@ -150,7 +148,6 @@ struct worker {
        int                     id;             /* I: worker id */
 
        /* for rebinding worker to CPU */
-       struct idle_rebind      *idle_rebind;   /* L: for idle worker */
        struct work_struct      rebind_work;    /* L: for busy worker */
 };
 
@@ -160,13 +157,15 @@ struct worker_pool {
 
        struct list_head        worklist;       /* L: list of pending works */
        int                     nr_workers;     /* L: total number of workers */
+
+       /* nr_idle includes the ones off idle_list for rebinding */
        int                     nr_idle;        /* L: currently idle ones */
 
        struct list_head        idle_list;      /* X: list of idle workers */
        struct timer_list       idle_timer;     /* L: worker idle timeout */
        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 
-       struct mutex            manager_mutex;  /* mutex manager should hold */
+       struct mutex            assoc_mutex;    /* protect GCWQ_DISASSOCIATED */
        struct ida              worker_ida;     /* L: for worker IDs */
 };
 
@@ -184,9 +183,8 @@ struct global_cwq {
        struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
                                                /* L: hash of busy workers */
 
-       struct worker_pool      pools[2];       /* normal and highpri pools */
-
-       wait_queue_head_t       rebind_hold;    /* rebind hold wait */
+       struct worker_pool      pools[NR_WORKER_POOLS];
+                                               /* normal and highpri pools */
 } ____cacheline_aligned_in_smp;
 
 /*
@@ -269,17 +267,15 @@ struct workqueue_struct {
 };
 
 struct workqueue_struct *system_wq __read_mostly;
-struct workqueue_struct *system_long_wq __read_mostly;
-struct workqueue_struct *system_nrt_wq __read_mostly;
-struct workqueue_struct *system_unbound_wq __read_mostly;
-struct workqueue_struct *system_freezable_wq __read_mostly;
-struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
+struct workqueue_struct *system_highpri_wq __read_mostly;
+EXPORT_SYMBOL_GPL(system_highpri_wq);
+struct workqueue_struct *system_long_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_long_wq);
-EXPORT_SYMBOL_GPL(system_nrt_wq);
+struct workqueue_struct *system_unbound_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_unbound_wq);
+struct workqueue_struct *system_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_freezable_wq);
-EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
@@ -534,18 +530,24 @@ static int work_next_color(int color)
 }
 
 /*
- * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
- * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
- * cleared and the work data contains the cpu number it was last on.
+ * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
+ * contain the pointer to the queued cwq.  Once execution starts, the flag
+ * is cleared and the high bits contain OFFQ flags and CPU number.
+ *
+ * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
+ * and clear_work_data() can be used to set the cwq, cpu or clear
+ * work->data.  These functions should only be called while the work is
+ * owned - ie. while the PENDING bit is set.
  *
- * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
- * cwq, cpu or clear work->data.  These functions should only be
- * called while the work is owned - ie. while the PENDING bit is set.
+ * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
+ * a work.  gcwq is available once the work has been queued anywhere after
+ * initialization until it is sync canceled.  cwq is available only while
+ * the work item is queued.
  *
- * get_work_[g]cwq() can be used to obtain the gcwq or cwq
- * corresponding to a work.  gcwq is available once the work has been
- * queued anywhere after initialization.  cwq is available only from
- * queueing until execution starts.
+ * %WORK_OFFQ_CANCELING is used to mark a work item which is being
+ * canceled.  While being canceled, a work item may have its PENDING set
+ * but stay off timer and worklist for arbitrarily long and nobody should
+ * try to steal the PENDING bit.
  */
 static inline void set_work_data(struct work_struct *work, unsigned long data,
                                 unsigned long flags)
@@ -562,13 +564,22 @@ static void set_work_cwq(struct work_struct *work,
                      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 }
 
-static void set_work_cpu(struct work_struct *work, unsigned int cpu)
+static void set_work_cpu_and_clear_pending(struct work_struct *work,
+                                          unsigned int cpu)
 {
-       set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
+       /*
+        * The following wmb is paired with the implied mb in
+        * test_and_set_bit(PENDING) and ensures all updates to @work made
+        * here are visible to and precede any updates by the next PENDING
+        * owner.
+        */
+       smp_wmb();
+       set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
 }
 
 static void clear_work_data(struct work_struct *work)
 {
+       smp_wmb();      /* see set_work_cpu_and_clear_pending() */
        set_work_data(work, WORK_STRUCT_NO_CPU, 0);
 }
 
@@ -591,7 +602,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
                return ((struct cpu_workqueue_struct *)
                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
 
-       cpu = data >> WORK_STRUCT_FLAG_BITS;
+       cpu = data >> WORK_OFFQ_CPU_SHIFT;
        if (cpu == WORK_CPU_NONE)
                return NULL;
 
@@ -599,6 +610,22 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
        return get_gcwq(cpu);
 }
 
+static void mark_work_canceling(struct work_struct *work)
+{
+       struct global_cwq *gcwq = get_work_gcwq(work);
+       unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
+
+       set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
+                     WORK_STRUCT_PENDING);
+}
+
+static bool work_is_canceling(struct work_struct *work)
+{
+       unsigned long data = atomic_long_read(&work->data);
+
+       return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
+}
+
 /*
  * Policy functions.  These define the policies on how the global worker
  * pools are managed.  Unless noted otherwise, these functions assume that
@@ -657,6 +684,13 @@ static bool too_many_workers(struct worker_pool *pool)
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
+       /*
+        * nr_idle and idle_list may disagree if idle rebinding is in
+        * progress.  Never return %true if idle_list is empty.
+        */
+       if (list_empty(&pool->idle_list))
+               return false;
+
        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 }
 
@@ -902,6 +936,206 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
                                            work);
 }
 
+/**
+ * move_linked_works - move linked works to a list
+ * @work: start of series of works to be scheduled
+ * @head: target list to append @work to
+ * @nextp: out paramter for nested worklist walking
+ *
+ * Schedule linked works starting from @work to @head.  Work series to
+ * be scheduled starts at @work and includes any consecutive work with
+ * WORK_STRUCT_LINKED set in its predecessor.
+ *
+ * If @nextp is not NULL, it's updated to point to the next work of
+ * the last scheduled work.  This allows move_linked_works() to be
+ * nested inside outer list_for_each_entry_safe().
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void move_linked_works(struct work_struct *work, struct list_head *head,
+                             struct work_struct **nextp)
+{
+       struct work_struct *n;
+
+       /*
+        * Linked worklist will always end before the end of the list,
+        * use NULL for list head.
+        */
+       list_for_each_entry_safe_from(work, n, NULL, entry) {
+               list_move_tail(&work->entry, head);
+               if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
+                       break;
+       }
+
+       /*
+        * If we're already inside safe list traversal and have moved
+        * multiple works to the scheduled queue, the next position
+        * needs to be updated.
+        */
+       if (nextp)
+               *nextp = n;
+}
+
+static void cwq_activate_delayed_work(struct work_struct *work)
+{
+       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+
+       trace_workqueue_activate_work(work);
+       move_linked_works(work, &cwq->pool->worklist, NULL);
+       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
+       cwq->nr_active++;
+}
+
+static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+{
+       struct work_struct *work = list_first_entry(&cwq->delayed_works,
+                                                   struct work_struct, entry);
+
+       cwq_activate_delayed_work(work);
+}
+
+/**
+ * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
+ * @cwq: cwq of interest
+ * @color: color of work which left the queue
+ *
+ * A work either has completed or is removed from pending queue,
+ * decrement nr_in_flight of its cwq and handle workqueue flushing.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+{
+       /* ignore uncolored works */
+       if (color == WORK_NO_COLOR)
+               return;
+
+       cwq->nr_in_flight[color]--;
+
+       cwq->nr_active--;
+       if (!list_empty(&cwq->delayed_works)) {
+               /* one down, submit a delayed one */
+               if (cwq->nr_active < cwq->max_active)
+                       cwq_activate_first_delayed(cwq);
+       }
+
+       /* is flush in progress and are we at the flushing tip? */
+       if (likely(cwq->flush_color != color))
+               return;
+
+       /* are there still in-flight works? */
+       if (cwq->nr_in_flight[color])
+               return;
+
+       /* this cwq is done, clear flush_color */
+       cwq->flush_color = -1;
+
+       /*
+        * If this was the last cwq, wake up the first flusher.  It
+        * will handle the rest.
+        */
+       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
+               complete(&cwq->wq->first_flusher->done);
+}
+
+/**
+ * try_to_grab_pending - steal work item from worklist and disable irq
+ * @work: work item to steal
+ * @is_dwork: @work is a delayed_work
+ * @flags: place to store irq state
+ *
+ * Try to grab PENDING bit of @work.  This function can handle @work in any
+ * stable state - idle, on timer or on worklist.  Return values are
+ *
+ *  1          if @work was pending and we successfully stole PENDING
+ *  0          if @work was idle and we claimed PENDING
+ *  -EAGAIN    if PENDING couldn't be grabbed at the moment, safe to busy-retry
+ *  -ENOENT    if someone else is canceling @work, this state may persist
+ *             for arbitrarily long
+ *
+ * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
+ * interrupted while holding PENDING and @work off queue, irq must be
+ * disabled on entry.  This, combined with delayed_work->timer being
+ * irqsafe, ensures that we return -EAGAIN for finite short period of time.
+ *
+ * On successful return, >= 0, irq is disabled and the caller is
+ * responsible for releasing it using local_irq_restore(*@flags).
+ *
+ * This function is safe to call from any context including IRQ handler.
+ */
+static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+                              unsigned long *flags)
+{
+       struct global_cwq *gcwq;
+
+       local_irq_save(*flags);
+
+       /* try to steal the timer if it exists */
+       if (is_dwork) {
+               struct delayed_work *dwork = to_delayed_work(work);
+
+               /*
+                * dwork->timer is irqsafe.  If del_timer() fails, it's
+                * guaranteed that the timer is not queued anywhere and not
+                * running on the local CPU.
+                */
+               if (likely(del_timer(&dwork->timer)))
+                       return 1;
+       }
+
+       /* try to claim PENDING the normal way */
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
+               return 0;
+
+       /*
+        * The queueing is in progress, or it is already queued. Try to
+        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+        */
+       gcwq = get_work_gcwq(work);
+       if (!gcwq)
+               goto fail;
+
+       spin_lock(&gcwq->lock);
+       if (!list_empty(&work->entry)) {
+               /*
+                * This work is queued, but perhaps we locked the wrong gcwq.
+                * In that case we must see the new value after rmb(), see
+                * insert_work()->wmb().
+                */
+               smp_rmb();
+               if (gcwq == get_work_gcwq(work)) {
+                       debug_work_deactivate(work);
+
+                       /*
+                        * A delayed work item cannot be grabbed directly
+                        * because it might have linked NO_COLOR work items
+                        * which, if left on the delayed_list, will confuse
+                        * cwq->nr_active management later on and cause
+                        * stall.  Make sure the work item is activated
+                        * before grabbing.
+                        */
+                       if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
+                               cwq_activate_delayed_work(work);
+
+                       list_del_init(&work->entry);
+                       cwq_dec_nr_in_flight(get_work_cwq(work),
+                               get_work_color(work));
+
+                       spin_unlock(&gcwq->lock);
+                       return 1;
+               }
+       }
+       spin_unlock(&gcwq->lock);
+fail:
+       local_irq_restore(*flags);
+       if (work_is_canceling(work))
+               return -ENOENT;
+       cpu_relax();
+       return -EAGAIN;
+}
+
 /**
  * insert_work - insert a work into gcwq
  * @cwq: cwq @work belongs to
@@ -982,7 +1216,15 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
        unsigned int work_flags;
-       unsigned long flags;
+       unsigned int req_cpu = cpu;
+
+       /*
+        * While a work item is PENDING && off queue, a task trying to
+        * steal the PENDING will busy-loop waiting for it to either get
+        * queued or lose PENDING.  Grabbing PENDING and queueing should
+        * happen with IRQ disabled.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
 
        debug_work_activate(work);
 
@@ -995,21 +1237,22 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
 
-               if (unlikely(cpu == WORK_CPU_UNBOUND))
+               if (cpu == WORK_CPU_UNBOUND)
                        cpu = raw_smp_processor_id();
 
                /*
-                * It's multi cpu.  If @wq is non-reentrant and @work
-                * was previously on a different cpu, it might still
-                * be running there, in which case the work needs to
-                * be queued on that cpu to guarantee non-reentrance.
+                * It's multi cpu.  If @work was previously on a different
+                * cpu, it might still be running there, in which case the
+                * work needs to be queued on that cpu to guarantee
+                * non-reentrancy.
                 */
                gcwq = get_gcwq(cpu);
-               if (wq->flags & WQ_NON_REENTRANT &&
-                   (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
+               last_gcwq = get_work_gcwq(work);
+
+               if (last_gcwq && last_gcwq != gcwq) {
                        struct worker *worker;
 
-                       spin_lock_irqsave(&last_gcwq->lock, flags);
+                       spin_lock(&last_gcwq->lock);
 
                        worker = find_worker_executing_work(last_gcwq, work);
 
@@ -1017,22 +1260,23 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                                gcwq = last_gcwq;
                        else {
                                /* meh... not running there, queue here */
-                               spin_unlock_irqrestore(&last_gcwq->lock, flags);
-                               spin_lock_irqsave(&gcwq->lock, flags);
+                               spin_unlock(&last_gcwq->lock);
+                               spin_lock(&gcwq->lock);
                        }
-               } else
-                       spin_lock_irqsave(&gcwq->lock, flags);
+               } else {
+                       spin_lock(&gcwq->lock);
+               }
        } else {
                gcwq = get_gcwq(WORK_CPU_UNBOUND);
-               spin_lock_irqsave(&gcwq->lock, flags);
+               spin_lock(&gcwq->lock);
        }
 
        /* gcwq determined, get cwq and queue */
        cwq = get_cwq(gcwq->cpu, wq);
-       trace_workqueue_queue_work(cpu, cwq, work);
+       trace_workqueue_queue_work(req_cpu, cwq, work);
 
        if (WARN_ON(!list_empty(&work->entry))) {
-               spin_unlock_irqrestore(&gcwq->lock, flags);
+               spin_unlock(&gcwq->lock);
                return;
        }
 
@@ -1050,79 +1294,110 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        insert_work(cwq, work, worklist, work_flags);
 
-       spin_unlock_irqrestore(&gcwq->lock, flags);
+       spin_unlock(&gcwq->lock);
 }
 
 /**
- * queue_work - queue work on a workqueue
+ * queue_work_on - queue work on specific cpu
+ * @cpu: CPU number to execute work on
  * @wq: workqueue to use
  * @work: work to queue
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.
  *
- * We queue the work to the CPU on which it was submitted, but if the CPU dies
- * it can be processed by another CPU.
+ * We queue the work to a specific CPU, the caller must ensure it
+ * can't go away.
  */
-int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+bool queue_work_on(int cpu, struct workqueue_struct *wq,
+                  struct work_struct *work)
 {
-       int ret;
+       bool ret = false;
+       unsigned long flags;
 
-       ret = queue_work_on(get_cpu(), wq, work);
-       put_cpu();
+       local_irq_save(flags);
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+               __queue_work(cpu, wq, work);
+               ret = true;
+       }
 
+       local_irq_restore(flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_work);
+EXPORT_SYMBOL_GPL(queue_work_on);
 
 /**
- * queue_work_on - queue work on specific cpu
- * @cpu: CPU number to execute work on
+ * queue_work - queue work on a workqueue
  * @wq: workqueue to use
  * @work: work to queue
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.
  *
- * We queue the work to a specific CPU, the caller must ensure it
- * can't go away.
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
  */
-int
-queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
+bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
-       int ret = 0;
-
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-               __queue_work(cpu, wq, work);
-               ret = 1;
-       }
-       return ret;
+       return queue_work_on(WORK_CPU_UNBOUND, wq, work);
 }
-EXPORT_SYMBOL_GPL(queue_work_on);
+EXPORT_SYMBOL_GPL(queue_work);
 
-static void delayed_work_timer_fn(unsigned long __data)
+void delayed_work_timer_fn(unsigned long __data)
 {
        struct delayed_work *dwork = (struct delayed_work *)__data;
        struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
-       __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
+       /* should have been called from irqsafe timer with irq already off */
+       __queue_work(dwork->cpu, cwq->wq, &dwork->work);
 }
+EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
 
-/**
- * queue_delayed_work - queue work on a workqueue after delay
- * @wq: workqueue to use
- * @dwork: delayable work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
- */
-int queue_delayed_work(struct workqueue_struct *wq,
-                       struct delayed_work *dwork, unsigned long delay)
+static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+                               struct delayed_work *dwork, unsigned long delay)
 {
-       if (delay == 0)
-               return queue_work(wq, &dwork->work);
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
+       unsigned int lcpu;
+
+       WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
+                    timer->data != (unsigned long)dwork);
+       BUG_ON(timer_pending(timer));
+       BUG_ON(!list_empty(&work->entry));
+
+       timer_stats_timer_set_start_info(&dwork->timer);
 
-       return queue_delayed_work_on(-1, wq, dwork, delay);
+       /*
+        * This stores cwq for the moment, for the timer_fn.  Note that the
+        * work's gcwq is preserved to allow reentrance detection for
+        * delayed works.
+        */
+       if (!(wq->flags & WQ_UNBOUND)) {
+               struct global_cwq *gcwq = get_work_gcwq(work);
+
+               /*
+                * If we cannot get the last gcwq from @work directly,
+                * select the last CPU such that it avoids unnecessarily
+                * triggering non-reentrancy check in __queue_work().
+                */
+               lcpu = cpu;
+               if (gcwq)
+                       lcpu = gcwq->cpu;
+               if (lcpu == WORK_CPU_UNBOUND)
+                       lcpu = raw_smp_processor_id();
+       } else {
+               lcpu = WORK_CPU_UNBOUND;
+       }
+
+       set_work_cwq(work, get_cwq(lcpu, wq), 0);
+
+       dwork->cpu = cpu;
+       timer->expires = jiffies + delay;
+
+       if (unlikely(cpu != WORK_CPU_UNBOUND))
+               add_timer_on(timer, cpu);
+       else
+               add_timer(timer);
 }
-EXPORT_SYMBOL_GPL(queue_delayed_work);
 
 /**
  * queue_delayed_work_on - queue work on specific CPU after delay
@@ -1131,53 +1406,100 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
  * @dwork: work to queue
  * @delay: number of jiffies to wait before queueing
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.  If
+ * @delay is zero and @dwork is idle, it will be scheduled for immediate
+ * execution.
  */
-int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-                       struct delayed_work *dwork, unsigned long delay)
+bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                          struct delayed_work *dwork, unsigned long delay)
 {
-       int ret = 0;
-       struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
+       bool ret = false;
+       unsigned long flags;
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-               unsigned int lcpu;
+       if (!delay)
+               return queue_work_on(cpu, wq, &dwork->work);
 
-               BUG_ON(timer_pending(timer));
-               BUG_ON(!list_empty(&work->entry));
+       /* read the comment in __queue_work() */
+       local_irq_save(flags);
 
-               timer_stats_timer_set_start_info(&dwork->timer);
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+               __queue_delayed_work(cpu, wq, dwork, delay);
+               ret = true;
+       }
 
-               /*
-                * This stores cwq for the moment, for the timer_fn.
-                * Note that the work's gcwq is preserved to allow
-                * reentrance detection for delayed works.
-                */
-               if (!(wq->flags & WQ_UNBOUND)) {
-                       struct global_cwq *gcwq = get_work_gcwq(work);
+       local_irq_restore(flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
-                       if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
-                               lcpu = gcwq->cpu;
-                       else
-                               lcpu = raw_smp_processor_id();
-               } else
-                       lcpu = WORK_CPU_UNBOUND;
+/**
+ * queue_delayed_work - queue work on a workqueue after delay
+ * @wq: workqueue to use
+ * @dwork: delayable work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
+ */
+bool queue_delayed_work(struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay)
+{
+       return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(queue_delayed_work);
 
-               set_work_cwq(work, get_cwq(lcpu, wq), 0);
+/**
+ * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
+ * @cpu: CPU number to execute work on
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
+ * modify @dwork's timer so that it expires after @delay.  If @delay is
+ * zero, @work is guaranteed to be scheduled immediately regardless of its
+ * current state.
+ *
+ * Returns %false if @dwork was idle and queued, %true if @dwork was
+ * pending and its timer was modified.
+ *
+ * This function is safe to call from any context including IRQ handler.
+ * See try_to_grab_pending() for details.
+ */
+bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                        struct delayed_work *dwork, unsigned long delay)
+{
+       unsigned long flags;
+       int ret;
 
-               timer->expires = jiffies + delay;
-               timer->data = (unsigned long)dwork;
-               timer->function = delayed_work_timer_fn;
+       do {
+               ret = try_to_grab_pending(&dwork->work, true, &flags);
+       } while (unlikely(ret == -EAGAIN));
 
-               if (unlikely(cpu >= 0))
-                       add_timer_on(timer, cpu);
-               else
-                       add_timer(timer);
-               ret = 1;
+       if (likely(ret >= 0)) {
+               __queue_delayed_work(cpu, wq, dwork, delay);
+               local_irq_restore(flags);
        }
+
+       /* -ENOENT from try_to_grab_pending() becomes %true */
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+EXPORT_SYMBOL_GPL(mod_delayed_work_on);
+
+/**
+ * mod_delayed_work - modify delay of or queue a delayed work
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * mod_delayed_work_on() on local CPU.
+ */
+bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
+                     unsigned long delay)
+{
+       return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(mod_delayed_work);
 
 /**
  * worker_enter_idle - enter idle state
@@ -1305,37 +1627,21 @@ __acquires(&gcwq->lock)
        }
 }
 
-struct idle_rebind {
-       int                     cnt;            /* # workers to be rebound */
-       struct completion       done;           /* all workers rebound */
-};
-
 /*
- * Rebind an idle @worker to its CPU.  During CPU onlining, this has to
- * happen synchronously for idle workers.  worker_thread() will test
- * %WORKER_REBIND before leaving idle and call this function.
+ * Rebind an idle @worker to its CPU.  worker_thread() will test
+ * list_empty(@worker->entry) before leaving idle and call this function.
  */
 static void idle_worker_rebind(struct worker *worker)
 {
        struct global_cwq *gcwq = worker->pool->gcwq;
 
-       /* CPU must be online at this point */
-       WARN_ON(!worker_maybe_bind_and_lock(worker));
-       if (!--worker->idle_rebind->cnt)
-               complete(&worker->idle_rebind->done);
-       spin_unlock_irq(&worker->pool->gcwq->lock);
+       /* CPU may go down again inbetween, clear UNBOUND only on success */
+       if (worker_maybe_bind_and_lock(worker))
+               worker_clr_flags(worker, WORKER_UNBOUND);
 
-       /* we did our part, wait for rebind_workers() to finish up */
-       wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
-
-       /*
-        * rebind_workers() shouldn't finish until all workers passed the
-        * above WORKER_REBIND wait.  Tell it when done.
-        */
-       spin_lock_irq(&worker->pool->gcwq->lock);
-       if (!--worker->idle_rebind->cnt)
-               complete(&worker->idle_rebind->done);
-       spin_unlock_irq(&worker->pool->gcwq->lock);
+       /* rebind complete, become available again */
+       list_add(&worker->entry, &worker->pool->idle_list);
+       spin_unlock_irq(&gcwq->lock);
 }
 
 /*
@@ -1349,16 +1655,8 @@ static void busy_worker_rebind_fn(struct work_struct *work)
        struct worker *worker = container_of(work, struct worker, rebind_work);
        struct global_cwq *gcwq = worker->pool->gcwq;
 
-       worker_maybe_bind_and_lock(worker);
-
-       /*
-        * %WORKER_REBIND must be cleared even if the above binding failed;
-        * otherwise, we may confuse the next CPU_UP cycle or oops / get
-        * stuck by calling idle_worker_rebind() prematurely.  If CPU went
-        * down again inbetween, %WORKER_UNBOUND would be set, so clearing
-        * %WORKER_REBIND is always safe.
-        */
-       worker_clr_flags(worker, WORKER_REBIND);
+       if (worker_maybe_bind_and_lock(worker))
+               worker_clr_flags(worker, WORKER_UNBOUND);
 
        spin_unlock_irq(&gcwq->lock);
 }
@@ -1370,123 +1668,74 @@ static void busy_worker_rebind_fn(struct work_struct *work)
  * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
  * is different for idle and busy ones.
  *
- * The idle ones should be rebound synchronously and idle rebinding should
- * be complete before any worker starts executing work items with
- * concurrency management enabled; otherwise, scheduler may oops trying to
- * wake up non-local idle worker from wq_worker_sleeping().
+ * Idle ones will be removed from the idle_list and woken up.  They will
+ * add themselves back after completing rebind.  This ensures that the
+ * idle_list doesn't contain any unbound workers when re-bound busy workers
+ * try to perform local wake-ups for concurrency management.
  *
- * This is achieved by repeatedly requesting rebinding until all idle
- * workers are known to have been rebound under @gcwq->lock and holding all
- * idle workers from becoming busy until idle rebinding is complete.
+ * Busy workers can rebind after they finish their current work items.
+ * Queueing the rebind work item at the head of the scheduled list is
+ * enough.  Note that nr_running will be properly bumped as busy workers
+ * rebind.
  *
- * Once idle workers are rebound, busy workers can be rebound as they
- * finish executing their current work items.  Queueing the rebind work at
- * the head of their scheduled lists is enough.  Note that nr_running will
- * be properbly bumped as busy workers rebind.
- *
- * On return, all workers are guaranteed to either be bound or have rebind
- * work item scheduled.
+ * On return, all non-manager workers are scheduled for rebind - see
+ * manage_workers() for the manager special case.  Any idle worker
+ * including the manager will not appear on @idle_list until rebind is
+ * complete, making local wake-ups safe.
  */
 static void rebind_workers(struct global_cwq *gcwq)
-       __releases(&gcwq->lock) __acquires(&gcwq->lock)
 {
-       struct idle_rebind idle_rebind;
        struct worker_pool *pool;
-       struct worker *worker;
+       struct worker *worker, *n;
        struct hlist_node *pos;
        int i;
 
        lockdep_assert_held(&gcwq->lock);
 
        for_each_worker_pool(pool, gcwq)
-               lockdep_assert_held(&pool->manager_mutex);
-
-       /*
-        * Rebind idle workers.  Interlocked both ways.  We wait for
-        * workers to rebind via @idle_rebind.done.  Workers will wait for
-        * us to finish up by watching %WORKER_REBIND.
-        */
-       init_completion(&idle_rebind.done);
-retry:
-       idle_rebind.cnt = 1;
-       INIT_COMPLETION(idle_rebind.done);
+               lockdep_assert_held(&pool->assoc_mutex);
 
-       /* set REBIND and kick idle ones, we'll wait for these later */
+       /* dequeue and kick idle ones */
        for_each_worker_pool(pool, gcwq) {
-               list_for_each_entry(worker, &pool->idle_list, entry) {
-                       unsigned long worker_flags = worker->flags;
-
-                       if (worker->flags & WORKER_REBIND)
-                               continue;
-
-                       /* morph UNBOUND to REBIND atomically */
-                       worker_flags &= ~WORKER_UNBOUND;
-                       worker_flags |= WORKER_REBIND;
-                       ACCESS_ONCE(worker->flags) = worker_flags;
-
-                       idle_rebind.cnt++;
-                       worker->idle_rebind = &idle_rebind;
+               list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
+                       /*
+                        * idle workers should be off @pool->idle_list
+                        * until rebind is complete to avoid receiving
+                        * premature local wake-ups.
+                        */
+                       list_del_init(&worker->entry);
 
-                       /* worker_thread() will call idle_worker_rebind() */
+                       /*
+                        * worker_thread() will see the above dequeuing
+                        * and call idle_worker_rebind().
+                        */
                        wake_up_process(worker->task);
                }
        }
 
-       if (--idle_rebind.cnt) {
-               spin_unlock_irq(&gcwq->lock);
-               wait_for_completion(&idle_rebind.done);
-               spin_lock_irq(&gcwq->lock);
-               /* busy ones might have become idle while waiting, retry */
-               goto retry;
-       }
-
-       /* all idle workers are rebound, rebind busy workers */
+       /* rebind busy workers */
        for_each_busy_worker(worker, i, pos, gcwq) {
                struct work_struct *rebind_work = &worker->rebind_work;
-               unsigned long worker_flags = worker->flags;
-
-               /* morph UNBOUND to REBIND atomically */
-               worker_flags &= ~WORKER_UNBOUND;
-               worker_flags |= WORKER_REBIND;
-               ACCESS_ONCE(worker->flags) = worker_flags;
+               struct workqueue_struct *wq;
 
                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
                                     work_data_bits(rebind_work)))
                        continue;
 
-               /* wq doesn't matter, use the default one */
                debug_work_activate(rebind_work);
-               insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
-                           worker->scheduled.next,
-                           work_color_to_flags(WORK_NO_COLOR));
-       }
-
-       /*
-        * All idle workers are rebound and waiting for %WORKER_REBIND to
-        * be cleared inside idle_worker_rebind().  Clear and release.
-        * Clearing %WORKER_REBIND from this foreign context is safe
-        * because these workers are still guaranteed to be idle.
-        *
-        * We need to make sure all idle workers passed WORKER_REBIND wait
-        * in idle_worker_rebind() before returning; otherwise, workers can
-        * get stuck at the wait if hotplug cycle repeats.
-        */
-       idle_rebind.cnt = 1;
-       INIT_COMPLETION(idle_rebind.done);
-
-       for_each_worker_pool(pool, gcwq) {
-               list_for_each_entry(worker, &pool->idle_list, entry) {
-                       worker->flags &= ~WORKER_REBIND;
-                       idle_rebind.cnt++;
-               }
-       }
 
-       wake_up_all(&gcwq->rebind_hold);
+               /*
+                * wq doesn't really matter but let's keep @worker->pool
+                * and @cwq->pool consistent for sanity.
+                */
+               if (worker_pool_pri(worker->pool))
+                       wq = system_highpri_wq;
+               else
+                       wq = system_wq;
 
-       if (--idle_rebind.cnt) {
-               spin_unlock_irq(&gcwq->lock);
-               wait_for_completion(&idle_rebind.done);
-               spin_lock_irq(&gcwq->lock);
+               insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
+                       worker->scheduled.next,
+                       work_color_to_flags(WORK_NO_COLOR));
        }
 }
 
@@ -1801,190 +2050,89 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
                        break;
                }
 
-               destroy_worker(worker);
-               ret = true;
-       }
-
-       return ret;
-}
-
-/**
- * manage_workers - manage worker pool
- * @worker: self
- *
- * Assume the manager role and manage gcwq worker pool @worker belongs
- * to.  At any given time, there can be only zero or one manager per
- * gcwq.  The exclusion is handled automatically by this function.
- *
- * The caller can safely start processing works on false return.  On
- * true return, it's guaranteed that need_to_create_worker() is false
- * and may_start_working() is true.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times.  Does GFP_KERNEL allocations.
- *
- * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true if
- * some action was taken.
- */
-static bool manage_workers(struct worker *worker)
-{
-       struct worker_pool *pool = worker->pool;
-       bool ret = false;
-
-       if (pool->flags & POOL_MANAGING_WORKERS)
-               return ret;
-
-       pool->flags |= POOL_MANAGING_WORKERS;
-
-       /*
-        * To simplify both worker management and CPU hotplug, hold off
-        * management while hotplug is in progress.  CPU hotplug path can't
-        * grab %POOL_MANAGING_WORKERS to achieve this because that can
-        * lead to idle worker depletion (all become busy thinking someone
-        * else is managing) which in turn can result in deadlock under
-        * extreme circumstances.  Use @pool->manager_mutex to synchronize
-        * manager against CPU hotplug.
-        *
-        * manager_mutex would always be free unless CPU hotplug is in
-        * progress.  trylock first without dropping @gcwq->lock.
-        */
-       if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
-               spin_unlock_irq(&pool->gcwq->lock);
-               mutex_lock(&pool->manager_mutex);
-               /*
-                * CPU hotplug could have happened while we were waiting
-                * for manager_mutex.  Hotplug itself can't handle us
-                * because manager isn't either on idle or busy list, and
-                * @gcwq's state and ours could have deviated.
-                *
-                * As hotplug is now excluded via manager_mutex, we can
-                * simply try to bind.  It will succeed or fail depending
-                * on @gcwq's current state.  Try it and adjust
-                * %WORKER_UNBOUND accordingly.
-                */
-               if (worker_maybe_bind_and_lock(worker))
-                       worker->flags &= ~WORKER_UNBOUND;
-               else
-                       worker->flags |= WORKER_UNBOUND;
-
-               ret = true;
-       }
-
-       pool->flags &= ~POOL_MANAGE_WORKERS;
-
-       /*
-        * Destroy and then create so that may_start_working() is true
-        * on return.
-        */
-       ret |= maybe_destroy_workers(pool);
-       ret |= maybe_create_worker(pool);
-
-       pool->flags &= ~POOL_MANAGING_WORKERS;
-       mutex_unlock(&pool->manager_mutex);
-       return ret;
-}
-
-/**
- * move_linked_works - move linked works to a list
- * @work: start of series of works to be scheduled
- * @head: target list to append @work to
- * @nextp: out paramter for nested worklist walking
- *
- * Schedule linked works starting from @work to @head.  Work series to
- * be scheduled starts at @work and includes any consecutive work with
- * WORK_STRUCT_LINKED set in its predecessor.
- *
- * If @nextp is not NULL, it's updated to point to the next work of
- * the last scheduled work.  This allows move_linked_works() to be
- * nested inside outer list_for_each_entry_safe().
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- */
-static void move_linked_works(struct work_struct *work, struct list_head *head,
-                             struct work_struct **nextp)
-{
-       struct work_struct *n;
-
-       /*
-        * Linked worklist will always end before the end of the list,
-        * use NULL for list head.
-        */
-       list_for_each_entry_safe_from(work, n, NULL, entry) {
-               list_move_tail(&work->entry, head);
-               if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
-                       break;
-       }
-
-       /*
-        * If we're already inside safe list traversal and have moved
-        * multiple works to the scheduled queue, the next position
-        * needs to be updated.
-        */
-       if (nextp)
-               *nextp = n;
-}
-
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
-{
-       struct work_struct *work = list_first_entry(&cwq->delayed_works,
-                                                   struct work_struct, entry);
+               destroy_worker(worker);
+               ret = true;
+       }
 
-       trace_workqueue_activate_work(work);
-       move_linked_works(work, &cwq->pool->worklist, NULL);
-       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
-       cwq->nr_active++;
+       return ret;
 }
 
 /**
- * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
- * @cwq: cwq of interest
- * @color: color of work which left the queue
- * @delayed: for a delayed work
+ * manage_workers - manage worker pool
+ * @worker: self
  *
- * A work either has completed or is removed from pending queue,
- * decrement nr_in_flight of its cwq and handle workqueue flushing.
+ * Assume the manager role and manage gcwq worker pool @worker belongs
+ * to.  At any given time, there can be only zero or one manager per
+ * gcwq.  The exclusion is handled automatically by this function.
+ *
+ * The caller can safely start processing works on false return.  On
+ * true return, it's guaranteed that need_to_create_worker() is false
+ * and may_start_working() is true.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * multiple times.  Does GFP_KERNEL allocations.
+ *
+ * RETURNS:
+ * false if no action was taken and gcwq->lock stayed locked, true if
+ * some action was taken.
  */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
-                                bool delayed)
+static bool manage_workers(struct worker *worker)
 {
-       /* ignore uncolored works */
-       if (color == WORK_NO_COLOR)
-               return;
+       struct worker_pool *pool = worker->pool;
+       bool ret = false;
 
-       cwq->nr_in_flight[color]--;
+       if (pool->flags & POOL_MANAGING_WORKERS)
+               return ret;
 
-       if (!delayed) {
-               cwq->nr_active--;
-               if (!list_empty(&cwq->delayed_works)) {
-                       /* one down, submit a delayed one */
-                       if (cwq->nr_active < cwq->max_active)
-                               cwq_activate_first_delayed(cwq);
-               }
-       }
+       pool->flags |= POOL_MANAGING_WORKERS;
 
-       /* is flush in progress and are we at the flushing tip? */
-       if (likely(cwq->flush_color != color))
-               return;
+       /*
+        * To simplify both worker management and CPU hotplug, hold off
+        * management while hotplug is in progress.  CPU hotplug path can't
+        * grab %POOL_MANAGING_WORKERS to achieve this because that can
+        * lead to idle worker depletion (all become busy thinking someone
+        * else is managing) which in turn can result in deadlock under
+        * extreme circumstances.  Use @pool->assoc_mutex to synchronize
+        * manager against CPU hotplug.
+        *
+        * assoc_mutex would always be free unless CPU hotplug is in
+        * progress.  trylock first without dropping @gcwq->lock.
+        */
+       if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
+               spin_unlock_irq(&pool->gcwq->lock);
+               mutex_lock(&pool->assoc_mutex);
+               /*
+                * CPU hotplug could have happened while we were waiting
+                * for assoc_mutex.  Hotplug itself can't handle us
+                * because manager isn't either on idle or busy list, and
+                * @gcwq's state and ours could have deviated.
+                *
+                * As hotplug is now excluded via assoc_mutex, we can
+                * simply try to bind.  It will succeed or fail depending
+                * on @gcwq's current state.  Try it and adjust
+                * %WORKER_UNBOUND accordingly.
+                */
+               if (worker_maybe_bind_and_lock(worker))
+                       worker->flags &= ~WORKER_UNBOUND;
+               else
+                       worker->flags |= WORKER_UNBOUND;
 
-       /* are there still in-flight works? */
-       if (cwq->nr_in_flight[color])
-               return;
+               ret = true;
+       }
 
-       /* this cwq is done, clear flush_color */
-       cwq->flush_color = -1;
+       pool->flags &= ~POOL_MANAGE_WORKERS;
 
        /*
-        * If this was the last cwq, wake up the first flusher.  It
-        * will handle the rest.
+        * Destroy and then create so that may_start_working() is true
+        * on return.
         */
-       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
-               complete(&cwq->wq->first_flusher->done);
+       ret |= maybe_destroy_workers(pool);
+       ret |= maybe_create_worker(pool);
+
+       pool->flags &= ~POOL_MANAGING_WORKERS;
+       mutex_unlock(&pool->assoc_mutex);
+       return ret;
 }
 
 /**
@@ -2030,7 +2178,7 @@ __acquires(&gcwq->lock)
         * necessary to avoid spurious warnings from rescuers servicing the
         * unbound or a disassociated gcwq.
         */
-       WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) &&
+       WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
                     !(gcwq->flags & GCWQ_DISASSOCIATED) &&
                     raw_smp_processor_id() != gcwq->cpu);
 
@@ -2046,15 +2194,13 @@ __acquires(&gcwq->lock)
                return;
        }
 
-       /* claim and process */
+       /* claim and dequeue */
        debug_work_deactivate(work);
        hlist_add_head(&worker->hentry, bwh);
        worker->current_work = work;
        worker->current_cwq = cwq;
        work_color = get_work_color(work);
 
-       /* record the current cpu number in the work data and dequeue */
-       set_work_cpu(work, gcwq->cpu);
        list_del_init(&work->entry);
 
        /*
@@ -2071,9 +2217,16 @@ __acquires(&gcwq->lock)
        if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
                wake_up_worker(pool);
 
+       /*
+        * Record the last CPU and clear PENDING which should be the last
+        * update to @work.  Also, do this inside @gcwq->lock so that
+        * PENDING and queued state changes happen together while IRQ is
+        * disabled.
+        */
+       set_work_cpu_and_clear_pending(work, gcwq->cpu);
+
        spin_unlock_irq(&gcwq->lock);
 
-       work_clear_pending(work);
        lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
@@ -2087,11 +2240,9 @@ __acquires(&gcwq->lock)
        lock_map_release(&cwq->wq->lockdep_map);
 
        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
-               printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
-                      "%s/0x%08x/%d\n",
-                      current->comm, preempt_count(), task_pid_nr(current));
-               printk(KERN_ERR "    last function: ");
-               print_symbol("%s\n", (unsigned long)f);
+               pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
+                      "     last function: %pf\n",
+                      current->comm, preempt_count(), task_pid_nr(current), f);
                debug_show_held_locks(current);
                dump_stack();
        }
@@ -2106,7 +2257,7 @@ __acquires(&gcwq->lock)
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;
        worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color, false);
+       cwq_dec_nr_in_flight(cwq, work_color);
 }
 
 /**
@@ -2151,18 +2302,17 @@ static int worker_thread(void *__worker)
 woke_up:
        spin_lock_irq(&gcwq->lock);
 
-       /*
-        * DIE can be set only while idle and REBIND set while busy has
-        * @worker->rebind_work scheduled.  Checking here is enough.
-        */
-       if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) {
+       /* we are off idle list if destruction or rebind is requested */
+       if (unlikely(list_empty(&worker->entry))) {
                spin_unlock_irq(&gcwq->lock);
 
+               /* if DIE is set, destruction is requested */
                if (worker->flags & WORKER_DIE) {
                        worker->task->flags &= ~PF_WQ_WORKER;
                        return 0;
                }
 
+               /* otherwise, rebind */
                idle_worker_rebind(worker);
                goto woke_up;
        }
@@ -2645,8 +2795,8 @@ reflush:
 
                if (++flush_cnt == 10 ||
                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
-                       pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
-                                  wq->name, flush_cnt);
+                       pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
+                               wq->name, flush_cnt);
                goto reflush;
        }
 
@@ -2657,8 +2807,7 @@ reflush:
 }
 EXPORT_SYMBOL_GPL(drain_workqueue);
 
-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
-                            bool wait_executing)
+static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 {
        struct worker *worker = NULL;
        struct global_cwq *gcwq;
@@ -2680,13 +2829,12 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
                cwq = get_work_cwq(work);
                if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
                        goto already_gone;
-       } else if (wait_executing) {
+       } else {
                worker = find_worker_executing_work(gcwq, work);
                if (!worker)
                        goto already_gone;
                cwq = worker->current_cwq;
-       } else
-               goto already_gone;
+       }
 
        insert_wq_barrier(cwq, barr, work, worker);
        spin_unlock_irq(&gcwq->lock);
@@ -2713,15 +2861,8 @@ already_gone:
  * flush_work - wait for a work to finish executing the last queueing instance
  * @work: the work to flush
  *
- * Wait until @work has finished execution.  This function considers
- * only the last queueing instance of @work.  If @work has been
- * enqueued across different CPUs on a non-reentrant workqueue or on
- * multiple workqueues, @work might still be executing on return on
- * some of the CPUs from earlier queueing.
- *
- * If @work was queued only on a non-reentrant, ordered or unbound
- * workqueue, @work is guaranteed to be idle on return if it hasn't
- * been requeued since flush started.
+ * Wait until @work has finished execution.  @work is guaranteed to be idle
+ * on return if it hasn't been requeued since flush started.
  *
  * RETURNS:
  * %true if flush_work() waited for the work to finish execution,
@@ -2734,140 +2875,36 @@ bool flush_work(struct work_struct *work)
        lock_map_acquire(&work->lockdep_map);
        lock_map_release(&work->lockdep_map);
 
-       if (start_flush_work(work, &barr, true)) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-               return true;
-       } else
-               return false;
-}
-EXPORT_SYMBOL_GPL(flush_work);
-
-static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
-{
-       struct wq_barrier barr;
-       struct worker *worker;
-
-       spin_lock_irq(&gcwq->lock);
-
-       worker = find_worker_executing_work(gcwq, work);
-       if (unlikely(worker))
-               insert_wq_barrier(worker->current_cwq, &barr, work, worker);
-
-       spin_unlock_irq(&gcwq->lock);
-
-       if (unlikely(worker)) {
+       if (start_flush_work(work, &barr)) {
                wait_for_completion(&barr.done);
                destroy_work_on_stack(&barr.work);
                return true;
-       } else
+       } else {
                return false;
-}
-
-static bool wait_on_work(struct work_struct *work)
-{
-       bool ret = false;
-       int cpu;
-
-       might_sleep();
-
-       lock_map_acquire(&work->lockdep_map);
-       lock_map_release(&work->lockdep_map);
-
-       for_each_gcwq_cpu(cpu)
-               ret |= wait_on_cpu_work(get_gcwq(cpu), work);
-       return ret;
-}
-
-/**
- * flush_work_sync - wait until a work has finished execution
- * @work: the work to flush
- *
- * Wait until @work has finished execution.  On return, it's
- * guaranteed that all queueing instances of @work which happened
- * before this function is called are finished.  In other words, if
- * @work hasn't been requeued since this function was called, @work is
- * guaranteed to be idle on return.
- *
- * RETURNS:
- * %true if flush_work_sync() waited for the work to finish execution,
- * %false if it was already idle.
- */
-bool flush_work_sync(struct work_struct *work)
-{
-       struct wq_barrier barr;
-       bool pending, waited;
-
-       /* we'll wait for executions separately, queue barr only if pending */
-       pending = start_flush_work(work, &barr, false);
-
-       /* wait for executions to finish */
-       waited = wait_on_work(work);
-
-       /* wait for the pending one */
-       if (pending) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-       }
-
-       return pending || waited;
-}
-EXPORT_SYMBOL_GPL(flush_work_sync);
-
-/*
- * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
- * so this work can't be re-armed in any way.
- */
-static int try_to_grab_pending(struct work_struct *work)
-{
-       struct global_cwq *gcwq;
-       int ret = -1;
-
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
-               return 0;
-
-       /*
-        * The queueing is in progress, or it is already queued. Try to
-        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-        */
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
-               return ret;
-
-       spin_lock_irq(&gcwq->lock);
-       if (!list_empty(&work->entry)) {
-               /*
-                * This work is queued, but perhaps we locked the wrong gcwq.
-                * In that case we must see the new value after rmb(), see
-                * insert_work()->wmb().
-                */
-               smp_rmb();
-               if (gcwq == get_work_gcwq(work)) {
-                       debug_work_deactivate(work);
-                       list_del_init(&work->entry);
-                       cwq_dec_nr_in_flight(get_work_cwq(work),
-                               get_work_color(work),
-                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
-                       ret = 1;
-               }
        }
-       spin_unlock_irq(&gcwq->lock);
-
-       return ret;
 }
+EXPORT_SYMBOL_GPL(flush_work);
 
-static bool __cancel_work_timer(struct work_struct *work,
-                               struct timer_list* timer)
+static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 {
+       unsigned long flags;
        int ret;
 
        do {
-               ret = (timer && likely(del_timer(timer)));
-               if (!ret)
-                       ret = try_to_grab_pending(work);
-               wait_on_work(work);
+               ret = try_to_grab_pending(work, is_dwork, &flags);
+               /*
+                * If someone else is canceling, wait for the same event it
+                * would be waiting for before retrying.
+                */
+               if (unlikely(ret == -ENOENT))
+                       flush_work(work);
        } while (unlikely(ret < 0));
 
+       /* tell other tasks trying to grab @work to back off */
+       mark_work_canceling(work);
+       local_irq_restore(flags);
+
+       flush_work(work);
        clear_work_data(work);
        return ret;
 }
@@ -2892,7 +2929,7 @@ static bool __cancel_work_timer(struct work_struct *work,
  */
 bool cancel_work_sync(struct work_struct *work)
 {
-       return __cancel_work_timer(work, NULL);
+       return __cancel_work_timer(work, false);
 }
 EXPORT_SYMBOL_GPL(cancel_work_sync);
 
@@ -2910,33 +2947,44 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
  */
 bool flush_delayed_work(struct delayed_work *dwork)
 {
+       local_irq_disable();
        if (del_timer_sync(&dwork->timer))
-               __queue_work(raw_smp_processor_id(),
+               __queue_work(dwork->cpu,
                             get_work_cwq(&dwork->work)->wq, &dwork->work);
+       local_irq_enable();
        return flush_work(&dwork->work);
 }
 EXPORT_SYMBOL(flush_delayed_work);
 
 /**
- * flush_delayed_work_sync - wait for a dwork to finish
- * @dwork: the delayed work to flush
+ * cancel_delayed_work - cancel a delayed work
+ * @dwork: delayed_work to cancel
  *
- * Delayed timer is cancelled and the pending work is queued for
- * execution immediately.  Other than timer handling, its behavior
- * is identical to flush_work_sync().
+ * Kill off a pending delayed_work.  Returns %true if @dwork was pending
+ * and canceled; %false if wasn't pending.  Note that the work callback
+ * function may still be running on return, unless it returns %true and the
+ * work doesn't re-arm itself.  Explicitly flush or use
+ * cancel_delayed_work_sync() to wait on it.
  *
- * RETURNS:
- * %true if flush_work_sync() waited for the work to finish execution,
- * %false if it was already idle.
+ * This function is safe to call from any context including IRQ handler.
  */
-bool flush_delayed_work_sync(struct delayed_work *dwork)
+bool cancel_delayed_work(struct delayed_work *dwork)
 {
-       if (del_timer_sync(&dwork->timer))
-               __queue_work(raw_smp_processor_id(),
-                            get_work_cwq(&dwork->work)->wq, &dwork->work);
-       return flush_work_sync(&dwork->work);
+       unsigned long flags;
+       int ret;
+
+       do {
+               ret = try_to_grab_pending(&dwork->work, true, &flags);
+       } while (unlikely(ret == -EAGAIN));
+
+       if (unlikely(ret < 0))
+               return false;
+
+       set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
+       local_irq_restore(flags);
+       return true;
 }
-EXPORT_SYMBOL(flush_delayed_work_sync);
+EXPORT_SYMBOL(cancel_delayed_work);
 
 /**
  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
@@ -2949,54 +2997,39 @@ EXPORT_SYMBOL(flush_delayed_work_sync);
  */
 bool cancel_delayed_work_sync(struct delayed_work *dwork)
 {
-       return __cancel_work_timer(&dwork->work, &dwork->timer);
+       return __cancel_work_timer(&dwork->work, true);
 }
 EXPORT_SYMBOL(cancel_delayed_work_sync);
 
 /**
- * schedule_work - put work task in global workqueue
- * @work: job to be done
- *
- * Returns zero if @work was already on the kernel-global workqueue and
- * non-zero otherwise.
- *
- * This puts a job in the kernel-global workqueue if it was not already
- * queued and leaves it in the same position on the kernel-global
- * workqueue otherwise.
- */
-int schedule_work(struct work_struct *work)
-{
-       return queue_work(system_wq, work);
-}
-EXPORT_SYMBOL(schedule_work);
-
-/*
  * schedule_work_on - put work task on a specific cpu
  * @cpu: cpu to put the work task on
  * @work: job to be done
  *
  * This puts a job on a specific cpu
  */
-int schedule_work_on(int cpu, struct work_struct *work)
+bool schedule_work_on(int cpu, struct work_struct *work)
 {
        return queue_work_on(cpu, system_wq, work);
 }
 EXPORT_SYMBOL(schedule_work_on);
 
 /**
- * schedule_delayed_work - put work task in global workqueue after delay
- * @dwork: job to be done
- * @delay: number of jiffies to wait or 0 for immediate execution
+ * schedule_work - put work task in global workqueue
+ * @work: job to be done
  *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue.
+ * Returns %false if @work was already on the kernel-global workqueue and
+ * %true otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
-int schedule_delayed_work(struct delayed_work *dwork,
-                                       unsigned long delay)
+bool schedule_work(struct work_struct *work)
 {
-       return queue_delayed_work(system_wq, dwork, delay);
+       return queue_work(system_wq, work);
 }
-EXPORT_SYMBOL(schedule_delayed_work);
+EXPORT_SYMBOL(schedule_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
@@ -3007,13 +3040,27 @@ EXPORT_SYMBOL(schedule_delayed_work);
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
-int schedule_delayed_work_on(int cpu,
-                       struct delayed_work *dwork, unsigned long delay)
+bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
+                             unsigned long delay)
 {
        return queue_delayed_work_on(cpu, system_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
+/**
+ * schedule_delayed_work - put work task in global workqueue after delay
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ */
+bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
+{
+       return queue_delayed_work(system_wq, dwork, delay);
+}
+EXPORT_SYMBOL(schedule_delayed_work);
+
 /**
  * schedule_on_each_cpu - execute a function synchronously on each online CPU
  * @func: the function to call
@@ -3161,9 +3208,8 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
 
        if (max_active < 1 || max_active > lim)
-               printk(KERN_WARNING "workqueue: max_active %d requested for %s "
-                      "is out of range, clamping between %d and %d\n",
-                      max_active, name, 1, lim);
+               pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
+                       max_active, name, 1, lim);
 
        return clamp_val(max_active, 1, lim);
 }
@@ -3318,6 +3364,26 @@ void destroy_workqueue(struct workqueue_struct *wq)
 }
 EXPORT_SYMBOL_GPL(destroy_workqueue);
 
+/**
+ * cwq_set_max_active - adjust max_active of a cwq
+ * @cwq: target cpu_workqueue_struct
+ * @max_active: new max_active value.
+ *
+ * Set @cwq->max_active to @max_active and activate delayed works if
+ * increased.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
+{
+       cwq->max_active = max_active;
+
+       while (!list_empty(&cwq->delayed_works) &&
+              cwq->nr_active < cwq->max_active)
+               cwq_activate_first_delayed(cwq);
+}
+
 /**
  * workqueue_set_max_active - adjust max_active of a workqueue
  * @wq: target workqueue
@@ -3345,7 +3411,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 
                if (!(wq->flags & WQ_FREEZABLE) ||
                    !(gcwq->flags & GCWQ_FREEZING))
-                       get_cwq(gcwq->cpu, wq)->max_active = max_active;
+                       cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
 
                spin_unlock_irq(&gcwq->lock);
        }
@@ -3440,23 +3506,23 @@ EXPORT_SYMBOL_GPL(work_busy);
  */
 
 /* claim manager positions of all pools */
-static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
+static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
 {
        struct worker_pool *pool;
 
        for_each_worker_pool(pool, gcwq)
-               mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
+               mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
        spin_lock_irq(&gcwq->lock);
 }
 
 /* release manager positions */
-static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
+static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
 {
        struct worker_pool *pool;
 
        spin_unlock_irq(&gcwq->lock);
        for_each_worker_pool(pool, gcwq)
-               mutex_unlock(&pool->manager_mutex);
+               mutex_unlock(&pool->assoc_mutex);
 }
 
 static void gcwq_unbind_fn(struct work_struct *work)
@@ -3469,7 +3535,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
 
        BUG_ON(gcwq->cpu != smp_processor_id());
 
-       gcwq_claim_management_and_lock(gcwq);
+       gcwq_claim_assoc_and_lock(gcwq);
 
        /*
         * We've claimed all manager positions.  Make all workers unbound
@@ -3486,7 +3552,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
 
        gcwq->flags |= GCWQ_DISASSOCIATED;
 
-       gcwq_release_management_and_unlock(gcwq);
+       gcwq_release_assoc_and_unlock(gcwq);
 
        /*
         * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3514,7 +3580,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
  * Workqueues should be brought up before normal priority CPU notifiers.
  * This will be registered high priority CPU notifier.
  */
-static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                                               unsigned long action,
                                               void *hcpu)
 {
@@ -3542,10 +3608,10 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               gcwq_claim_management_and_lock(gcwq);
+               gcwq_claim_assoc_and_lock(gcwq);
                gcwq->flags &= ~GCWQ_DISASSOCIATED;
                rebind_workers(gcwq);
-               gcwq_release_management_and_unlock(gcwq);
+               gcwq_release_assoc_and_unlock(gcwq);
                break;
        }
        return NOTIFY_OK;
@@ -3555,7 +3621,7 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
  * Workqueues should be brought down after normal priority CPU notifiers.
  * This will be registered as low priority CPU notifier.
  */
-static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
                                                 unsigned long action,
                                                 void *hcpu)
 {
@@ -3566,7 +3632,7 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
        case CPU_DOWN_PREPARE:
                /* unbinding should happen on the local CPU */
                INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
-               schedule_work_on(cpu, &unbind_work);
+               queue_work_on(cpu, system_highpri_wq, &unbind_work);
                flush_work(&unbind_work);
                break;
        }
@@ -3735,11 +3801,7 @@ void thaw_workqueues(void)
                                continue;
 
                        /* restore max_active and repopulate worklist */
-                       cwq->max_active = wq->saved_max_active;
-
-                       while (!list_empty(&cwq->delayed_works) &&
-                              cwq->nr_active < cwq->max_active)
-                               cwq_activate_first_delayed(cwq);
+                       cwq_set_max_active(cwq, wq->saved_max_active);
                }
 
                for_each_worker_pool(pool, gcwq)
@@ -3759,8 +3821,12 @@ static int __init init_workqueues(void)
        unsigned int cpu;
        int i;
 
+       /* make sure we have enough bits for OFFQ CPU number */
+       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
+                    WORK_CPU_LAST);
+
        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
-       cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+       hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
        /* initialize gcwqs */
        for_each_gcwq_cpu(cpu) {
@@ -3786,11 +3852,9 @@ static int __init init_workqueues(void)
                        setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
                                    (unsigned long)pool);
 
-                       mutex_init(&pool->manager_mutex);
+                       mutex_init(&pool->assoc_mutex);
                        ida_init(&pool->worker_ida);
                }
-
-               init_waitqueue_head(&gcwq->rebind_hold);
        }
 
        /* create the initial worker */
@@ -3813,17 +3877,14 @@ static int __init init_workqueues(void)
        }
 
        system_wq = alloc_workqueue("events", 0, 0);
+       system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
        system_long_wq = alloc_workqueue("events_long", 0, 0);
-       system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
                                            WQ_UNBOUND_MAX_ACTIVE);
        system_freezable_wq = alloc_workqueue("events_freezable",
                                              WQ_FREEZABLE, 0);
-       system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
-                       WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
-       BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
-              !system_unbound_wq || !system_freezable_wq ||
-               !system_nrt_freezable_wq);
+       BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
+              !system_unbound_wq || !system_freezable_wq);
        return 0;
 }
 early_initcall(init_workqueues);
index 0401d2916d9fa25515540c0483dd486adaddc858..52e5abbc41dbc0724e9b6664e7674b6bd680ca53 100644 (file)
@@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net)
        struct uevent_sock *ue_sk;
        struct netlink_kernel_cfg cfg = {
                .groups = 1,
+               .flags  = NL_CFG_F_NONROOT_RECV,
        };
 
        ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
        if (!ue_sk)
                return -ENOMEM;
 
-       ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
-                                         THIS_MODULE, &cfg);
+       ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
        if (!ue_sk->sk) {
                printk(KERN_ERR
                       "kobject_uevent: unable to create netlink socket!\n");
@@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = {
 
 static int __init kobject_uevent_init(void)
 {
-       netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
        return register_pernet_subsys(&uevent_net_ops);
 }
 
index 4226dfeb51786f4e0a926499dca36f9b4f64d9d6..18eca7809b08894cd135519f8991e138bbc0fa2a 100644 (file)
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
        [NLA_U64]       = sizeof(u64),
        [NLA_MSECS]     = sizeof(u64),
        [NLA_NESTED]    = NLA_HDRLEN,
+       [NLA_S8]        = sizeof(s8),
+       [NLA_S16]       = sizeof(s16),
+       [NLA_S32]       = sizeof(s32),
+       [NLA_S64]       = sizeof(s64),
 };
 
 static int validate_nla(const struct nlattr *nla, int maxtype,
index 45bc1f83a5ada665297bc0b9dcd3a6ad72e2ec72..f114bf6a8e1363dc812cfa4fd8e6ae66a1411575 100644 (file)
@@ -170,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
  * Statically reserve bounce buffer space and initialize bounce buffer data
  * structures for the software IO TLB used to implement the DMA API.
  */
-void __init
+static void __init
 swiotlb_init_with_default_size(size_t default_size, int verbose)
 {
        unsigned long bytes;
@@ -206,8 +206,9 @@ swiotlb_init(int verbose)
 int
 swiotlb_late_init_with_default_size(size_t default_size)
 {
-       unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
+       unsigned long bytes, req_nslabs = io_tlb_nslabs;
        unsigned int order;
+       int rc = 0;
 
        if (!io_tlb_nslabs) {
                io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
@@ -229,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size)
                order--;
        }
 
-       if (!io_tlb_start)
-               goto cleanup1;
-
+       if (!io_tlb_start) {
+               io_tlb_nslabs = req_nslabs;
+               return -ENOMEM;
+       }
        if (order != get_order(bytes)) {
                printk(KERN_WARNING "Warning: only able to allocate %ld MB "
                       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
                io_tlb_nslabs = SLABS_PER_PAGE << order;
-               bytes = io_tlb_nslabs << IO_TLB_SHIFT;
        }
+       rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs);
+       if (rc)
+               free_pages((unsigned long)io_tlb_start, order);
+       return rc;
+}
+
+int
+swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
+{
+       unsigned long i, bytes;
+
+       bytes = nslabs << IO_TLB_SHIFT;
+
+       io_tlb_nslabs = nslabs;
+       io_tlb_start = tlb;
        io_tlb_end = io_tlb_start + bytes;
+
        memset(io_tlb_start, 0, bytes);
 
        /*
@@ -288,10 +305,8 @@ cleanup3:
        io_tlb_list = NULL;
 cleanup2:
        io_tlb_end = NULL;
-       free_pages((unsigned long)io_tlb_start, order);
        io_tlb_start = NULL;
-cleanup1:
-       io_tlb_nslabs = req_nslabs;
+       io_tlb_nslabs = 0;
        return -ENOMEM;
 }
 
index 9b75a045dbf4bf63e250372daad3f878909ecfdd..a47f0f50c89fccb6e11a6ce99605abde1cc077d6 100644 (file)
@@ -26,7 +26,7 @@
  */
 SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
 {
-       struct file *file = fget(fd);
+       struct fd f = fdget(fd);
        struct address_space *mapping;
        struct backing_dev_info *bdi;
        loff_t endbyte;                 /* inclusive */
@@ -35,15 +35,15 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
        unsigned long nrpages;
        int ret = 0;
 
-       if (!file)
+       if (!f.file)
                return -EBADF;
 
-       if (S_ISFIFO(file->f_path.dentry->d_inode->i_mode)) {
+       if (S_ISFIFO(f.file->f_path.dentry->d_inode->i_mode)) {
                ret = -ESPIPE;
                goto out;
        }
 
-       mapping = file->f_mapping;
+       mapping = f.file->f_mapping;
        if (!mapping || len < 0) {
                ret = -EINVAL;
                goto out;
@@ -76,21 +76,21 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
 
        switch (advice) {
        case POSIX_FADV_NORMAL:
-               file->f_ra.ra_pages = bdi->ra_pages;
-               spin_lock(&file->f_lock);
-               file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&file->f_lock);
+               f.file->f_ra.ra_pages = bdi->ra_pages;
+               spin_lock(&f.file->f_lock);
+               f.file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&f.file->f_lock);
                break;
        case POSIX_FADV_RANDOM:
-               spin_lock(&file->f_lock);
-               file->f_mode |= FMODE_RANDOM;
-               spin_unlock(&file->f_lock);
+               spin_lock(&f.file->f_lock);
+               f.file->f_mode |= FMODE_RANDOM;
+               spin_unlock(&f.file->f_lock);
                break;
        case POSIX_FADV_SEQUENTIAL:
-               file->f_ra.ra_pages = bdi->ra_pages * 2;
-               spin_lock(&file->f_lock);
-               file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&file->f_lock);
+               f.file->f_ra.ra_pages = bdi->ra_pages * 2;
+               spin_lock(&f.file->f_lock);
+               f.file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&f.file->f_lock);
                break;
        case POSIX_FADV_WILLNEED:
                /* First and last PARTIAL page! */
@@ -106,7 +106,7 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
                 * Ignore return value because fadvise() shall return
                 * success even if filesystem can't retrieve a hint,
                 */
-               force_page_cache_readahead(mapping, file, start_index,
+               force_page_cache_readahead(mapping, f.file, start_index,
                                           nrpages);
                break;
        case POSIX_FADV_NOREUSE:
@@ -128,7 +128,7 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
                ret = -EINVAL;
        }
 out:
-       fput(file);
+       fdput(f);
        return ret;
 }
 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
index 9ed4fd432467ee45a5310cd152fbba4399cbf722..048659c0c03d9abb429173217cf06454a471074f 100644 (file)
@@ -195,10 +195,9 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
                 */
                if (mapping_cap_account_dirty(mapping)) {
                        unsigned long addr;
-                       struct file *file = vma->vm_file;
+                       struct file *file = get_file(vma->vm_file);
 
                        flags &= MAP_NONBLOCK;
-                       get_file(file);
                        addr = mmap_region(file, start, size,
                                        flags, vma->vm_flags, pgoff);
                        fput(file);
index 6b3e71a2cd483a70c845d33136719c43739a6c68..2890e67d602621f0aaf6b516905b5340c9ef7b2c 100644 (file)
@@ -44,6 +44,13 @@ EXPORT_SYMBOL(frontswap_enabled);
  */
 static bool frontswap_writethrough_enabled __read_mostly;
 
+/*
+ * If enabled, the underlying tmem implementation is capable of doing
+ * exclusive gets, so frontswap_load, on a successful tmem_get must
+ * mark the page as no longer in frontswap AND mark it dirty.
+ */
+static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
+
 #ifdef CONFIG_DEBUG_FS
 /*
  * Counters available via /sys/kernel/debug/frontswap (if debugfs is
@@ -96,6 +103,15 @@ void frontswap_writethrough(bool enable)
 }
 EXPORT_SYMBOL(frontswap_writethrough);
 
+/*
+ * Enable/disable frontswap exclusive gets (see above).
+ */
+void frontswap_tmem_exclusive_gets(bool enable)
+{
+       frontswap_tmem_exclusive_gets_enabled = enable;
+}
+EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
+
 /*
  * Called when a swap device is swapon'd.
  */
@@ -174,8 +190,13 @@ int __frontswap_load(struct page *page)
        BUG_ON(sis == NULL);
        if (frontswap_test(sis, offset))
                ret = frontswap_ops.load(type, offset, page);
-       if (ret == 0)
+       if (ret == 0) {
                inc_frontswap_loads();
+               if (frontswap_tmem_exclusive_gets_enabled) {
+                       SetPageDirty(page);
+                       frontswap_clear(sis, offset);
+               }
+       }
        return ret;
 }
 EXPORT_SYMBOL(__frontswap_load);
@@ -263,6 +284,11 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
        return ret;
 }
 
+/*
+ * Used to check if it's necessory and feasible to unuse pages.
+ * Return 1 when nothing to do, 0 when need to shink pages,
+ * error code when there is an error.
+ */
 static int __frontswap_shrink(unsigned long target_pages,
                                unsigned long *pages_to_unuse,
                                int *type)
@@ -275,7 +301,7 @@ static int __frontswap_shrink(unsigned long target_pages,
        if (total_pages <= target_pages) {
                /* Nothing to do */
                *pages_to_unuse = 0;
-               return 0;
+               return 1;
        }
        total_pages_to_unuse = total_pages - target_pages;
        return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
@@ -292,7 +318,7 @@ static int __frontswap_shrink(unsigned long target_pages,
 void frontswap_shrink(unsigned long target_pages)
 {
        unsigned long pages_to_unuse = 0;
-       int type, ret;
+       int uninitialized_var(type), ret;
 
        /*
         * we don't want to hold swap_lock while doing a very
@@ -302,7 +328,7 @@ void frontswap_shrink(unsigned long target_pages)
        spin_lock(&swap_lock);
        ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
        spin_unlock(&swap_lock);
-       if (ret == 0 && pages_to_unuse)
+       if (ret == 0)
                try_to_unuse(type, true, pages_to_unuse);
        return;
 }
index 795e525afaba8914f3f8863de6a7299d84898ada..a72f2ffdc3d07b70925eaf2824c8a9ba7994c41c 100644 (file)
@@ -4973,6 +4973,13 @@ mem_cgroup_create(struct cgroup *cont)
        } else {
                res_counter_init(&memcg->res, NULL);
                res_counter_init(&memcg->memsw, NULL);
+               /*
+                * Deeper hierachy with use_hierarchy == false doesn't make
+                * much sense so let cgroup subsystem know about this
+                * unfortunate state in our controller.
+                */
+               if (parent && parent != root_mem_cgroup)
+                       mem_cgroup_subsys.broken_hierarchy = true;
        }
        memcg->last_scanned_node = MAX_NUMNODES;
        INIT_LIST_HEAD(&memcg->oom_notify);
index ae18a48e7e4e7944af308bbff226217ae7d1601e..872441e819141c2e93657b358fedf6204d62ac57 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1301,8 +1301,7 @@ munmap_back:
                                goto free_vma;
                        correct_wcount = 1;
                }
-               vma->vm_file = file;
-               get_file(file);
+               vma->vm_file = get_file(file);
                error = file->f_op->mmap(file, vma);
                if (error)
                        goto unmap_and_free_vma;
index d4b0c10872de59d8959262b1daac92d5d60eb80a..dee2ff89fd5816ab36893a26154314c6add2dce5 100644 (file)
@@ -1282,10 +1282,8 @@ unsigned long do_mmap_pgoff(struct file *file,
        vma->vm_pgoff = pgoff;
 
        if (file) {
-               region->vm_file = file;
-               get_file(file);
-               vma->vm_file = file;
-               get_file(file);
+               region->vm_file = get_file(file);
+               vma->vm_file = get_file(file);
                if (vm_flags & VM_EXECUTABLE) {
                        added_exe_file_vma(current->mm);
                        vma->vm_mm = current->mm;
index ea8f8fa21649d7069543e19e0e48ae38d63c175e..7963f239123689864a5bf1d9e174d595c6cf1592 100644 (file)
@@ -579,19 +579,19 @@ do_readahead(struct address_space *mapping, struct file *filp,
 SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
 {
        ssize_t ret;
-       struct file *file;
+       struct fd f;
 
        ret = -EBADF;
-       file = fget(fd);
-       if (file) {
-               if (file->f_mode & FMODE_READ) {
-                       struct address_space *mapping = file->f_mapping;
+       f = fdget(fd);
+       if (f.file) {
+               if (f.file->f_mode & FMODE_READ) {
+                       struct address_space *mapping = f.file->f_mapping;
                        pgoff_t start = offset >> PAGE_CACHE_SHIFT;
                        pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
                        unsigned long len = end - start + 1;
-                       ret = do_readahead(mapping, file, start, len);
+                       ret = do_readahead(mapping, f.file, start, len);
                }
-               fput(file);
+               fdput(f);
        }
        return ret;
 }
index d4e184e2a38ea590350e5f31073c1ed8ad6690e0..d3752110c8c7ee29b7a0d98947366482984c18ca 100644 (file)
@@ -77,13 +77,6 @@ static struct vfsmount *shm_mnt;
 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
 #define SHORT_SYMLINK_LEN 128
 
-struct shmem_xattr {
-       struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
-       char *name;             /* xattr name */
-       size_t size;
-       char value[0];
-};
-
 /*
  * shmem_fallocate and shmem_writepage communicate via inode->i_private
  * (with i_mutex making sure that it has only one user at a time):
@@ -636,7 +629,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 static void shmem_evict_inode(struct inode *inode)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_xattr *xattr, *nxattr;
 
        if (inode->i_mapping->a_ops == &shmem_aops) {
                shmem_unacct_size(info->flags, inode->i_size);
@@ -650,10 +642,7 @@ static void shmem_evict_inode(struct inode *inode)
        } else
                kfree(info->symlink);
 
-       list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
-               kfree(xattr->name);
-               kfree(xattr);
-       }
+       simple_xattrs_free(&info->xattrs);
        BUG_ON(inode->i_blocks);
        shmem_free_inode(inode->i_sb);
        clear_inode(inode);
@@ -1377,7 +1366,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                spin_lock_init(&info->lock);
                info->flags = flags & VM_NORESERVE;
                INIT_LIST_HEAD(&info->swaplist);
-               INIT_LIST_HEAD(&info->xattr_list);
+               simple_xattrs_init(&info->xattrs);
                cache_no_acl(inode);
 
                switch (mode & S_IFMT) {
@@ -2059,28 +2048,6 @@ static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *co
  * filesystem level, though.
  */
 
-/*
- * Allocate new xattr and copy in the value; but leave the name to callers.
- */
-static struct shmem_xattr *shmem_xattr_alloc(const void *value, size_t size)
-{
-       struct shmem_xattr *new_xattr;
-       size_t len;
-
-       /* wrap around? */
-       len = sizeof(*new_xattr) + size;
-       if (len <= sizeof(*new_xattr))
-               return NULL;
-
-       new_xattr = kmalloc(len, GFP_KERNEL);
-       if (!new_xattr)
-               return NULL;
-
-       new_xattr->size = size;
-       memcpy(new_xattr->value, value, size);
-       return new_xattr;
-}
-
 /*
  * Callback for security_inode_init_security() for acquiring xattrs.
  */
@@ -2090,11 +2057,11 @@ static int shmem_initxattrs(struct inode *inode,
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
        const struct xattr *xattr;
-       struct shmem_xattr *new_xattr;
+       struct simple_xattr *new_xattr;
        size_t len;
 
        for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-               new_xattr = shmem_xattr_alloc(xattr->value, xattr->value_len);
+               new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
                if (!new_xattr)
                        return -ENOMEM;
 
@@ -2111,91 +2078,12 @@ static int shmem_initxattrs(struct inode *inode,
                memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
                       xattr->name, len);
 
-               spin_lock(&info->lock);
-               list_add(&new_xattr->list, &info->xattr_list);
-               spin_unlock(&info->lock);
+               simple_xattr_list_add(&info->xattrs, new_xattr);
        }
 
        return 0;
 }
 
-static int shmem_xattr_get(struct dentry *dentry, const char *name,
-                          void *buffer, size_t size)
-{
-       struct shmem_inode_info *info;
-       struct shmem_xattr *xattr;
-       int ret = -ENODATA;
-
-       info = SHMEM_I(dentry->d_inode);
-
-       spin_lock(&info->lock);
-       list_for_each_entry(xattr, &info->xattr_list, list) {
-               if (strcmp(name, xattr->name))
-                       continue;
-
-               ret = xattr->size;
-               if (buffer) {
-                       if (size < xattr->size)
-                               ret = -ERANGE;
-                       else
-                               memcpy(buffer, xattr->value, xattr->size);
-               }
-               break;
-       }
-       spin_unlock(&info->lock);
-       return ret;
-}
-
-static int shmem_xattr_set(struct inode *inode, const char *name,
-                          const void *value, size_t size, int flags)
-{
-       struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_xattr *xattr;
-       struct shmem_xattr *new_xattr = NULL;
-       int err = 0;
-
-       /* value == NULL means remove */
-       if (value) {
-               new_xattr = shmem_xattr_alloc(value, size);
-               if (!new_xattr)
-                       return -ENOMEM;
-
-               new_xattr->name = kstrdup(name, GFP_KERNEL);
-               if (!new_xattr->name) {
-                       kfree(new_xattr);
-                       return -ENOMEM;
-               }
-       }
-
-       spin_lock(&info->lock);
-       list_for_each_entry(xattr, &info->xattr_list, list) {
-               if (!strcmp(name, xattr->name)) {
-                       if (flags & XATTR_CREATE) {
-                               xattr = new_xattr;
-                               err = -EEXIST;
-                       } else if (new_xattr) {
-                               list_replace(&xattr->list, &new_xattr->list);
-                       } else {
-                               list_del(&xattr->list);
-                       }
-                       goto out;
-               }
-       }
-       if (flags & XATTR_REPLACE) {
-               xattr = new_xattr;
-               err = -ENODATA;
-       } else {
-               list_add(&new_xattr->list, &info->xattr_list);
-               xattr = NULL;
-       }
-out:
-       spin_unlock(&info->lock);
-       if (xattr)
-               kfree(xattr->name);
-       kfree(xattr);
-       return err;
-}
-
 static const struct xattr_handler *shmem_xattr_handlers[] = {
 #ifdef CONFIG_TMPFS_POSIX_ACL
        &generic_acl_access_handler,
@@ -2226,6 +2114,7 @@ static int shmem_xattr_validate(const char *name)
 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
                              void *buffer, size_t size)
 {
+       struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
        int err;
 
        /*
@@ -2240,12 +2129,13 @@ static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
        if (err)
                return err;
 
-       return shmem_xattr_get(dentry, name, buffer, size);
+       return simple_xattr_get(&info->xattrs, name, buffer, size);
 }
 
 static int shmem_setxattr(struct dentry *dentry, const char *name,
                          const void *value, size_t size, int flags)
 {
+       struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
        int err;
 
        /*
@@ -2260,15 +2150,12 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
        if (err)
                return err;
 
-       if (size == 0)
-               value = "";  /* empty EA, do not remove */
-
-       return shmem_xattr_set(dentry->d_inode, name, value, size, flags);
-
+       return simple_xattr_set(&info->xattrs, name, value, size, flags);
 }
 
 static int shmem_removexattr(struct dentry *dentry, const char *name)
 {
+       struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
        int err;
 
        /*
@@ -2283,45 +2170,13 @@ static int shmem_removexattr(struct dentry *dentry, const char *name)
        if (err)
                return err;
 
-       return shmem_xattr_set(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
-}
-
-static bool xattr_is_trusted(const char *name)
-{
-       return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
+       return simple_xattr_remove(&info->xattrs, name);
 }
 
 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
 {
-       bool trusted = capable(CAP_SYS_ADMIN);
-       struct shmem_xattr *xattr;
-       struct shmem_inode_info *info;
-       size_t used = 0;
-
-       info = SHMEM_I(dentry->d_inode);
-
-       spin_lock(&info->lock);
-       list_for_each_entry(xattr, &info->xattr_list, list) {
-               size_t len;
-
-               /* skip "trusted." attributes for unprivileged callers */
-               if (!trusted && xattr_is_trusted(xattr->name))
-                       continue;
-
-               len = strlen(xattr->name) + 1;
-               used += len;
-               if (buffer) {
-                       if (size < used) {
-                               used = -ERANGE;
-                               break;
-                       }
-                       memcpy(buffer, xattr->name, len);
-                       buffer += len;
-               }
-       }
-       spin_unlock(&info->lock);
-
-       return used;
+       struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
+       return simple_xattr_list(&info->xattrs, buffer, size);
 }
 #endif /* CONFIG_TMPFS_XATTR */
 
index c6854759bcf1e041d7a21781921ac45cfd2f5484..11339110271ef8296771a76f5aaebe7acf6b5230 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -900,7 +900,7 @@ static void __cpuinit start_cpu_timer(int cpu)
         */
        if (keventd_up() && reap_work->work.func == NULL) {
                init_reap_node(cpu);
-               INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
+               INIT_DEFERRABLE_WORK(reap_work, cache_reap);
                schedule_delayed_work_on(cpu, reap_work,
                                        __round_jiffies_relative(HZ, cpu));
        }
index df7a6748231d67e524e2cea0c0a663d91f5d63d2..b3e3b9d525d08b71527365ae63a59a49695e5757 100644 (file)
@@ -1157,7 +1157,7 @@ static void __cpuinit start_cpu_timer(int cpu)
 {
        struct delayed_work *work = &per_cpu(vmstat_work, cpu);
 
-       INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
+       INIT_DEFERRABLE_WORK(work, vmstat_update);
        schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
 }
 
index 8ca533c95de0346f181c1acd067a787c20a49f62..b258da88f6756d4f3ad6d52c992f86fb5a23a93d 100644 (file)
@@ -368,3 +368,9 @@ void vlan_vids_del_by_dev(struct net_device *dev,
                vlan_vid_del(dev, vid_info->vid);
 }
 EXPORT_SYMBOL(vlan_vids_del_by_dev);
+
+bool vlan_uses_dev(const struct net_device *dev)
+{
+       return rtnl_dereference(dev->vlan_info) ? true : false;
+}
+EXPORT_SYMBOL(vlan_uses_dev);
index 6449bae157023c182de306d862a6c0a11f0b04e9..15656b8573f38b87e396998bb419f3415adb9d30 100644 (file)
@@ -793,30 +793,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 {
        struct p9_trans_fd *p;
-       int ret, fd;
+       struct file *file;
+       int ret;
 
        p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
        if (!p)
                return -ENOMEM;
 
        csocket->sk->sk_allocation = GFP_NOIO;
-       fd = sock_map_fd(csocket, 0);
-       if (fd < 0) {
+       file = sock_alloc_file(csocket, 0, NULL);
+       if (IS_ERR(file)) {
                pr_err("%s (%d): failed to map fd\n",
                       __func__, task_pid_nr(current));
                sock_release(csocket);
                kfree(p);
-               return fd;
+               return PTR_ERR(file);
        }
 
-       get_file(csocket->file);
-       get_file(csocket->file);
-       p->wr = p->rd = csocket->file;
+       get_file(file);
+       p->wr = p->rd = file;
        client->trans = p;
        client->status = Connected;
 
-       sys_close(fd);  /* still racy */
-
        p->rd->f_flags |= O_NONBLOCK;
 
        p->conn = p9_conn_create(client);
@@ -1083,7 +1081,7 @@ int p9_trans_fd_init(void)
 
 void p9_trans_fd_exit(void)
 {
-       flush_work_sync(&p9_poll_work);
+       flush_work(&p9_poll_work);
        v9fs_unregister_trans(&p9_tcp_trans);
        v9fs_unregister_trans(&p9_unix_trans);
        v9fs_unregister_trans(&p9_fd_trans);
index 245831bec09a56dd263cde753050d10208ee0200..30b48f523135be8f00025f32b887b51f0f7000c3 100644 (file)
@@ -52,6 +52,8 @@ source "net/iucv/Kconfig"
 
 config INET
        bool "TCP/IP networking"
+       select CRYPTO
+       select CRYPTO_AES
        ---help---
          These are the protocols used on the Internet and on most local
          Ethernets. It is highly recommended to say Y here (this will enlarge
index b5b1a221c242d981dff59a483bdcd496849ab157..c30f3a0717fb8a609239cd8578416eac63c61dd5 100644 (file)
@@ -183,7 +183,8 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
                   ntohs(at->dest_net), at->dest_node, at->dest_port,
                   sk_wmem_alloc_get(s),
                   sk_rmem_alloc_get(s),
-                  s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
+                  s->sk_state,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
 out:
        return 0;
 }
index 23f45ce6f3510fb1e9b30b58b7e52fac6a456961..0447d5d0b63983b139bda2853eaa9980640dd32d 100644 (file)
@@ -432,7 +432,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
                        size = dev->ops->ioctl(dev, cmd, buf);
                }
                if (size < 0) {
-                       error = (size == -ENOIOCTLCMD ? -EINVAL : size);
+                       error = (size == -ENOIOCTLCMD ? -ENOTTY : size);
                        goto done;
                }
        }
index e3c579ba632527ed14debcb0b0b274dbd365074a..957999e43ff71001a875eb4a0c2f02e8834b26e4 100644 (file)
@@ -51,14 +51,14 @@ int ax25_uid_policy;
 
 EXPORT_SYMBOL(ax25_uid_policy);
 
-ax25_uid_assoc *ax25_findbyuid(uid_t uid)
+ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
 {
        ax25_uid_assoc *ax25_uid, *res = NULL;
        struct hlist_node *node;
 
        read_lock(&ax25_uid_lock);
        ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
-               if (ax25_uid->uid == uid) {
+               if (uid_eq(ax25_uid->uid, uid)) {
                        ax25_uid_hold(ax25_uid);
                        res = ax25_uid;
                        break;
@@ -84,7 +84,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                read_lock(&ax25_uid_lock);
                ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
                        if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
-                               res = ax25_uid->uid;
+                               res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
                                break;
                        }
                }
@@ -93,9 +93,14 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                return res;
 
        case SIOCAX25ADDUID:
+       {
+               kuid_t sax25_kuid;
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-               user = ax25_findbyuid(sax->sax25_uid);
+               sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid);
+               if (!uid_valid(sax25_kuid))
+                       return -EINVAL;
+               user = ax25_findbyuid(sax25_kuid);
                if (user) {
                        ax25_uid_put(user);
                        return -EEXIST;
@@ -106,7 +111,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                        return -ENOMEM;
 
                atomic_set(&ax25_uid->refcount, 1);
-               ax25_uid->uid  = sax->sax25_uid;
+               ax25_uid->uid  = sax25_kuid;
                ax25_uid->call = sax->sax25_call;
 
                write_lock(&ax25_uid_lock);
@@ -114,7 +119,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
                write_unlock(&ax25_uid_lock);
 
                return 0;
-
+       }
        case SIOCAX25DELUID:
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
@@ -172,7 +177,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
                struct ax25_uid_assoc *pt;
 
                pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
-               seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
+               seq_printf(seq, "%6d %s\n",
+                       from_kuid_munged(seq_user_ns(seq), pt->uid),
+                       ax2asc(buf, &pt->call));
        }
        return 0;
 }
index 469daabd90c7bf28572c3f9066ee2146ed590fd1..b02b75dae3a8a5dfeb015838ba5eca2ecb5d6109 100644 (file)
@@ -166,13 +166,15 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
        int16_t buff_pos;
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct sk_buff *skb;
+       uint8_t *packet_pos;
 
        if (hard_iface->if_status != BATADV_IF_ACTIVE)
                return;
 
        packet_num = 0;
        buff_pos = 0;
-       batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+       packet_pos = forw_packet->skb->data;
+       batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
 
        /* adjust all flags and log packets */
        while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
@@ -181,15 +183,17 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
-               if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
-                   (forw_packet->if_incoming == hard_iface))
+               if (forw_packet->direct_link_flags & BIT(packet_num) &&
+                   forw_packet->if_incoming == hard_iface)
                        batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
                else
                        batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
 
-               fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
-                                                           "Sending own" :
-                                                           "Forwarding"));
+               if (packet_num > 0 || !forw_packet->own)
+                       fwd_str = "Forwarding";
+               else
+                       fwd_str = "Sending own";
+
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
                           fwd_str, (packet_num > 0 ? "aggregated " : ""),
@@ -204,8 +208,8 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                buff_pos += BATADV_OGM_HLEN;
                buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
                packet_num++;
-               batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                       (forw_packet->skb->data + buff_pos);
+               packet_pos = forw_packet->skb->data + buff_pos;
+               batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        }
 
        /* create clone because function is called more than once */
@@ -227,9 +231,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_ogm_packet *batadv_ogm_packet;
        unsigned char directlink;
+       uint8_t *packet_pos;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                               (forw_packet->skb->data);
+       packet_pos = forw_packet->skb->data;
+       batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
 
        if (!forw_packet->if_incoming) {
@@ -454,6 +459,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
                                    int packet_len, bool direct_link)
 {
        unsigned char *skb_buff;
+       unsigned long new_direct_link_flag;
 
        skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
        memcpy(skb_buff, packet_buff, packet_len);
@@ -461,9 +467,10 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
        forw_packet_aggr->num_packets++;
 
        /* save packet direct link flag status */
-       if (direct_link)
-               forw_packet_aggr->direct_link_flags |=
-                       (1 << forw_packet_aggr->num_packets);
+       if (direct_link) {
+               new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
+               forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
+       }
 }
 
 static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
@@ -586,6 +593,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct batadv_hard_iface *primary_if;
        int vis_server, tt_num_changes = 0;
+       uint32_t seqno;
+       uint8_t bandwidth;
 
        vis_server = atomic_read(&bat_priv->vis_mode);
        primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -599,12 +608,12 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
 
        /* change sequence number to network order */
-       batadv_ogm_packet->seqno =
-                       htonl((uint32_t)atomic_read(&hard_iface->seqno));
+       seqno = (uint32_t)atomic_read(&hard_iface->seqno);
+       batadv_ogm_packet->seqno = htonl(seqno);
        atomic_inc(&hard_iface->seqno);
 
-       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
-       batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
+       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
+       batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
        if (tt_num_changes >= 0)
                batadv_ogm_packet->tt_num_changes = tt_num_changes;
 
@@ -613,12 +622,13 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        else
                batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
 
-       if ((hard_iface == primary_if) &&
-           (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
-               batadv_ogm_packet->gw_flags =
-                               (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
-       else
+       if (hard_iface == primary_if &&
+           atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
+               bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
+               batadv_ogm_packet->gw_flags = bandwidth;
+       } else {
                batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
+       }
 
        batadv_slide_own_bcast_window(hard_iface);
        batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
@@ -645,6 +655,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        int if_num;
        uint8_t sum_orig, sum_neigh;
        uint8_t *neigh_addr;
+       uint8_t tq_avg;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "update_originator(): Searching and updating originator entry of received packet\n");
@@ -668,8 +679,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                spin_lock_bh(&tmp_neigh_node->lq_update_lock);
                batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
                                       &tmp_neigh_node->tq_index, 0);
-               tmp_neigh_node->tq_avg =
-                       batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+               tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
+               tmp_neigh_node->tq_avg = tq_avg;
                spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
        }
 
@@ -836,8 +847,10 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        spin_unlock_bh(&orig_node->ogm_cnt_lock);
 
        /* pay attention to not get a value bigger than 100 % */
-       total_count = (orig_eq_count > neigh_rq_count ?
-                      neigh_rq_count : orig_eq_count);
+       if (orig_eq_count > neigh_rq_count)
+               total_count = neigh_rq_count;
+       else
+               total_count = orig_eq_count;
 
        /* if we have too few packets (too less data) we set tq_own to zero
         * if we receive too few packets it is not considered bidirectional
@@ -911,6 +924,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        int set_mark, ret = -1;
        uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
        uint8_t *neigh_addr;
+       uint8_t packet_count;
 
        orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
@@ -945,9 +959,9 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                                                     tmp_neigh_node->real_bits,
                                                     seq_diff, set_mark);
 
-               tmp_neigh_node->real_packet_count =
-                       bitmap_weight(tmp_neigh_node->real_bits,
-                                     BATADV_TQ_LOCAL_WINDOW_SIZE);
+               packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+                                            BATADV_TQ_LOCAL_WINDOW_SIZE);
+               tmp_neigh_node->real_packet_count = packet_count;
        }
        rcu_read_unlock();
 
@@ -1164,9 +1178,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
        /* if sender is a direct neighbor the sender mac equals
         * originator mac
         */
-       orig_neigh_node = (is_single_hop_neigh ?
-                          orig_node :
-                          batadv_get_orig_node(bat_priv, ethhdr->h_source));
+       if (is_single_hop_neigh)
+               orig_neigh_node = orig_node;
+       else
+               orig_neigh_node = batadv_get_orig_node(bat_priv,
+                                                      ethhdr->h_source);
+
        if (!orig_neigh_node)
                goto out;
 
@@ -1252,6 +1269,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        int buff_pos = 0, packet_len;
        unsigned char *tt_buff, *packet_buff;
        bool ret;
+       uint8_t *packet_pos;
 
        ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
        if (!ret)
@@ -1282,8 +1300,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
                buff_pos += BATADV_OGM_HLEN;
                buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
 
-               batadv_ogm_packet = (struct batadv_ogm_packet *)
-                                               (packet_buff + buff_pos);
+               packet_pos = packet_buff + buff_pos;
+               batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
        } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
                                           batadv_ogm_packet->tt_num_changes));
 
index 6705d35b17cef3351ed9c202f5ce2bb17df933a7..0a9084ad19a60f71a2f9f70a8879e7ada24ccf6c 100644 (file)
@@ -133,7 +133,7 @@ static void batadv_claim_free_ref(struct batadv_claim *claim)
 static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
                                                   struct batadv_claim *data)
 {
-       struct batadv_hashtable *hash = bat_priv->claim_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_claim *claim;
@@ -174,7 +174,7 @@ static struct batadv_backbone_gw *
 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
                          uint8_t *addr, short vid)
 {
-       struct batadv_hashtable *hash = bat_priv->backbone_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_backbone_gw search_entry, *backbone_gw;
@@ -218,7 +218,7 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
        int i;
        spinlock_t *list_lock;  /* protects write access to the hash lists */
 
-       hash = backbone_gw->bat_priv->claim_hash;
+       hash = backbone_gw->bat_priv->bla.claim_hash;
        if (!hash)
                return;
 
@@ -265,7 +265,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
        if (!primary_if)
                return;
 
-       memcpy(&local_claim_dest, &bat_priv->claim_dest,
+       memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
               sizeof(local_claim_dest));
        local_claim_dest.type = claimtype;
 
@@ -281,7 +281,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                         NULL,
                         /* Ethernet SRC/HW SRC:  originator mac */
                         primary_if->net_dev->dev_addr,
-                        /* HW DST: FF:43:05:XX:00:00
+                        /* HW DST: FF:43:05:XX:YY:YY
                          * with XX   = claim type
                          * and YY:YY = group id
                          */
@@ -295,7 +295,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
 
        /* now we pretend that the client would have sent this ... */
        switch (claimtype) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                /* normal claim frame
                 * set Ethernet SRC to the clients mac
                 */
@@ -303,7 +303,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
                break;
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                /* unclaim frame
                 * set HW SRC to the clients mac
                 */
@@ -323,7 +323,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
                /* request frame
-                * set HW SRC to the special mac containg the crc
+                * set HW SRC and header destination to the receiving backbone
+                * gws mac
                 */
                memcpy(hw_src, mac, ETH_ALEN);
                memcpy(ethhdr->h_dest, mac, ETH_ALEN);
@@ -339,8 +340,9 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
 
        skb_reset_mac_header(skb);
        skb->protocol = eth_type_trans(skb, soft_iface);
-       bat_priv->stats.rx_packets++;
-       bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+       batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+                          skb->len + ETH_HLEN);
        soft_iface->last_rx = jiffies;
 
        netif_rx(skb);
@@ -389,7 +391,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
        /* one for the hash, one for returning */
        atomic_set(&entry->refcount, 2);
 
-       hash_added = batadv_hash_add(bat_priv->backbone_hash,
+       hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
                                     batadv_compare_backbone_gw,
                                     batadv_choose_backbone_gw, entry,
                                     &entry->hash_entry);
@@ -456,7 +458,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
        if (!backbone_gw)
                return;
 
-       hash = bat_priv->claim_hash;
+       hash = bat_priv->bla.claim_hash;
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
@@ -467,7 +469,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
                                continue;
 
                        batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
-                                             BATADV_CLAIM_TYPE_ADD);
+                                             BATADV_CLAIM_TYPE_CLAIM);
                }
                rcu_read_unlock();
        }
@@ -497,7 +499,7 @@ static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
 
        /* no local broadcasts should be sent or received, for now. */
        if (!atomic_read(&backbone_gw->request_sent)) {
-               atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+               atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
                atomic_set(&backbone_gw->request_sent, 1);
        }
 }
@@ -557,7 +559,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
                           mac, vid);
-               hash_added = batadv_hash_add(bat_priv->claim_hash,
+               hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
                                             batadv_compare_claim,
                                             batadv_choose_claim, claim,
                                             &claim->hash_entry);
@@ -577,8 +579,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                           "bla_add_claim(): changing ownership for %pM, vid %d\n",
                           mac, vid);
 
-               claim->backbone_gw->crc ^=
-                       crc16(0, claim->addr, ETH_ALEN);
+               claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
                batadv_backbone_gw_free_ref(claim->backbone_gw);
 
        }
@@ -610,7 +611,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
                   mac, vid);
 
-       batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
+       batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
                           batadv_choose_claim, claim);
        batadv_claim_free_ref(claim); /* reference from the hash is gone */
 
@@ -657,7 +658,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
                 * we can allow traffic again.
                 */
                if (atomic_read(&backbone_gw->request_sent)) {
-                       atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+                       atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
                        atomic_set(&backbone_gw->request_sent, 0);
                }
        }
@@ -702,7 +703,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
        if (primary_if && batadv_compare_eth(backbone_addr,
                                             primary_if->net_dev->dev_addr))
                batadv_bla_send_claim(bat_priv, claim_addr, vid,
-                                     BATADV_CLAIM_TYPE_DEL);
+                                     BATADV_CLAIM_TYPE_UNCLAIM);
 
        backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
 
@@ -738,7 +739,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
        batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
        if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
                batadv_bla_send_claim(bat_priv, claim_addr, vid,
-                                     BATADV_CLAIM_TYPE_ADD);
+                                     BATADV_CLAIM_TYPE_CLAIM);
 
        /* TODO: we could call something like tt_local_del() here. */
 
@@ -772,7 +773,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
 
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
-       bla_dst_own = &bat_priv->claim_dest;
+       bla_dst_own = &bat_priv->bla.claim_dest;
 
        /* check if it is a claim packet in general */
        if (memcmp(bla_dst->magic, bla_dst_own->magic,
@@ -783,12 +784,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
         * otherwise assume it is in the hw_src
         */
        switch (bla_dst->type) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                backbone_addr = hw_src;
                break;
        case BATADV_CLAIM_TYPE_REQUEST:
        case BATADV_CLAIM_TYPE_ANNOUNCE:
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                backbone_addr = ethhdr->h_source;
                break;
        default:
@@ -904,12 +905,12 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
 
        /* check for the different types of claim frames ... */
        switch (bla_dst->type) {
-       case BATADV_CLAIM_TYPE_ADD:
+       case BATADV_CLAIM_TYPE_CLAIM:
                if (batadv_handle_claim(bat_priv, primary_if, hw_src,
                                        ethhdr->h_source, vid))
                        return 1;
                break;
-       case BATADV_CLAIM_TYPE_DEL:
+       case BATADV_CLAIM_TYPE_UNCLAIM:
                if (batadv_handle_unclaim(bat_priv, primary_if,
                                          ethhdr->h_source, hw_src, vid))
                        return 1;
@@ -945,7 +946,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
        spinlock_t *list_lock;  /* protects write access to the hash lists */
        int i;
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                return;
 
@@ -969,7 +970,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
 purge_now:
                        /* don't wait for the pending request anymore */
                        if (atomic_read(&backbone_gw->request_sent))
-                               atomic_dec(&bat_priv->bla_num_requests);
+                               atomic_dec(&bat_priv->bla.num_requests);
 
                        batadv_bla_del_backbone_claims(backbone_gw);
 
@@ -999,7 +1000,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
        struct batadv_hashtable *hash;
        int i;
 
-       hash = bat_priv->claim_hash;
+       hash = bat_priv->bla.claim_hash;
        if (!hash)
                return;
 
@@ -1046,11 +1047,12 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
        struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
+       __be16 group;
        int i;
 
        /* reset bridge loop avoidance group id */
-       bat_priv->claim_dest.group =
-               htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+       group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+       bat_priv->bla.claim_dest.group = group;
 
        if (!oldif) {
                batadv_bla_purge_claims(bat_priv, NULL, 1);
@@ -1058,7 +1060,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                return;
        }
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                return;
 
@@ -1088,8 +1090,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 /* (re)start the timer */
 static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
+       INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
                           msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
 }
 
@@ -1099,9 +1101,9 @@ static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
  */
 static void batadv_bla_periodic_work(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
+       struct batadv_priv_bla *priv_bla;
        struct hlist_node *node;
        struct hlist_head *head;
        struct batadv_backbone_gw *backbone_gw;
@@ -1109,7 +1111,9 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        struct batadv_hard_iface *primary_if;
        int i;
 
-       bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
+       delayed_work = container_of(work, struct delayed_work, work);
+       priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
+       bat_priv = container_of(priv_bla, struct batadv_priv, bla);
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
@@ -1120,7 +1124,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto out;
 
-       hash = bat_priv->backbone_hash;
+       hash = bat_priv->bla.backbone_hash;
        if (!hash)
                goto out;
 
@@ -1160,40 +1164,41 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
        int i;
        uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
        struct batadv_hard_iface *primary_if;
+       uint16_t crc;
+       unsigned long entrytime;
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
 
        /* setting claim destination address */
-       memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
-       bat_priv->claim_dest.type = 0;
+       memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
+       bat_priv->bla.claim_dest.type = 0;
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (primary_if) {
-               bat_priv->claim_dest.group =
-                       htons(crc16(0, primary_if->net_dev->dev_addr,
-                                   ETH_ALEN));
+               crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
+               bat_priv->bla.claim_dest.group = htons(crc);
                batadv_hardif_free_ref(primary_if);
        } else {
-               bat_priv->claim_dest.group = 0; /* will be set later */
+               bat_priv->bla.claim_dest.group = 0; /* will be set later */
        }
 
        /* initialize the duplicate list */
+       entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
        for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
-               bat_priv->bcast_duplist[i].entrytime =
-                       jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
-       bat_priv->bcast_duplist_curr = 0;
+               bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
+       bat_priv->bla.bcast_duplist_curr = 0;
 
-       if (bat_priv->claim_hash)
+       if (bat_priv->bla.claim_hash)
                return 0;
 
-       bat_priv->claim_hash = batadv_hash_new(128);
-       bat_priv->backbone_hash = batadv_hash_new(32);
+       bat_priv->bla.claim_hash = batadv_hash_new(128);
+       bat_priv->bla.backbone_hash = batadv_hash_new(32);
 
-       if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+       if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
                return -ENOMEM;
 
-       batadv_hash_set_lock_class(bat_priv->claim_hash,
+       batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
                                   &batadv_claim_hash_lock_class_key);
-       batadv_hash_set_lock_class(bat_priv->backbone_hash,
+       batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
                                   &batadv_backbone_hash_lock_class_key);
 
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
@@ -1234,8 +1239,9 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
        crc = crc16(0, content, length);
 
        for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
-               curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
-               entry = &bat_priv->bcast_duplist[curr];
+               curr = (bat_priv->bla.bcast_duplist_curr + i);
+               curr %= BATADV_DUPLIST_SIZE;
+               entry = &bat_priv->bla.bcast_duplist[curr];
 
                /* we can stop searching if the entry is too old ;
                 * later entries will be even older
@@ -1256,13 +1262,13 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                return 1;
        }
        /* not found, add a new entry (overwrite the oldest entry) */
-       curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+       curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
        curr %= BATADV_DUPLIST_SIZE;
-       entry = &bat_priv->bcast_duplist[curr];
+       entry = &bat_priv->bla.bcast_duplist[curr];
        entry->crc = crc;
        entry->entrytime = jiffies;
        memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
-       bat_priv->bcast_duplist_curr = curr;
+       bat_priv->bla.bcast_duplist_curr = curr;
 
        /* allow it, its the first occurence. */
        return 0;
@@ -1279,7 +1285,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
  */
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
 {
-       struct batadv_hashtable *hash = bat_priv->backbone_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_backbone_gw *backbone_gw;
@@ -1339,8 +1345,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
                        return 0;
 
-               vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
-                                             hdr_size);
+               vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
                vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
        }
 
@@ -1359,18 +1364,18 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *primary_if;
 
-       cancel_delayed_work_sync(&bat_priv->bla_work);
+       cancel_delayed_work_sync(&bat_priv->bla.work);
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
-       if (bat_priv->claim_hash) {
+       if (bat_priv->bla.claim_hash) {
                batadv_bla_purge_claims(bat_priv, primary_if, 1);
-               batadv_hash_destroy(bat_priv->claim_hash);
-               bat_priv->claim_hash = NULL;
+               batadv_hash_destroy(bat_priv->bla.claim_hash);
+               bat_priv->bla.claim_hash = NULL;
        }
-       if (bat_priv->backbone_hash) {
+       if (bat_priv->bla.backbone_hash) {
                batadv_bla_purge_backbone_gw(bat_priv, 1);
-               batadv_hash_destroy(bat_priv->backbone_hash);
-               bat_priv->backbone_hash = NULL;
+               batadv_hash_destroy(bat_priv->bla.backbone_hash);
+               bat_priv->bla.backbone_hash = NULL;
        }
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -1409,7 +1414,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
                goto allow;
 
 
-       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+       if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
                        goto handled;
@@ -1508,7 +1513,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
-       if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+       if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
                /* don't allow broadcasts while requests are in flight */
                if (is_multicast_ether_addr(ethhdr->h_dest))
                        goto handled;
@@ -1564,7 +1569,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->claim_hash;
+       struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
        struct batadv_claim *claim;
        struct batadv_hard_iface *primary_if;
        struct hlist_node *node;
@@ -1593,7 +1598,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq,
                   "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
                   net_dev->name, primary_addr,
-                  ntohs(bat_priv->claim_dest.group));
+                  ntohs(bat_priv->bla.claim_dest.group));
        seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
                   "Client", "VID", "Originator", "CRC");
        for (i = 0; i < hash->size; i++) {
@@ -1616,3 +1621,68 @@ out:
                batadv_hardif_free_ref(primary_if);
        return ret;
 }
+
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+       struct net_device *net_dev = (struct net_device *)seq->private;
+       struct batadv_priv *bat_priv = netdev_priv(net_dev);
+       struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
+       struct batadv_backbone_gw *backbone_gw;
+       struct batadv_hard_iface *primary_if;
+       struct hlist_node *node;
+       struct hlist_head *head;
+       int secs, msecs;
+       uint32_t i;
+       bool is_own;
+       int ret = 0;
+       uint8_t *primary_addr;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       if (primary_if->if_status != BATADV_IF_ACTIVE) {
+               ret = seq_printf(seq,
+                                "BATMAN mesh %s disabled - primary interface not active\n",
+                                net_dev->name);
+               goto out;
+       }
+
+       primary_addr = primary_if->net_dev->dev_addr;
+       seq_printf(seq,
+                  "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
+                  net_dev->name, primary_addr,
+                  ntohs(bat_priv->bla.claim_dest.group));
+       seq_printf(seq, "   %-17s    %-5s %-9s (%-4s)\n",
+                  "Originator", "VID", "last seen", "CRC");
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+                       msecs = jiffies_to_msecs(jiffies -
+                                                backbone_gw->lasttime);
+                       secs = msecs / 1000;
+                       msecs = msecs % 1000;
+
+                       is_own = batadv_compare_eth(backbone_gw->orig,
+                                                   primary_addr);
+                       if (is_own)
+                               continue;
+
+                       seq_printf(seq,
+                                  " * %pM on % 5d % 4i.%03is (%04x)\n",
+                                  backbone_gw->orig, backbone_gw->vid,
+                                  secs, msecs, backbone_gw->crc);
+               }
+               rcu_read_unlock();
+       }
+out:
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+       return ret;
+}
index 563cfbf94a7f5110c9dcd5eeffb6ff10da32b86b..789cb73bde67acf8f19375d67e8c0ec2322f509f 100644 (file)
@@ -27,6 +27,8 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size);
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+                                            void *offset);
 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                                   struct batadv_bcast_packet *bcast_packet,
@@ -41,8 +43,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv);
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
 static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
-                               struct sk_buff *skb, short vid,
-                               bool is_bcast)
+                               struct sk_buff *skb, short vid, bool is_bcast)
 {
        return 0;
 }
@@ -66,6 +67,12 @@ static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
        return 0;
 }
 
+static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
+                                                          void *offset)
+{
+       return 0;
+}
+
 static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
                                                 uint8_t *orig)
 {
index 34fbb1667bcd5194ae391abf8ca3a03944246c8d..391d4fb2026f9acf7ce1b1f85a0c2f5366152c35 100644 (file)
@@ -267,6 +267,15 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
        return single_open(file, batadv_bla_claim_table_seq_print_text,
                           net_dev);
 }
+
+static int batadv_bla_backbone_table_open(struct inode *inode,
+                                         struct file *file)
+{
+       struct net_device *net_dev = (struct net_device *)inode->i_private;
+       return single_open(file, batadv_bla_backbone_table_seq_print_text,
+                          net_dev);
+}
+
 #endif
 
 static int batadv_transtable_local_open(struct inode *inode, struct file *file)
@@ -305,6 +314,8 @@ static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
                        batadv_transtable_global_open);
 #ifdef CONFIG_BATMAN_ADV_BLA
 static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
+static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO,
+                       batadv_bla_backbone_table_open);
 #endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
                        batadv_transtable_local_open);
@@ -316,6 +327,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
        &batadv_debuginfo_transtable_global,
 #ifdef CONFIG_BATMAN_ADV_BLA
        &batadv_debuginfo_bla_claim_table,
+       &batadv_debuginfo_bla_backbone_table,
 #endif
        &batadv_debuginfo_transtable_local,
        &batadv_debuginfo_vis_data,
index fc866f2e4528c71c82ba125c23fb48fbd430b390..15d67abc10a43b329ef3cf561bd3fdf456c3c286 100644 (file)
@@ -48,7 +48,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
        struct batadv_gw_node *gw_node;
 
        rcu_read_lock();
-       gw_node = rcu_dereference(bat_priv->curr_gw);
+       gw_node = rcu_dereference(bat_priv->gw.curr_gw);
        if (!gw_node)
                goto out;
 
@@ -91,23 +91,23 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *curr_gw_node;
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
 
        if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
                new_gw_node = NULL;
 
-       curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
-       rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
+       curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
+       rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
 
        if (curr_gw_node)
                batadv_gw_node_free_ref(curr_gw_node);
 
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 }
 
 void batadv_gw_deselect(struct batadv_priv *bat_priv)
 {
-       atomic_set(&bat_priv->gw_reselect, 1);
+       atomic_set(&bat_priv->gw.reselect, 1);
 }
 
 static struct batadv_gw_node *
@@ -117,12 +117,17 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        struct hlist_node *node;
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
+       uint32_t gw_divisor;
        uint8_t max_tq = 0;
        int down, up;
+       uint8_t tq_avg;
        struct batadv_orig_node *orig_node;
 
+       gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
+       gw_divisor *= 64;
+
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
@@ -134,19 +139,19 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                if (!atomic_inc_not_zero(&gw_node->refcount))
                        goto next;
 
+               tq_avg = router->tq_avg;
+
                switch (atomic_read(&bat_priv->gw_sel_class)) {
                case 1: /* fast connection */
                        batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
                                                    &down, &up);
 
-                       tmp_gw_factor = (router->tq_avg * router->tq_avg *
-                                        down * 100 * 100) /
-                                        (BATADV_TQ_LOCAL_WINDOW_SIZE *
-                                         BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
+                       tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+                       tmp_gw_factor /= gw_divisor;
 
                        if ((tmp_gw_factor > max_gw_factor) ||
                            ((tmp_gw_factor == max_gw_factor) &&
-                            (router->tq_avg > max_tq))) {
+                            (tq_avg > max_tq))) {
                                if (curr_gw)
                                        batadv_gw_node_free_ref(curr_gw);
                                curr_gw = gw_node;
@@ -161,7 +166,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                          *     soon as a better gateway appears which has
                          *     $routing_class more tq points)
                          */
-                       if (router->tq_avg > max_tq) {
+                       if (tq_avg > max_tq) {
                                if (curr_gw)
                                        batadv_gw_node_free_ref(curr_gw);
                                curr_gw = gw_node;
@@ -170,8 +175,8 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                        break;
                }
 
-               if (router->tq_avg > max_tq)
-                       max_tq = router->tq_avg;
+               if (tq_avg > max_tq)
+                       max_tq = tq_avg;
 
                if (tmp_gw_factor > max_gw_factor)
                        max_gw_factor = tmp_gw_factor;
@@ -202,7 +207,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
+       if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
                goto out;
 
        next_gw = batadv_gw_get_best_gw_node(bat_priv);
@@ -321,9 +326,9 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        gw_node->orig_node = orig_node;
        atomic_set(&gw_node->refcount, 1);
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
-       hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
+       hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -350,7 +355,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->orig_node != orig_node)
                        continue;
 
@@ -404,10 +409,10 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       spin_lock_bh(&bat_priv->gw_list_lock);
+       spin_lock_bh(&bat_priv->gw.list_lock);
 
        hlist_for_each_entry_safe(gw_node, node, node_tmp,
-                                 &bat_priv->gw_list, list) {
+                                 &bat_priv->gw.list, list) {
                if (((!gw_node->deleted) ||
                     (time_before(jiffies, gw_node->deleted + timeout))) &&
                    atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
@@ -420,7 +425,7 @@ void batadv_gw_node_purge(struct batadv_priv *bat_priv)
                batadv_gw_node_free_ref(gw_node);
        }
 
-       spin_unlock_bh(&bat_priv->gw_list_lock);
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        /* gw_deselect() needs to acquire the gw_list_lock */
        if (do_deselect)
@@ -496,7 +501,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
                   primary_if->net_dev->dev_addr, net_dev->name);
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
+       hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
                if (gw_node->deleted)
                        continue;
 
index 282bf6e9353e6ad5ab657e0c2722ec9635701f7c..d112fd6750b0564e1b08232482eb8c1261b749ad 100644 (file)
@@ -103,13 +103,14 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
 {
        struct batadv_vis_packet *vis_packet;
        struct batadv_hard_iface *primary_if;
+       struct sk_buff *skb;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
 
-       vis_packet = (struct batadv_vis_packet *)
-                               bat_priv->my_vis_info->skb_packet->data;
+       skb = bat_priv->vis.my_info->skb_packet;
+       vis_packet = (struct batadv_vis_packet *)skb->data;
        memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
        memcpy(vis_packet->sender_orig,
               primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -313,7 +314,13 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        hard_iface->if_num = bat_priv->num_ifaces;
        bat_priv->num_ifaces++;
        hard_iface->if_status = BATADV_IF_INACTIVE;
-       batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+       ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+       if (ret < 0) {
+               bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
+               bat_priv->num_ifaces--;
+               hard_iface->if_status = BATADV_IF_NOT_IN_USE;
+               goto err_dev;
+       }
 
        hard_iface->batman_adv_ptype.type = ethertype;
        hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
index 13c88b25ab319a15260e77d12c65957180c46b9c..b4aa470bc4a6ec675d84fb373a973dd4d224cd5d 100644 (file)
@@ -58,9 +58,6 @@ static int __init batadv_init(void)
 
        batadv_iv_init();
 
-       /* the name should not be longer than 10 chars - see
-        * http://lwn.net/Articles/23634/
-        */
        batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
        if (!batadv_event_workqueue)
@@ -97,20 +94,20 @@ int batadv_mesh_init(struct net_device *soft_iface)
 
        spin_lock_init(&bat_priv->forw_bat_list_lock);
        spin_lock_init(&bat_priv->forw_bcast_list_lock);
-       spin_lock_init(&bat_priv->tt_changes_list_lock);
-       spin_lock_init(&bat_priv->tt_req_list_lock);
-       spin_lock_init(&bat_priv->tt_roam_list_lock);
-       spin_lock_init(&bat_priv->tt_buff_lock);
-       spin_lock_init(&bat_priv->gw_list_lock);
-       spin_lock_init(&bat_priv->vis_hash_lock);
-       spin_lock_init(&bat_priv->vis_list_lock);
+       spin_lock_init(&bat_priv->tt.changes_list_lock);
+       spin_lock_init(&bat_priv->tt.req_list_lock);
+       spin_lock_init(&bat_priv->tt.roam_list_lock);
+       spin_lock_init(&bat_priv->tt.last_changeset_lock);
+       spin_lock_init(&bat_priv->gw.list_lock);
+       spin_lock_init(&bat_priv->vis.hash_lock);
+       spin_lock_init(&bat_priv->vis.list_lock);
 
        INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
        INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
-       INIT_HLIST_HEAD(&bat_priv->gw_list);
-       INIT_LIST_HEAD(&bat_priv->tt_changes_list);
-       INIT_LIST_HEAD(&bat_priv->tt_req_list);
-       INIT_LIST_HEAD(&bat_priv->tt_roam_list);
+       INIT_HLIST_HEAD(&bat_priv->gw.list);
+       INIT_LIST_HEAD(&bat_priv->tt.changes_list);
+       INIT_LIST_HEAD(&bat_priv->tt.req_list);
+       INIT_LIST_HEAD(&bat_priv->tt.roam_list);
 
        ret = batadv_originator_init(bat_priv);
        if (ret < 0)
@@ -131,7 +128,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       atomic_set(&bat_priv->gw_reselect, 0);
+       atomic_set(&bat_priv->gw.reselect, 0);
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
        return 0;
index 5d8fa07579477e4c82ec8aa160af256fa53ac94f..d57b746219de057c931d8459f1ccef84ce711cee 100644 (file)
@@ -26,7 +26,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2012.3.0"
+#define BATADV_SOURCE_VERSION "2012.4.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
  * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
  */
 #define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
-#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
-#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
+#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
+#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
 /* sliding packet range of received originator messages in sequence numbers
  * (should be a multiple of our word size)
  */
 #define BATADV_TQ_LOCAL_WINDOW_SIZE 64
-/* miliseconds we have to keep pending tt_req */
+/* milliseconds we have to keep pending tt_req */
 #define BATADV_TT_REQUEST_TIMEOUT 3000
 
 #define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
@@ -59,7 +60,7 @@
 #define BATADV_TT_OGM_APPEND_MAX 3
 
 /* Time in which a client can roam at most ROAMING_MAX_COUNT times in
- * miliseconds
+ * milliseconds
  */
 #define BATADV_ROAMING_MAX_TIME 20000
 #define BATADV_ROAMING_MAX_COUNT 5
@@ -123,15 +124,6 @@ enum batadv_uev_type {
 /* Append 'batman-adv: ' before kernel messages */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-/* all messages related to routing / flooding / broadcasting / etc */
-enum batadv_dbg_level {
-       BATADV_DBG_BATMAN = 1 << 0,
-       BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
-       BATADV_DBG_TT     = 1 << 2, /* translation table operations */
-       BATADV_DBG_BLA    = 1 << 3, /* bridge loop avoidance */
-       BATADV_DBG_ALL    = 15,
-};
-
 /* Kernel headers */
 
 #include <linux/mutex.h>       /* mutex */
@@ -173,6 +165,15 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
 
+/* all messages related to routing / flooding / broadcasting / etc */
+enum batadv_dbg_level {
+       BATADV_DBG_BATMAN = BIT(0),
+       BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
+       BATADV_DBG_TT     = BIT(2), /* translation table operations */
+       BATADV_DBG_BLA    = BIT(3), /* bridge loop avoidance */
+       BATADV_DBG_ALL    = 15,
+};
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
index 8d3e55a96adc8cc1a71d9994e90af4ddd81c50fc..2d23a14c220eb281a839c58650fb9a70802bb415 100644 (file)
@@ -37,10 +37,10 @@ enum batadv_packettype {
 #define BATADV_COMPAT_VERSION 14
 
 enum batadv_iv_flags {
-       BATADV_NOT_BEST_NEXT_HOP   = 1 << 3,
-       BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
-       BATADV_VIS_SERVER          = 1 << 5,
-       BATADV_DIRECTLINK          = 1 << 6,
+       BATADV_NOT_BEST_NEXT_HOP   = BIT(3),
+       BATADV_PRIMARIES_FIRST_HOP = BIT(4),
+       BATADV_VIS_SERVER          = BIT(5),
+       BATADV_DIRECTLINK          = BIT(6),
 };
 
 /* ICMP message types */
@@ -60,8 +60,8 @@ enum batadv_vis_packettype {
 
 /* fragmentation defines */
 enum batadv_unicast_frag_flags {
-       BATADV_UNI_FRAG_HEAD      = 1 << 0,
-       BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
+       BATADV_UNI_FRAG_HEAD      = BIT(0),
+       BATADV_UNI_FRAG_LARGETAIL = BIT(1),
 };
 
 /* TT_QUERY subtypes */
@@ -74,26 +74,27 @@ enum batadv_tt_query_packettype {
 
 /* TT_QUERY flags */
 enum batadv_tt_query_flags {
-       BATADV_TT_FULL_TABLE = 1 << 2,
+       BATADV_TT_FULL_TABLE = BIT(2),
 };
 
 /* BATADV_TT_CLIENT flags.
- * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
- * 1 << 15 are used for local computation only
+ * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
+ * BIT(15) are used for local computation only
  */
 enum batadv_tt_client_flags {
-       BATADV_TT_CLIENT_DEL     = 1 << 0,
-       BATADV_TT_CLIENT_ROAM    = 1 << 1,
-       BATADV_TT_CLIENT_WIFI    = 1 << 2,
-       BATADV_TT_CLIENT_NOPURGE = 1 << 8,
-       BATADV_TT_CLIENT_NEW     = 1 << 9,
-       BATADV_TT_CLIENT_PENDING = 1 << 10,
+       BATADV_TT_CLIENT_DEL     = BIT(0),
+       BATADV_TT_CLIENT_ROAM    = BIT(1),
+       BATADV_TT_CLIENT_WIFI    = BIT(2),
+       BATADV_TT_CLIENT_TEMP    = BIT(3),
+       BATADV_TT_CLIENT_NOPURGE = BIT(8),
+       BATADV_TT_CLIENT_NEW     = BIT(9),
+       BATADV_TT_CLIENT_PENDING = BIT(10),
 };
 
 /* claim frame types for the bridge loop avoidance */
 enum batadv_bla_claimframe {
-       BATADV_CLAIM_TYPE_ADD           = 0x00,
-       BATADV_CLAIM_TYPE_DEL           = 0x01,
+       BATADV_CLAIM_TYPE_CLAIM         = 0x00,
+       BATADV_CLAIM_TYPE_UNCLAIM       = 0x01,
        BATADV_CLAIM_TYPE_ANNOUNCE      = 0x02,
        BATADV_CLAIM_TYPE_REQUEST       = 0x03,
 };
index bc2b88bbea1fb5561a9ac2c199eb6926f3af2347..939fc01371dff0c209665b297b69ec6502887b1a 100644 (file)
@@ -579,32 +579,45 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
        return router;
 }
 
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
 {
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_tt_query_packet *tt_query;
-       uint16_t tt_size;
        struct ethhdr *ethhdr;
-       char tt_flag;
-       size_t packet_size;
 
        /* drop packet if it has not necessary minimum size */
-       if (unlikely(!pskb_may_pull(skb,
-                                   sizeof(struct batadv_tt_query_packet))))
-               goto out;
-
-       /* I could need to modify it */
-       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
-               goto out;
+       if (unlikely(!pskb_may_pull(skb, hdr_size)))
+               return -1;
 
        ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
        /* packet with unicast indication but broadcast recipient */
        if (is_broadcast_ether_addr(ethhdr->h_dest))
-               goto out;
+               return -1;
 
        /* packet with broadcast sender address */
        if (is_broadcast_ether_addr(ethhdr->h_source))
+               return -1;
+
+       /* not for me */
+       if (!batadv_is_my_mac(ethhdr->h_dest))
+               return -1;
+
+       return 0;
+}
+
+int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
+{
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       struct batadv_tt_query_packet *tt_query;
+       uint16_t tt_size;
+       int hdr_size = sizeof(*tt_query);
+       char tt_flag;
+       size_t packet_size;
+
+       if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+               return NET_RX_DROP;
+
+       /* I could need to modify it */
+       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
                goto out;
 
        tt_query = (struct batadv_tt_query_packet *)skb->data;
@@ -721,7 +734,7 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
         * been incremented yet. This flag will make me check all the incoming
         * packets for the correct destination.
         */
-       bat_priv->tt_poss_change = true;
+       bat_priv->tt.poss_change = true;
 
        batadv_orig_node_free_ref(orig_node);
 out:
@@ -819,31 +832,6 @@ err:
        return NULL;
 }
 
-static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
-{
-       struct ethhdr *ethhdr;
-
-       /* drop packet if it has not necessary minimum size */
-       if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return -1;
-
-       ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-       /* packet with unicast indication but broadcast recipient */
-       if (is_broadcast_ether_addr(ethhdr->h_dest))
-               return -1;
-
-       /* packet with broadcast sender address */
-       if (is_broadcast_ether_addr(ethhdr->h_source))
-               return -1;
-
-       /* not for me */
-       if (!batadv_is_my_mac(ethhdr->h_dest))
-               return -1;
-
-       return 0;
-}
-
 static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if)
 {
@@ -947,8 +935,8 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        if (batadv_is_my_mac(unicast_packet->dest)) {
-               tt_poss_change = bat_priv->tt_poss_change;
-               curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+               tt_poss_change = bat_priv->tt.poss_change;
+               curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        } else {
                orig_node = batadv_orig_hash_find(bat_priv,
                                                  unicast_packet->dest);
@@ -993,8 +981,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                } else {
                        memcpy(unicast_packet->dest, orig_node->orig,
                               ETH_ALEN);
-                       curr_ttvn = (uint8_t)
-                               atomic_read(&orig_node->last_ttvn);
+                       curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
                        batadv_orig_node_free_ref(orig_node);
                }
 
@@ -1025,8 +1012,9 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
 
        /* packet for me */
        if (batadv_is_my_mac(unicast_packet->dest)) {
-               batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
-                                   hdr_size);
+               batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+                                   NULL);
+
                return NET_RX_SUCCESS;
        }
 
@@ -1063,7 +1051,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
                        return NET_RX_SUCCESS;
 
                batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
-                                   sizeof(struct batadv_unicast_packet));
+                                   sizeof(struct batadv_unicast_packet), NULL);
                return NET_RX_SUCCESS;
        }
 
@@ -1150,7 +1138,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
                goto out;
 
        /* broadcast for me */
-       batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+       batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+                           orig_node);
        ret = NET_RX_SUCCESS;
        goto out;
 
index 3b4b2daa3b3e1b40315a2afa657b13d67517ebb0..570a8bce0364ea08ea45341b57b362a72beedc45 100644 (file)
@@ -190,13 +190,13 @@ out:
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 {
        struct batadv_hard_iface *hard_iface;
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_forw_packet *forw_packet;
        struct sk_buff *skb1;
        struct net_device *soft_iface;
        struct batadv_priv *bat_priv;
 
+       delayed_work = container_of(work, struct delayed_work, work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        soft_iface = forw_packet->if_incoming->soft_iface;
@@ -239,11 +239,11 @@ out:
 
 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_forw_packet *forw_packet;
        struct batadv_priv *bat_priv;
 
+       delayed_work = container_of(work, struct delayed_work, work);
        forw_packet = container_of(delayed_work, struct batadv_forw_packet,
                                   delayed_work);
        bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
index 21c53577c8d6a5e65599aa5c01d8d9d93c6fb838..b9a28d2dd3e8d907526ebd51dfedba9e993fd526 100644 (file)
@@ -93,7 +93,14 @@ static int batadv_interface_release(struct net_device *dev)
 static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
-       return &bat_priv->stats;
+       struct net_device_stats *stats = &bat_priv->stats;
+
+       stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
+       stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
+       stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
+       stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
+       stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
+       return stats;
 }
 
 static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
@@ -145,6 +152,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
        int data_len = skb->len, ret;
        short vid __maybe_unused = -1;
        bool do_bcast = false;
+       uint32_t seqno;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
@@ -226,8 +234,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
                       primary_if->net_dev->dev_addr, ETH_ALEN);
 
                /* set broadcast sequence number */
-               bcast_packet->seqno =
-                       htonl(atomic_inc_return(&bat_priv->bcast_seqno));
+               seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+               bcast_packet->seqno = htonl(seqno);
 
                batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
 
@@ -249,14 +257,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
                        goto dropped_freed;
        }
 
-       bat_priv->stats.tx_packets++;
-       bat_priv->stats.tx_bytes += data_len;
+       batadv_inc_counter(bat_priv, BATADV_CNT_TX);
+       batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
        goto end;
 
 dropped:
        kfree_skb(skb);
 dropped_freed:
-       bat_priv->stats.tx_dropped++;
+       batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
 end:
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -265,7 +273,7 @@ end:
 
 void batadv_interface_rx(struct net_device *soft_iface,
                         struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-                        int hdr_size)
+                        int hdr_size, struct batadv_orig_node *orig_node)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct ethhdr *ethhdr;
@@ -311,11 +319,16 @@ void batadv_interface_rx(struct net_device *soft_iface,
 
        /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
 
-       bat_priv->stats.rx_packets++;
-       bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+       batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+                          skb->len + ETH_HLEN);
 
        soft_iface->last_rx = jiffies;
 
+       if (orig_node)
+               batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
+                                                    ethhdr->h_source);
+
        if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
                goto dropped;
 
@@ -382,15 +395,22 @@ struct net_device *batadv_softif_create(const char *name)
        if (!soft_iface)
                goto out;
 
+       bat_priv = netdev_priv(soft_iface);
+
+       /* batadv_interface_stats() needs to be available as soon as
+        * register_netdevice() has been called
+        */
+       bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+       if (!bat_priv->bat_counters)
+               goto free_soft_iface;
+
        ret = register_netdevice(soft_iface);
        if (ret < 0) {
                pr_err("Unable to register the batman interface '%s': %i\n",
                       name, ret);
-               goto free_soft_iface;
+               goto free_bat_counters;
        }
 
-       bat_priv = netdev_priv(soft_iface);
-
        atomic_set(&bat_priv->aggregated_ogms, 1);
        atomic_set(&bat_priv->bonding, 0);
        atomic_set(&bat_priv->bridge_loop_avoidance, 0);
@@ -408,29 +428,26 @@ struct net_device *batadv_softif_create(const char *name)
 
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
        atomic_set(&bat_priv->bcast_seqno, 1);
-       atomic_set(&bat_priv->ttvn, 0);
-       atomic_set(&bat_priv->tt_local_changes, 0);
-       atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
-       atomic_set(&bat_priv->bla_num_requests, 0);
-
-       bat_priv->tt_buff = NULL;
-       bat_priv->tt_buff_len = 0;
-       bat_priv->tt_poss_change = false;
+       atomic_set(&bat_priv->tt.vn, 0);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
+       atomic_set(&bat_priv->bla.num_requests, 0);
+#endif
+       bat_priv->tt.last_changeset = NULL;
+       bat_priv->tt.last_changeset_len = 0;
+       bat_priv->tt.poss_change = false;
 
        bat_priv->primary_if = NULL;
        bat_priv->num_ifaces = 0;
 
-       bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
-       if (!bat_priv->bat_counters)
-               goto unreg_soft_iface;
-
        ret = batadv_algo_select(bat_priv, batadv_routing_algo);
        if (ret < 0)
-               goto free_bat_counters;
+               goto unreg_soft_iface;
 
        ret = batadv_sysfs_add_meshif(soft_iface);
        if (ret < 0)
-               goto free_bat_counters;
+               goto unreg_soft_iface;
 
        ret = batadv_debugfs_add_meshif(soft_iface);
        if (ret < 0)
@@ -446,12 +463,13 @@ unreg_debugfs:
        batadv_debugfs_del_meshif(soft_iface);
 unreg_sysfs:
        batadv_sysfs_del_meshif(soft_iface);
-free_bat_counters:
-       free_percpu(bat_priv->bat_counters);
 unreg_soft_iface:
+       free_percpu(bat_priv->bat_counters);
        unregister_netdevice(soft_iface);
        return NULL;
 
+free_bat_counters:
+       free_percpu(bat_priv->bat_counters);
 free_soft_iface:
        free_netdev(soft_iface);
 out:
@@ -521,6 +539,11 @@ static u32 batadv_get_link(struct net_device *dev)
 static const struct {
        const char name[ETH_GSTRING_LEN];
 } batadv_counters_strings[] = {
+       { "tx" },
+       { "tx_bytes" },
+       { "tx_dropped" },
+       { "rx" },
+       { "rx_bytes" },
        { "forward" },
        { "forward_bytes" },
        { "mgmt_tx" },
index 852c683b06a187a1eecaa7c68ad2430f747c34f3..07a08fed28b97ae2739e8a7cb36040f75345a951 100644 (file)
@@ -21,8 +21,9 @@
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
-void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
-                        struct batadv_hard_iface *recv_if, int hdr_size);
+void batadv_interface_rx(struct net_device *soft_iface,
+                        struct sk_buff *skb, struct batadv_hard_iface *recv_if,
+                        int hdr_size, struct batadv_orig_node *orig_node);
 struct net_device *batadv_softif_create(const char *name);
 void batadv_softif_destroy(struct net_device *soft_iface);
 int batadv_softif_is_valid(const struct net_device *net_dev);
index 99dd8f75b3ff20f0d2a277e1f65ffc284b002f2e..112edd371b2f81c79f431c6e99402165405c1b10 100644 (file)
@@ -34,6 +34,10 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
 static void batadv_tt_purge(struct work_struct *work);
 static void
 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+                                struct batadv_orig_node *orig_node,
+                                const unsigned char *addr,
+                                const char *message, bool roaming);
 
 /* returns 1 if they are the same mac addr */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -46,8 +50,8 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
 
 static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
+       INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
                           msecs_to_jiffies(5000));
 }
 
@@ -88,7 +92,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
        if (tt_common_entry)
                tt_local_entry = container_of(tt_common_entry,
                                              struct batadv_tt_local_entry,
@@ -102,7 +106,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
        if (tt_common_entry)
                tt_global_entry = container_of(tt_common_entry,
                                               struct batadv_tt_global_entry,
@@ -152,6 +156,8 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
+       if (!atomic_dec_and_test(&orig_entry->refcount))
+               return;
        /* to avoid race conditions, immediately decrease the tt counter */
        atomic_dec(&orig_entry->orig_node->tt_size);
        call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
@@ -175,8 +181,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
        del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
        /* check for ADD+DEL or DEL+ADD events */
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                if (!batadv_compare_eth(entry->change.addr, addr))
                        continue;
@@ -203,15 +209,15 @@ del:
        }
 
        /* track the change in the OGMinterval list */
-       list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
+       list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
        if (event_removed)
-               atomic_dec(&bat_priv->tt_local_changes);
+               atomic_dec(&bat_priv->tt.local_changes);
        else
-               atomic_inc(&bat_priv->tt_local_changes);
+               atomic_inc(&bat_priv->tt.local_changes);
 }
 
 int batadv_tt_len(int changes_num)
@@ -221,12 +227,12 @@ int batadv_tt_len(int changes_num)
 
 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
 {
-       if (bat_priv->tt_local_hash)
+       if (bat_priv->tt.local_hash)
                return 0;
 
-       bat_priv->tt_local_hash = batadv_hash_new(1024);
+       bat_priv->tt.local_hash = batadv_hash_new(1024);
 
-       if (!bat_priv->tt_local_hash)
+       if (!bat_priv->tt.local_hash)
                return -ENOMEM;
 
        return 0;
@@ -258,7 +264,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
-                  (uint8_t)atomic_read(&bat_priv->ttvn));
+                  (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
        tt_local_entry->common.flags = BATADV_NO_FLAGS;
@@ -266,6 +272,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
        atomic_set(&tt_local_entry->common.refcount, 2);
        tt_local_entry->last_seen = jiffies;
+       tt_local_entry->common.added_at = tt_local_entry->last_seen;
 
        /* the batman interface mac address should never be purged */
        if (batadv_compare_eth(addr, soft_iface->dev_addr))
@@ -277,7 +284,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
         */
        tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
 
-       hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
+       hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
                                     batadv_choose_orig,
                                     &tt_local_entry->common,
                                     &tt_local_entry->common.hash_entry);
@@ -348,7 +355,7 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
        req_len = min_packet_len;
-       req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
+       req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
 
        /* if we have too many changes for one packet don't send any
         * and wait for the tt table request which will be fragmented
@@ -381,10 +388,10 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
        if (new_len > 0)
                tot_changes = new_len / batadv_tt_len(1);
 
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
-       atomic_set(&bat_priv->tt_local_changes, 0);
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
+       atomic_set(&bat_priv->tt.local_changes, 0);
 
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                if (count < tot_changes) {
                        memcpy(tt_buff + batadv_tt_len(count),
@@ -394,25 +401,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
                list_del(&entry->list);
                kfree(entry);
        }
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 
        /* Keep the buffer for possible tt_request */
-       spin_lock_bh(&bat_priv->tt_buff_lock);
-       kfree(bat_priv->tt_buff);
-       bat_priv->tt_buff_len = 0;
-       bat_priv->tt_buff = NULL;
+       spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+       kfree(bat_priv->tt.last_changeset);
+       bat_priv->tt.last_changeset_len = 0;
+       bat_priv->tt.last_changeset = NULL;
        /* check whether this new OGM has no changes due to size problems */
        if (new_len > 0) {
                /* if kmalloc() fails we will reply with the full table
                 * instead of providing the diff
                 */
-               bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
-               if (bat_priv->tt_buff) {
-                       memcpy(bat_priv->tt_buff, tt_buff, new_len);
-                       bat_priv->tt_buff_len = new_len;
+               bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+               if (bat_priv->tt.last_changeset) {
+                       memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
+                       bat_priv->tt.last_changeset_len = new_len;
                }
        }
-       spin_unlock_bh(&bat_priv->tt_buff_lock);
+       spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 
        return count;
 }
@@ -421,7 +428,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_hard_iface *primary_if;
        struct hlist_node *node;
@@ -446,7 +453,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 
        seq_printf(seq,
                   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
-                  net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
+                  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -544,7 +551,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 
 static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
@@ -570,10 +577,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        uint32_t i;
 
-       if (!bat_priv->tt_local_hash)
+       if (!bat_priv->tt.local_hash)
                return;
 
-       hash = bat_priv->tt_local_hash;
+       hash = bat_priv->tt.local_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -593,17 +600,17 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
 
        batadv_hash_destroy(hash);
 
-       bat_priv->tt_local_hash = NULL;
+       bat_priv->tt.local_hash = NULL;
 }
 
 static int batadv_tt_global_init(struct batadv_priv *bat_priv)
 {
-       if (bat_priv->tt_global_hash)
+       if (bat_priv->tt.global_hash)
                return 0;
 
-       bat_priv->tt_global_hash = batadv_hash_new(1024);
+       bat_priv->tt.global_hash = batadv_hash_new(1024);
 
-       if (!bat_priv->tt_global_hash)
+       if (!bat_priv->tt.global_hash)
                return -ENOMEM;
 
        return 0;
@@ -613,62 +620,99 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_change_node *entry, *safe;
 
-       spin_lock_bh(&bat_priv->tt_changes_list_lock);
+       spin_lock_bh(&bat_priv->tt.changes_list_lock);
 
-       list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+       list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
                list_del(&entry->list);
                kfree(entry);
        }
 
-       atomic_set(&bat_priv->tt_local_changes, 0);
-       spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+       atomic_set(&bat_priv->tt.local_changes, 0);
+       spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 }
 
-/* find out if an orig_node is already in the list of a tt_global_entry.
- * returns 1 if found, 0 otherwise
+/* retrieves the orig_tt_list_entry belonging to orig_node from the
+ * batadv_tt_global_entry list
+ *
+ * returns it with an increased refcounter, NULL if not found
  */
-static bool
-batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
-                               const struct batadv_orig_node *orig_node)
+static struct batadv_tt_orig_list_entry *
+batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
+                                const struct batadv_orig_node *orig_node)
 {
-       struct batadv_tt_orig_list_entry *tmp_orig_entry;
+       struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
        const struct hlist_head *head;
        struct hlist_node *node;
-       bool found = false;
 
        rcu_read_lock();
        head = &entry->orig_list;
        hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
-               if (tmp_orig_entry->orig_node == orig_node) {
-                       found = true;
-                       break;
-               }
+               if (tmp_orig_entry->orig_node != orig_node)
+                       continue;
+               if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
+                       continue;
+
+               orig_entry = tmp_orig_entry;
+               break;
        }
        rcu_read_unlock();
+
+       return orig_entry;
+}
+
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns true if found, false otherwise
+ */
+static bool
+batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+                               const struct batadv_orig_node *orig_node)
+{
+       struct batadv_tt_orig_list_entry *orig_entry;
+       bool found = false;
+
+       orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
+       if (orig_entry) {
+               found = true;
+               batadv_tt_orig_list_entry_free_ref(orig_entry);
+       }
+
        return found;
 }
 
 static void
-batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
                                struct batadv_orig_node *orig_node, int ttvn)
 {
        struct batadv_tt_orig_list_entry *orig_entry;
 
+       orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
+       if (orig_entry) {
+               /* refresh the ttvn: the current value could be a bogus one that
+                * was added during a "temporary client detection"
+                */
+               orig_entry->ttvn = ttvn;
+               goto out;
+       }
+
        orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
        if (!orig_entry)
-               return;
+               goto out;
 
        INIT_HLIST_NODE(&orig_entry->list);
        atomic_inc(&orig_node->refcount);
        atomic_inc(&orig_node->tt_size);
        orig_entry->orig_node = orig_node;
        orig_entry->ttvn = ttvn;
+       atomic_set(&orig_entry->refcount, 2);
 
-       spin_lock_bh(&tt_global_entry->list_lock);
+       spin_lock_bh(&tt_global->list_lock);
        hlist_add_head_rcu(&orig_entry->list,
-                          &tt_global_entry->orig_list);
-       spin_unlock_bh(&tt_global_entry->list_lock);
+                          &tt_global->orig_list);
+       spin_unlock_bh(&tt_global->list_lock);
+out:
+       if (orig_entry)
+               batadv_tt_orig_list_entry_free_ref(orig_entry);
 }
 
 /* caller must hold orig_node refcount */
@@ -695,11 +739,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                common->flags = flags;
                tt_global_entry->roam_at = 0;
                atomic_set(&common->refcount, 2);
+               common->added_at = jiffies;
 
                INIT_HLIST_HEAD(&tt_global_entry->orig_list);
                spin_lock_init(&tt_global_entry->list_lock);
 
-               hash_added = batadv_hash_add(bat_priv->tt_global_hash,
+               hash_added = batadv_hash_add(bat_priv->tt.global_hash,
                                             batadv_compare_tt,
                                             batadv_choose_orig, common,
                                             &common->hash_entry);
@@ -709,11 +754,20 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                        batadv_tt_global_entry_free_ref(tt_global_entry);
                        goto out_remove;
                }
-
-               batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
-                                               ttvn);
        } else {
-               /* there is already a global entry, use this one. */
+               /* If there is already a global entry, we can use this one for
+                * our processing.
+                * But if we are trying to add a temporary client we can exit
+                * directly because the temporary information should never
+                * override any already known client state (whatever it is)
+                */
+               if (flags & BATADV_TT_CLIENT_TEMP)
+                       goto out;
+
+               /* if the client was temporary added before receiving the first
+                * OGM announcing it, we have to clear the TEMP flag
+                */
+               tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
@@ -727,12 +781,9 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
                        tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
                        tt_global_entry->roam_at = 0;
                }
-
-               if (!batadv_tt_global_entry_has_orig(tt_global_entry,
-                                                    orig_node))
-                       batadv_tt_global_add_orig_entry(tt_global_entry,
-                                                       orig_node, ttvn);
        }
+       /* add the new orig_entry (if needed) or update it */
+       batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new global tt entry: %pM (via %pM)\n",
@@ -771,11 +822,12 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
        hlist_for_each_entry_rcu(orig_entry, node, head, list) {
                flags = tt_common_entry->flags;
                last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-               seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
+               seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
                           tt_global_entry->common.addr, orig_entry->ttvn,
                           orig_entry->orig_node->orig, last_ttvn,
                           (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
-                          (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
+                          (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
+                          (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
        }
 }
 
@@ -783,7 +835,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global;
        struct batadv_hard_iface *primary_if;
@@ -884,7 +936,7 @@ batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
                   "Deleting global tt entry %pM: %s\n",
                   tt_global_entry->common.addr, message);
 
-       batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
+       batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
                           batadv_choose_orig, tt_global_entry->common.addr);
        batadv_tt_global_entry_free_ref(tt_global_entry);
 
@@ -995,7 +1047,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        struct batadv_tt_global_entry *tt_global;
        struct batadv_tt_common_entry *tt_common_entry;
        uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct hlist_node *node, *safe;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1030,49 +1082,63 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        orig_node->tt_initialised = false;
 }
 
-static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
-                                            struct hlist_head *head)
+static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
+                                     char **msg)
 {
-       struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_global_entry *tt_global_entry;
-       struct hlist_node *node, *node_tmp;
-
-       hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
-                                 hash_entry) {
-               tt_global_entry = container_of(tt_common_entry,
-                                              struct batadv_tt_global_entry,
-                                              common);
-               if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
-                       continue;
-               if (!batadv_has_timed_out(tt_global_entry->roam_at,
-                                         BATADV_TT_CLIENT_ROAM_TIMEOUT))
-                       continue;
+       bool purge = false;
+       unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
+       unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
 
-               batadv_dbg(BATADV_DBG_TT, bat_priv,
-                          "Deleting global tt entry (%pM): Roaming timeout\n",
-                          tt_global_entry->common.addr);
+       if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
+           batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
+               purge = true;
+               *msg = "Roaming timeout\n";
+       }
 
-               hlist_del_rcu(node);
-               batadv_tt_global_entry_free_ref(tt_global_entry);
+       if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
+           batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
+               purge = true;
+               *msg = "Temporary client timeout\n";
        }
+
+       return purge;
 }
 
-static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
+static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct hlist_head *head;
+       struct hlist_node *node, *node_tmp;
        spinlock_t *list_lock; /* protects write access to the hash lists */
        uint32_t i;
+       char *msg = NULL;
+       struct batadv_tt_common_entry *tt_common;
+       struct batadv_tt_global_entry *tt_global;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               batadv_tt_global_roam_purge_list(bat_priv, head);
+               hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+                                         hash_entry) {
+                       tt_global = container_of(tt_common,
+                                                struct batadv_tt_global_entry,
+                                                common);
+
+                       if (!batadv_tt_global_to_purge(tt_global, &msg))
+                               continue;
+
+                       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                                  "Deleting global tt entry (%pM): %s\n",
+                                  tt_global->common.addr, msg);
+
+                       hlist_del_rcu(node);
+
+                       batadv_tt_global_entry_free_ref(tt_global);
+               }
                spin_unlock_bh(list_lock);
        }
-
 }
 
 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
@@ -1085,10 +1151,10 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        uint32_t i;
 
-       if (!bat_priv->tt_global_hash)
+       if (!bat_priv->tt.global_hash)
                return;
 
-       hash = bat_priv->tt_global_hash;
+       hash = bat_priv->tt.global_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1108,7 +1174,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
 
        batadv_hash_destroy(hash);
 
-       bat_priv->tt_global_hash = NULL;
+       bat_priv->tt.global_hash = NULL;
 }
 
 static bool
@@ -1187,7 +1253,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                     struct batadv_orig_node *orig_node)
 {
        uint16_t total = 0, total_one;
-       struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_node *node;
@@ -1210,6 +1276,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                         */
                        if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
                                continue;
+                       /* Temporary clients have not been announced yet, so
+                        * they have to be skipped while computing the global
+                        * crc
+                        */
+                       if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
+                               continue;
 
                        /* find out if this global entry is announced by this
                         * originator
@@ -1234,7 +1306,7 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
 {
        uint16_t total = 0, total_one;
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct hlist_node *node;
        struct hlist_head *head;
@@ -1267,14 +1339,14 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
 
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                list_del(&node->list);
                kfree(node);
        }
 
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
@@ -1304,15 +1376,15 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
                        list_del(&node->list);
                        kfree(node);
                }
        }
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
 /* returns the pointer to the new tt_req_node struct if no request
@@ -1324,8 +1396,8 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
 
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
                if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
                    !batadv_has_timed_out(tt_req_node_tmp->issued_at,
                                          BATADV_TT_REQUEST_TIMEOUT))
@@ -1339,9 +1411,9 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
        memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
        tt_req_node->issued_at = jiffies;
 
-       list_add(&tt_req_node->list, &bat_priv->tt_req_list);
+       list_add(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
        return tt_req_node;
 }
 
@@ -1363,7 +1435,8 @@ static int batadv_tt_global_valid(const void *entry_ptr,
        const struct batadv_tt_global_entry *tt_global_entry;
        const struct batadv_orig_node *orig_node = data_ptr;
 
-       if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
+       if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
+           tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
                return 0;
 
        tt_global_entry = container_of(tt_common_entry,
@@ -1507,9 +1580,9 @@ out:
        if (ret)
                kfree_skb(skb);
        if (ret && tt_req_node) {
-               spin_lock_bh(&bat_priv->tt_req_list_lock);
+               spin_lock_bh(&bat_priv->tt.req_list_lock);
                list_del(&tt_req_node->list);
-               spin_unlock_bh(&bat_priv->tt_req_list_lock);
+               spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
        return ret;
@@ -1530,6 +1603,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
        uint16_t tt_len, tt_tot;
        struct sk_buff *skb = NULL;
        struct batadv_tt_query_packet *tt_response;
+       uint8_t *packet_pos;
        size_t len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1583,8 +1657,8 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                        goto unlock;
 
                skb_reserve(skb, ETH_HLEN);
-               tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-                                                                      len);
+               packet_pos = skb_put(skb, len);
+               tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
                tt_response->tt_data = htons(tt_tot);
 
@@ -1600,7 +1674,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
 
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt_global_hash,
+                                                   bat_priv->tt.global_hash,
                                                    primary_if,
                                                    batadv_tt_global_valid,
                                                    req_dst_orig_node);
@@ -1663,6 +1737,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        uint16_t tt_len, tt_tot;
        struct sk_buff *skb = NULL;
        struct batadv_tt_query_packet *tt_response;
+       uint8_t *packet_pos;
        size_t len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
@@ -1671,7 +1746,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                   (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
 
-       my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+       my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        req_ttvn = tt_request->ttvn;
 
        orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
@@ -1690,7 +1765,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
         * is too big send the whole local translation table
         */
        if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
-           !bat_priv->tt_buff)
+           !bat_priv->tt.last_changeset)
                full_table = true;
        else
                full_table = false;
@@ -1699,8 +1774,8 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
         * I'll send only one packet with as much TT entries as I can
         */
        if (!full_table) {
-               spin_lock_bh(&bat_priv->tt_buff_lock);
-               tt_len = bat_priv->tt_buff_len;
+               spin_lock_bh(&bat_priv->tt.last_changeset_lock);
+               tt_len = bat_priv->tt.last_changeset_len;
                tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
                len = sizeof(*tt_response) + tt_len;
@@ -1709,22 +1784,22 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                        goto unlock;
 
                skb_reserve(skb, ETH_HLEN);
-               tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
-                                                                      len);
+               packet_pos = skb_put(skb, len);
+               tt_response = (struct batadv_tt_query_packet *)packet_pos;
                tt_response->ttvn = req_ttvn;
                tt_response->tt_data = htons(tt_tot);
 
                tt_buff = skb->data + sizeof(*tt_response);
-               memcpy(tt_buff, bat_priv->tt_buff,
-                      bat_priv->tt_buff_len);
-               spin_unlock_bh(&bat_priv->tt_buff_lock);
+               memcpy(tt_buff, bat_priv->tt.last_changeset,
+                      bat_priv->tt.last_changeset_len);
+               spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
        } else {
-               tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
+               tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
                tt_len *= sizeof(struct batadv_tt_change);
-               ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+               ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
 
                skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt_local_hash,
+                                                   bat_priv->tt.local_hash,
                                                    primary_if,
                                                    batadv_tt_local_valid_entry,
                                                    NULL);
@@ -1756,7 +1831,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        goto out;
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_buff_lock);
+       spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 out:
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
@@ -1909,14 +1984,14 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv,
        }
 
        /* Delete the tt_req_node from pending tt_requests list */
-       spin_lock_bh(&bat_priv->tt_req_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+       spin_lock_bh(&bat_priv->tt.req_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                if (!batadv_compare_eth(node->addr, tt_response->src))
                        continue;
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt_req_list_lock);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 
        /* Recalculate the CRC for this orig_node and store it */
        orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
@@ -1950,22 +2025,22 @@ static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
 
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
                list_del(&node->list);
                kfree(node);
        }
 
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
-       list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
+       list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
                if (!batadv_has_timed_out(node->first_time,
                                          BATADV_ROAMING_MAX_TIME))
                        continue;
@@ -1973,7 +2048,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
 /* This function checks whether the client already reached the
@@ -1988,11 +2063,11 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
        struct batadv_tt_roam_node *tt_roam_node;
        bool ret = false;
 
-       spin_lock_bh(&bat_priv->tt_roam_list_lock);
+       spin_lock_bh(&bat_priv->tt.roam_list_lock);
        /* The new tt_req will be issued only if I'm not waiting for a
         * reply from the same orig_node yet
         */
-       list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+       list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
                if (!batadv_compare_eth(tt_roam_node->addr, client))
                        continue;
 
@@ -2017,12 +2092,12 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
                           BATADV_ROAMING_MAX_COUNT - 1);
                memcpy(tt_roam_node->addr, client, ETH_ALEN);
 
-               list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+               list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
                ret = true;
        }
 
 unlock:
-       spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+       spin_unlock_bh(&bat_priv->tt.roam_list_lock);
        return ret;
 }
 
@@ -2086,13 +2161,15 @@ out:
 static void batadv_tt_purge(struct work_struct *work)
 {
        struct delayed_work *delayed_work;
+       struct batadv_priv_tt *priv_tt;
        struct batadv_priv *bat_priv;
 
        delayed_work = container_of(work, struct delayed_work, work);
-       bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
+       priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
+       bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
        batadv_tt_local_purge(bat_priv);
-       batadv_tt_global_roam_purge(bat_priv);
+       batadv_tt_global_purge(bat_priv);
        batadv_tt_req_purge(bat_priv);
        batadv_tt_roam_purge(bat_priv);
 
@@ -2101,7 +2178,7 @@ static void batadv_tt_purge(struct work_struct *work)
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
-       cancel_delayed_work_sync(&bat_priv->tt_work);
+       cancel_delayed_work_sync(&bat_priv->tt.work);
 
        batadv_tt_local_table_free(bat_priv);
        batadv_tt_global_table_free(bat_priv);
@@ -2109,7 +2186,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
        batadv_tt_changes_list_free(bat_priv);
        batadv_tt_roam_list_free(bat_priv);
 
-       kfree(bat_priv->tt_buff);
+       kfree(bat_priv->tt.last_changeset);
 }
 
 /* This function will enable or disable the specified flags for all the entries
@@ -2153,7 +2230,7 @@ out:
 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 {
-       struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
        struct hlist_node *node, *node_tmp;
@@ -2178,7 +2255,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                   "Deleting local tt entry (%pM): pending\n",
                                   tt_common->addr);
 
-                       atomic_dec(&bat_priv->num_local_tt);
+                       atomic_dec(&bat_priv->tt.local_entry_num);
                        hlist_del_rcu(node);
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
@@ -2196,26 +2273,26 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
 {
        uint16_t changed_num = 0;
 
-       if (atomic_read(&bat_priv->tt_local_changes) < 1)
+       if (atomic_read(&bat_priv->tt.local_changes) < 1)
                return -ENOENT;
 
-       changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
+       changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
                                          BATADV_TT_CLIENT_NEW, false);
 
        /* all reset entries have to be counted as local entries */
-       atomic_add(changed_num, &bat_priv->num_local_tt);
+       atomic_add(changed_num, &bat_priv->tt.local_entry_num);
        batadv_tt_local_purge_pending_clients(bat_priv);
-       bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
+       bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
-       atomic_inc(&bat_priv->ttvn);
+       atomic_inc(&bat_priv->tt.vn);
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Local changes committed, updating to ttvn %u\n",
-                  (uint8_t)atomic_read(&bat_priv->ttvn));
-       bat_priv->tt_poss_change = false;
+                  (uint8_t)atomic_read(&bat_priv->tt.vn));
+       bat_priv->tt.poss_change = false;
 
        /* reset the sending counter */
-       atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+       atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
 
        return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
                                           packet_buff_len, packet_min_len);
@@ -2235,7 +2312,7 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
 
        /* if the changes have been sent often enough */
        if ((tt_num_changes < 0) &&
-           (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
+           (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
                batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
                                              packet_min_len, packet_min_len);
                tt_num_changes = 0;
@@ -2366,3 +2443,22 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
 out:
        return ret;
 }
+
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig_node,
+                                         const unsigned char *addr)
+{
+       bool ret = false;
+
+       if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+                                 BATADV_TT_CLIENT_TEMP,
+                                 atomic_read(&orig_node->last_ttvn)))
+               goto out;
+
+       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                  "Added temporary global client (addr: %pM orig: %pM)\n",
+                  addr, orig_node->orig);
+       ret = true;
+out:
+       return ret;
+}
index ffa87355096b3396bd0ffcf8e7cbba7fd9d86581..811fffd4760c3678a60994e027896277289e751a 100644 (file)
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv,
                          int packet_min_len);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
                                        uint8_t *addr);
-
+bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig_node,
+                                         const unsigned char *addr);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
index 12635fd2c3d3fe68985d53a00e10d684045bfc32..2ed82caacdca4bfa0cf429d74a7311523874b4c2 100644 (file)
@@ -145,6 +145,11 @@ struct batadv_bcast_duplist_entry {
 #endif
 
 enum batadv_counters {
+       BATADV_CNT_TX,
+       BATADV_CNT_TX_BYTES,
+       BATADV_CNT_TX_DROPPED,
+       BATADV_CNT_RX,
+       BATADV_CNT_RX_BYTES,
        BATADV_CNT_FORWARD,
        BATADV_CNT_FORWARD_BYTES,
        BATADV_CNT_MGMT_TX,
@@ -160,6 +165,67 @@ enum batadv_counters {
        BATADV_CNT_NUM,
 };
 
+/**
+ * struct batadv_priv_tt - per mesh interface translation table data
+ * @vn: translation table version number
+ * @local_changes: changes registered in an originator interval
+ * @poss_change: Detect an ongoing roaming phase. If true, then this node
+ *  received a roaming_adv and has to inspect every packet directed to it to
+ *  check whether it still is the true destination or not. This flag will be
+ *  reset to false as soon as the this node's ttvn is increased
+ * @changes_list: tracks tt local changes within an originator interval
+ * @req_list: list of pending tt_requests
+ * @local_crc: Checksum of the local table, recomputed before sending a new OGM
+ */
+struct batadv_priv_tt {
+       atomic_t vn;
+       atomic_t ogm_append_cnt;
+       atomic_t local_changes;
+       bool poss_change;
+       struct list_head changes_list;
+       struct batadv_hashtable *local_hash;
+       struct batadv_hashtable *global_hash;
+       struct list_head req_list;
+       struct list_head roam_list;
+       spinlock_t changes_list_lock; /* protects changes */
+       spinlock_t req_list_lock; /* protects req_list */
+       spinlock_t roam_list_lock; /* protects roam_list */
+       atomic_t local_entry_num;
+       uint16_t local_crc;
+       unsigned char *last_changeset;
+       int16_t last_changeset_len;
+       spinlock_t last_changeset_lock; /* protects last_changeset */
+       struct delayed_work work;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct batadv_priv_bla {
+       atomic_t num_requests; /* number of bla requests in flight */
+       struct batadv_hashtable *claim_hash;
+       struct batadv_hashtable *backbone_hash;
+       struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+       int bcast_duplist_curr;
+       struct batadv_bla_claim_dst claim_dest;
+       struct delayed_work work;
+};
+#endif
+
+struct batadv_priv_gw {
+       struct hlist_head list;
+       spinlock_t list_lock; /* protects gw_list and curr_gw */
+       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+       atomic_t reselect;
+};
+
+struct batadv_priv_vis {
+       struct list_head send_list;
+       struct batadv_hashtable *hash;
+       spinlock_t hash_lock; /* protects hash */
+       spinlock_t list_lock; /* protects info::recv_list */
+       struct delayed_work work;
+       struct batadv_vis_info *my_info;
+};
+
 struct batadv_priv {
        atomic_t mesh_state;
        struct net_device_stats stats;
@@ -179,64 +245,24 @@ struct batadv_priv {
        atomic_t bcast_seqno;
        atomic_t bcast_queue_left;
        atomic_t batman_queue_left;
-       atomic_t ttvn; /* translation table version number */
-       atomic_t tt_ogm_append_cnt;
-       atomic_t tt_local_changes; /* changes registered in a OGM interval */
-       atomic_t bla_num_requests; /* number of bla requests in flight */
-       /* The tt_poss_change flag is used to detect an ongoing roaming phase.
-        * If true, then I received a Roaming_adv and I have to inspect every
-        * packet directed to me to check whether I am still the true
-        * destination or not. This flag will be reset to false as soon as I
-        * increase my TTVN
-        */
-       bool tt_poss_change;
        char num_ifaces;
        struct batadv_debug_log *debug_log;
        struct kobject *mesh_obj;
        struct dentry *debug_dir;
        struct hlist_head forw_bat_list;
        struct hlist_head forw_bcast_list;
-       struct hlist_head gw_list;
-       struct list_head tt_changes_list; /* tracks changes in a OGM int */
-       struct list_head vis_send_list;
        struct batadv_hashtable *orig_hash;
-       struct batadv_hashtable *tt_local_hash;
-       struct batadv_hashtable *tt_global_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-       struct batadv_hashtable *claim_hash;
-       struct batadv_hashtable *backbone_hash;
-#endif
-       struct list_head tt_req_list; /* list of pending tt_requests */
-       struct list_head tt_roam_list;
-       struct batadv_hashtable *vis_hash;
-#ifdef CONFIG_BATMAN_ADV_BLA
-       struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
-       int bcast_duplist_curr;
-       struct batadv_bla_claim_dst claim_dest;
-#endif
        spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
        spinlock_t forw_bcast_list_lock; /* protects  */
-       spinlock_t tt_changes_list_lock; /* protects tt_changes */
-       spinlock_t tt_req_list_lock; /* protects tt_req_list */
-       spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
-       spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
-       spinlock_t vis_hash_lock; /* protects vis_hash */
-       spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-       atomic_t num_local_tt;
-       /* Checksum of the local table, recomputed before sending a new OGM */
-       uint16_t tt_crc;
-       unsigned char *tt_buff;
-       int16_t tt_buff_len;
-       spinlock_t tt_buff_lock; /* protects tt_buff */
-       struct delayed_work tt_work;
        struct delayed_work orig_work;
-       struct delayed_work vis_work;
-       struct delayed_work bla_work;
-       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
-       atomic_t gw_reselect;
        struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
-       struct batadv_vis_info *my_vis_info;
        struct batadv_algo_ops *bat_algo_ops;
+#ifdef CONFIG_BATMAN_ADV_BLA
+       struct batadv_priv_bla bla;
+#endif
+       struct batadv_priv_gw gw;
+       struct batadv_priv_tt tt;
+       struct batadv_priv_vis vis;
 };
 
 struct batadv_socket_client {
@@ -258,6 +284,7 @@ struct batadv_tt_common_entry {
        uint8_t addr[ETH_ALEN];
        struct hlist_node hash_entry;
        uint16_t flags;
+       unsigned long added_at;
        atomic_t refcount;
        struct rcu_head rcu;
 };
@@ -277,6 +304,7 @@ struct batadv_tt_global_entry {
 struct batadv_tt_orig_list_entry {
        struct batadv_orig_node *orig_node;
        uint8_t ttvn;
+       atomic_t refcount;
        struct rcu_head rcu;
        struct hlist_node list;
 };
index 00164645b3f74763ce1ca33cde11ea598c279107..f39723281ca1f7cd96dd9f7535a2bea8fbc5aa07 100644 (file)
@@ -39,6 +39,7 @@ batadv_frag_merge_packet(struct list_head *head,
        struct batadv_unicast_packet *unicast_packet;
        int hdr_len = sizeof(*unicast_packet);
        int uni_diff = sizeof(*up) - hdr_len;
+       uint8_t *packet_pos;
 
        up = (struct batadv_unicast_frag_packet *)skb->data;
        /* set skb to the first part and tmp_skb to the second part */
@@ -65,8 +66,8 @@ batadv_frag_merge_packet(struct list_head *head,
        kfree_skb(tmp_skb);
 
        memmove(skb->data + uni_diff, skb->data, hdr_len);
-       unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
-                                                                 uni_diff);
+       packet_pos = skb_pull(skb, uni_diff);
+       unicast_packet = (struct batadv_unicast_packet *)packet_pos;
        unicast_packet->header.packet_type = BATADV_UNICAST;
 
        return skb;
@@ -121,6 +122,7 @@ batadv_frag_search_packet(struct list_head *head,
 {
        struct batadv_frag_packet_list_entry *tfp;
        struct batadv_unicast_frag_packet *tmp_up = NULL;
+       int is_head_tmp, is_head;
        uint16_t search_seqno;
 
        if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -128,6 +130,8 @@ batadv_frag_search_packet(struct list_head *head,
        else
                search_seqno = ntohs(up->seqno)-1;
 
+       is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
+
        list_for_each_entry(tfp, head, list) {
 
                if (!tfp->skb)
@@ -139,9 +143,8 @@ batadv_frag_search_packet(struct list_head *head,
                tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
 
                if (tfp->seqno == search_seqno) {
-
-                       if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
-                           (up->flags & BATADV_UNI_FRAG_HEAD))
+                       is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD);
+                       if (is_head_tmp != is_head)
                                return tfp;
                        else
                                goto mov_tail;
@@ -334,8 +337,7 @@ find_router:
        /* copy the destination for faster routing */
        memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
        /* set the destination tt version number */
-       unicast_packet->ttvn =
-               (uint8_t)atomic_read(&orig_node->last_ttvn);
+       unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
 
        /* inform the destination node that we are still missing a correct route
         * for this client. The destination will receive this packet and will
index 2a2ea06814695f4356f38de38c83cd3465184630..5abd1454fb07d3025184de6731afad654176ae02 100644 (file)
@@ -41,13 +41,13 @@ static void batadv_free_info(struct kref *ref)
        bat_priv = info->bat_priv;
 
        list_del_init(&info->send_list);
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
                list_del(&entry->list);
                kfree(entry);
        }
 
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
        kfree_skb(info->skb_packet);
        kfree(info);
 }
@@ -94,7 +94,7 @@ static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
 static struct batadv_vis_info *
 batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
 {
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        struct hlist_head *head;
        struct hlist_node *node;
        struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
@@ -252,7 +252,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
        struct hlist_head *head;
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        uint32_t i;
        int ret = 0;
        int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -264,12 +264,12 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
        if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
                goto out;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                batadv_vis_seq_print_text_bucket(seq, head);
        }
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 
 out:
        if (primary_if)
@@ -285,7 +285,7 @@ static void batadv_send_list_add(struct batadv_priv *bat_priv,
 {
        if (list_empty(&info->send_list)) {
                kref_get(&info->refcount);
-               list_add_tail(&info->send_list, &bat_priv->vis_send_list);
+               list_add_tail(&info->send_list, &bat_priv->vis.send_list);
        }
 }
 
@@ -311,9 +311,9 @@ static void batadv_recv_list_add(struct batadv_priv *bat_priv,
                return;
 
        memcpy(entry->mac, mac, ETH_ALEN);
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_add_tail(&entry->list, recv_list);
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
 }
 
 /* returns 1 if this mac is in the recv_list */
@@ -323,14 +323,14 @@ static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
 {
        const struct batadv_recvlist_node *entry;
 
-       spin_lock_bh(&bat_priv->vis_list_lock);
+       spin_lock_bh(&bat_priv->vis.list_lock);
        list_for_each_entry(entry, recv_list, list) {
                if (batadv_compare_eth(entry->mac, mac)) {
-                       spin_unlock_bh(&bat_priv->vis_list_lock);
+                       spin_unlock_bh(&bat_priv->vis.list_lock);
                        return 1;
                }
        }
-       spin_unlock_bh(&bat_priv->vis_list_lock);
+       spin_unlock_bh(&bat_priv->vis.list_lock);
        return 0;
 }
 
@@ -354,7 +354,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
 
        *is_new = 0;
        /* sanity check */
-       if (!bat_priv->vis_hash)
+       if (!bat_priv->vis.hash)
                return NULL;
 
        /* see if the packet is already in vis_hash */
@@ -385,7 +385,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
                        }
                }
                /* remove old entry */
-               batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
+               batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
                                   batadv_vis_info_choose, old_info);
                batadv_send_list_del(old_info);
                kref_put(&old_info->refcount, batadv_free_info);
@@ -426,7 +426,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
        batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
 
        /* try to add it */
-       hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
                                     batadv_vis_info_choose, info,
                                     &info->hash_entry);
        if (hash_added != 0) {
@@ -449,7 +449,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
 
        make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
                                 &is_new, make_broadcast);
        if (!info)
@@ -461,7 +461,7 @@ void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
        if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
                batadv_send_list_add(bat_priv, info);
 end:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* handle an incoming client update packet and schedule forward if needed. */
@@ -484,7 +484,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
            batadv_is_my_mac(vis_packet->target_orig))
                are_target = 1;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
                                 &is_new, are_target);
 
@@ -505,7 +505,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
        }
 
 end:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* Walk the originators and find the VIS server with the best tq. Set the packet
@@ -574,10 +574,11 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
        struct batadv_neigh_node *router;
-       struct batadv_vis_info *info = bat_priv->my_vis_info;
+       struct batadv_vis_info *info = bat_priv->vis.my_info;
        struct batadv_vis_packet *packet;
        struct batadv_vis_info_entry *entry;
        struct batadv_tt_common_entry *tt_common_entry;
+       uint8_t *packet_pos;
        int best_tq = -1;
        uint32_t i;
 
@@ -618,8 +619,8 @@ static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
                                goto next;
 
                        /* fill one entry into buffer. */
-                       entry = (struct batadv_vis_info_entry *)
-                                     skb_put(info->skb_packet, sizeof(*entry));
+                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+                       entry = (struct batadv_vis_info_entry *)packet_pos;
                        memcpy(entry->src,
                               router->if_incoming->net_dev->dev_addr,
                               ETH_ALEN);
@@ -636,7 +637,7 @@ next:
                rcu_read_unlock();
        }
 
-       hash = bat_priv->tt_local_hash;
+       hash = bat_priv->tt.local_hash;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -644,9 +645,8 @@ next:
                rcu_read_lock();
                hlist_for_each_entry_rcu(tt_common_entry, node, head,
                                         hash_entry) {
-                       entry = (struct batadv_vis_info_entry *)
-                                       skb_put(info->skb_packet,
-                                               sizeof(*entry));
+                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
+                       entry = (struct batadv_vis_info_entry *)packet_pos;
                        memset(entry->src, 0, ETH_ALEN);
                        memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
                        entry->quality = 0; /* 0 means TT */
@@ -671,7 +671,7 @@ unlock:
 static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
 {
        uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->vis_hash;
+       struct batadv_hashtable *hash = bat_priv->vis.hash;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        struct batadv_vis_info *info;
@@ -682,7 +682,7 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
                hlist_for_each_entry_safe(info, node, node_tmp,
                                          head, hash_entry) {
                        /* never purge own data. */
-                       if (info == bat_priv->my_vis_info)
+                       if (info == bat_priv->vis.my_info)
                                continue;
 
                        if (batadv_has_timed_out(info->first_seen,
@@ -814,34 +814,36 @@ out:
 /* called from timer; send (and maybe generate) vis packet. */
 static void batadv_send_vis_packets(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work;
        struct batadv_priv *bat_priv;
+       struct batadv_priv_vis *priv_vis;
        struct batadv_vis_info *info;
 
-       bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       delayed_work = container_of(work, struct delayed_work, work);
+       priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
+       bat_priv = container_of(priv_vis, struct batadv_priv, vis);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        batadv_purge_vis_packets(bat_priv);
 
        if (batadv_generate_vis_packet(bat_priv) == 0) {
                /* schedule if generation was successful */
-               batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
+               batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
        }
 
-       while (!list_empty(&bat_priv->vis_send_list)) {
-               info = list_first_entry(&bat_priv->vis_send_list,
+       while (!list_empty(&bat_priv->vis.send_list)) {
+               info = list_first_entry(&bat_priv->vis.send_list,
                                        typeof(*info), send_list);
 
                kref_get(&info->refcount);
-               spin_unlock_bh(&bat_priv->vis_hash_lock);
+               spin_unlock_bh(&bat_priv->vis.hash_lock);
 
                batadv_send_vis_packet(bat_priv, info);
 
-               spin_lock_bh(&bat_priv->vis_hash_lock);
+               spin_lock_bh(&bat_priv->vis.hash_lock);
                batadv_send_list_del(info);
                kref_put(&info->refcount, batadv_free_info);
        }
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_start_vis_timer(bat_priv);
 }
 
@@ -856,37 +858,37 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
        unsigned long first_seen;
        struct sk_buff *tmp_skb;
 
-       if (bat_priv->vis_hash)
+       if (bat_priv->vis.hash)
                return 0;
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
 
-       bat_priv->vis_hash = batadv_hash_new(256);
-       if (!bat_priv->vis_hash) {
+       bat_priv->vis.hash = batadv_hash_new(256);
+       if (!bat_priv->vis.hash) {
                pr_err("Can't initialize vis_hash\n");
                goto err;
        }
 
-       bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-       if (!bat_priv->my_vis_info)
+       bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+       if (!bat_priv->vis.my_info)
                goto err;
 
        len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
-       bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
-       if (!bat_priv->my_vis_info->skb_packet)
+       bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len);
+       if (!bat_priv->vis.my_info->skb_packet)
                goto free_info;
 
-       skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
-       tmp_skb = bat_priv->my_vis_info->skb_packet;
+       skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
+       tmp_skb = bat_priv->vis.my_info->skb_packet;
        packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
 
        /* prefill the vis info */
        first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
-       bat_priv->my_vis_info->first_seen = first_seen;
-       INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
-       INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
-       kref_init(&bat_priv->my_vis_info->refcount);
-       bat_priv->my_vis_info->bat_priv = bat_priv;
+       bat_priv->vis.my_info->first_seen = first_seen;
+       INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
+       INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
+       kref_init(&bat_priv->vis.my_info->refcount);
+       bat_priv->vis.my_info->bat_priv = bat_priv;
        packet->header.version = BATADV_COMPAT_VERSION;
        packet->header.packet_type = BATADV_VIS;
        packet->header.ttl = BATADV_TTL;
@@ -894,28 +896,28 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
        packet->reserved = 0;
        packet->entries = 0;
 
-       INIT_LIST_HEAD(&bat_priv->vis_send_list);
+       INIT_LIST_HEAD(&bat_priv->vis.send_list);
 
-       hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
                                     batadv_vis_info_choose,
-                                    bat_priv->my_vis_info,
-                                    &bat_priv->my_vis_info->hash_entry);
+                                    bat_priv->vis.my_info,
+                                    &bat_priv->vis.my_info->hash_entry);
        if (hash_added != 0) {
                pr_err("Can't add own vis packet into hash\n");
                /* not in hash, need to remove it manually. */
-               kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
+               kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
                goto err;
        }
 
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_start_vis_timer(bat_priv);
        return 0;
 
 free_info:
-       kfree(bat_priv->my_vis_info);
-       bat_priv->my_vis_info = NULL;
+       kfree(bat_priv->vis.my_info);
+       bat_priv->vis.my_info = NULL;
 err:
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
        batadv_vis_quit(bat_priv);
        return -ENOMEM;
 }
@@ -933,23 +935,23 @@ static void batadv_free_info_ref(struct hlist_node *node, void *arg)
 /* shutdown vis-server */
 void batadv_vis_quit(struct batadv_priv *bat_priv)
 {
-       if (!bat_priv->vis_hash)
+       if (!bat_priv->vis.hash)
                return;
 
-       cancel_delayed_work_sync(&bat_priv->vis_work);
+       cancel_delayed_work_sync(&bat_priv->vis.work);
 
-       spin_lock_bh(&bat_priv->vis_hash_lock);
+       spin_lock_bh(&bat_priv->vis.hash_lock);
        /* properly remove, kill timers ... */
-       batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
-       bat_priv->vis_hash = NULL;
-       bat_priv->my_vis_info = NULL;
-       spin_unlock_bh(&bat_priv->vis_hash_lock);
+       batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
+       bat_priv->vis.hash = NULL;
+       bat_priv->vis.my_info = NULL;
+       spin_unlock_bh(&bat_priv->vis.hash_lock);
 }
 
 /* schedule packets for (re)transmission */
 static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
+       INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
                           msecs_to_jiffies(BATADV_VIS_INTERVAL));
 }
index 84e716ed8963af8df1053299b433bdb60f9d7fa3..873282fa86dadce0671c1c928a44903229462543 100644 (file)
@@ -20,7 +20,7 @@
 #ifndef _NET_BATMAN_ADV_VIS_H_
 #define _NET_BATMAN_ADV_VIS_H_
 
-/* timeout of vis packets in miliseconds */
+/* timeout of vis packets in milliseconds */
 #define BATADV_VIS_TIMEOUT             200000
 
 int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
index 4ff0bf3ba9a516bcb99e6a8165c38b81419a28c5..0760d1fed6f08bb13404a11622b83cf3dcf02484 100644 (file)
@@ -316,7 +316,7 @@ send_rsp:
 static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
                               struct a2mp_cmd *hdr)
 {
-       BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+       BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
 
        skb_pull(skb, le16_to_cpu(hdr->len));
        return 0;
@@ -325,17 +325,19 @@ static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
 /* Handle A2MP signalling */
 static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-       struct a2mp_cmd *hdr = (void *) skb->data;
+       struct a2mp_cmd *hdr;
        struct amp_mgr *mgr = chan->data;
        int err = 0;
 
        amp_mgr_get(mgr);
 
        while (skb->len >= sizeof(*hdr)) {
-               struct a2mp_cmd *hdr = (void *) skb->data;
-               u16 len = le16_to_cpu(hdr->len);
+               u16 len;
 
-               BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
+               hdr = (void *) skb->data;
+               len = le16_to_cpu(hdr->len);
+
+               BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
 
                skb_pull(skb, sizeof(*hdr));
 
@@ -393,7 +395,9 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 
        if (err) {
                struct a2mp_cmd_rej rej;
+
                rej.reason = __constant_cpu_to_le16(0);
+               hdr = (void *) skb->data;
 
                BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
 
@@ -412,7 +416,7 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 
 static void a2mp_chan_close_cb(struct l2cap_chan *chan)
 {
-       l2cap_chan_destroy(chan);
+       l2cap_chan_put(chan);
 }
 
 static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
index f7db5792ec648d3078d047cb46d0b269a68028bf..9d49ee6d72190c8f9b727ed98ee1ba9f0c256f48 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/ioctls.h>
 
 #include <net/bluetooth/bluetooth.h>
+#include <linux/proc_fs.h>
 
 #define VERSION "2.16"
 
@@ -532,6 +533,144 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
 }
 EXPORT_SYMBOL(bt_sock_wait_state);
 
+#ifdef CONFIG_PROC_FS
+struct bt_seq_state {
+       struct bt_sock_list *l;
+};
+
+static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(seq->private->l->lock)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       read_lock(&l->lock);
+       return seq_hlist_start_head(&l->head, *pos);
+}
+
+static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       return seq_hlist_next(v, &l->head, pos);
+}
+
+static void bt_seq_stop(struct seq_file *seq, void *v)
+       __releases(seq->private->l->lock)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+
+       read_unlock(&l->lock);
+}
+
+static int bt_seq_show(struct seq_file *seq, void *v)
+{
+       struct bt_seq_state *s = seq->private;
+       struct bt_sock_list *l = s->l;
+       bdaddr_t src_baswapped, dst_baswapped;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
+
+               if (l->custom_seq_show) {
+                       seq_putc(seq, ' ');
+                       l->custom_seq_show(seq, v);
+               }
+
+               seq_putc(seq, '\n');
+       } else {
+               struct sock *sk = sk_entry(v);
+               struct bt_sock *bt = bt_sk(sk);
+               baswap(&src_baswapped, &bt->src);
+               baswap(&dst_baswapped, &bt->dst);
+
+               seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu",
+                          sk,
+                          atomic_read(&sk->sk_refcnt),
+                          sk_rmem_alloc_get(sk),
+                          sk_wmem_alloc_get(sk),
+                          sock_i_uid(sk),
+                          sock_i_ino(sk),
+                          &src_baswapped,
+                          &dst_baswapped,
+                          bt->parent? sock_i_ino(bt->parent): 0LU);
+
+               if (l->custom_seq_show) {
+                       seq_putc(seq, ' ');
+                       l->custom_seq_show(seq, v);
+               }
+
+               seq_putc(seq, '\n');
+       }
+       return 0;
+}
+
+static struct seq_operations bt_seq_ops = {
+       .start = bt_seq_start,
+       .next  = bt_seq_next,
+       .stop  = bt_seq_stop,
+       .show  = bt_seq_show,
+};
+
+static int bt_seq_open(struct inode *inode, struct file *file)
+{
+       struct bt_sock_list *sk_list;
+       struct bt_seq_state *s;
+
+       sk_list = PDE(inode)->data;
+       s = __seq_open_private(file, &bt_seq_ops,
+                              sizeof(struct bt_seq_state));
+       if (!s)
+               return -ENOMEM;
+
+       s->l = sk_list;
+       return 0;
+}
+
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+                  struct bt_sock_list* sk_list,
+                  int (* seq_show)(struct seq_file *, void *))
+{
+       struct proc_dir_entry * pde;
+
+       sk_list->custom_seq_show = seq_show;
+
+       sk_list->fops.owner     = module;
+       sk_list->fops.open      = bt_seq_open;
+       sk_list->fops.read      = seq_read;
+       sk_list->fops.llseek    = seq_lseek;
+       sk_list->fops.release   = seq_release_private;
+
+       pde = proc_net_fops_create(net, name, 0, &sk_list->fops);
+       if (!pde)
+               return -ENOMEM;
+
+       pde->data = sk_list;
+
+       return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+       proc_net_remove(net, name);
+}
+#else
+int bt_procfs_init(struct module* module, struct net *net, const char *name,
+                  struct bt_sock_list* sk_list,
+                  int (* seq_show)(struct seq_file *, void *))
+{
+       return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+}
+#endif
+EXPORT_SYMBOL(bt_procfs_init);
+EXPORT_SYMBOL(bt_procfs_cleanup);
+
 static struct net_proto_family bt_sock_family_ops = {
        .owner  = THIS_MODULE,
        .family = PF_BLUETOOTH,
index 1eaacf10d19d9ea078f49f225423dff96ebee8a2..e7154a58465f6b9136f527ab868024464d961564 100644 (file)
 
 #include "bnep.h"
 
+static struct bt_sock_list bnep_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
+};
+
 static int bnep_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -38,6 +42,8 @@ static int bnep_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&bnep_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
        return 0;
@@ -204,6 +210,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&bnep_sk_list, sk);
        return 0;
 }
 
@@ -222,19 +229,30 @@ int __init bnep_sock_init(void)
                return err;
 
        err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register BNEP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "bnep", &bnep_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create BNEP proc file");
+               bt_sock_unregister(BTPROTO_BNEP);
+               goto error;
+       }
+
+       BT_INFO("BNEP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("Can't register BNEP socket");
        proto_unregister(&bnep_proto);
        return err;
 }
 
 void __exit bnep_sock_cleanup(void)
 {
+       bt_procfs_cleanup(&init_net, "bnep");
        if (bt_sock_unregister(BTPROTO_BNEP) < 0)
                BT_ERR("Can't unregister BNEP socket");
 
index 32dc83dcb6b2edd669d7a9ce2fc00480d249b4be..aacb802d1ee45d419aac1555442bb08edf227dc9 100644 (file)
 
 #include "cmtp.h"
 
+static struct bt_sock_list cmtp_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock)
+};
+
 static int cmtp_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -51,6 +55,8 @@ static int cmtp_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&cmtp_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
 
@@ -214,6 +220,8 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&cmtp_sk_list, sk);
+
        return 0;
 }
 
@@ -232,19 +240,30 @@ int cmtp_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register CMTP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "cmtp", &cmtp_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create CMTP proc file");
+               bt_sock_unregister(BTPROTO_HIDP);
+               goto error;
+       }
+
+       BT_INFO("CMTP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("Can't register CMTP socket");
        proto_unregister(&cmtp_proto);
        return err;
 }
 
 void cmtp_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "cmtp");
        if (bt_sock_unregister(BTPROTO_CMTP) < 0)
                BT_ERR("Can't unregister CMTP socket");
 
index 3c094e78dde98cafed3ac893abd3b2fa86b76a92..b9196a44f7598bf33b0c2bff6d0764eeeba8fc11 100644 (file)
@@ -31,7 +31,7 @@
 #include <net/bluetooth/a2mp.h>
 #include <net/bluetooth/smp.h>
 
-static void hci_le_connect(struct hci_conn *conn)
+static void hci_le_create_connection(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
        struct hci_cp_le_create_conn cp;
@@ -55,12 +55,12 @@ static void hci_le_connect(struct hci_conn *conn)
        hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
 }
 
-static void hci_le_connect_cancel(struct hci_conn *conn)
+static void hci_le_create_connection_cancel(struct hci_conn *conn)
 {
        hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
 }
 
-void hci_acl_connect(struct hci_conn *conn)
+static void hci_acl_create_connection(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
        struct inquiry_entry *ie;
@@ -104,7 +104,7 @@ void hci_acl_connect(struct hci_conn *conn)
        hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
 }
 
-static void hci_acl_connect_cancel(struct hci_conn *conn)
+static void hci_acl_create_connection_cancel(struct hci_conn *conn)
 {
        struct hci_cp_create_conn_cancel cp;
 
@@ -130,7 +130,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
        hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
 }
 
-void hci_add_sco(struct hci_conn *conn, __u16 handle)
+static void hci_add_sco(struct hci_conn *conn, __u16 handle)
 {
        struct hci_dev *hdev = conn->hdev;
        struct hci_cp_add_sco cp;
@@ -246,9 +246,9 @@ static void hci_conn_timeout(struct work_struct *work)
        case BT_CONNECT2:
                if (conn->out) {
                        if (conn->type == ACL_LINK)
-                               hci_acl_connect_cancel(conn);
+                               hci_acl_create_connection_cancel(conn);
                        else if (conn->type == LE_LINK)
-                               hci_le_connect_cancel(conn);
+                               hci_le_create_connection_cancel(conn);
                }
                break;
        case BT_CONFIG:
@@ -471,40 +471,37 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 }
 EXPORT_SYMBOL(hci_get_route);
 
-/* Create SCO, ACL or LE connection.
- * Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
-                            __u8 dst_type, __u8 sec_level, __u8 auth_type)
+static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+                                   u8 dst_type, u8 sec_level, u8 auth_type)
 {
-       struct hci_conn *acl;
-       struct hci_conn *sco;
        struct hci_conn *le;
 
-       BT_DBG("%s dst %s", hdev->name, batostr(dst));
+       le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       if (!le) {
+               le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (le)
+                       return ERR_PTR(-EBUSY);
 
-       if (type == LE_LINK) {
-               le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
-               if (!le) {
-                       le = hci_conn_hash_lookup_state(hdev, LE_LINK,
-                                                       BT_CONNECT);
-                       if (le)
-                               return ERR_PTR(-EBUSY);
+               le = hci_conn_add(hdev, LE_LINK, dst);
+               if (!le)
+                       return ERR_PTR(-ENOMEM);
 
-                       le = hci_conn_add(hdev, LE_LINK, dst);
-                       if (!le)
-                               return ERR_PTR(-ENOMEM);
+               le->dst_type = bdaddr_to_le(dst_type);
+               hci_le_create_connection(le);
+       }
 
-                       le->dst_type = bdaddr_to_le(dst_type);
-                       hci_le_connect(le);
-               }
+       le->pending_sec_level = sec_level;
+       le->auth_type = auth_type;
 
-               le->pending_sec_level = sec_level;
-               le->auth_type = auth_type;
+       hci_conn_hold(le);
 
-               hci_conn_hold(le);
+       return le;
+}
 
-               return le;
-       }
+static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+                                               u8 sec_level, u8 auth_type)
+{
+       struct hci_conn *acl;
 
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
@@ -519,10 +516,20 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
                acl->sec_level = BT_SECURITY_LOW;
                acl->pending_sec_level = sec_level;
                acl->auth_type = auth_type;
-               hci_acl_connect(acl);
+               hci_acl_create_connection(acl);
        }
 
-       if (type == ACL_LINK)
+       return acl;
+}
+
+static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
+                               bdaddr_t *dst, u8 sec_level, u8 auth_type)
+{
+       struct hci_conn *acl;
+       struct hci_conn *sco;
+
+       acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
+       if (IS_ERR(acl))
                return acl;
 
        sco = hci_conn_hash_lookup_ba(hdev, type, dst);
@@ -556,6 +563,25 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
        return sco;
 }
 
+/* Create SCO, ACL or LE connection. */
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+                            __u8 dst_type, __u8 sec_level, __u8 auth_type)
+{
+       BT_DBG("%s dst %s type 0x%x", hdev->name, batostr(dst), type);
+
+       switch (type) {
+       case LE_LINK:
+               return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
+       case ACL_LINK:
+               return hci_connect_acl(hdev, dst, sec_level, auth_type);
+       case SCO_LINK:
+       case ESCO_LINK:
+               return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
 /* Check link security requirement */
 int hci_conn_check_link_mode(struct hci_conn *conn)
 {
@@ -775,7 +801,7 @@ void hci_conn_check_pending(struct hci_dev *hdev)
 
        conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
        if (conn)
-               hci_acl_connect(conn);
+               hci_acl_create_connection(conn);
 
        hci_dev_unlock(hdev);
 }
@@ -913,7 +939,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
        return chan;
 }
 
-int hci_chan_del(struct hci_chan *chan)
+void hci_chan_del(struct hci_chan *chan)
 {
        struct hci_conn *conn = chan->conn;
        struct hci_dev *hdev = conn->hdev;
@@ -926,8 +952,6 @@ int hci_chan_del(struct hci_chan *chan)
 
        skb_queue_purge(&chan->data_q);
        kfree(chan);
-
-       return 0;
 }
 
 void hci_chan_list_flush(struct hci_conn *conn)
index 0b997c8f965531d22da050339d17011ab9dd5b3c..8a0ce706aebd624ae7fd1c50b9670780dc4f6761 100644 (file)
@@ -231,6 +231,9 @@ static void amp_init(struct hci_dev *hdev)
 
        /* Read Local AMP Info */
        hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+
+       /* Read Data Blk size */
+       hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
 }
 
 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -268,7 +271,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
                BT_ERR("Unknown device type %d", hdev->dev_type);
                break;
        }
-
 }
 
 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -696,7 +698,8 @@ int hci_dev_open(__u16 dev)
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
-               if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+                   mgmt_valid_hdev(hdev)) {
                        hci_dev_lock(hdev);
                        mgmt_powered(hdev, 1);
                        hci_dev_unlock(hdev);
@@ -799,7 +802,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
-       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+           mgmt_valid_hdev(hdev)) {
                hci_dev_lock(hdev);
                mgmt_powered(hdev, 0);
                hci_dev_unlock(hdev);
@@ -1652,6 +1656,7 @@ struct hci_dev *hci_alloc_dev(void)
        INIT_LIST_HEAD(&hdev->link_keys);
        INIT_LIST_HEAD(&hdev->long_term_keys);
        INIT_LIST_HEAD(&hdev->remote_oob_data);
+       INIT_LIST_HEAD(&hdev->conn_hash.list);
 
        INIT_WORK(&hdev->rx_work, hci_rx_work);
        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
@@ -1674,7 +1679,6 @@ struct hci_dev *hci_alloc_dev(void)
 
        hci_init_sysfs(hdev);
        discovery_init(hdev);
-       hci_conn_hash_init(hdev);
 
        return hdev;
 }
index 715d7e33fba0639d1556088e5ea84a08f6b4ccd4..2022b43c7353ee98d7546d6c9e0ef67c43811d3f 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
 
 /* Handle HCI Event packets */
 
@@ -303,7 +304,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (status != 0) {
+       if (status) {
                mgmt_write_scan_failed(hdev, param, status);
                hdev->discov_timeout = 0;
                goto done;
@@ -513,7 +514,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
        if (hdev->features[3] & LMP_RSSI_INQ)
                events[4] |= 0x02; /* Inquiry Result with RSSI */
 
-       if (hdev->features[5] & LMP_SNIFF_SUBR)
+       if (lmp_sniffsubr_capable(hdev))
                events[5] |= 0x20; /* Sniff Subrating */
 
        if (hdev->features[5] & LMP_PAUSE_ENC)
@@ -522,13 +523,13 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
        if (hdev->features[6] & LMP_EXT_INQ)
                events[5] |= 0x40; /* Extended Inquiry Result */
 
-       if (hdev->features[6] & LMP_NO_FLUSH)
+       if (lmp_no_flush_capable(hdev))
                events[7] |= 0x01; /* Enhanced Flush Complete */
 
        if (hdev->features[7] & LMP_LSTO)
                events[6] |= 0x80; /* Link Supervision Timeout Changed */
 
-       if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+       if (lmp_ssp_capable(hdev)) {
                events[6] |= 0x01;      /* IO Capability Request */
                events[6] |= 0x02;      /* IO Capability Response */
                events[6] |= 0x04;      /* User Confirmation Request */
@@ -541,7 +542,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
                                         * Features Notification */
        }
 
-       if (hdev->features[4] & LMP_LE)
+       if (lmp_le_capable(hdev))
                events[7] |= 0x20;      /* LE Meta-Event */
 
        hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
@@ -623,11 +624,11 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
        struct hci_cp_write_def_link_policy cp;
        u16 link_policy = 0;
 
-       if (hdev->features[0] & LMP_RSWITCH)
+       if (lmp_rswitch_capable(hdev))
                link_policy |= HCI_LP_RSWITCH;
        if (hdev->features[0] & LMP_HOLD)
                link_policy |= HCI_LP_HOLD;
-       if (hdev->features[0] & LMP_SNIFF)
+       if (lmp_sniff_capable(hdev))
                link_policy |= HCI_LP_SNIFF;
        if (hdev->features[1] & LMP_PARK)
                link_policy |= HCI_LP_PARK;
@@ -686,7 +687,7 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
                hdev->esco_type |= (ESCO_HV3);
        }
 
-       if (hdev->features[3] & LMP_ESCO)
+       if (lmp_esco_capable(hdev))
                hdev->esco_type |= (ESCO_EV3);
 
        if (hdev->features[4] & LMP_EV4)
@@ -746,7 +747,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
                break;
        }
 
-       if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
+       if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
                hci_set_le_support(hdev);
 
 done:
@@ -925,7 +926,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
        if (test_bit(HCI_MGMT, &hdev->dev_flags))
                mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 
-       if (rp->status != 0)
+       if (rp->status)
                goto unlock;
 
        cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
@@ -1625,43 +1626,30 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
 
 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
 {
-       struct hci_cp_le_create_conn *cp;
        struct hci_conn *conn;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
-       if (!cp)
-               return;
+       if (status) {
+               hci_dev_lock(hdev);
 
-       hci_dev_lock(hdev);
+               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (!conn) {
+                       hci_dev_unlock(hdev);
+                       return;
+               }
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+               BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
+                      conn);
 
-       BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
-              conn);
+               conn->state = BT_CLOSED;
+               mgmt_connect_failed(hdev, &conn->dst, conn->type,
+                                   conn->dst_type, status);
+               hci_proto_connect_cfm(conn, status);
+               hci_conn_del(conn);
 
-       if (status) {
-               if (conn && conn->state == BT_CONNECT) {
-                       conn->state = BT_CLOSED;
-                       mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
-                                           conn->dst_type, status);
-                       hci_proto_connect_cfm(conn, status);
-                       hci_conn_del(conn);
-               }
-       } else {
-               if (!conn) {
-                       conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
-                       if (conn) {
-                               conn->dst_type = cp->peer_addr_type;
-                               conn->out = true;
-                       } else {
-                               BT_ERR("No memory for new connection");
-                       }
-               }
+               hci_dev_unlock(hdev);
        }
-
-       hci_dev_unlock(hdev);
 }
 
 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
@@ -1904,6 +1892,22 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
+static u8 hci_to_mgmt_reason(u8 err)
+{
+       switch (err) {
+       case HCI_ERROR_CONNECTION_TIMEOUT:
+               return MGMT_DEV_DISCONN_TIMEOUT;
+       case HCI_ERROR_REMOTE_USER_TERM:
+       case HCI_ERROR_REMOTE_LOW_RESOURCES:
+       case HCI_ERROR_REMOTE_POWER_OFF:
+               return MGMT_DEV_DISCONN_REMOTE;
+       case HCI_ERROR_LOCAL_HOST_TERM:
+               return MGMT_DEV_DISCONN_LOCAL_HOST;
+       default:
+               return MGMT_DEV_DISCONN_UNKNOWN;
+       }
+}
+
 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_disconn_complete *ev = (void *) skb->data;
@@ -1922,12 +1926,15 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
            (conn->type == ACL_LINK || conn->type == LE_LINK)) {
-               if (ev->status != 0)
+               if (ev->status) {
                        mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
                                               conn->dst_type, ev->status);
-               else
+               } else {
+                       u8 reason = hci_to_mgmt_reason(ev->reason);
+
                        mgmt_device_disconnected(hdev, &conn->dst, conn->type,
-                                                conn->dst_type);
+                                                conn->dst_type, reason);
+               }
        }
 
        if (ev->status == 0) {
@@ -3268,12 +3275,67 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev,
 
        BT_DBG("%s", hdev->name);
 
-       hci_dev_lock(hdev);
-
        if (test_bit(HCI_MGMT, &hdev->dev_flags))
                mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
+}
 
-       hci_dev_unlock(hdev);
+static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
+                                       struct sk_buff *skb)
+{
+       struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s", hdev->name);
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+       if (!conn)
+               return;
+
+       conn->passkey_notify = __le32_to_cpu(ev->passkey);
+       conn->passkey_entered = 0;
+
+       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+               mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+                                        conn->dst_type, conn->passkey_notify,
+                                        conn->passkey_entered);
+}
+
+static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_keypress_notify *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s", hdev->name);
+
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+       if (!conn)
+               return;
+
+       switch (ev->type) {
+       case HCI_KEYPRESS_STARTED:
+               conn->passkey_entered = 0;
+               return;
+
+       case HCI_KEYPRESS_ENTERED:
+               conn->passkey_entered++;
+               break;
+
+       case HCI_KEYPRESS_ERASED:
+               conn->passkey_entered--;
+               break;
+
+       case HCI_KEYPRESS_CLEARED:
+               conn->passkey_entered = 0;
+               break;
+
+       case HCI_KEYPRESS_COMPLETED:
+               return;
+       }
+
+       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+               mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+                                        conn->dst_type, conn->passkey_notify,
+                                        conn->passkey_entered);
 }
 
 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
@@ -3295,7 +3357,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
         * initiated the authentication. A traditional auth_complete
         * event gets always produced as initiator and is also mapped to
         * the mgmt_auth_failed event */
-       if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
+       if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
                mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
                                 ev->status);
 
@@ -3366,11 +3428,23 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (ev->status) {
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (!conn)
+       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       if (!conn) {
+               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+               if (!conn) {
+                       BT_ERR("No memory for new connection");
                        goto unlock;
+               }
+
+               conn->dst_type = ev->bdaddr_type;
 
+               if (ev->role == LE_CONN_ROLE_MASTER) {
+                       conn->out = true;
+                       conn->link_mode |= HCI_LM_MASTER;
+               }
+       }
+
+       if (ev->status) {
                mgmt_connect_failed(hdev, &conn->dst, conn->type,
                                    conn->dst_type, ev->status);
                hci_proto_connect_cfm(conn, ev->status);
@@ -3379,18 +3453,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                goto unlock;
        }
 
-       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
-       if (!conn) {
-               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
-               if (!conn) {
-                       BT_ERR("No memory for new connection");
-                       hci_dev_unlock(hdev);
-                       return;
-               }
-
-               conn->dst_type = ev->bdaddr_type;
-       }
-
        if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
                mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
                                      conn->dst_type, 0, NULL, 0, NULL);
@@ -3640,6 +3702,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_user_passkey_request_evt(hdev, skb);
                break;
 
+       case HCI_EV_USER_PASSKEY_NOTIFY:
+               hci_user_passkey_notify_evt(hdev, skb);
+               break;
+
+       case HCI_EV_KEYPRESS_NOTIFY:
+               hci_keypress_notify_evt(hdev, skb);
+               break;
+
        case HCI_EV_SIMPLE_PAIR_COMPLETE:
                hci_simple_pair_complete_evt(hdev, skb);
                break;
index d5ace1eda3ed8c3fd06f422bb7cbf340d3187c6c..07f073935811b86292136a59ecb544d323d3ea86 100644 (file)
@@ -1102,21 +1102,30 @@ int __init hci_sock_init(void)
                return err;
 
        err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("HCI socket registration failed");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "hci", &hci_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create HCI proc file");
+               bt_sock_unregister(BTPROTO_HCI);
+               goto error;
+       }
 
        BT_INFO("HCI socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("HCI socket registration failed");
        proto_unregister(&hci_sk_proto);
        return err;
 }
 
 void hci_sock_cleanup(void)
 {
+       bt_procfs_cleanup(&init_net, "hci");
        if (bt_sock_unregister(BTPROTO_HCI) < 0)
                BT_ERR("HCI socket unregistration failed");
 
index b24fb3bd862555c81746fe664e7f90270cc6a2df..82a829d90b0f4a8f60013653bf0eed34237cb5b0 100644 (file)
 
 #include "hidp.h"
 
+static struct bt_sock_list hidp_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock)
+};
+
 static int hidp_sock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
@@ -34,6 +38,8 @@ static int hidp_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&hidp_sk_list, sk);
+
        sock_orphan(sk);
        sock_put(sk);
 
@@ -253,6 +259,8 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_protocol = protocol;
        sk->sk_state    = BT_OPEN;
 
+       bt_sock_link(&hidp_sk_list, sk);
+
        return 0;
 }
 
@@ -271,8 +279,19 @@ int __init hidp_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("Can't register HIDP socket");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "hidp", &hidp_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create HIDP proc file");
+               bt_sock_unregister(BTPROTO_HIDP);
+               goto error;
+       }
+
+       BT_INFO("HIDP socket layer initialized");
 
        return 0;
 
@@ -284,6 +303,7 @@ error:
 
 void __exit hidp_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "hidp");
        if (bt_sock_unregister(BTPROTO_HIDP) < 0)
                BT_ERR("Can't unregister HIDP socket");
 
index 38c00f142203505d3a3c809e8e6162f159ae0196..a91239dcda417f5a862346c981bb941aba44d086 100644 (file)
@@ -406,7 +406,7 @@ struct l2cap_chan *l2cap_chan_create(void)
 
        chan->state = BT_OPEN;
 
-       atomic_set(&chan->refcnt, 1);
+       kref_init(&chan->kref);
 
        /* This flag is cleared in l2cap_chan_ready() */
        set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
@@ -416,13 +416,31 @@ struct l2cap_chan *l2cap_chan_create(void)
        return chan;
 }
 
-void l2cap_chan_destroy(struct l2cap_chan *chan)
+static void l2cap_chan_destroy(struct kref *kref)
 {
+       struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
+
+       BT_DBG("chan %p", chan);
+
        write_lock(&chan_list_lock);
        list_del(&chan->global_l);
        write_unlock(&chan_list_lock);
 
-       l2cap_chan_put(chan);
+       kfree(chan);
+}
+
+void l2cap_chan_hold(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+
+       kref_get(&c->kref);
+}
+
+void l2cap_chan_put(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+
+       kref_put(&c->kref, l2cap_chan_destroy);
 }
 
 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
@@ -1431,7 +1449,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        int err;
 
        BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
-              dst_type, __le16_to_cpu(chan->psm));
+              dst_type, __le16_to_cpu(psm));
 
        hdev = hci_get_route(dst, src);
        if (!hdev)
@@ -5331,7 +5349,7 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
        return exact ? lm1 : lm2;
 }
 
-int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
 {
        struct l2cap_conn *conn;
 
@@ -5344,7 +5362,6 @@ int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
        } else
                l2cap_conn_del(hcon, bt_to_errno(status));
 
-       return 0;
 }
 
 int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -5358,12 +5375,11 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
        return conn->disc_reason;
 }
 
-int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        l2cap_conn_del(hcon, bt_to_errno(reason));
-       return 0;
 }
 
 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
@@ -5406,6 +5422,11 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
                       state_to_string(chan->state));
 
+               if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+                       l2cap_chan_unlock(chan);
+                       continue;
+               }
+
                if (chan->scid == L2CAP_CID_LE_DATA) {
                        if (!status && encrypt) {
                                chan->sec_level = hcon->sec_level;
index 34bbe1c5e389500f080e15b30c194e95ea36f189..083f2bf065d4d788e59702d29b71b39aaa7bd688 100644 (file)
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/smp.h>
 
+static struct bt_sock_list l2cap_sk_list = {
+       .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
 static const struct proto_ops l2cap_sock_ops;
 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
@@ -824,7 +828,7 @@ static void l2cap_sock_kill(struct sock *sk)
 
        /* Kill poor orphan */
 
-       l2cap_chan_destroy(l2cap_pi(sk)->chan);
+       l2cap_chan_put(l2cap_pi(sk)->chan);
        sock_set_flag(sk, SOCK_DEAD);
        sock_put(sk);
 }
@@ -887,6 +891,8 @@ static int l2cap_sock_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       bt_sock_unlink(&l2cap_sk_list, sk);
+
        err = l2cap_sock_shutdown(sock, 2);
 
        sock_orphan(sk);
@@ -1211,6 +1217,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
                return -ENOMEM;
 
        l2cap_sock_init(sk, NULL);
+       bt_sock_link(&l2cap_sk_list, sk);
        return 0;
 }
 
@@ -1249,21 +1256,30 @@ int __init l2cap_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("L2CAP socket registration failed");
                goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create L2CAP proc file");
+               bt_sock_unregister(BTPROTO_L2CAP);
+               goto error;
+       }
 
        BT_INFO("L2CAP socket layer initialized");
 
        return 0;
 
 error:
-       BT_ERR("L2CAP socket registration failed");
        proto_unregister(&l2cap_proto);
        return err;
 }
 
 void l2cap_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "l2cap");
        if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
                BT_ERR("L2CAP socket unregistration failed");
 
index eba022de3c205bb55f2ed9639e4faf21d78ae9a8..aa2ea0a8142cc0d6c7378ced06be1257b2f846d7 100644 (file)
@@ -35,7 +35,7 @@
 bool enable_hs;
 
 #define MGMT_VERSION   1
-#define MGMT_REVISION  1
+#define MGMT_REVISION  2
 
 static const u16 mgmt_commands[] = {
        MGMT_OP_READ_INDEX_LIST,
@@ -99,6 +99,7 @@ static const u16 mgmt_events[] = {
        MGMT_EV_DEVICE_BLOCKED,
        MGMT_EV_DEVICE_UNBLOCKED,
        MGMT_EV_DEVICE_UNPAIRED,
+       MGMT_EV_PASSKEY_NOTIFY,
 };
 
 /*
@@ -193,6 +194,11 @@ static u8 mgmt_status_table[] = {
        MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
 };
 
+bool mgmt_valid_hdev(struct hci_dev *hdev)
+{
+       return hdev->dev_type == HCI_BREDR;
+}
+
 static u8 mgmt_status(u8 hci_status)
 {
        if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -317,7 +323,6 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                           u16 data_len)
 {
        struct mgmt_rp_read_index_list *rp;
-       struct list_head *p;
        struct hci_dev *d;
        size_t rp_len;
        u16 count;
@@ -328,7 +333,10 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
        read_lock(&hci_dev_list_lock);
 
        count = 0;
-       list_for_each(p, &hci_dev_list) {
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (!mgmt_valid_hdev(d))
+                       continue;
+
                count++;
        }
 
@@ -346,6 +354,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                if (test_bit(HCI_SETUP, &d->dev_flags))
                        continue;
 
+               if (!mgmt_valid_hdev(d))
+                       continue;
+
                rp->index[i++] = cpu_to_le16(d->id);
                BT_DBG("Added hci%u", d->id);
        }
@@ -370,10 +381,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        settings |= MGMT_SETTING_DISCOVERABLE;
        settings |= MGMT_SETTING_PAIRABLE;
 
-       if (hdev->features[6] & LMP_SIMPLE_PAIR)
+       if (lmp_ssp_capable(hdev))
                settings |= MGMT_SETTING_SSP;
 
-       if (!(hdev->features[4] & LMP_NO_BREDR)) {
+       if (lmp_bredr_capable(hdev)) {
                settings |= MGMT_SETTING_BREDR;
                settings |= MGMT_SETTING_LINK_SECURITY;
        }
@@ -381,7 +392,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        if (enable_hs)
                settings |= MGMT_SETTING_HS;
 
-       if (hdev->features[4] & LMP_LE)
+       if (lmp_le_capable(hdev))
                settings |= MGMT_SETTING_LE;
 
        return settings;
@@ -403,7 +414,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
                settings |= MGMT_SETTING_PAIRABLE;
 
-       if (!(hdev->features[4] & LMP_NO_BREDR))
+       if (lmp_bredr_capable(hdev))
                settings |= MGMT_SETTING_BREDR;
 
        if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -1111,7 +1122,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_dev_lock(hdev);
 
-       if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+       if (!lmp_ssp_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto failed;
@@ -1195,7 +1206,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_dev_lock(hdev);
 
-       if (!(hdev->features[4] & LMP_LE)) {
+       if (!lmp_le_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
@@ -2191,7 +2202,7 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
-       if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
+       if (!lmp_ssp_capable(hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
                                 MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
@@ -2820,6 +2831,9 @@ static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
 
 int mgmt_index_added(struct hci_dev *hdev)
 {
+       if (!mgmt_valid_hdev(hdev))
+               return -ENOTSUPP;
+
        return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
@@ -2827,6 +2841,9 @@ int mgmt_index_removed(struct hci_dev *hdev)
 {
        u8 status = MGMT_STATUS_INVALID_INDEX;
 
+       if (!mgmt_valid_hdev(hdev))
+               return -ENOTSUPP;
+
        mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
        return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
@@ -3077,16 +3094,17 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
 }
 
 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                            u8 link_type, u8 addr_type)
+                            u8 link_type, u8 addr_type, u8 reason)
 {
-       struct mgmt_addr_info ev;
+       struct mgmt_ev_device_disconnected ev;
        struct sock *sk = NULL;
        int err;
 
        mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 
-       bacpy(&ev.bdaddr, bdaddr);
-       ev.type = link_to_bdaddr(link_type, addr_type);
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = link_to_bdaddr(link_type, addr_type);
+       ev.reason = reason;
 
        err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
                         sk);
@@ -3275,6 +3293,22 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                          MGMT_OP_USER_PASSKEY_NEG_REPLY);
 }
 
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                            u8 link_type, u8 addr_type, u32 passkey,
+                            u8 entered)
+{
+       struct mgmt_ev_passkey_notify ev;
+
+       BT_DBG("%s", hdev->name);
+
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = link_to_bdaddr(link_type, addr_type);
+       ev.passkey = __cpu_to_le32(passkey);
+       ev.entered = entered;
+
+       return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
+}
+
 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                     u8 addr_type, u8 status)
 {
index 1a17850d093cd652621ac54833c53b8dabd395bc..b3226f3658cfda1142c7484b110324115dccc85b 100644 (file)
@@ -1035,8 +1035,17 @@ int __init rfcomm_init_sockets(void)
                return err;
 
        err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
-       if (err < 0)
+       if (err < 0) {
+               BT_ERR("RFCOMM socket layer registration failed");
+               goto error;
+       }
+
+       err = bt_procfs_init(THIS_MODULE, &init_net, "rfcomm", &rfcomm_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create RFCOMM proc file");
+               bt_sock_unregister(BTPROTO_RFCOMM);
                goto error;
+       }
 
        if (bt_debugfs) {
                rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
@@ -1050,13 +1059,14 @@ int __init rfcomm_init_sockets(void)
        return 0;
 
 error:
-       BT_ERR("RFCOMM socket layer registration failed");
        proto_unregister(&rfcomm_proto);
        return err;
 }
 
 void __exit rfcomm_cleanup_sockets(void)
 {
+       bt_procfs_cleanup(&init_net, "rfcomm");
+
        debugfs_remove(rfcomm_sock_debugfs);
 
        if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
index 3589e21edb09817bace527336fd4880e24137ada..dc42b917aaafad3f050177694c438648af2b22bb 100644 (file)
@@ -912,7 +912,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
        return lm;
 }
 
-int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
 {
        BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
        if (!status) {
@@ -923,16 +923,13 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
                        sco_conn_ready(conn);
        } else
                sco_conn_del(hcon, bt_to_errno(status));
-
-       return 0;
 }
 
-int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
        sco_conn_del(hcon, bt_to_errno(reason));
-       return 0;
 }
 
 int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
@@ -1025,6 +1022,13 @@ int __init sco_init(void)
                goto error;
        }
 
+       err = bt_procfs_init(THIS_MODULE, &init_net, "sco", &sco_sk_list, NULL);
+       if (err < 0) {
+               BT_ERR("Failed to create SCO proc file");
+               bt_sock_unregister(BTPROTO_SCO);
+               goto error;
+       }
+
        if (bt_debugfs) {
                sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
                                                  NULL, &sco_debugfs_fops);
@@ -1043,6 +1047,8 @@ error:
 
 void __exit sco_exit(void)
 {
+       bt_procfs_cleanup(&init_net, "sco");
+
        debugfs_remove(sco_debugfs);
 
        if (bt_sock_unregister(BTPROTO_SCO) < 0)
index d21f32383517f6b66a3b0c43ea534beb3840307c..d9576e6de2b85c232c1dcde0e54469bac3397212 100644 (file)
@@ -312,7 +312,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 
                        fe->is_local = f->is_local;
                        if (!f->is_static)
-                               fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
+                               fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
                        ++fe;
                        ++num;
                }
@@ -467,14 +467,14 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
 
 static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
                         const struct net_bridge_fdb_entry *fdb,
-                        u32 pid, u32 seq, int type, unsigned int flags)
+                        u32 portid, u32 seq, int type, unsigned int flags)
 {
        unsigned long now = jiffies;
        struct nda_cacheinfo ci;
        struct nlmsghdr *nlh;
        struct ndmsg *ndm;
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -555,7 +555,7 @@ int br_fdb_dump(struct sk_buff *skb,
                                goto skip;
 
                        if (fdb_fill_info(skb, br, f,
-                                         NETLINK_CB(cb->skb).pid,
+                                         NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq,
                                          RTM_NEWNEIGH,
                                          NLM_F_MULTI) < 0)
@@ -608,8 +608,9 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
 }
 
 /* Add new permanent fdb entry with RTM_NEWNEIGH */
-int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
-              unsigned char *addr, u16 nlh_flags)
+int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+              struct net_device *dev,
+              const unsigned char *addr, u16 nlh_flags)
 {
        struct net_bridge_port *p;
        int err = 0;
@@ -639,7 +640,7 @@ int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
        return err;
 }
 
-static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
+static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
 {
        struct net_bridge *br = p->br;
        struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -655,7 +656,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
 
 /* Remove neighbor entry with RTM_DELNEIGH */
 int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
-                 unsigned char *addr)
+                 const unsigned char *addr)
 {
        struct net_bridge_port *p;
        int err;
index fe41260fbf38b28bb121dcc2235a5d27b83e27b7..093f527276a39c097de23cea8ab7e4016df438cc 100644 (file)
@@ -127,7 +127,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        goto skip;
 
                if (br_fill_ifinfo(skb, port,
-                                  NETLINK_CB(cb->skb).pid,
+                                  NETLINK_CB(cb->skb).portid,
                                   cb->nlh->nlmsg_seq, RTM_NEWLINK,
                                   NLM_F_MULTI) < 0)
                        break;
index f507d2af9646bcb273cf1f50f98feb065027e14a..9b278c4ebee10efb45ee07f480f749e609ca891e 100644 (file)
@@ -363,10 +363,10 @@ extern void br_fdb_update(struct net_bridge *br,
 
 extern int br_fdb_delete(struct ndmsg *ndm,
                         struct net_device *dev,
-                        unsigned char *addr);
-extern int br_fdb_add(struct ndmsg *nlh,
+                        const unsigned char *addr);
+extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
                      struct net_device *dev,
-                     unsigned char *addr,
+                     const unsigned char *addr,
                      u16 nlh_flags);
 extern int br_fdb_dump(struct sk_buff *skb,
                       struct netlink_callback *cb,
index a6747e673426e3f29c734f11596c1be328e7b558..c3530a81a33bf40c9162cb28a6f931ea937baf54 100644 (file)
@@ -170,5 +170,5 @@ void br_stp_port_timer_init(struct net_bridge_port *p)
 unsigned long br_timer_value(const struct timer_list *timer)
 {
        return timer_pending(timer)
-               ? jiffies_to_clock_t(timer->expires - jiffies) : 0;
+               ? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0;
 }
index 19063473c71f2efc897e2ee3a51ea023052f15e6..3476ec469740d6829deed8089888353d7934d4a5 100644 (file)
@@ -298,8 +298,7 @@ static int __init ebt_ulog_init(void)
                spin_lock_init(&ulog_buffers[i].lock);
        }
 
-       ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
-                                         THIS_MODULE, &cfg);
+       ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
        if (!ebtulognl)
                ret = -ENOMEM;
        else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
index 42e6bd0945745f99ae2cb718b55efed2410f704b..3c2e9dced9e0afd8a5ed3522357eb0faf63e8289 100644 (file)
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
 static int __net_init frame_filter_net_init(struct net *net)
 {
        net->xt.frame_filter = ebt_register_table(net, &frame_filter);
-       if (IS_ERR(net->xt.frame_filter))
-               return PTR_ERR(net->xt.frame_filter);
-       return 0;
+       return PTR_RET(net->xt.frame_filter);
 }
 
 static void __net_exit frame_filter_net_exit(struct net *net)
index 6dc2f878ae0533a58455f3b2b76f49681fb42f5f..10871bc77908ec8799a482636995771fd47a6b03 100644 (file)
@@ -100,9 +100,7 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
 static int __net_init frame_nat_net_init(struct net *net)
 {
        net->xt.frame_nat = ebt_register_table(net, &frame_nat);
-       if (IS_ERR(net->xt.frame_nat))
-               return PTR_ERR(net->xt.frame_nat);
-       return 0;
+       return PTR_RET(net->xt.frame_nat);
 }
 
 static void __net_exit frame_nat_net_exit(struct net *net)
index b54d5e695b034b8abbd3c0dc083fc74c5d1cf15e..127879c55fb66f9d3e6698def42013c1561ab58b 100644 (file)
@@ -549,7 +549,7 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
                if (idx < s_idx)
                        goto cont;
 
-               if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid,
+               if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).portid,
                    cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
                        break;
 cont:
index 74ed1d7a84a2ead7fe68e4c4e9c8ae1129490f3c..79ae884850015a99a1e43c4b2fc228670c3779b9 100644 (file)
@@ -301,8 +301,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
                        break;
                }
                /* Bump the usage count and install the file. */
-               get_file(fp[i]);
-               fd_install(new_fd, fp[i]);
+               fd_install(new_fd, get_file(fp[i]));
        }
 
        if (i > 0) {
index 36c4a0cdb6c128bb0174bbf4302e0e65e9309cbc..1e0a1847c3bbee7fd5aeb8654bf0d9da08ce5ca9 100644 (file)
@@ -959,18 +959,30 @@ int dev_alloc_name(struct net_device *dev, const char *name)
 }
 EXPORT_SYMBOL(dev_alloc_name);
 
-static int dev_get_valid_name(struct net_device *dev, const char *name)
+static int dev_alloc_name_ns(struct net *net,
+                            struct net_device *dev,
+                            const char *name)
 {
-       struct net *net;
+       char buf[IFNAMSIZ];
+       int ret;
 
-       BUG_ON(!dev_net(dev));
-       net = dev_net(dev);
+       ret = __dev_alloc_name(net, name, buf);
+       if (ret >= 0)
+               strlcpy(dev->name, buf, IFNAMSIZ);
+       return ret;
+}
+
+static int dev_get_valid_name(struct net *net,
+                             struct net_device *dev,
+                             const char *name)
+{
+       BUG_ON(!net);
 
        if (!dev_valid_name(name))
                return -EINVAL;
 
        if (strchr(name, '%'))
-               return dev_alloc_name(dev, name);
+               return dev_alloc_name_ns(net, dev, name);
        else if (__dev_get_by_name(net, name))
                return -EEXIST;
        else if (dev->name != name)
@@ -1006,7 +1018,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
 
        memcpy(oldname, dev->name, IFNAMSIZ);
 
-       err = dev_get_valid_name(dev, newname);
+       err = dev_get_valid_name(net, dev, newname);
        if (err < 0)
                return err;
 
@@ -1109,11 +1121,23 @@ void netdev_state_change(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_state_change);
 
-int netdev_bonding_change(struct net_device *dev, unsigned long event)
+/**
+ *     netdev_notify_peers - notify network peers about existence of @dev
+ *     @dev: network device
+ *
+ * Generate traffic such that interested network peers are aware of
+ * @dev, such as by generating a gratuitous ARP. This may be used when
+ * a device wants to inform the rest of the network about some sort of
+ * reconfiguration such as a failover event or virtual machine
+ * migration.
+ */
+void netdev_notify_peers(struct net_device *dev)
 {
-       return call_netdevice_notifiers(event, dev);
+       rtnl_lock();
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+       rtnl_unlock();
 }
-EXPORT_SYMBOL(netdev_bonding_change);
+EXPORT_SYMBOL(netdev_notify_peers);
 
 /**
  *     dev_load        - load a network module
@@ -1394,7 +1418,6 @@ rollback:
                                nb->notifier_call(nb, NETDEV_DOWN, dev);
                        }
                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-                       nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
                }
        }
 
@@ -1436,7 +1459,6 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
                                nb->notifier_call(nb, NETDEV_DOWN, dev);
                        }
                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-                       nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
                }
        }
 unlock:
@@ -2175,9 +2197,7 @@ EXPORT_SYMBOL(netif_skb_features);
 /*
  * Returns true if either:
  *     1. skb has frag_list and the device doesn't support FRAGLIST, or
- *     2. skb is fragmented and the device does not support SG, or if
- *        at least one of fragments is in highmem and device does not
- *        support DMA from it.
+ *     2. skb is fragmented and the device does not support SG.
  */
 static inline int skb_needs_linearize(struct sk_buff *skb,
                                      int features)
@@ -2206,9 +2226,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
                        skb_dst_drop(skb);
 
-               if (!list_empty(&ptype_all))
-                       dev_queue_xmit_nit(skb, dev);
-
                features = netif_skb_features(skb);
 
                if (vlan_tx_tag_present(skb) &&
@@ -2243,6 +2260,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        }
                }
 
+               if (!list_empty(&ptype_all))
+                       dev_queue_xmit_nit(skb, dev);
+
                skb_len = skb->len;
                rc = ops->ndo_start_xmit(skb, dev);
                trace_net_dev_xmit(skb, rc, dev, skb_len);
@@ -2265,6 +2285,9 @@ gso:
                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
                        skb_dst_drop(nskb);
 
+               if (!list_empty(&ptype_all))
+                       dev_queue_xmit_nit(nskb, dev);
+
                skb_len = nskb->len;
                rc = ops->ndo_start_xmit(nskb, dev);
                trace_net_dev_xmit(nskb, rc, dev, skb_len);
@@ -2374,8 +2397,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 #endif
 }
 
-static struct netdev_queue *dev_pick_tx(struct net_device *dev,
-                                       struct sk_buff *skb)
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                   struct sk_buff *skb)
 {
        int queue_index;
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -2549,7 +2572,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 
        skb_update_prio(skb);
 
-       txq = dev_pick_tx(dev, skb);
+       txq = netdev_pick_tx(dev, skb);
        q = rcu_dereference_bh(txq->qdisc);
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -2622,6 +2645,8 @@ EXPORT_SYMBOL(dev_queue_xmit);
   =======================================================================*/
 
 int netdev_max_backlog __read_mostly = 1000;
+EXPORT_SYMBOL(netdev_max_backlog);
+
 int netdev_tstamp_prequeue __read_mostly = 1;
 int netdev_budget __read_mostly = 300;
 int weight_p __read_mostly = 64;            /* old backlog weight */
@@ -4512,8 +4537,8 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
 static int __dev_set_promiscuity(struct net_device *dev, int inc)
 {
        unsigned int old_flags = dev->flags;
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
 
        ASSERT_RTNL();
 
@@ -4544,8 +4569,9 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
                                "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
                                dev->name, (dev->flags & IFF_PROMISC),
                                (old_flags & IFF_PROMISC),
-                               audit_get_loginuid(current),
-                               uid, gid,
+                               from_kuid(&init_user_ns, audit_get_loginuid(current)),
+                               from_kuid(&init_user_ns, uid),
+                               from_kgid(&init_user_ns, gid),
                                audit_get_sessionid(current));
                }
 
@@ -5238,12 +5264,12 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
  */
 static int dev_new_index(struct net *net)
 {
-       static int ifindex;
+       int ifindex = net->ifindex;
        for (;;) {
                if (++ifindex <= 0)
                        ifindex = 1;
                if (!__dev_get_by_index(net, ifindex))
-                       return ifindex;
+                       return net->ifindex = ifindex;
        }
 }
 
@@ -5321,10 +5347,6 @@ static void rollback_registered_many(struct list_head *head)
                netdev_unregister_kobject(dev);
        }
 
-       /* Process any work delayed until the end of the batch */
-       dev = list_first_entry(head, struct net_device, unreg_list);
-       call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
-
        synchronize_net();
 
        list_for_each_entry(dev, head, unreg_list)
@@ -5582,7 +5604,7 @@ int register_netdevice(struct net_device *dev)
 
        dev->iflink = -1;
 
-       ret = dev_get_valid_name(dev, dev->name);
+       ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
                goto out;
 
@@ -5596,7 +5618,12 @@ int register_netdevice(struct net_device *dev)
                }
        }
 
-       dev->ifindex = dev_new_index(net);
+       ret = -EBUSY;
+       if (!dev->ifindex)
+               dev->ifindex = dev_new_index(net);
+       else if (__dev_get_by_index(net, dev->ifindex))
+               goto err_uninit;
+
        if (dev->iflink == -1)
                dev->iflink = dev->ifindex;
 
@@ -5639,6 +5666,8 @@ int register_netdevice(struct net_device *dev)
 
        set_bit(__LINK_STATE_PRESENT, &dev->state);
 
+       linkwatch_init_dev(dev);
+
        dev_init_scheduler(dev);
        dev_hold(dev);
        list_netdevice(dev);
@@ -5772,9 +5801,12 @@ static void netdev_wait_allrefs(struct net_device *dev)
 
                        /* Rebroadcast unregister notification */
                        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-                       /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
-                        * should have already handle it the first time */
 
+                       __rtnl_unlock();
+                       rcu_barrier();
+                       rtnl_lock();
+
+                       call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
                        if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
                                     &dev->state)) {
                                /* We must not have linkwatch events
@@ -5836,9 +5868,8 @@ void netdev_run_todo(void)
 
        __rtnl_unlock();
 
-       /* Wait for rcu callbacks to finish before attempting to drain
-        * the device list.  This usually avoids a 250ms wait.
-        */
+
+       /* Wait for rcu callbacks to finish before next phase */
        if (!list_empty(&list))
                rcu_barrier();
 
@@ -5847,6 +5878,10 @@ void netdev_run_todo(void)
                        = list_first_entry(&list, struct net_device, todo_list);
                list_del(&dev->todo_list);
 
+               rtnl_lock();
+               call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
+               __rtnl_unlock();
+
                if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
                        pr_err("network todo '%s' but state %d\n",
                               dev->name, dev->reg_state);
@@ -5942,6 +5977,8 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
        return queue;
 }
 
+static const struct ethtool_ops default_ethtool_ops;
+
 /**
  *     alloc_netdev_mqs - allocate network device
  *     @sizeof_priv:   size of private data to allocate space for
@@ -6029,6 +6066,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        strcpy(dev->name, name);
        dev->group = INIT_NETDEV_GROUP;
+       if (!dev->ethtool_ops)
+               dev->ethtool_ops = &default_ethtool_ops;
        return dev;
 
 free_all:
@@ -6213,7 +6252,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
-               if (dev_get_valid_name(dev, pat) < 0)
+               if (dev_get_valid_name(net, dev, pat) < 0)
                        goto out;
        }
 
@@ -6241,7 +6280,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
           the device is just moving and can keep their slaves up.
        */
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-       call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
+       rcu_barrier();
+       call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
        rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
 
        /*
index c4cc2bc49f06d4041fcbe1cbf3759933ed31c1f4..87cc17db2d566e5846601d6eb03ba38ce64b2ddb 100644 (file)
@@ -22,7 +22,7 @@
  */
 
 static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
-                              unsigned char *addr, int addr_len,
+                              const unsigned char *addr, int addr_len,
                               unsigned char addr_type, bool global)
 {
        struct netdev_hw_addr *ha;
@@ -46,7 +46,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
 }
 
 static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
-                           unsigned char *addr, int addr_len,
+                           const unsigned char *addr, int addr_len,
                            unsigned char addr_type, bool global)
 {
        struct netdev_hw_addr *ha;
@@ -72,14 +72,15 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
        return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
 }
 
-static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
-                        int addr_len, unsigned char addr_type)
+static int __hw_addr_add(struct netdev_hw_addr_list *list,
+                        const unsigned char *addr, int addr_len,
+                        unsigned char addr_type)
 {
        return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
 }
 
 static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
-                           unsigned char *addr, int addr_len,
+                           const unsigned char *addr, int addr_len,
                            unsigned char addr_type, bool global)
 {
        struct netdev_hw_addr *ha;
@@ -104,8 +105,9 @@ static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
        return -ENOENT;
 }
 
-static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
-                        int addr_len, unsigned char addr_type)
+static int __hw_addr_del(struct netdev_hw_addr_list *list,
+                        const unsigned char *addr, int addr_len,
+                        unsigned char addr_type)
 {
        return __hw_addr_del_ex(list, addr, addr_len, addr_type, false);
 }
@@ -278,7 +280,7 @@ EXPORT_SYMBOL(dev_addr_init);
  *
  *     The caller must hold the rtnl_mutex.
  */
-int dev_addr_add(struct net_device *dev, unsigned char *addr,
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
                 unsigned char addr_type)
 {
        int err;
@@ -303,7 +305,7 @@ EXPORT_SYMBOL(dev_addr_add);
  *
  *     The caller must hold the rtnl_mutex.
  */
-int dev_addr_del(struct net_device *dev, unsigned char *addr,
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
                 unsigned char addr_type)
 {
        int err;
@@ -390,7 +392,7 @@ EXPORT_SYMBOL(dev_addr_del_multiple);
  *     @dev: device
  *     @addr: address to add
  */
-int dev_uc_add_excl(struct net_device *dev, unsigned char *addr)
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr)
 {
        struct netdev_hw_addr *ha;
        int err;
@@ -421,7 +423,7 @@ EXPORT_SYMBOL(dev_uc_add_excl);
  *     Add a secondary unicast address to the device or increase
  *     the reference count if it already exists.
  */
-int dev_uc_add(struct net_device *dev, unsigned char *addr)
+int dev_uc_add(struct net_device *dev, const unsigned char *addr)
 {
        int err;
 
@@ -443,7 +445,7 @@ EXPORT_SYMBOL(dev_uc_add);
  *     Release reference to a secondary unicast address and remove it
  *     from the device if the reference count drops to zero.
  */
-int dev_uc_del(struct net_device *dev, unsigned char *addr)
+int dev_uc_del(struct net_device *dev, const unsigned char *addr)
 {
        int err;
 
@@ -543,7 +545,7 @@ EXPORT_SYMBOL(dev_uc_init);
  *     @dev: device
  *     @addr: address to add
  */
-int dev_mc_add_excl(struct net_device *dev, unsigned char *addr)
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr)
 {
        struct netdev_hw_addr *ha;
        int err;
@@ -566,7 +568,7 @@ out:
 }
 EXPORT_SYMBOL(dev_mc_add_excl);
 
-static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
+static int __dev_mc_add(struct net_device *dev, const unsigned char *addr,
                        bool global)
 {
        int err;
@@ -587,7 +589,7 @@ static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
  *     Add a multicast address to the device or increase
  *     the reference count if it already exists.
  */
-int dev_mc_add(struct net_device *dev, unsigned char *addr)
+int dev_mc_add(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_add(dev, addr, false);
 }
@@ -600,13 +602,13 @@ EXPORT_SYMBOL(dev_mc_add);
  *
  *     Add a global multicast address to the device.
  */
-int dev_mc_add_global(struct net_device *dev, unsigned char *addr)
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_add(dev, addr, true);
 }
 EXPORT_SYMBOL(dev_mc_add_global);
 
-static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
+static int __dev_mc_del(struct net_device *dev, const unsigned char *addr,
                        bool global)
 {
        int err;
@@ -628,7 +630,7 @@ static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
  *     Release reference to a multicast address and remove it
  *     from the device if the reference count drops to zero.
  */
-int dev_mc_del(struct net_device *dev, unsigned char *addr)
+int dev_mc_del(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_del(dev, addr, false);
 }
@@ -642,7 +644,7 @@ EXPORT_SYMBOL(dev_mc_del);
  *     Release reference to a multicast address and remove it
  *     from the device if the reference count drops to zero.
  */
-int dev_mc_del_global(struct net_device *dev, unsigned char *addr)
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr)
 {
        return __dev_mc_del(dev, addr, true);
 }
index 56d63612e1e4b9a1ebfc04ddfa1bcd50333e0118..ee6153e2cf43b83abb51e96620d482e820560ca9 100644 (file)
@@ -222,8 +222,8 @@ void __dst_free(struct dst_entry *dst)
        if (dst_garbage.timer_inc > DST_GC_INC) {
                dst_garbage.timer_inc = DST_GC_INC;
                dst_garbage.timer_expires = DST_GC_MIN;
-               cancel_delayed_work(&dst_gc_work);
-               schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
+               mod_delayed_work(system_wq, &dst_gc_work,
+                                dst_garbage.timer_expires);
        }
        spin_unlock_bh(&dst_garbage.lock);
 }
@@ -374,7 +374,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
        struct dst_entry *dst, *last = NULL;
 
        switch (event) {
-       case NETDEV_UNREGISTER:
+       case NETDEV_UNREGISTER_FINAL:
        case NETDEV_DOWN:
                mutex_lock(&dst_gc_mutex);
                for (dst = dst_busy_list; dst; dst = dst->next) {
index cbf033dcaf1feb8b2cfc2610dad581e89bcdb061..4d64cc2e3fa9bf1246ea3504f582616f98060bf1 100644 (file)
@@ -1426,18 +1426,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
                return -EFAULT;
 
-       if (!dev->ethtool_ops) {
-               /* A few commands do not require any driver support,
-                * are unprivileged, and do not change anything, so we
-                * can take a shortcut to them. */
-               if (ethcmd == ETHTOOL_GDRVINFO)
-                       return ethtool_get_drvinfo(dev, useraddr);
-               else if (ethcmd == ETHTOOL_GET_TS_INFO)
-                       return ethtool_get_ts_info(dev, useraddr);
-               else
-                       return -EOPNOTSUPP;
-       }
-
        /* Allow some commands to be done by anyone */
        switch (ethcmd) {
        case ETHTOOL_GSET:
index ab7db83236c96fa2b4746ffc25717f24db671e17..58a4ba27dfe3117d439ada122000e1339a23f48f 100644 (file)
@@ -402,7 +402,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        if (unresolved)
                ops->unresolved_rules++;
 
-       notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
+       notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
        flush_route_cache(ops);
        rules_ops_put(ops);
        return 0;
@@ -500,7 +500,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                }
 
                notify_rule_change(RTM_DELRULE, rule, ops, nlh,
-                                  NETLINK_CB(skb).pid);
+                                  NETLINK_CB(skb).portid);
                if (ops->delete)
                        ops->delete(rule);
                fib_rule_put(rule);
@@ -601,7 +601,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
                if (idx < cb->args[1])
                        goto skip;
 
-               if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
+               if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
                                     cb->nlh->nlmsg_seq, RTM_NEWRULE,
                                     NLM_F_MULTI, ops) < 0)
                        break;
index 907efd27ec77bcf5f3f214058dce27fc77e873bd..3d92ebb7fbcf71471d4b3d90e5309f459c47bea0 100644 (file)
@@ -167,6 +167,14 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
                case BPF_S_ALU_DIV_K:
                        A = reciprocal_divide(A, K);
                        continue;
+               case BPF_S_ALU_MOD_X:
+                       if (X == 0)
+                               return 0;
+                       A %= X;
+                       continue;
+               case BPF_S_ALU_MOD_K:
+                       A %= K;
+                       continue;
                case BPF_S_ALU_AND_X:
                        A &= X;
                        continue;
@@ -179,6 +187,13 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
                case BPF_S_ALU_OR_K:
                        A |= K;
                        continue;
+               case BPF_S_ANC_ALU_XOR_X:
+               case BPF_S_ALU_XOR_X:
+                       A ^= X;
+                       continue;
+               case BPF_S_ALU_XOR_K:
+                       A ^= K;
+                       continue;
                case BPF_S_ALU_LSH_X:
                        A <<= X;
                        continue;
@@ -326,9 +341,6 @@ load_b:
                case BPF_S_ANC_CPU:
                        A = raw_smp_processor_id();
                        continue;
-               case BPF_S_ANC_ALU_XOR_X:
-                       A ^= X;
-                       continue;
                case BPF_S_ANC_NLATTR: {
                        struct nlattr *nla;
 
@@ -469,10 +481,14 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
                [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
                [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
                [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
+               [BPF_ALU|BPF_MOD|BPF_K]  = BPF_S_ALU_MOD_K,
+               [BPF_ALU|BPF_MOD|BPF_X]  = BPF_S_ALU_MOD_X,
                [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
                [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
                [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
                [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
+               [BPF_ALU|BPF_XOR|BPF_K]  = BPF_S_ALU_XOR_K,
+               [BPF_ALU|BPF_XOR|BPF_X]  = BPF_S_ALU_XOR_X,
                [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
                [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
                [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
@@ -531,6 +547,11 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
                                return -EINVAL;
                        ftest->k = reciprocal_value(ftest->k);
                        break;
+               case BPF_S_ALU_MOD_K:
+                       /* check for division by zero */
+                       if (ftest->k == 0)
+                               return -EINVAL;
+                       break;
                case BPF_S_LD_MEM:
                case BPF_S_LDX_MEM:
                case BPF_S_ST:
index c3519c6d1b169a5c895efd781c7958218d7f8dc6..8f82a5cc3851d3a61953a6cad926c9040bc770a7 100644 (file)
@@ -76,6 +76,14 @@ static void rfc2863_policy(struct net_device *dev)
 }
 
 
+void linkwatch_init_dev(struct net_device *dev)
+{
+       /* Handle pre-registration link state changes */
+       if (!netif_carrier_ok(dev) || netif_dormant(dev))
+               rfc2863_policy(dev);
+}
+
+
 static bool linkwatch_urgent_event(struct net_device *dev)
 {
        if (!netif_running(dev))
@@ -120,22 +128,13 @@ static void linkwatch_schedule_work(int urgent)
                delay = 0;
 
        /*
-        * This is true if we've scheduled it immeditately or if we don't
-        * need an immediate execution and it's already pending.
+        * If urgent, schedule immediate execution; otherwise, don't
+        * override the existing timer.
         */
-       if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
-               return;
-
-       /* Don't bother if there is nothing urgent. */
-       if (!test_bit(LW_URGENT, &linkwatch_flags))
-               return;
-
-       /* It's already running which is good enough. */
-       if (!__cancel_delayed_work(&linkwatch_work))
-               return;
-
-       /* Otherwise we reschedule it again for immediate execution. */
-       schedule_delayed_work(&linkwatch_work, 0);
+       if (test_bit(LW_URGENT, &linkwatch_flags))
+               mod_delayed_work(system_wq, &linkwatch_work, 0);
+       else
+               schedule_delayed_work(&linkwatch_work, delay);
 }
 
 
index 117afaf512689b4de570ddb0c32869a4c5532918..baca771caae2df503a1b0f1bcc9a4fcb577777e8 100644 (file)
@@ -1545,7 +1545,7 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
                panic("cannot allocate neighbour cache hashes");
 
        rwlock_init(&tbl->lock);
-       INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
+       INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
        schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
        setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
        skb_queue_head_init_class(&tbl->proxy_queue,
@@ -2102,7 +2102,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                if (tidx < tbl_skip || (family && tbl->family != family))
                        continue;
 
-               if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
+               if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
                                       NLM_F_MULTI) <= 0)
                        break;
@@ -2115,7 +2115,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
                                goto next;
 
                        if (neightbl_fill_param_info(skb, tbl, p,
-                                                    NETLINK_CB(cb->skb).pid,
+                                                    NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
                                                     RTM_NEWNEIGHTBL,
                                                     NLM_F_MULTI) <= 0)
@@ -2244,7 +2244,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                                continue;
                        if (idx < s_idx)
                                goto next;
-                       if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+                       if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            RTM_NEWNEIGH,
                                            NLM_F_MULTI) <= 0) {
@@ -2281,7 +2281,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                                continue;
                        if (idx < s_idx)
                                goto next;
-                       if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+                       if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            RTM_NEWNEIGH,
                                            NLM_F_MULTI, tbl) <= 0) {
index 72607174ea5a4af158855f588b767b5c4c86d973..bcf02f608cbfa76ad06da490f2dbf423e728fc17 100644 (file)
@@ -166,9 +166,21 @@ static ssize_t show_duplex(struct device *dev,
 
        if (netif_running(netdev)) {
                struct ethtool_cmd cmd;
-               if (!__ethtool_get_settings(netdev, &cmd))
-                       ret = sprintf(buf, "%s\n",
-                                     cmd.duplex ? "full" : "half");
+               if (!__ethtool_get_settings(netdev, &cmd)) {
+                       const char *duplex;
+                       switch (cmd.duplex) {
+                       case DUPLEX_HALF:
+                               duplex = "half";
+                               break;
+                       case DUPLEX_FULL:
+                               duplex = "full";
+                               break;
+                       default:
+                               duplex = "unknown";
+                               break;
+                       }
+                       ret = sprintf(buf, "%s\n", duplex);
+               }
        }
        rtnl_unlock();
        return ret;
index e4ba3e70c1747684ad480815f67b2410e87974a7..77a0388fc3beccbf39187e357c7a1b4c93f09d73 100644 (file)
@@ -328,7 +328,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
                struct netdev_queue *txq;
 
-               txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+               txq = netdev_pick_tx(dev, skb);
 
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -380,6 +380,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
+       static atomic_t ip_ident;
 
        udp_len = len + sizeof(*udph);
        ip_len = udp_len + sizeof(*iph);
@@ -415,7 +416,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
        put_unaligned(0x45, (unsigned char *)iph);
        iph->tos      = 0;
        put_unaligned(htons(ip_len), &(iph->tot_len));
-       iph->id       = 0;
+       iph->id       = htons(atomic_inc_return(&ip_ident));
        iph->frag_off = 0;
        iph->ttl      = 64;
        iph->protocol = IPPROTO_UDP;
index c75e3f9d060f8e3d086b747255ab65c8104f7dde..79285a36035ffa6624c49ee1a0a865558f66859e 100644 (file)
@@ -73,7 +73,6 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len)
                           ((sizeof(u32) * new_len));
        struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
        struct netprio_map *old_priomap;
-       int i;
 
        old_priomap  = rtnl_dereference(dev->priomap);
 
@@ -82,10 +81,10 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len)
                return -ENOMEM;
        }
 
-       for (i = 0;
-            old_priomap && (i < old_priomap->priomap_len);
-            i++)
-               new_priomap->priomap[i] = old_priomap->priomap[i];
+       if (old_priomap)
+               memcpy(new_priomap->priomap, old_priomap->priomap,
+                      old_priomap->priomap_len *
+                      sizeof(old_priomap->priomap[0]));
 
        new_priomap->priomap_len = new_len;
 
@@ -109,32 +108,6 @@ static int write_update_netdev_table(struct net_device *dev)
        return ret;
 }
 
-static int update_netdev_tables(void)
-{
-       int ret = 0;
-       struct net_device *dev;
-       u32 max_len;
-       struct netprio_map *map;
-
-       rtnl_lock();
-       max_len = atomic_read(&max_prioidx) + 1;
-       for_each_netdev(&init_net, dev) {
-               map = rtnl_dereference(dev->priomap);
-               /*
-                * don't allocate priomap if we didn't
-                * change net_prio.ifpriomap (map == NULL),
-                * this will speed up skb_update_prio.
-                */
-               if (map && map->priomap_len < max_len) {
-                       ret = extend_netdev_table(dev, max_len);
-                       if (ret < 0)
-                               break;
-               }
-       }
-       rtnl_unlock();
-       return ret;
-}
-
 static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
 {
        struct cgroup_netprio_state *cs;
@@ -153,12 +126,6 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
                goto out;
        }
 
-       ret = update_netdev_tables();
-       if (ret < 0) {
-               put_prioidx(cs->prioidx);
-               goto out;
-       }
-
        return &cs->css;
 out:
        kfree(cs);
@@ -272,38 +239,24 @@ out_free_devname:
        return ret;
 }
 
+static int update_netprio(const void *v, struct file *file, unsigned n)
+{
+       int err;
+       struct socket *sock = sock_from_file(file, &err);
+       if (sock)
+               sock->sk->sk_cgrp_prioidx = (u32)(unsigned long)v;
+       return 0;
+}
+
 void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 {
        struct task_struct *p;
+       void *v;
 
        cgroup_taskset_for_each(p, cgrp, tset) {
-               unsigned int fd;
-               struct fdtable *fdt;
-               struct files_struct *files;
-
                task_lock(p);
-               files = p->files;
-               if (!files) {
-                       task_unlock(p);
-                       continue;
-               }
-
-               spin_lock(&files->file_lock);
-               fdt = files_fdtable(files);
-               for (fd = 0; fd < fdt->max_fds; fd++) {
-                       struct file *file;
-                       struct socket *sock;
-                       int err;
-
-                       file = fcheck_files(files, fd);
-                       if (!file)
-                               continue;
-
-                       sock = sock_from_file(file, &err);
-                       if (sock)
-                               sock_update_netprioidx(sock->sk, p);
-               }
-               spin_unlock(&files->file_lock);
+               v = (void *)(unsigned long)task_netprioidx(p);
+               iterate_fd(p->files, 0, update_netprio, v);
                task_unlock(p);
        }
 }
@@ -326,11 +279,19 @@ struct cgroup_subsys net_prio_subsys = {
        .create         = cgrp_create,
        .destroy        = cgrp_destroy,
        .attach         = net_prio_attach,
-#ifdef CONFIG_NETPRIO_CGROUP
        .subsys_id      = net_prio_subsys_id,
-#endif
        .base_cftypes   = ss_files,
-       .module         = THIS_MODULE
+       .module         = THIS_MODULE,
+
+       /*
+        * net_prio has artificial limit on the number of cgroups and
+        * disallows nesting making it impossible to co-mount it with other
+        * hierarchical subsystems.  Remove the artificially low PRIOIDX_SZ
+        * limit and properly nest configuration such that children follow
+        * their parents' configurations by default and are allowed to
+        * override and remove the following.
+        */
+       .broken_hierarchy = true,
 };
 
 static int netprio_device_event(struct notifier_block *unused,
@@ -366,10 +327,6 @@ static int __init init_cgroup_netprio(void)
        ret = cgroup_load_subsys(&net_prio_subsys);
        if (ret)
                goto out;
-#ifndef CONFIG_NETPRIO_CGROUP
-       smp_wmb();
-       net_prio_subsys_id = net_prio_subsys.subsys_id;
-#endif
 
        register_netdevice_notifier(&netprio_device_notifier);
 
@@ -386,11 +343,6 @@ static void __exit exit_cgroup_netprio(void)
 
        cgroup_unload_subsys(&net_prio_subsys);
 
-#ifndef CONFIG_NETPRIO_CGROUP
-       net_prio_subsys_id = -1;
-       synchronize_rcu();
-#endif
-
        rtnl_lock();
        for_each_netdev(&init_net, dev) {
                old = rtnl_dereference(dev->priomap);
index 9b570a6a33c5d8c52d777e160742dc31ec350c16..c31d9e8668c30346894adbf3be55eed4beeb1258 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/tcp.h>
 #include <linux/vmalloc.h>
 
 #include <net/request_sock.h>
@@ -130,3 +131,97 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                kfree(lopt);
 }
 
+/*
+ * This function is called to set a Fast Open socket's "fastopen_rsk" field
+ * to NULL when a TFO socket no longer needs to access the request_sock.
+ * This happens only after 3WHS has been either completed or aborted (e.g.,
+ * RST is received).
+ *
+ * Before TFO, a child socket is created only after 3WHS is completed,
+ * hence it never needs to access the request_sock. things get a lot more
+ * complex with TFO. A child socket, accepted or not, has to access its
+ * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
+ * until 3WHS is either completed or aborted. Afterwards the req will stay
+ * until either the child socket is accepted, or in the rare case when the
+ * listener is closed before the child is accepted.
+ *
+ * In short, a request socket is only freed after BOTH 3WHS has completed
+ * (or aborted) and the child socket has been accepted (or listener closed).
+ * When a child socket is accepted, its corresponding req->sk is set to
+ * NULL since it's no longer needed. More importantly, "req->sk == NULL"
+ * will be used by the code below to determine if a child socket has been
+ * accepted or not, and the check is protected by the fastopenq->lock
+ * described below.
+ *
+ * Note that fastopen_rsk is only accessed from the child socket's context
+ * with its socket lock held. But a request_sock (req) can be accessed by
+ * both its child socket through fastopen_rsk, and a listener socket through
+ * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
+ * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
+ * only in the rare case when both the listener and the child locks are held,
+ * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
+ * The lock also protects other fields such as fastopenq->qlen, which is
+ * decremented by this function when fastopen_rsk is no longer needed.
+ *
+ * Note that another solution was to simply use the existing socket lock
+ * from the listener. But first socket lock is difficult to use. It is not
+ * a simple spin lock - one must consider sock_owned_by_user() and arrange
+ * to use sk_add_backlog() stuff. But what really makes it infeasible is the
+ * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
+ * acquire a child's lock while holding listener's socket lock. A corner
+ * case might also exist in tcp_v4_hnd_req() that will trigger this locking
+ * order.
+ *
+ * When a TFO req is created, it needs to sock_hold its listener to prevent
+ * the latter data structure from going away.
+ *
+ * This function also sets "treq->listener" to NULL and unreference listener
+ * socket. treq->listener is used by the listener so it is protected by the
+ * fastopenq->lock in this function.
+ */
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+                          bool reset)
+{
+       struct sock *lsk = tcp_rsk(req)->listener;
+       struct fastopen_queue *fastopenq =
+           inet_csk(lsk)->icsk_accept_queue.fastopenq;
+
+       BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
+
+       tcp_sk(sk)->fastopen_rsk = NULL;
+       spin_lock_bh(&fastopenq->lock);
+       fastopenq->qlen--;
+       tcp_rsk(req)->listener = NULL;
+       if (req->sk)    /* the child socket hasn't been accepted yet */
+               goto out;
+
+       if (!reset || lsk->sk_state != TCP_LISTEN) {
+               /* If the listener has been closed don't bother with the
+                * special RST handling below.
+                */
+               spin_unlock_bh(&fastopenq->lock);
+               sock_put(lsk);
+               reqsk_free(req);
+               return;
+       }
+       /* Wait for 60secs before removing a req that has triggered RST.
+        * This is a simple defense against TFO spoofing attack - by
+        * counting the req against fastopen.max_qlen, and disabling
+        * TFO when the qlen exceeds max_qlen.
+        *
+        * For more details see CoNext'11 "TCP Fast Open" paper.
+        */
+       req->expires = jiffies + 60*HZ;
+       if (fastopenq->rskq_rst_head == NULL)
+               fastopenq->rskq_rst_head = req;
+       else
+               fastopenq->rskq_rst_tail->dl_next = req;
+
+       req->dl_next = NULL;
+       fastopenq->rskq_rst_tail = req;
+       fastopenq->qlen++;
+out:
+       spin_unlock_bh(&fastopenq->lock);
+       sock_put(lsk);
+       return;
+}
index 2c5a0a06c4ce3053a4a6c6afa3437f3a47f2d08c..76d4c2c3c89b9d170e89c39fbf0ed5729621a0bc 100644 (file)
@@ -618,7 +618,7 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
                       long expires, u32 error)
 {
        struct rta_cacheinfo ci = {
-               .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
+               .rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
                .rta_used = dst->__use,
                .rta_clntref = atomic_read(&(dst->__refcnt)),
                .rta_error = error,
@@ -1081,7 +1081,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        if (idx < s_idx)
                                goto cont;
                        if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
-                                            NETLINK_CB(cb->skb).pid,
+                                            NETLINK_CB(cb->skb).portid,
                                             cb->nlh->nlmsg_seq, 0,
                                             NLM_F_MULTI,
                                             ext_filter_mask) <= 0)
@@ -1812,8 +1812,6 @@ replay:
                        return -ENODEV;
                }
 
-               if (ifm->ifi_index)
-                       return -EOPNOTSUPP;
                if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
                        return -EOPNOTSUPP;
 
@@ -1839,10 +1837,14 @@ replay:
                        return PTR_ERR(dest_net);
 
                dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
-
-               if (IS_ERR(dev))
+               if (IS_ERR(dev)) {
                        err = PTR_ERR(dev);
-               else if (ops->newlink)
+                       goto out;
+               }
+
+               dev->ifindex = ifm->ifi_index;
+
+               if (ops->newlink)
                        err = ops->newlink(net, dev, tb, data);
                else
                        err = register_netdevice(dev);
@@ -1897,14 +1899,14 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
        if (nskb == NULL)
                return -ENOBUFS;
 
-       err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
+       err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
                               nlh->nlmsg_seq, 0, 0, ext_filter_mask);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size */
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(nskb);
        } else
-               err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
+               err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
 
        return err;
 }
@@ -2088,7 +2090,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
            (dev->priv_flags & IFF_BRIDGE_PORT)) {
                master = dev->master;
-               err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+               err = master->netdev_ops->ndo_fdb_add(ndm, tb,
+                                                     dev, addr,
                                                      nlh->nlmsg_flags);
                if (err)
                        goto out;
@@ -2098,7 +2101,8 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
        /* Embedded bridge, macvlan, and any other device support */
        if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
-               err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+               err = dev->netdev_ops->ndo_fdb_add(ndm, tb,
+                                                  dev, addr,
                                                   nlh->nlmsg_flags);
 
                if (!err) {
@@ -2178,9 +2182,9 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
 {
        struct netdev_hw_addr *ha;
        int err;
-       u32 pid, seq;
+       u32 portid, seq;
 
-       pid = NETLINK_CB(cb->skb).pid;
+       portid = NETLINK_CB(cb->skb).portid;
        seq = cb->nlh->nlmsg_seq;
 
        list_for_each_entry(ha, &list->list, list) {
@@ -2188,7 +2192,7 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
                        goto skip;
 
                err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
-                                             pid, seq, 0, NTF_SELF);
+                                             portid, seq, 0, NTF_SELF);
                if (err < 0)
                        return err;
 skip:
@@ -2356,7 +2360,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
        case NETDEV_PRE_TYPE_CHANGE:
        case NETDEV_GOING_DOWN:
        case NETDEV_UNREGISTER:
-       case NETDEV_UNREGISTER_BATCH:
+       case NETDEV_UNREGISTER_FINAL:
        case NETDEV_RELEASE:
        case NETDEV_JOIN:
                break;
@@ -2379,9 +2383,10 @@ static int __net_init rtnetlink_net_init(struct net *net)
                .groups         = RTNLGRP_MAX,
                .input          = rtnetlink_rcv,
                .cb_mutex       = &rtnl_mutex,
+               .flags          = NL_CFG_F_NONROOT_RECV,
        };
 
-       sk = netlink_kernel_create(net, NETLINK_ROUTE, THIS_MODULE, &cfg);
+       sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
        if (!sk)
                return -ENOMEM;
        net->rtnl = sk;
@@ -2414,7 +2419,6 @@ void __init rtnetlink_init(void)
        if (register_pernet_subsys(&rtnetlink_net_ops))
                panic("rtnetlink_init: cannot initialize rtnetlink\n");
 
-       netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
        register_netdevice_notifier(&rtnetlink_dev_notifier);
 
        rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
index 040cebeed45b810cf9dd7d85c6ac2cbade98ee77..ab570841a532ea318b5133596f1957192ff01354 100644 (file)
 static __inline__ int scm_check_creds(struct ucred *creds)
 {
        const struct cred *cred = current_cred();
+       kuid_t uid = make_kuid(cred->user_ns, creds->uid);
+       kgid_t gid = make_kgid(cred->user_ns, creds->gid);
+
+       if (!uid_valid(uid) || !gid_valid(gid))
+               return -EINVAL;
 
        if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) &&
-           ((creds->uid == cred->uid   || creds->uid == cred->euid ||
-             creds->uid == cred->suid) || capable(CAP_SETUID)) &&
-           ((creds->gid == cred->gid   || creds->gid == cred->egid ||
-             creds->gid == cred->sgid) || capable(CAP_SETGID))) {
+           ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||
+             uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) &&
+           ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) ||
+             gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) {
               return 0;
        }
        return -EPERM;
@@ -149,39 +154,54 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
                                goto error;
                        break;
                case SCM_CREDENTIALS:
+               {
+                       struct ucred creds;
+                       kuid_t uid;
+                       kgid_t gid;
                        if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
                                goto error;
-                       memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred));
-                       err = scm_check_creds(&p->creds);
+                       memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred));
+                       err = scm_check_creds(&creds);
                        if (err)
                                goto error;
 
-                       if (!p->pid || pid_vnr(p->pid) != p->creds.pid) {
+                       p->creds.pid = creds.pid;
+                       if (!p->pid || pid_vnr(p->pid) != creds.pid) {
                                struct pid *pid;
                                err = -ESRCH;
-                               pid = find_get_pid(p->creds.pid);
+                               pid = find_get_pid(creds.pid);
                                if (!pid)
                                        goto error;
                                put_pid(p->pid);
                                p->pid = pid;
                        }
 
+                       err = -EINVAL;
+                       uid = make_kuid(current_user_ns(), creds.uid);
+                       gid = make_kgid(current_user_ns(), creds.gid);
+                       if (!uid_valid(uid) || !gid_valid(gid))
+                               goto error;
+
+                       p->creds.uid = uid;
+                       p->creds.gid = gid;
+
                        if (!p->cred ||
-                           (p->cred->euid != p->creds.uid) ||
-                           (p->cred->egid != p->creds.gid)) {
+                           !uid_eq(p->cred->euid, uid) ||
+                           !gid_eq(p->cred->egid, gid)) {
                                struct cred *cred;
                                err = -ENOMEM;
                                cred = prepare_creds();
                                if (!cred)
                                        goto error;
 
-                               cred->uid = cred->euid = p->creds.uid;
-                               cred->gid = cred->egid = p->creds.gid;
+                               cred->uid = cred->euid = uid;
+                               cred->gid = cred->egid = gid;
                                if (p->cred)
                                        put_cred(p->cred);
                                p->cred = cred;
                        }
                        break;
+               }
                default:
                        goto error;
                }
@@ -281,11 +301,10 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
                        break;
                }
                /* Bump the usage count and install the file. */
-               get_file(fp[i]);
                sock = sock_from_file(fp[i], &err);
                if (sock)
                        sock_update_netprioidx(sock->sk, current);
-               fd_install(new_fd, fp[i]);
+               fd_install(new_fd, get_file(fp[i]));
        }
 
        if (i > 0)
index 99b2596531bbc2a9a714bf262a8c79c61f1726b5..e61a8bb7fce73393ddaab88747a9637c1dcbf311 100644 (file)
@@ -76,6 +76,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
 
        return hash[0];
 }
+EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #endif
 
 #ifdef CONFIG_INET
index e33ebae519c8c3283dadde917cdf46d413229a54..cdc28598f4efadb83b79a006a71ea676b508a655 100644 (file)
@@ -340,43 +340,57 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
 EXPORT_SYMBOL(build_skb);
 
 struct netdev_alloc_cache {
-       struct page *page;
-       unsigned int offset;
-       unsigned int pagecnt_bias;
+       struct page_frag        frag;
+       /* we maintain a pagecount bias, so that we dont dirty cache line
+        * containing page->_count every time we allocate a fragment.
+        */
+       unsigned int            pagecnt_bias;
 };
 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
 
-#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
+#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
+#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
+#define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
 
 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
        struct netdev_alloc_cache *nc;
        void *data = NULL;
+       int order;
        unsigned long flags;
 
        local_irq_save(flags);
        nc = &__get_cpu_var(netdev_alloc_cache);
-       if (unlikely(!nc->page)) {
+       if (unlikely(!nc->frag.page)) {
 refill:
-               nc->page = alloc_page(gfp_mask);
-               if (unlikely(!nc->page))
-                       goto end;
+               for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
+                       gfp_t gfp = gfp_mask;
+
+                       if (order)
+                               gfp |= __GFP_COMP | __GFP_NOWARN;
+                       nc->frag.page = alloc_pages(gfp, order);
+                       if (likely(nc->frag.page))
+                               break;
+                       if (--order < 0)
+                               goto end;
+               }
+               nc->frag.size = PAGE_SIZE << order;
 recycle:
-               atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS);
-               nc->pagecnt_bias = NETDEV_PAGECNT_BIAS;
-               nc->offset = 0;
+               atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
+               nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
+               nc->frag.offset = 0;
        }
 
-       if (nc->offset + fragsz > PAGE_SIZE) {
+       if (nc->frag.offset + fragsz > nc->frag.size) {
                /* avoid unnecessary locked operations if possible */
-               if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) ||
-                   atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count))
+               if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
+                   atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
                        goto recycle;
                goto refill;
        }
 
-       data = page_address(nc->page) + nc->offset;
-       nc->offset += fragsz;
+       data = page_address(nc->frag.page) + nc->frag.offset;
+       nc->frag.offset += fragsz;
        nc->pagecnt_bias--;
 end:
        local_irq_restore(flags);
@@ -1655,38 +1669,19 @@ static struct page *linear_to_page(struct page *page, unsigned int *len,
                                   unsigned int *offset,
                                   struct sk_buff *skb, struct sock *sk)
 {
-       struct page *p = sk->sk_sndmsg_page;
-       unsigned int off;
-
-       if (!p) {
-new_page:
-               p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
-               if (!p)
-                       return NULL;
+       struct page_frag *pfrag = sk_page_frag(sk);
 
-               off = sk->sk_sndmsg_off = 0;
-               /* hold one ref to this page until it's full */
-       } else {
-               unsigned int mlen;
-
-               /* If we are the only user of the page, we can reset offset */
-               if (page_count(p) == 1)
-                       sk->sk_sndmsg_off = 0;
-               off = sk->sk_sndmsg_off;
-               mlen = PAGE_SIZE - off;
-               if (mlen < 64 && mlen < *len) {
-                       put_page(p);
-                       goto new_page;
-               }
+       if (!sk_page_frag_refill(sk, pfrag))
+               return NULL;
 
-               *len = min_t(unsigned int, *len, mlen);
-       }
+       *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
 
-       memcpy(page_address(p) + off, page_address(page) + *offset, *len);
-       sk->sk_sndmsg_off += *len;
-       *offset = off;
+       memcpy(page_address(pfrag->page) + pfrag->offset,
+              page_address(page) + *offset, *len);
+       *offset = pfrag->offset;
+       pfrag->offset += *len;
 
-       return p;
+       return pfrag->page;
 }
 
 static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
@@ -3488,8 +3483,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
                    skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
                        return false;
 
-               delta = from->truesize -
-                       SKB_TRUESIZE(skb_end_pointer(from) - from->head);
+               delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
        }
 
        WARN_ON_ONCE(delta < len);
index a6000fbad2949f58a079322f4e328e0c051df896..8a146cfcc366fbead42d31d882893a74ec8dcf4e 100644 (file)
@@ -326,17 +326,6 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(__sk_backlog_rcv);
 
-#if defined(CONFIG_CGROUPS)
-#if !defined(CONFIG_NET_CLS_CGROUP)
-int net_cls_subsys_id = -1;
-EXPORT_SYMBOL_GPL(net_cls_subsys_id);
-#endif
-#if !defined(CONFIG_NETPRIO_CGROUP)
-int net_prio_subsys_id = -1;
-EXPORT_SYMBOL_GPL(net_prio_subsys_id);
-#endif
-#endif
-
 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 {
        struct timeval tv;
@@ -869,8 +858,8 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
        if (cred) {
                struct user_namespace *current_ns = current_user_ns();
 
-               ucred->uid = from_kuid(current_ns, cred->euid);
-               ucred->gid = from_kgid(current_ns, cred->egid);
+               ucred->uid = from_kuid_munged(current_ns, cred->euid);
+               ucred->gid = from_kgid_munged(current_ns, cred->egid);
        }
 }
 EXPORT_SYMBOL_GPL(cred_to_ucred);
@@ -1224,6 +1213,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
 }
 
 #ifdef CONFIG_CGROUPS
+#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
 void sock_update_classid(struct sock *sk)
 {
        u32 classid;
@@ -1231,11 +1221,13 @@ void sock_update_classid(struct sock *sk)
        rcu_read_lock();  /* doing current task, which cannot vanish. */
        classid = task_cls_classid(current);
        rcu_read_unlock();
-       if (classid && classid != sk->sk_classid)
+       if (classid != sk->sk_classid)
                sk->sk_classid = classid;
 }
 EXPORT_SYMBOL(sock_update_classid);
+#endif
 
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
 void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
 {
        if (in_interrupt())
@@ -1245,6 +1237,7 @@ void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
 }
 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
 #endif
+#endif
 
 /**
  *     sk_alloc - All socket objects are allocated here
@@ -1465,19 +1458,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 }
 EXPORT_SYMBOL_GPL(sk_setup_caps);
 
-void __init sk_init(void)
-{
-       if (totalram_pages <= 4096) {
-               sysctl_wmem_max = 32767;
-               sysctl_rmem_max = 32767;
-               sysctl_wmem_default = 32767;
-               sysctl_rmem_default = 32767;
-       } else if (totalram_pages >= 131072) {
-               sysctl_wmem_max = 131071;
-               sysctl_rmem_max = 131071;
-       }
-}
-
 /*
  *     Simple resource managers for sockets.
  */
@@ -1535,12 +1515,12 @@ void sock_edemux(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_edemux);
 
-int sock_i_uid(struct sock *sk)
+kuid_t sock_i_uid(struct sock *sk)
 {
-       int uid;
+       kuid_t uid;
 
        read_lock_bh(&sk->sk_callback_lock);
-       uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+       uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
        read_unlock_bh(&sk->sk_callback_lock);
        return uid;
 }
@@ -1745,6 +1725,45 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
+/* On 32bit arches, an skb frag is limited to 2^15 */
+#define SKB_FRAG_PAGE_ORDER    get_order(32768)
+
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+{
+       int order;
+
+       if (pfrag->page) {
+               if (atomic_read(&pfrag->page->_count) == 1) {
+                       pfrag->offset = 0;
+                       return true;
+               }
+               if (pfrag->offset < pfrag->size)
+                       return true;
+               put_page(pfrag->page);
+       }
+
+       /* We restrict high order allocations to users that can afford to wait */
+       order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
+
+       do {
+               gfp_t gfp = sk->sk_allocation;
+
+               if (order)
+                       gfp |= __GFP_COMP | __GFP_NOWARN;
+               pfrag->page = alloc_pages(gfp, order);
+               if (likely(pfrag->page)) {
+                       pfrag->offset = 0;
+                       pfrag->size = PAGE_SIZE << order;
+                       return true;
+               }
+       } while (--order >= 0);
+
+       sk_enter_memory_pressure(sk);
+       sk_stream_moderate_sndbuf(sk);
+       return false;
+}
+EXPORT_SYMBOL(sk_page_frag_refill);
+
 static void __lock_sock(struct sock *sk)
        __releases(&sk->sk_lock.slock)
        __acquires(&sk->sk_lock.slock)
@@ -2174,8 +2193,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_error_report     =       sock_def_error_report;
        sk->sk_destruct         =       sock_def_destruct;
 
-       sk->sk_sndmsg_page      =       NULL;
-       sk->sk_sndmsg_off       =       0;
+       sk->sk_frag.page        =       NULL;
+       sk->sk_frag.offset      =       0;
        sk->sk_peek_off         =       -1;
 
        sk->sk_peer_pid         =       NULL;
@@ -2418,6 +2437,12 @@ void sk_common_release(struct sock *sk)
        xfrm_sk_free_policy(sk);
 
        sk_refcnt_debug_release(sk);
+
+       if (sk->sk_frag.page) {
+               put_page(sk->sk_frag.page);
+               sk->sk_frag.page = NULL;
+       }
+
        sock_put(sk);
 }
 EXPORT_SYMBOL(sk_common_release);
index 9d8755e4a7a51e8415818f6c3542ef56c0697c08..602cd637182ebb321af6773d2ccfe9a8945d44c5 100644 (file)
@@ -172,8 +172,7 @@ static int __net_init diag_net_init(struct net *net)
                .input  = sock_diag_rcv,
        };
 
-       net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG,
-                                              THIS_MODULE, &cfg);
+       net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
        return net->diag_nlsk == NULL ? -ENOMEM : 0;
 }
 
index 39895a65e54ae59d35144c656c37483b43efcf87..f5613d569c23a17a806d8579d08a3b103735a84a 100644 (file)
@@ -294,6 +294,26 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_proto_csum_replace4);
 
+void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+                              const __be32 *from, const __be32 *to,
+                              int pseudohdr)
+{
+       __be32 diff[] = {
+               ~from[0], ~from[1], ~from[2], ~from[3],
+               to[0], to[1], to[2], to[3],
+       };
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               *sum = csum_fold(csum_partial(diff, sizeof(diff),
+                                ~csum_unfold(*sum)));
+               if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+                       skb->csum = ~csum_partial(diff, sizeof(diff),
+                                                 ~skb->csum);
+       } else if (pseudohdr)
+               *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
+                                 csum_unfold(*sum)));
+}
+EXPORT_SYMBOL(inet_proto_csum_replace16);
+
 int mac_pton(const char *s, u8 *mac)
 {
        int i;
index 81f2bb62dea3a7fdd569636fa8649350eb264a35..70989e672304938a39cdf7b7a0dfd87b7497d2a9 100644 (file)
@@ -1319,7 +1319,7 @@ nla_put_failure:
 }
 
 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
-                       u32 seq, u32 pid, int dcbx_ver)
+                       u32 seq, u32 portid, int dcbx_ver)
 {
        struct net *net = dev_net(dev);
        struct sk_buff *skb;
@@ -1330,7 +1330,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
        if (!ops)
                return -EOPNOTSUPP;
 
-       skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
+       skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
        if (!skb)
                return -ENOBUFS;
 
@@ -1353,16 +1353,16 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
 }
 
 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
-                     u32 seq, u32 pid)
+                     u32 seq, u32 portid)
 {
-       return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
+       return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
 }
 EXPORT_SYMBOL(dcbnl_ieee_notify);
 
 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
-                    u32 seq, u32 pid)
+                    u32 seq, u32 portid)
 {
-       return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
+       return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
 }
 EXPORT_SYMBOL(dcbnl_cee_notify);
 
@@ -1656,7 +1656,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        struct net_device *netdev;
        struct dcbmsg *dcb = nlmsg_data(nlh);
        struct nlattr *tb[DCB_ATTR_MAX + 1];
-       u32 pid = skb ? NETLINK_CB(skb).pid : 0;
+       u32 portid = skb ? NETLINK_CB(skb).portid : 0;
        int ret = -EINVAL;
        struct sk_buff *reply_skb;
        struct nlmsghdr *reply_nlh = NULL;
@@ -1690,7 +1690,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                goto out;
        }
 
-       reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
+       reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
                                 nlh->nlmsg_flags, &reply_nlh);
        if (!reply_skb) {
                ret = -ENOBUFS;
@@ -1705,7 +1705,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
        nlmsg_end(reply_skb, reply_nlh);
 
-       ret = rtnl_unicast(reply_skb, &init_net, pid);
+       ret = rtnl_unicast(reply_skb, &init_net, portid);
 out:
        dev_put(netdev);
        return ret;
index 2ba1a2814c24e2260592c985706c5cbc58e45fdc..307c322d53bb889540aa3d302a69b13646f0cda3 100644 (file)
@@ -1313,10 +1313,10 @@ static int dn_shutdown(struct socket *sock, int how)
        if (scp->state == DN_O)
                goto out;
 
-       if (how != SHUTDOWN_MASK)
+       if (how != SHUT_RDWR)
                goto out;
 
-       sk->sk_shutdown = how;
+       sk->sk_shutdown = SHUTDOWN_MASK;
        dn_destroy_sock(sk);
        err = 0;
 
index f3924ab1e019f5efc22f6278021078889f43ce52..7b7e561412d379380a54678b1505da796184c49c 100644 (file)
@@ -667,12 +667,12 @@ static inline size_t dn_ifaddr_nlmsg_size(void)
 }
 
 static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
-                            u32 pid, u32 seq, int event, unsigned int flags)
+                            u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct ifaddrmsg *ifm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -753,7 +753,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                        if (dn_idx < skip_naddr)
                                continue;
 
-                       if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
+                       if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid,
                                              cb->nlh->nlmsg_seq, RTM_NEWADDR,
                                              NLM_F_MULTI) < 0)
                                goto done;
index 85a3604c87c8d2cac6cac4d7049d9ba75877bf66..b57419cc41a486b3ab2ddc82643169c97b19d132 100644 (file)
@@ -961,7 +961,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
                .saddr = oldflp->saddr,
                .flowidn_scope = RT_SCOPE_UNIVERSE,
                .flowidn_mark = oldflp->flowidn_mark,
-               .flowidn_iif = init_net.loopback_dev->ifindex,
+               .flowidn_iif = LOOPBACK_IFINDEX,
                .flowidn_oif = oldflp->flowidn_oif,
        };
        struct dn_route *rt = NULL;
@@ -979,7 +979,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
                       "dn_route_output_slow: dst=%04x src=%04x mark=%d"
                       " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
                       le16_to_cpu(oldflp->saddr),
-                      oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
+                      oldflp->flowidn_mark, LOOPBACK_IFINDEX,
                       oldflp->flowidn_oif);
 
        /* If we have an output interface, verify its a DECnet device */
@@ -1042,7 +1042,7 @@ source_ok:
                        if (!fld.daddr)
                                goto out;
                }
-               fld.flowidn_oif = init_net.loopback_dev->ifindex;
+               fld.flowidn_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
                goto make_route;
        }
@@ -1543,7 +1543,7 @@ static int dn_route_input(struct sk_buff *skb)
        return dn_route_input_slow(skb);
 }
 
-static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
                           int event, int nowait, unsigned int flags)
 {
        struct dn_route *rt = (struct dn_route *)skb_dst(skb);
@@ -1551,7 +1551,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        struct nlmsghdr *nlh;
        long expires;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -1685,7 +1685,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
        if (rtm->rtm_flags & RTM_F_NOTIFY)
                rt->rt_flags |= RTCF_NOTIFY;
 
-       err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
+       err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
 
        if (err == 0)
                goto out_free;
@@ -1694,7 +1694,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
                goto out_free;
        }
 
-       return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
+       return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid);
 
 out_free:
        kfree_skb(skb);
@@ -1737,7 +1737,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        if (idx < s_idx)
                                continue;
                        skb_dst_set(skb, dst_clone(&rt->dst));
-                       if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
+                       if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, RTM_NEWROUTE,
                                        1, NLM_F_MULTI) <= 0) {
                                skb_dst_drop(skb);
index 16c986ab1228ec7f5cf5821253938705b8b6a808..f968c1b58f47d1892ee392eea366d67f4c687397 100644 (file)
@@ -291,14 +291,14 @@ static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
        return payload;
 }
 
-static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
+static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                        u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
                        struct dn_fib_info *fi, unsigned int flags)
 {
        struct rtmsg *rtm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -374,14 +374,14 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
                        struct nlmsghdr *nlh, struct netlink_skb_parms *req)
 {
        struct sk_buff *skb;
-       u32 pid = req ? req->pid : 0;
+       u32 portid = req ? req->portid : 0;
        int err = -ENOBUFS;
 
        skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
        if (skb == NULL)
                goto errout;
 
-       err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
+       err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id,
                               f->fn_type, f->fn_scope, &f->fn_key, z,
                               DN_FIB_INFO(f), 0);
        if (err < 0) {
@@ -390,7 +390,7 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, &init_net, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
+       rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
        return;
 errout:
        if (err < 0)
@@ -411,7 +411,7 @@ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
                        continue;
                if (f->fn_state & DN_S_ZOMBIE)
                        continue;
-               if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
+               if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq,
                                RTM_NEWROUTE,
                                tb->n,
index 11db0ecf342ff2f32901acdb24836055ca0e383c..dfe42012a044142dbc9c0bf6197f539fd1996a8b 100644 (file)
@@ -130,8 +130,7 @@ static int __init dn_rtmsg_init(void)
                .input  = dnrmg_receive_user_skb,
        };
 
-       dnrmg = netlink_kernel_create(&init_net,
-                                     NETLINK_DNRTMSG, THIS_MODULE, &cfg);
+       dnrmg = netlink_kernel_create(&init_net, NETLINK_DNRTMSG, &cfg);
        if (dnrmg == NULL) {
                printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
                return -ENOMEM;
index d9507dd058182295a80bacf0b966e71602307e84..9807945a56d90e0e0feb5c32f5ebaedca6de208f 100644 (file)
@@ -259,7 +259,8 @@ static int __init init_dns_resolver(void)
        if (!cred)
                return -ENOMEM;
 
-       keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
+       keyring = key_alloc(&key_type_keyring, ".dns_resolver",
+                           GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
                            (KEY_POS_ALL & ~KEY_POS_SETATTR) |
                            KEY_USR_VIEW | KEY_USR_READ,
                            KEY_ALLOC_NOT_IN_QUOTA);
index 88e7c2f3fa0d470404f6560db952cb30a9639a33..45295ca095717314294e1be8172a6e852fe1a679 100644 (file)
@@ -370,7 +370,7 @@ static int dsa_remove(struct platform_device *pdev)
        if (dst->link_poll_needed)
                del_timer_sync(&dst->link_poll_timer);
 
-       flush_work_sync(&dst->link_poll_work);
+       flush_work(&dst->link_poll_work);
 
        for (i = 0; i < dst->pd->nr_chips; i++) {
                struct dsa_switch *ds = dst->ds[i];
index 6a095225148e929bc5e3b8d7de2731229cc5f53c..6d42c17af96b4a606a545d2d72ad5e32cb53c3da 100644 (file)
@@ -1063,12 +1063,6 @@ out:
        return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
 }
 
-static void lowpan_dev_free(struct net_device *dev)
-{
-       dev_put(lowpan_dev_info(dev)->real_dev);
-       free_netdev(dev);
-}
-
 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
@@ -1118,7 +1112,7 @@ static void lowpan_setup(struct net_device *dev)
        dev->netdev_ops         = &lowpan_netdev_ops;
        dev->header_ops         = &lowpan_header_ops;
        dev->ml_priv            = &lowpan_mlme;
-       dev->destructor         = lowpan_dev_free;
+       dev->destructor         = free_netdev;
 }
 
 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1133,6 +1127,8 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        struct packet_type *pt, struct net_device *orig_dev)
 {
+       struct sk_buff *local_skb;
+
        if (!netif_running(dev))
                goto drop;
 
@@ -1144,7 +1140,12 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
        case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
        case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
        case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
-               lowpan_process_data(skb);
+               local_skb = skb_clone(skb, GFP_ATOMIC);
+               if (!local_skb)
+                       goto drop;
+               lowpan_process_data(local_skb);
+
+               kfree_skb(skb);
                break;
        default:
                break;
@@ -1237,6 +1238,34 @@ static inline void __init lowpan_netlink_fini(void)
        rtnl_link_unregister(&lowpan_link_ops);
 }
 
+static int lowpan_device_event(struct notifier_block *unused,
+                               unsigned long event,
+                               void *ptr)
+{
+       struct net_device *dev = ptr;
+       LIST_HEAD(del_list);
+       struct lowpan_dev_record *entry, *tmp;
+
+       if (dev->type != ARPHRD_IEEE802154)
+               goto out;
+
+       if (event == NETDEV_UNREGISTER) {
+               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
+                               lowpan_dellink(entry->ldev, &del_list);
+               }
+
+               unregister_netdevice_many(&del_list);
+       }
+
+out:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block lowpan_dev_notifier = {
+       .notifier_call = lowpan_device_event,
+};
+
 static struct packet_type lowpan_packet_type = {
        .type = __constant_htons(ETH_P_IEEE802154),
        .func = lowpan_rcv,
@@ -1251,6 +1280,12 @@ static int __init lowpan_init_module(void)
                goto out;
 
        dev_add_pack(&lowpan_packet_type);
+
+       err = register_netdevice_notifier(&lowpan_dev_notifier);
+       if (err < 0) {
+               dev_remove_pack(&lowpan_packet_type);
+               lowpan_netlink_fini();
+       }
 out:
        return err;
 }
@@ -1263,6 +1298,8 @@ static void __exit lowpan_cleanup_module(void)
 
        dev_remove_pack(&lowpan_packet_type);
 
+       unregister_netdevice_notifier(&lowpan_dev_notifier);
+
        /* Now 6lowpan packet_type is removed, so no new fragments are
         * expected on RX, therefore that's the time to clean incomplete
         * fragments.
index 1e9917124e75ccada73d34e3a19578155db4dec9..96bb08abece29408f0d9d65f453ee7f2c0c55c4e 100644 (file)
@@ -246,7 +246,7 @@ nla_put_failure:
 }
 EXPORT_SYMBOL(ieee802154_nl_start_confirm);
 
-static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
+static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        u32 seq, int flags, struct net_device *dev)
 {
        void *hdr;
@@ -534,7 +534,7 @@ static int ieee802154_list_iface(struct sk_buff *skb,
        if (!msg)
                goto out_dev;
 
-       rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
+       rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq,
                        0, dev);
        if (rc < 0)
                goto out_free;
@@ -565,7 +565,7 @@ static int ieee802154_dump_iface(struct sk_buff *skb,
                if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
                        goto cont;
 
-               if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
+               if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
                        cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
                        break;
 cont:
index d54be34cca9442b2cb1624ad167e205bea718832..22b1a7058fd3f841d94ee27655840c1ae325f011 100644 (file)
@@ -35,7 +35,7 @@
 
 #include "ieee802154.h"
 
-static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
+static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
        u32 seq, int flags, struct wpan_phy *phy)
 {
        void *hdr;
@@ -105,7 +105,7 @@ static int ieee802154_list_phy(struct sk_buff *skb,
        if (!msg)
                goto out_dev;
 
-       rc = ieee802154_nl_fill_phy(msg, info->snd_pid, info->snd_seq,
+       rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
                        0, phy);
        if (rc < 0)
                goto out_free;
@@ -138,7 +138,7 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
                return 0;
 
        rc = ieee802154_nl_fill_phy(data->skb,
-                       NETLINK_CB(data->cb->skb).pid,
+                       NETLINK_CB(data->cb->skb).portid,
                        data->cb->nlh->nlmsg_seq,
                        NLM_F_MULTI,
                        phy);
index fe4582ca969a4ff85c862f8fd96d3b6ec3d03a4d..766c596585631e1001bdd64c80639554c6ab7831 100644 (file)
@@ -212,6 +212,26 @@ int inet_listen(struct socket *sock, int backlog)
         * we can only allow the backlog to be adjusted.
         */
        if (old_state != TCP_LISTEN) {
+               /* Check special setups for testing purpose to enable TFO w/o
+                * requiring TCP_FASTOPEN sockopt.
+                * Note that only TCP sockets (SOCK_STREAM) will reach here.
+                * Also fastopenq may already been allocated because this
+                * socket was in TCP_LISTEN state previously but was
+                * shutdown() (rather than close()).
+                */
+               if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
+                   inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
+                       if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
+                               err = fastopen_init_queue(sk, backlog);
+                       else if ((sysctl_tcp_fastopen &
+                                 TFO_SERVER_WO_SOCKOPT2) != 0)
+                               err = fastopen_init_queue(sk,
+                                   ((uint)sysctl_tcp_fastopen) >> 16);
+                       else
+                               err = 0;
+                       if (err)
+                               goto out;
+               }
                err = inet_csk_listen_start(sk, backlog);
                if (err)
                        goto out;
@@ -701,7 +721,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
 
        sock_rps_record_flow(sk2);
        WARN_ON(!((1 << sk2->sk_state) &
-                 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+                 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+                 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
 
        sock_graft(sk2, newsock);
 
@@ -1364,7 +1385,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        if (*(u8 *)iph != 0x45)
                goto out_unlock;
 
-       if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+       if (unlikely(ip_fast_csum((u8 *)iph, 5)))
                goto out_unlock;
 
        id = ntohl(*(__be32 *)&iph->id);
@@ -1380,7 +1401,6 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                iph2 = ip_hdr(p);
 
                if ((iph->protocol ^ iph2->protocol) |
-                   (iph->tos ^ iph2->tos) |
                    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
                    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
                        NAPI_GRO_CB(p)->same_flow = 0;
@@ -1390,6 +1410,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
                /* All fields must match except length and checksum. */
                NAPI_GRO_CB(p)->flush |=
                        (iph->ttl ^ iph2->ttl) |
+                       (iph->tos ^ iph2->tos) |
                        ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 
                NAPI_GRO_CB(p)->flush |= flush;
index e12fad773852b5271afafa414ffc1f2706ff4428..2a6abc163ed2fa9cc15d4be2f4e9e795ec9f08b5 100644 (file)
@@ -94,25 +94,22 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
        [IFA_LABEL]             = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 };
 
-/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
- * value.  So if you change this define, make appropriate changes to
- * inet_addr_hash as well.
- */
-#define IN4_ADDR_HSIZE 256
+#define IN4_ADDR_HSIZE_SHIFT   8
+#define IN4_ADDR_HSIZE         (1U << IN4_ADDR_HSIZE_SHIFT)
+
 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
 static DEFINE_SPINLOCK(inet_addr_hash_lock);
 
-static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+static u32 inet_addr_hash(struct net *net, __be32 addr)
 {
-       u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+       u32 val = (__force u32) addr ^ net_hash_mix(net);
 
-       return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
-               (IN4_ADDR_HSIZE - 1));
+       return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
 }
 
 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 {
-       unsigned int hash = inet_addr_hash(net, ifa->ifa_local);
+       u32 hash = inet_addr_hash(net, ifa->ifa_local);
 
        spin_lock(&inet_addr_hash_lock);
        hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
@@ -136,18 +133,18 @@ static void inet_hash_remove(struct in_ifaddr *ifa)
  */
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 {
-       unsigned int hash = inet_addr_hash(net, addr);
+       u32 hash = inet_addr_hash(net, addr);
        struct net_device *result = NULL;
        struct in_ifaddr *ifa;
        struct hlist_node *node;
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
-               struct net_device *dev = ifa->ifa_dev->dev;
-
-               if (!net_eq(dev_net(dev), net))
-                       continue;
                if (ifa->ifa_local == addr) {
+                       struct net_device *dev = ifa->ifa_dev->dev;
+
+                       if (!net_eq(dev_net(dev), net))
+                               continue;
                        result = dev;
                        break;
                }
@@ -182,10 +179,10 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 static void devinet_sysctl_register(struct in_device *idev);
 static void devinet_sysctl_unregister(struct in_device *idev);
 #else
-static inline void devinet_sysctl_register(struct in_device *idev)
+static void devinet_sysctl_register(struct in_device *idev)
 {
 }
-static inline void devinet_sysctl_unregister(struct in_device *idev)
+static void devinet_sysctl_unregister(struct in_device *idev)
 {
 }
 #endif
@@ -205,7 +202,7 @@ static void inet_rcu_free_ifa(struct rcu_head *head)
        kfree(ifa);
 }
 
-static inline void inet_free_ifa(struct in_ifaddr *ifa)
+static void inet_free_ifa(struct in_ifaddr *ifa)
 {
        call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 }
@@ -314,7 +311,7 @@ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
 }
 
 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
-                        int destroy, struct nlmsghdr *nlh, u32 pid)
+                        int destroy, struct nlmsghdr *nlh, u32 portid)
 {
        struct in_ifaddr *promote = NULL;
        struct in_ifaddr *ifa, *ifa1 = *ifap;
@@ -348,7 +345,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                                inet_hash_remove(ifa);
                                *ifap1 = ifa->ifa_next;
 
-                               rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
+                               rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
                                blocking_notifier_call_chain(&inetaddr_chain,
                                                NETDEV_DOWN, ifa);
                                inet_free_ifa(ifa);
@@ -385,7 +382,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
           is valid, it will try to restore deleted routes... Grr.
           So that, this order is correct.
         */
-       rtmsg_ifa(RTM_DELADDR, ifa1, nlh, pid);
+       rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
        blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
 
        if (promote) {
@@ -398,7 +395,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                }
 
                promote->ifa_flags &= ~IFA_F_SECONDARY;
-               rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid);
+               rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
                blocking_notifier_call_chain(&inetaddr_chain,
                                NETDEV_UP, promote);
                for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
@@ -420,7 +417,7 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
 }
 
 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
-                            u32 pid)
+                            u32 portid)
 {
        struct in_device *in_dev = ifa->ifa_dev;
        struct in_ifaddr *ifa1, **ifap, **last_primary;
@@ -467,7 +464,7 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        /* Send message first, then call notifier.
           Notifier will trigger FIB update, so that
           listeners of netlink will know about new ifaddr */
-       rtmsg_ifa(RTM_NEWADDR, ifa, nlh, pid);
+       rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
        blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
 
        return 0;
@@ -566,7 +563,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
                    !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa)))
                        continue;
 
-               __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid);
+               __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
                return 0;
        }
 
@@ -652,14 +649,14 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
        if (IS_ERR(ifa))
                return PTR_ERR(ifa);
 
-       return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).pid);
+       return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
 }
 
 /*
  *     Determine a default network mask, based on the IP address.
  */
 
-static inline int inet_abc_len(__be32 addr)
+static int inet_abc_len(__be32 addr)
 {
        int rc = -1;    /* Something else, probably a multicast. */
 
@@ -1124,7 +1121,7 @@ skip:
        }
 }
 
-static inline bool inetdev_valid_mtu(unsigned int mtu)
+static bool inetdev_valid_mtu(unsigned int mtu)
 {
        return mtu >= 68;
 }
@@ -1239,7 +1236,7 @@ static struct notifier_block ip_netdev_notifier = {
        .notifier_call = inetdev_event,
 };
 
-static inline size_t inet_nlmsg_size(void)
+static size_t inet_nlmsg_size(void)
 {
        return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
               + nla_total_size(4) /* IFA_ADDRESS */
@@ -1249,12 +1246,12 @@ static inline size_t inet_nlmsg_size(void)
 }
 
 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
-                           u32 pid, u32 seq, int event, unsigned int flags)
+                           u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct ifaddrmsg *ifm;
        struct nlmsghdr  *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -1316,7 +1313,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                                if (ip_idx < s_ip_idx)
                                        continue;
                                if (inet_fill_ifaddr(skb, ifa,
-                                            NETLINK_CB(cb->skb).pid,
+                                            NETLINK_CB(cb->skb).portid,
                                             cb->nlh->nlmsg_seq,
                                             RTM_NEWADDR, NLM_F_MULTI) <= 0) {
                                        rcu_read_unlock();
@@ -1338,7 +1335,7 @@ done:
 }
 
 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
-                     u32 pid)
+                     u32 portid)
 {
        struct sk_buff *skb;
        u32 seq = nlh ? nlh->nlmsg_seq : 0;
@@ -1350,14 +1347,14 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
        if (skb == NULL)
                goto errout;
 
-       err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0);
+       err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, net, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
+       rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
        return;
 errout:
        if (err < 0)
index 8e2b475da9faca9f7e27482f7e416908e7cf66fa..68c93d1bb03adb9fef46ff264a783dad82a962ea 100644 (file)
@@ -218,7 +218,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
        scope = RT_SCOPE_UNIVERSE;
        if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
                fl4.flowi4_oif = 0;
-               fl4.flowi4_iif = net->loopback_dev->ifindex;
+               fl4.flowi4_iif = LOOPBACK_IFINDEX;
                fl4.daddr = ip_hdr(skb)->saddr;
                fl4.saddr = 0;
                fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
@@ -557,7 +557,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
        cfg->fc_flags = rtm->rtm_flags;
        cfg->fc_nlflags = nlh->nlmsg_flags;
 
-       cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
+       cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
        cfg->fc_nlinfo.nlh = nlh;
        cfg->fc_nlinfo.nl_net = net;
 
@@ -955,7 +955,7 @@ static void nl_fib_input(struct sk_buff *skb)
        struct fib_result_nl *frn;
        struct nlmsghdr *nlh;
        struct fib_table *tb;
-       u32 pid;
+       u32 portid;
 
        net = sock_net(skb->sk);
        nlh = nlmsg_hdr(skb);
@@ -973,10 +973,10 @@ static void nl_fib_input(struct sk_buff *skb)
 
        nl_fib_lookup(frn, tb);
 
-       pid = NETLINK_CB(skb).pid;      /* pid of sending process */
-       NETLINK_CB(skb).pid = 0;        /* from kernel */
+       portid = NETLINK_CB(skb).portid;      /* pid of sending process */
+       NETLINK_CB(skb).portid = 0;        /* from kernel */
        NETLINK_CB(skb).dst_group = 0;  /* unicast */
-       netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
+       netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
 }
 
 static int __net_init nl_fib_lookup_init(struct net *net)
@@ -986,7 +986,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
                .input  = nl_fib_input,
        };
 
-       sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg);
+       sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
        if (sk == NULL)
                return -EAFNOSUPPORT;
        net->ipv4.fibnl = sk;
@@ -1041,7 +1041,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = ptr;
-       struct in_device *in_dev = __in_dev_get_rtnl(dev);
+       struct in_device *in_dev;
        struct net *net = dev_net(dev);
 
        if (event == NETDEV_UNREGISTER) {
@@ -1050,8 +1050,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                return NOTIFY_DONE;
        }
 
-       if (!in_dev)
-               return NOTIFY_DONE;
+       in_dev = __in_dev_get_rtnl(dev);
 
        switch (event) {
        case NETDEV_UP:
@@ -1062,16 +1061,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                fib_sync_up(dev);
 #endif
                atomic_inc(&net->ipv4.dev_addr_genid);
-               rt_cache_flush(dev_net(dev));
+               rt_cache_flush(net);
                break;
        case NETDEV_DOWN:
                fib_disable_ip(dev, 0);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGE:
-               rt_cache_flush(dev_net(dev));
-               break;
-       case NETDEV_UNREGISTER_BATCH:
+               rt_cache_flush(net);
                break;
        }
        return NOTIFY_DONE;
index da80dc14cc76f51cb79e5c8042173396c036051e..3509065e409ab2782fe23cc8a174e369f0da501d 100644 (file)
@@ -391,7 +391,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
        if (skb == NULL)
                goto errout;
 
-       err = fib_dump_info(skb, info->pid, seq, event, tb_id,
+       err = fib_dump_info(skb, info->portid, seq, event, tb_id,
                            fa->fa_type, key, dst_len,
                            fa->fa_tos, fa->fa_info, nlm_flags);
        if (err < 0) {
@@ -400,7 +400,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, info->nl_net, info->pid, RTNLGRP_IPV4_ROUTE,
+       rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
                    info->nlh, GFP_KERNEL);
        return;
 errout:
@@ -989,14 +989,14 @@ failure:
        return ERR_PTR(err);
 }
 
-int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
+int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                  u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
                  struct fib_info *fi, unsigned int flags)
 {
        struct nlmsghdr *nlh;
        struct rtmsg *rtm;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
index d1b93595b4a7dce1e7701bb713bd5f0bc5adf1f6..31d771ca9a709f71328c1734433cecba1fde40fd 100644 (file)
@@ -1550,7 +1550,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                 * state.directly.
                 */
                if (pref_mismatch) {
-                       int mp = KEYLENGTH - fls(pref_mismatch);
+                       /* fls(x) = __fls(x) + 1 */
+                       int mp = KEYLENGTH - __fls(pref_mismatch) - 1;
 
                        if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0)
                                goto backtrace;
@@ -1655,7 +1656,12 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        if (!l)
                return -ESRCH;
 
-       fa_head = get_fa_head(l, plen);
+       li = find_leaf_info(l, plen);
+
+       if (!li)
+               return -ESRCH;
+
+       fa_head = &li->falh;
        fa = fib_find_alias(fa_head, tos, 0);
 
        if (!fa)
@@ -1691,9 +1697,6 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
                  &cfg->fc_nlinfo, 0);
 
-       l = fib_find_node(t, key);
-       li = find_leaf_info(l, plen);
-
        list_del_rcu(&fa->fa_list);
 
        if (!plen)
@@ -1870,7 +1873,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
                        continue;
                }
 
-               if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
+               if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq,
                                  RTM_NEWROUTE,
                                  tb->tb_id,
index 6699f23e6f55b0012cc50b74030f415362efbed1..736ab70fd17981c95085ee0fad163aaeca39133c 100644 (file)
@@ -815,14 +815,15 @@ static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
        return 1;
 }
 
-static void igmp_heard_report(struct in_device *in_dev, __be32 group)
+/* return true if packet was dropped */
+static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
 {
        struct ip_mc_list *im;
 
        /* Timers are only set for non-local groups */
 
        if (group == IGMP_ALL_HOSTS)
-               return;
+               return false;
 
        rcu_read_lock();
        for_each_pmc_rcu(in_dev, im) {
@@ -832,9 +833,11 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
                }
        }
        rcu_read_unlock();
+       return false;
 }
 
-static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
+/* return true if packet was dropped */
+static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
        int len)
 {
        struct igmphdr          *ih = igmp_hdr(skb);
@@ -866,7 +869,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                /* clear deleted report items */
                igmpv3_clear_delrec(in_dev);
        } else if (len < 12) {
-               return; /* ignore bogus packet; freed by caller */
+               return true;    /* ignore bogus packet; freed by caller */
        } else if (IGMP_V1_SEEN(in_dev)) {
                /* This is a v3 query with v1 queriers present */
                max_delay = IGMP_Query_Response_Interval;
@@ -883,13 +886,13 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                        max_delay = 1;  /* can't mod w/ 0 */
        } else { /* v3 */
                if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
-                       return;
+                       return true;
 
                ih3 = igmpv3_query_hdr(skb);
                if (ih3->nsrcs) {
                        if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
                                           + ntohs(ih3->nsrcs)*sizeof(__be32)))
-                               return;
+                               return true;
                        ih3 = igmpv3_query_hdr(skb);
                }
 
@@ -901,9 +904,9 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                        in_dev->mr_qrv = ih3->qrv;
                if (!group) { /* general query */
                        if (ih3->nsrcs)
-                               return; /* no sources allowed */
+                               return false;   /* no sources allowed */
                        igmp_gq_start_timer(in_dev);
-                       return;
+                       return false;
                }
                /* mark sources to include, if group & source-specific */
                mark = ih3->nsrcs != 0;
@@ -939,6 +942,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                        igmp_mod_timer(im, max_delay);
        }
        rcu_read_unlock();
+       return false;
 }
 
 /* called in rcu_read_lock() section */
@@ -948,6 +952,7 @@ int igmp_rcv(struct sk_buff *skb)
        struct igmphdr *ih;
        struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
        int len = skb->len;
+       bool dropped = true;
 
        if (in_dev == NULL)
                goto drop;
@@ -969,7 +974,7 @@ int igmp_rcv(struct sk_buff *skb)
        ih = igmp_hdr(skb);
        switch (ih->type) {
        case IGMP_HOST_MEMBERSHIP_QUERY:
-               igmp_heard_query(in_dev, skb, len);
+               dropped = igmp_heard_query(in_dev, skb, len);
                break;
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
@@ -979,7 +984,7 @@ int igmp_rcv(struct sk_buff *skb)
                /* don't rely on MC router hearing unicast reports */
                if (skb->pkt_type == PACKET_MULTICAST ||
                    skb->pkt_type == PACKET_BROADCAST)
-                       igmp_heard_report(in_dev, ih->group);
+                       dropped = igmp_heard_report(in_dev, ih->group);
                break;
        case IGMP_PIM:
 #ifdef CONFIG_IP_PIMSM_V1
@@ -997,7 +1002,10 @@ int igmp_rcv(struct sk_buff *skb)
        }
 
 drop:
-       kfree_skb(skb);
+       if (dropped)
+               kfree_skb(skb);
+       else
+               consume_skb(skb);
        return 0;
 }
 
@@ -1896,6 +1904,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
        rtnl_unlock();
        return ret;
 }
+EXPORT_SYMBOL(ip_mc_leave_group);
 
 int ip_mc_source(int add, int omode, struct sock *sk, struct
        ip_mreq_source *mreqs, int ifindex)
@@ -2435,6 +2444,8 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                struct ip_mc_list *im = (struct ip_mc_list *)v;
                struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
                char   *querier;
+               long delta;
+
 #ifdef CONFIG_IP_MULTICAST
                querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
                          IGMP_V2_SEEN(state->in_dev) ? "V2" :
@@ -2448,11 +2459,12 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                                   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
                }
 
+               delta = im->timer.expires - jiffies;
                seq_printf(seq,
                           "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
                           im->multiaddr, im->users,
-                          im->tm_running, im->tm_running ?
-                          jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
+                          im->tm_running,
+                          im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
                           im->reporter);
        }
        return 0;
index 7f75f21d7b8346e0279364c511117582f3c1f342..f0c5b9c1a95714e2e206cf6cd178a90626271fcc 100644 (file)
@@ -283,7 +283,9 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        struct sock *newsk;
+       struct request_sock *req;
        int error;
 
        lock_sock(sk);
@@ -296,7 +298,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
                goto out_err;
 
        /* Find already established connection */
-       if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
+       if (reqsk_queue_empty(queue)) {
                long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 
                /* If this is a non blocking socket don't sleep */
@@ -308,14 +310,32 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
                if (error)
                        goto out_err;
        }
-
-       newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
-       WARN_ON(newsk->sk_state == TCP_SYN_RECV);
+       req = reqsk_queue_remove(queue);
+       newsk = req->sk;
+
+       sk_acceptq_removed(sk);
+       if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
+               spin_lock_bh(&queue->fastopenq->lock);
+               if (tcp_rsk(req)->listener) {
+                       /* We are still waiting for the final ACK from 3WHS
+                        * so can't free req now. Instead, we set req->sk to
+                        * NULL to signify that the child socket is taken
+                        * so reqsk_fastopen_remove() will free the req
+                        * when 3WHS finishes (or is aborted).
+                        */
+                       req->sk = NULL;
+                       req = NULL;
+               }
+               spin_unlock_bh(&queue->fastopenq->lock);
+       }
 out:
        release_sock(sk);
+       if (req)
+               __reqsk_free(req);
        return newsk;
 out_err:
        newsk = NULL;
+       req = NULL;
        *err = error;
        goto out;
 }
@@ -720,13 +740,14 @@ EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 void inet_csk_listen_stop(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        struct request_sock *acc_req;
        struct request_sock *req;
 
        inet_csk_delete_keepalive_timer(sk);
 
        /* make all the listen_opt local to us */
-       acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
+       acc_req = reqsk_queue_yank_acceptq(queue);
 
        /* Following specs, it would be better either to send FIN
         * (and enter FIN-WAIT-1, it is normal close)
@@ -736,7 +757,7 @@ void inet_csk_listen_stop(struct sock *sk)
         * To be honest, we are not able to make either
         * of the variants now.                 --ANK
         */
-       reqsk_queue_destroy(&icsk->icsk_accept_queue);
+       reqsk_queue_destroy(queue);
 
        while ((req = acc_req) != NULL) {
                struct sock *child = req->sk;
@@ -754,6 +775,19 @@ void inet_csk_listen_stop(struct sock *sk)
 
                percpu_counter_inc(sk->sk_prot->orphan_count);
 
+               if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
+                       BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+                       BUG_ON(sk != tcp_rsk(req)->listener);
+
+                       /* Paranoid, to prevent race condition if
+                        * an inbound pkt destined for child is
+                        * blocked by sock lock in tcp_v4_rcv().
+                        * Also to satisfy an assertion in
+                        * tcp_v4_destroy_sock().
+                        */
+                       tcp_sk(child)->fastopen_rsk = NULL;
+                       sock_put(sk);
+               }
                inet_csk_destroy_sock(child);
 
                bh_unlock_sock(child);
@@ -763,6 +797,17 @@ void inet_csk_listen_stop(struct sock *sk)
                sk_acceptq_removed(sk);
                __reqsk_free(req);
        }
+       if (queue->fastopenq != NULL) {
+               /* Free all the reqs queued in rskq_rst_head. */
+               spin_lock_bh(&queue->fastopenq->lock);
+               acc_req = queue->fastopenq->rskq_rst_head;
+               queue->fastopenq->rskq_rst_head = NULL;
+               spin_unlock_bh(&queue->fastopenq->lock);
+               while ((req = acc_req) != NULL) {
+                       acc_req = req->dl_next;
+                       __reqsk_free(req);
+               }
+       }
        WARN_ON(sk->sk_ack_backlog);
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
index 570e61f9611fe9f62bf3513afc9a5abb365544b7..535584c00f9118fe33a17e79b858e66935f424f9 100644 (file)
@@ -69,7 +69,8 @@ static inline void inet_diag_unlock_handler(
 
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
-                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             struct user_namespace *user_ns,                   
+                             u32 portid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_sock *inet = inet_sk(sk);
@@ -83,7 +84,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        handler = inet_diag_table[req->sdiag_protocol];
        BUG_ON(handler == NULL);
 
-       nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        nlmsg_flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -124,7 +125,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        }
 #endif
 
-       r->idiag_uid = sock_i_uid(sk);
+       r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = sock_i_ino(sk);
 
        if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -199,23 +200,24 @@ EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
 
 static int inet_csk_diag_fill(struct sock *sk,
                              struct sk_buff *skb, struct inet_diag_req_v2 *req,
-                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             struct user_namespace *user_ns,
+                             u32 portid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        return inet_sk_diag_fill(sk, inet_csk(sk),
-                       skb, req, pid, seq, nlmsg_flags, unlh);
+                       skb, req, user_ns, portid, seq, nlmsg_flags, unlh);
 }
 
 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
                               struct sk_buff *skb, struct inet_diag_req_v2 *req,
-                              u32 pid, u32 seq, u16 nlmsg_flags,
+                              u32 portid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
        long tmo;
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        nlmsg_flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -256,14 +258,16 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
-                       struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags,
+                       struct inet_diag_req_v2 *r,
+                       struct user_namespace *user_ns,
+                       u32 portid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
                return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-                                          skb, r, pid, seq, nlmsg_flags,
+                                          skb, r, portid, seq, nlmsg_flags,
                                           unlh);
-       return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
+       return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
 }
 
 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -311,14 +315,15 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
        }
 
        err = sk_diag_fill(sk, rep, req,
-                          NETLINK_CB(in_skb).pid,
+                          sk_user_ns(NETLINK_CB(in_skb).ssk),
+                          NETLINK_CB(in_skb).portid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
                WARN_ON(err == -EMSGSIZE);
                nlmsg_free(rep);
                goto out;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
@@ -551,7 +556,8 @@ static int inet_csk_diag_dump(struct sock *sk,
                return 0;
 
        return inet_csk_diag_fill(sk, skb, r,
-                                 NETLINK_CB(cb->skb).pid,
+                                 sk_user_ns(NETLINK_CB(cb->skb).ssk),
+                                 NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
@@ -586,12 +592,14 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
        }
 
        return inet_twsk_diag_fill(tw, skb, r,
-                                  NETLINK_CB(cb->skb).pid,
+                                  NETLINK_CB(cb->skb).portid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
-                             struct request_sock *req, u32 pid, u32 seq,
+                             struct request_sock *req,
+                             struct user_namespace *user_ns,
+                             u32 portid, u32 seq,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
@@ -600,7 +608,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        struct nlmsghdr *nlh;
        long tmo;
 
-       nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+       nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
                        NLM_F_MULTI);
        if (!nlh)
                return -EMSGSIZE;
@@ -625,7 +633,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_expires = jiffies_to_msecs(tmo);
        r->idiag_rqueue = 0;
        r->idiag_wqueue = 0;
-       r->idiag_uid = sock_i_uid(sk);
+       r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = 0;
 #if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
@@ -702,7 +710,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
                        }
 
                        err = inet_diag_fill_req(skb, sk, req,
-                                              NETLINK_CB(cb->skb).pid,
+                                              sk_user_ns(NETLINK_CB(cb->skb).ssk),
+                                              NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq, cb->nlh);
                        if (err < 0) {
                                cb->args[3] = j + 1;
index 85190e69297bfd736df428226c3e01aeb5be3aa8..4750d2b74d79324cdc3176b7a9cbbe0d13c4e9c7 100644 (file)
@@ -89,7 +89,7 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
        nf->low_thresh = 0;
 
        local_bh_disable();
-       inet_frag_evictor(nf, f);
+       inet_frag_evictor(nf, f, true);
        local_bh_enable();
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
@@ -158,11 +158,16 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
 }
 EXPORT_SYMBOL(inet_frag_destroy);
 
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
+int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
 {
        struct inet_frag_queue *q;
        int work, evicted = 0;
 
+       if (!force) {
+               if (atomic_read(&nf->mem) <= nf->high_thresh)
+                       return 0;
+       }
+
        work = atomic_read(&nf->mem) - nf->low_thresh;
        while (work > 0) {
                read_lock(&f->lock);
index c7527f6b9ad9b54f36185975aed21171757c4600..000e3d239d6481ed230e71c9c9033dba301694e9 100644 (file)
@@ -194,7 +194,7 @@ void __init inet_initpeers(void)
                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
                        NULL);
 
-       INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
+       INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
 }
 
 static int addr_compare(const struct inetpeer_addr *a,
index 8d07c973409ca3df9d09f8fb6b3a614d304603a8..448e68546827431098c980bafc4a63967764942f 100644 (file)
@@ -219,7 +219,7 @@ static void ip_evictor(struct net *net)
 {
        int evicted;
 
-       evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
+       evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false);
        if (evicted)
                IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
 }
@@ -523,6 +523,10 @@ found:
        if (offset == 0)
                qp->q.last_in |= INET_FRAG_FIRST_IN;
 
+       if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
+           skb->len + ihl > qp->q.max_size)
+               qp->q.max_size = skb->len + ihl;
+
        if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            qp->q.meat == qp->q.len)
                return ip_frag_reasm(qp, prev, dev);
@@ -646,9 +650,11 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
        head->next = NULL;
        head->dev = dev;
        head->tstamp = qp->q.stamp;
+       IPCB(head)->frag_max_size = qp->q.max_size;
 
        iph = ip_hdr(head);
-       iph->frag_off = 0;
+       /* max_size != 0 implies at least one fragment had IP_DF set */
+       iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
        iph->tot_len = htons(len);
        iph->tos |= ecn;
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
@@ -678,8 +684,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Start by cleaning up the memory. */
-       if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
-               ip_evictor(net);
+       ip_evictor(net);
 
        /* Lookup (or create) queue header */
        if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
index b062a98574f2e40e63d5b5eee8f1d7f2fda28425..7240f8e2dd4511dde4de0bd08290bb718eab140f 100644 (file)
    Alexey Kuznetsov.
  */
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 static int ipgre_tunnel_init(struct net_device *dev);
 static void ipgre_tunnel_setup(struct net_device *dev);
@@ -204,7 +208,9 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
        tot->rx_crc_errors = dev->stats.rx_crc_errors;
        tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
        tot->rx_length_errors = dev->stats.rx_length_errors;
+       tot->rx_frame_errors = dev->stats.rx_frame_errors;
        tot->rx_errors = dev->stats.rx_errors;
+
        tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
        tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
        tot->tx_dropped = dev->stats.tx_dropped;
@@ -214,11 +220,25 @@ static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
        return tot;
 }
 
+/* Does key in tunnel parameters match packet */
+static bool ipgre_key_match(const struct ip_tunnel_parm *p,
+                           __be16 flags, __be32 key)
+{
+       if (p->i_flags & GRE_KEY) {
+               if (flags & GRE_KEY)
+                       return key == p->i_key;
+               else
+                       return false;   /* key expected, none present */
+       } else
+               return !(flags & GRE_KEY);
+}
+
 /* Given src, dst and key, find appropriate for input tunnel. */
 
 static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
                                             __be32 remote, __be32 local,
-                                            __be32 key, __be16 gre_proto)
+                                            __be16 flags, __be32 key,
+                                            __be16 gre_proto)
 {
        struct net *net = dev_net(dev);
        int link = dev->ifindex;
@@ -233,10 +253,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
        for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
                if (local != t->parms.iph.saddr ||
                    remote != t->parms.iph.daddr ||
-                   key != t->parms.i_key ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
+               if (!ipgre_key_match(&t->parms, flags, key))
+                       continue;
+
                if (t->dev->type != ARPHRD_IPGRE &&
                    t->dev->type != dev_type)
                        continue;
@@ -257,10 +279,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
 
        for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
                if (remote != t->parms.iph.daddr ||
-                   key != t->parms.i_key ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
+               if (!ipgre_key_match(&t->parms, flags, key))
+                       continue;
+
                if (t->dev->type != ARPHRD_IPGRE &&
                    t->dev->type != dev_type)
                        continue;
@@ -283,10 +307,12 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
                if ((local != t->parms.iph.saddr &&
                     (local != t->parms.iph.daddr ||
                      !ipv4_is_multicast(local))) ||
-                   key != t->parms.i_key ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
+               if (!ipgre_key_match(&t->parms, flags, key))
+                       continue;
+
                if (t->dev->type != ARPHRD_IPGRE &&
                    t->dev->type != dev_type)
                        continue;
@@ -489,6 +515,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
        const int code = icmp_hdr(skb)->code;
        struct ip_tunnel *t;
        __be16 flags;
+       __be32 key = 0;
 
        flags = p[0];
        if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
@@ -505,6 +532,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
        if (skb_headlen(skb) < grehlen)
                return;
 
+       if (flags & GRE_KEY)
+               key = *(((__be32 *)p) + (grehlen / 4) - 1);
+
        switch (type) {
        default:
        case ICMP_PARAMETERPROB:
@@ -533,49 +563,34 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
                break;
        }
 
-       rcu_read_lock();
        t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
-                               flags & GRE_KEY ?
-                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
-                               p[1]);
+                               flags, key, p[1]);
+
        if (t == NULL)
-               goto out;
+               return;
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
                                 t->parms.link, 0, IPPROTO_GRE, 0);
-               goto out;
+               return;
        }
        if (type == ICMP_REDIRECT) {
                ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
                              IPPROTO_GRE, 0);
-               goto out;
+               return;
        }
        if (t->parms.iph.daddr == 0 ||
            ipv4_is_multicast(t->parms.iph.daddr))
-               goto out;
+               return;
 
        if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-               goto out;
+               return;
 
        if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
                t->err_count++;
        else
                t->err_count = 1;
        t->err_time = jiffies;
-out:
-       rcu_read_unlock();
-}
-
-static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
-{
-       if (INET_ECN_is_ce(iph->tos)) {
-               if (skb->protocol == htons(ETH_P_IP)) {
-                       IP_ECN_set_ce(ip_hdr(skb));
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       IP6_ECN_set_ce(ipv6_hdr(skb));
-               }
-       }
 }
 
 static inline u8
@@ -600,9 +615,10 @@ static int ipgre_rcv(struct sk_buff *skb)
        struct ip_tunnel *tunnel;
        int    offset = 4;
        __be16 gre_proto;
+       int    err;
 
        if (!pskb_may_pull(skb, 16))
-               goto drop_nolock;
+               goto drop;
 
        iph = ip_hdr(skb);
        h = skb->data;
@@ -613,7 +629,7 @@ static int ipgre_rcv(struct sk_buff *skb)
                   - We do not support routing headers.
                 */
                if (flags&(GRE_VERSION|GRE_ROUTING))
-                       goto drop_nolock;
+                       goto drop;
 
                if (flags&GRE_CSUM) {
                        switch (skb->ip_summed) {
@@ -641,10 +657,10 @@ static int ipgre_rcv(struct sk_buff *skb)
 
        gre_proto = *(__be16 *)(h + 2);
 
-       rcu_read_lock();
-       if ((tunnel = ipgre_tunnel_lookup(skb->dev,
-                                         iph->saddr, iph->daddr, key,
-                                         gre_proto))) {
+       tunnel = ipgre_tunnel_lookup(skb->dev,
+                                    iph->saddr, iph->daddr, flags, key,
+                                    gre_proto);
+       if (tunnel) {
                struct pcpu_tstats *tstats;
 
                secpath_reset(skb);
@@ -703,27 +719,33 @@ static int ipgre_rcv(struct sk_buff *skb)
                        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
                }
 
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               skb_reset_network_header(skb);
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                                    &iph->saddr, iph->tos);
+                       if (err > 1) {
+                               ++tunnel->dev->stats.rx_frame_errors;
+                               ++tunnel->dev->stats.rx_errors;
+                               goto drop;
+                       }
+               }
+
                tstats = this_cpu_ptr(tunnel->dev->tstats);
                u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
-               __skb_tunnel_rx(skb, tunnel->dev);
-
-               skb_reset_network_header(skb);
-               ipgre_ecn_decapsulate(iph, skb);
-
-               netif_rx(skb);
-
-               rcu_read_unlock();
+               gro_cells_receive(&tunnel->gro_cells, skb);
                return 0;
        }
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
 drop:
-       rcu_read_unlock();
-drop_nolock:
        kfree_skb(skb);
        return 0;
 }
@@ -745,6 +767,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        __be32 dst;
        int    mtu;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           skb_checksum_help(skb))
+               goto tx_error;
+
        if (dev->type == ARPHRD_ETHER)
                IPCB(skb)->flags = 0;
 
@@ -1292,10 +1318,18 @@ static const struct net_device_ops ipgre_netdev_ops = {
 
 static void ipgre_dev_free(struct net_device *dev)
 {
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+
+       gro_cells_destroy(&tunnel->gro_cells);
        free_percpu(dev->tstats);
        free_netdev(dev);
 }
 
+#define GRE_FEATURES (NETIF_F_SG |             \
+                     NETIF_F_FRAGLIST |        \
+                     NETIF_F_HIGHDMA |         \
+                     NETIF_F_HW_CSUM)
+
 static void ipgre_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &ipgre_netdev_ops;
@@ -1309,12 +1343,16 @@ static void ipgre_tunnel_setup(struct net_device *dev)
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
+
+       dev->features           |= GRE_FEATURES;
+       dev->hw_features        |= GRE_FEATURES;
 }
 
 static int ipgre_tunnel_init(struct net_device *dev)
 {
        struct ip_tunnel *tunnel;
        struct iphdr *iph;
+       int err;
 
        tunnel = netdev_priv(dev);
        iph = &tunnel->parms.iph;
@@ -1341,6 +1379,12 @@ static int ipgre_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       err = gro_cells_init(&tunnel->gro_cells, dev);
+       if (err) {
+               free_percpu(dev->tstats);
+               return err;
+       }
+
        return 0;
 }
 
index c196d749daf23b3823ffe012495ea5d9411be99a..24a29a39e9a885dfa96300067fd37154b0875f53 100644 (file)
@@ -467,7 +467,9 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
        iph = ip_hdr(skb);
 
-       if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
+       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
+                    (IPCB(skb)->frag_max_size &&
+                     IPCB(skb)->frag_max_size > dst_mtu(&rt->dst)))) {
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                          htonl(ip_skb_dst_mtu(skb)));
@@ -791,6 +793,7 @@ static int __ip_append_data(struct sock *sk,
                            struct flowi4 *fl4,
                            struct sk_buff_head *queue,
                            struct inet_cork *cork,
+                           struct page_frag *pfrag,
                            int getfrag(void *from, char *to, int offset,
                                        int len, int odd, struct sk_buff *skb),
                            void *from, int length, int transhdrlen,
@@ -985,47 +988,30 @@ alloc_new_skb:
                        }
                } else {
                        int i = skb_shinfo(skb)->nr_frags;
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
-                       struct page *page = cork->page;
-                       int off = cork->off;
-                       unsigned int left;
-
-                       if (page && (left = PAGE_SIZE - off) > 0) {
-                               if (copy >= left)
-                                       copy = left;
-                               if (page != skb_frag_page(frag)) {
-                                       if (i == MAX_SKB_FRAGS) {
-                                               err = -EMSGSIZE;
-                                               goto error;
-                                       }
-                                       skb_fill_page_desc(skb, i, page, off, 0);
-                                       skb_frag_ref(skb, i);
-                                       frag = &skb_shinfo(skb)->frags[i];
-                               }
-                       } else if (i < MAX_SKB_FRAGS) {
-                               if (copy > PAGE_SIZE)
-                                       copy = PAGE_SIZE;
-                               page = alloc_pages(sk->sk_allocation, 0);
-                               if (page == NULL)  {
-                                       err = -ENOMEM;
-                                       goto error;
-                               }
-                               cork->page = page;
-                               cork->off = 0;
 
-                               skb_fill_page_desc(skb, i, page, 0, 0);
-                               frag = &skb_shinfo(skb)->frags[i];
-                       } else {
-                               err = -EMSGSIZE;
-                               goto error;
-                       }
-                       if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
-                                   offset, copy, skb->len, skb) < 0) {
-                               err = -EFAULT;
+                       err = -ENOMEM;
+                       if (!sk_page_frag_refill(sk, pfrag))
                                goto error;
+
+                       if (!skb_can_coalesce(skb, i, pfrag->page,
+                                             pfrag->offset)) {
+                               err = -EMSGSIZE;
+                               if (i == MAX_SKB_FRAGS)
+                                       goto error;
+
+                               __skb_fill_page_desc(skb, i, pfrag->page,
+                                                    pfrag->offset, 0);
+                               skb_shinfo(skb)->nr_frags = ++i;
+                               get_page(pfrag->page);
                        }
-                       cork->off += copy;
-                       skb_frag_size_add(frag, copy);
+                       copy = min_t(int, copy, pfrag->size - pfrag->offset);
+                       if (getfrag(from,
+                                   page_address(pfrag->page) + pfrag->offset,
+                                   offset, copy, skb->len, skb) < 0)
+                               goto error_efault;
+
+                       pfrag->offset += copy;
+                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                        skb->len += copy;
                        skb->data_len += copy;
                        skb->truesize += copy;
@@ -1037,6 +1023,8 @@ alloc_new_skb:
 
        return 0;
 
+error_efault:
+       err = -EFAULT;
 error:
        cork->length -= length;
        IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
@@ -1077,8 +1065,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->tx_flags = ipc->tx_flags;
-       cork->page = NULL;
-       cork->off = 0;
 
        return 0;
 }
@@ -1115,7 +1101,8 @@ int ip_append_data(struct sock *sk, struct flowi4 *fl4,
                transhdrlen = 0;
        }
 
-       return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
+       return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
+                               sk_page_frag(sk), getfrag,
                                from, length, transhdrlen, flags);
 }
 
@@ -1437,7 +1424,8 @@ struct sk_buff *ip_make_skb(struct sock *sk,
        if (err)
                return ERR_PTR(err);
 
-       err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
+       err = __ip_append_data(sk, fl4, &queue, &cork,
+                              &current->task_frag, getfrag,
                               from, length, transhdrlen, flags);
        if (err) {
                __ip_flush_pending_frames(sk, &queue, &cork);
index 3511ffba7bd41088cd95d677f5c43dab947d9bf2..978bca4818aef0c52dda4011c2a98b7ef0326559 100644 (file)
@@ -304,7 +304,6 @@ static int vti_err(struct sk_buff *skb, u32 info)
 
        err = -ENOENT;
 
-       rcu_read_lock();
        t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
        if (t == NULL)
                goto out;
@@ -326,7 +325,6 @@ static int vti_err(struct sk_buff *skb, u32 info)
                t->err_count = 1;
        t->err_time = jiffies;
 out:
-       rcu_read_unlock();
        return err;
 }
 
@@ -336,7 +334,6 @@ static int vti_rcv(struct sk_buff *skb)
        struct ip_tunnel *tunnel;
        const struct iphdr *iph = ip_hdr(skb);
 
-       rcu_read_lock();
        tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
@@ -348,10 +345,8 @@ static int vti_rcv(struct sk_buff *skb)
                u64_stats_update_end(&tstats->syncp);
 
                skb->dev = tunnel->dev;
-               rcu_read_unlock();
                return 1;
        }
-       rcu_read_unlock();
 
        return -1;
 }
index 67e8a6b086ea7a0d2c4cc986ed6ed0e6b4414c6a..798358b107171664823de1b13cc3ffd020dc7f94 100644 (file)
@@ -582,6 +582,17 @@ static void __init ic_rarp_send_if(struct ic_device *d)
 }
 #endif
 
+/*
+ *  Predefine Nameservers
+ */
+static inline void __init ic_nameservers_predef(void)
+{
+       int i;
+
+       for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
+               ic_nameservers[i] = NONE;
+}
+
 /*
  *     DHCP/BOOTP support.
  */
@@ -747,10 +758,7 @@ static void __init ic_bootp_init_ext(u8 *e)
  */
 static inline void __init ic_bootp_init(void)
 {
-       int i;
-
-       for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
-               ic_nameservers[i] = NONE;
+       ic_nameservers_predef();
 
        dev_add_pack(&bootp_packet_type);
 }
@@ -1379,6 +1387,7 @@ static int __init ip_auto_config(void)
        int retries = CONF_OPEN_RETRIES;
 #endif
        int err;
+       unsigned int i;
 
 #ifdef CONFIG_PROC_FS
        proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1499,7 +1508,15 @@ static int __init ip_auto_config(void)
                &ic_servaddr, &root_server_addr, root_server_path);
        if (ic_dev_mtu)
                pr_cont(", mtu=%d", ic_dev_mtu);
-       pr_cont("\n");
+       for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
+               if (ic_nameservers[i] != NONE) {
+                       pr_info("     nameserver%u=%pI4",
+                               i, &ic_nameservers[i]);
+                       break;
+               }
+       for (i++; i < CONF_NAMESERVERS_MAX; i++)
+               if (ic_nameservers[i] != NONE)
+                       pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]);
 #endif /* !SILENT */
 
        return 0;
@@ -1570,6 +1587,8 @@ static int __init ip_auto_config_setup(char *addrs)
                return 1;
        }
 
+       ic_nameservers_predef();
+
        /* Parse string for static IP assignment.  */
        ip = addrs;
        while (ip && *ip) {
@@ -1613,6 +1632,20 @@ static int __init ip_auto_config_setup(char *addrs)
                                        ic_enable = 0;
                                }
                                break;
+                       case 7:
+                               if (CONF_NAMESERVERS_MAX >= 1) {
+                                       ic_nameservers[0] = in_aton(ip);
+                                       if (ic_nameservers[0] == ANY)
+                                               ic_nameservers[0] = NONE;
+                               }
+                               break;
+                       case 8:
+                               if (CONF_NAMESERVERS_MAX >= 2) {
+                                       ic_nameservers[1] = in_aton(ip);
+                                       if (ic_nameservers[1] == ANY)
+                                               ic_nameservers[1] = NONE;
+                               }
+                               break;
                        }
                }
                ip = cp;
index 99af1f0cc65827c3faa40407c17b38527ac8a211..e15b45297c09f0043bda21ad366ce36d5a6e5824 100644 (file)
 #define HASH_SIZE  16
 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
 
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
 static int ipip_net_id __read_mostly;
 struct ipip_net {
        struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
@@ -365,8 +369,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
        }
 
        err = -ENOENT;
-
-       rcu_read_lock();
        t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
        if (t == NULL)
                goto out;
@@ -398,34 +400,22 @@ static int ipip_err(struct sk_buff *skb, u32 info)
                t->err_count = 1;
        t->err_time = jiffies;
 out:
-       rcu_read_unlock();
-       return err;
-}
-
-static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
-                                       struct sk_buff *skb)
-{
-       struct iphdr *inner_iph = ip_hdr(skb);
 
-       if (INET_ECN_is_ce(outer_iph->tos))
-               IP_ECN_set_ce(inner_iph);
+       return err;
 }
 
 static int ipip_rcv(struct sk_buff *skb)
 {
        struct ip_tunnel *tunnel;
        const struct iphdr *iph = ip_hdr(skb);
+       int err;
 
-       rcu_read_lock();
        tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
 
-               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-                       rcu_read_unlock();
-                       kfree_skb(skb);
-                       return 0;
-               }
+               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
 
                secpath_reset(skb);
 
@@ -434,24 +424,35 @@ static int ipip_rcv(struct sk_buff *skb)
                skb->protocol = htons(ETH_P_IP);
                skb->pkt_type = PACKET_HOST;
 
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                                    &iph->saddr, iph->tos);
+                       if (err > 1) {
+                               ++tunnel->dev->stats.rx_frame_errors;
+                               ++tunnel->dev->stats.rx_errors;
+                               goto drop;
+                       }
+               }
+
                tstats = this_cpu_ptr(tunnel->dev->tstats);
                u64_stats_update_begin(&tstats->syncp);
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
-               __skb_tunnel_rx(skb, tunnel->dev);
-
-               ipip_ecn_decapsulate(iph, skb);
-
                netif_rx(skb);
-
-               rcu_read_unlock();
                return 0;
        }
-       rcu_read_unlock();
 
        return -1;
+
+drop:
+       kfree_skb(skb);
+       return 0;
 }
 
 /*
index ebdf06f938bf040eebc91763c3e952c63d00f92b..1daa95c2a0bad8e532181dc4d67d4aead0f3671f 100644 (file)
@@ -626,7 +626,7 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
                        e->error = -ETIMEDOUT;
                        memset(&e->msg, 0, sizeof(e->msg));
 
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else {
                        kfree_skb(skb);
                }
@@ -870,7 +870,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
                                memset(&e->msg, 0, sizeof(e->msg));
                        }
 
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else {
                        ip_mr_forward(net, mrt, skb, c, 0);
                }
@@ -1808,7 +1808,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
                .flowi4_oif = (rt_is_output_route(rt) ?
                               skb->dev->ifindex : 0),
                .flowi4_iif = (rt_is_output_route(rt) ?
-                              net->loopback_dev->ifindex :
+                              LOOPBACK_IFINDEX :
                               skb->dev->ifindex),
                .flowi4_mark = skb->mark,
        };
@@ -2117,12 +2117,12 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
 }
 
 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
-                           u32 pid, u32 seq, struct mfc_cache *c)
+                           u32 portid, u32 seq, struct mfc_cache *c)
 {
        struct nlmsghdr *nlh;
        struct rtmsg *rtm;
 
-       nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2176,7 +2176,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
                                if (e < s_e)
                                        goto next_entry;
                                if (ipmr_fill_mroute(mrt, skb,
-                                                    NETLINK_CB(cb->skb).pid,
+                                                    NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
                                                     mfc) < 0)
                                        goto done;
index ed1b3678319223eaa7a4d0b2df3a9aae21deeb8a..4c0cf63dd92e9ab94bf155c8e0d497c2029cebde 100644 (file)
@@ -72,43 +72,6 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
 }
 EXPORT_SYMBOL(ip_route_me_harder);
 
-#ifdef CONFIG_XFRM
-int ip_xfrm_me_harder(struct sk_buff *skb)
-{
-       struct flowi fl;
-       unsigned int hh_len;
-       struct dst_entry *dst;
-
-       if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
-               return 0;
-       if (xfrm_decode_session(skb, &fl, AF_INET) < 0)
-               return -1;
-
-       dst = skb_dst(skb);
-       if (dst->xfrm)
-               dst = ((struct xfrm_dst *)dst)->route;
-       dst_hold(dst);
-
-       dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
-       if (IS_ERR(dst))
-               return -1;
-
-       skb_dst_drop(skb);
-       skb_dst_set(skb, dst);
-
-       /* Change in oif may mean change in hh_len. */
-       hh_len = skb_dst(skb)->dev->hard_header_len;
-       if (skb_headroom(skb) < hh_len &&
-           pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
-               return -1;
-       return 0;
-}
-EXPORT_SYMBOL(ip_xfrm_me_harder);
-#endif
-
-void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
-EXPORT_SYMBOL(ip_nat_decode_session);
-
 /*
  * Extra routing may needed on local out, as the QUEUE target never
  * returns control to the table.
@@ -225,12 +188,12 @@ static const struct nf_afinfo nf_ip_afinfo = {
        .route_key_size         = sizeof(struct ip_rt_info),
 };
 
-static int ipv4_netfilter_init(void)
+static int __init ipv4_netfilter_init(void)
 {
        return nf_register_afinfo(&nf_ip_afinfo);
 }
 
-static void ipv4_netfilter_fini(void)
+static void __exit ipv4_netfilter_fini(void)
 {
        nf_unregister_afinfo(&nf_ip_afinfo);
 }
index fcc543cd987a3f22b45ea143c9c754019fa8bacd..d8d6f2a5bf120fe857fce4d3beadfebea9a57e0a 100644 (file)
@@ -143,25 +143,22 @@ config IP_NF_TARGET_ULOG
          To compile it as a module, choose M here.  If unsure, say N.
 
 # NAT + specific targets: nf_conntrack
-config NF_NAT
-       tristate "Full NAT"
+config NF_NAT_IPV4
+       tristate "IPv4 NAT"
        depends on NF_CONNTRACK_IPV4
        default m if NETFILTER_ADVANCED=n
+       select NF_NAT
        help
-         The Full NAT option allows masquerading, port forwarding and other
+         The IPv4 NAT option allows masquerading, port forwarding and other
          forms of full Network Address Port Translation.  It is controlled by
          the `nat' table in iptables: see the man page for iptables(8).
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config NF_NAT_NEEDED
-       bool
-       depends on NF_NAT
-       default y
+if NF_NAT_IPV4
 
 config IP_NF_TARGET_MASQUERADE
        tristate "MASQUERADE target support"
-       depends on NF_NAT
        default m if NETFILTER_ADVANCED=n
        help
          Masquerading is a special case of NAT: all outgoing connections are
@@ -174,30 +171,27 @@ config IP_NF_TARGET_MASQUERADE
 
 config IP_NF_TARGET_NETMAP
        tristate "NETMAP target support"
-       depends on NF_NAT
        depends on NETFILTER_ADVANCED
-       help
-         NETMAP is an implementation of static 1:1 NAT mapping of network
-         addresses. It maps the network address part, while keeping the host
-         address part intact.
-
-         To compile it as a module, choose M here.  If unsure, say N.
+       select NETFILTER_XT_TARGET_NETMAP
+       ---help---
+       This is a backwards-compat option for the user's convenience
+       (e.g. when running oldconfig). It selects
+       CONFIG_NETFILTER_XT_TARGET_NETMAP.
 
 config IP_NF_TARGET_REDIRECT
        tristate "REDIRECT target support"
-       depends on NF_NAT
        depends on NETFILTER_ADVANCED
-       help
-         REDIRECT is a special case of NAT: all incoming connections are
-         mapped onto the incoming interface's address, causing the packets to
-         come to the local machine instead of passing through.  This is
-         useful for transparent proxies.
+       select NETFILTER_XT_TARGET_REDIRECT
+       ---help---
+       This is a backwards-compat option for the user's convenience
+       (e.g. when running oldconfig). It selects
+       CONFIG_NETFILTER_XT_TARGET_REDIRECT.
 
-         To compile it as a module, choose M here.  If unsure, say N.
+endif
 
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support"
-       depends on NF_CONNTRACK_SNMP && NF_NAT
+       depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
        depends on NETFILTER_ADVANCED
        default NF_NAT && NF_CONNTRACK_SNMP
        ---help---
@@ -219,61 +213,21 @@ config NF_NAT_SNMP_BASIC
 #           <expr> '&&' <expr>                   (6)
 #
 # (6) Returns the result of min(/expr/, /expr/).
-config NF_NAT_PROTO_DCCP
-       tristate
-       depends on NF_NAT && NF_CT_PROTO_DCCP
-       default NF_NAT && NF_CT_PROTO_DCCP
 
 config NF_NAT_PROTO_GRE
        tristate
-       depends on NF_NAT && NF_CT_PROTO_GRE
-
-config NF_NAT_PROTO_UDPLITE
-       tristate
-       depends on NF_NAT && NF_CT_PROTO_UDPLITE
-       default NF_NAT && NF_CT_PROTO_UDPLITE
-
-config NF_NAT_PROTO_SCTP
-       tristate
-       default NF_NAT && NF_CT_PROTO_SCTP
-       depends on NF_NAT && NF_CT_PROTO_SCTP
-       select LIBCRC32C
-
-config NF_NAT_FTP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_FTP
-
-config NF_NAT_IRC
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_IRC
-
-config NF_NAT_TFTP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_TFTP
-
-config NF_NAT_AMANDA
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_AMANDA
+       depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
 
 config NF_NAT_PPTP
        tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_PPTP
+       depends on NF_CONNTRACK && NF_NAT_IPV4
+       default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
        select NF_NAT_PROTO_GRE
 
 config NF_NAT_H323
        tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_H323
-
-config NF_NAT_SIP
-       tristate
-       depends on NF_CONNTRACK && NF_NAT
-       default NF_NAT && NF_CONNTRACK_SIP
+       depends on NF_CONNTRACK && NF_NAT_IPV4
+       default NF_NAT_IPV4 && NF_CONNTRACK_H323
 
 # mangle + specific targets
 config IP_NF_MANGLE
index c20674dc9452cefe0796d4cf45e93f005f965635..007b128eecc90246af3353f1e210bb07f267fe20 100644 (file)
@@ -10,32 +10,22 @@ nf_conntrack_ipv4-objs      += nf_conntrack_l3proto_ipv4_compat.o
 endif
 endif
 
-nf_nat-y               := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
-iptable_nat-y  := nf_nat_rule.o nf_nat_standalone.o
-
 # connection tracking
 obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
 
-obj-$(CONFIG_NF_NAT) += nf_nat.o
+nf_nat_ipv4-y          := nf_nat_l3proto_ipv4.o nf_nat_proto_icmp.o
+obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
 
 # defrag
 obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
 
 # NAT helpers (nf_conntrack)
-obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
-obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
 obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
-obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
-obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
 obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
-obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
 
 # NAT protocols (nf_nat)
-obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
 obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
-obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
-obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
 
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
@@ -43,7 +33,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 # the three instances of ip_tables
 obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
 obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_NF_NAT) += iptable_nat.o
+obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
 
@@ -55,8 +45,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
 obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
 obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
-obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
-obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
 
index cbb6a1a6f6f794aa3f16561658812cd181a6f49c..5d5d4d1be9c2c7c2c951943e5a7d7ce7a4b940d4 100644 (file)
@@ -19,9 +19,9 @@
 #include <net/ip.h>
 #include <net/checksum.h>
 #include <net/route.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -49,7 +49,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
        struct nf_conn *ct;
        struct nf_conn_nat *nat;
        enum ip_conntrack_info ctinfo;
-       struct nf_nat_ipv4_range newrange;
+       struct nf_nat_range newrange;
        const struct nf_nat_ipv4_multi_range_compat *mr;
        const struct rtable *rt;
        __be32 newsrc, nh;
@@ -80,10 +80,13 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
        nat->masq_index = par->out->ifindex;
 
        /* Transfer from original range. */
-       newrange = ((struct nf_nat_ipv4_range)
-               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
-                 newsrc, newsrc,
-                 mr->range[0].min, mr->range[0].max });
+       memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+       memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+       newrange.flags       = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.ip = newsrc;
+       newrange.max_addr.ip = newsrc;
+       newrange.min_proto   = mr->range[0].min;
+       newrange.max_proto   = mr->range[0].max;
 
        /* Hand modified range to generic setup. */
        return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
@@ -96,7 +99,8 @@ device_cmp(struct nf_conn *i, void *ifindex)
 
        if (!nat)
                return 0;
-
+       if (nf_ct_l3num(i) != NFPROTO_IPV4)
+               return 0;
        return nat->masq_index == (int)(long)ifindex;
 }
 
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
deleted file mode 100644 (file)
index b5bfbba..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/* NETMAP - static NAT mapping of IP network addresses (1:1).
- * The mapping can be applied to source (POSTROUTING),
- * destination (PREROUTING), or both (with separate rules).
- */
-
-/* (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/ip.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat_rule.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>");
-MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets");
-
-static int netmap_tg_check(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
-               pr_debug("bad MAP_IPS.\n");
-               return -EINVAL;
-       }
-       if (mr->rangesize != 1) {
-               pr_debug("bad rangesize %u.\n", mr->rangesize);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static unsigned int
-netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       __be32 new_ip, netmask;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-       struct nf_nat_ipv4_range newrange;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
-                    par->hooknum == NF_INET_POST_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_OUT ||
-                    par->hooknum == NF_INET_LOCAL_IN);
-       ct = nf_ct_get(skb, &ctinfo);
-
-       netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
-
-       if (par->hooknum == NF_INET_PRE_ROUTING ||
-           par->hooknum == NF_INET_LOCAL_OUT)
-               new_ip = ip_hdr(skb)->daddr & ~netmask;
-       else
-               new_ip = ip_hdr(skb)->saddr & ~netmask;
-       new_ip |= mr->range[0].min_ip & netmask;
-
-       newrange = ((struct nf_nat_ipv4_range)
-               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
-                 new_ip, new_ip,
-                 mr->range[0].min, mr->range[0].max });
-
-       /* Hand modified range to generic setup. */
-       return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
-}
-
-static struct xt_target netmap_tg_reg __read_mostly = {
-       .name           = "NETMAP",
-       .family         = NFPROTO_IPV4,
-       .target         = netmap_tg,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_PRE_ROUTING) |
-                         (1 << NF_INET_POST_ROUTING) |
-                         (1 << NF_INET_LOCAL_OUT) |
-                         (1 << NF_INET_LOCAL_IN),
-       .checkentry     = netmap_tg_check,
-       .me             = THIS_MODULE
-};
-
-static int __init netmap_tg_init(void)
-{
-       return xt_register_target(&netmap_tg_reg);
-}
-
-static void __exit netmap_tg_exit(void)
-{
-       xt_unregister_target(&netmap_tg_reg);
-}
-
-module_init(netmap_tg_init);
-module_exit(netmap_tg_exit);
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
deleted file mode 100644 (file)
index 7c0103a..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/* Redirect.  Simple mapping which alters dst to a local IP address. */
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-#include <linux/if.h>
-#include <linux/inetdevice.h>
-#include <net/protocol.h>
-#include <net/checksum.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_nat_rule.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
-
-/* FIXME: Take multiple ranges --RR */
-static int redirect_tg_check(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
-               pr_debug("bad MAP_IPS.\n");
-               return -EINVAL;
-       }
-       if (mr->rangesize != 1) {
-               pr_debug("bad rangesize %u.\n", mr->rangesize);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static unsigned int
-redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       __be32 newdst;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-       struct nf_nat_ipv4_range newrange;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_OUT);
-
-       ct = nf_ct_get(skb, &ctinfo);
-       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
-
-       /* Local packets: make them go to loopback */
-       if (par->hooknum == NF_INET_LOCAL_OUT)
-               newdst = htonl(0x7F000001);
-       else {
-               struct in_device *indev;
-               struct in_ifaddr *ifa;
-
-               newdst = 0;
-
-               rcu_read_lock();
-               indev = __in_dev_get_rcu(skb->dev);
-               if (indev && (ifa = indev->ifa_list))
-                       newdst = ifa->ifa_local;
-               rcu_read_unlock();
-
-               if (!newdst)
-                       return NF_DROP;
-       }
-
-       /* Transfer from original range. */
-       newrange = ((struct nf_nat_ipv4_range)
-               { mr->range[0].flags | NF_NAT_RANGE_MAP_IPS,
-                 newdst, newdst,
-                 mr->range[0].min, mr->range[0].max });
-
-       /* Hand modified range to generic setup. */
-       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
-}
-
-static struct xt_target redirect_tg_reg __read_mostly = {
-       .name           = "REDIRECT",
-       .family         = NFPROTO_IPV4,
-       .target         = redirect_tg,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
-       .checkentry     = redirect_tg_check,
-       .me             = THIS_MODULE,
-};
-
-static int __init redirect_tg_init(void)
-{
-       return xt_register_target(&redirect_tg_reg);
-}
-
-static void __exit redirect_tg_exit(void)
-{
-       xt_unregister_target(&redirect_tg_reg);
-}
-
-module_init(redirect_tg_init);
-module_exit(redirect_tg_exit);
index 1109f7f6c25433d64515180eb6c9ff599dcef0f0..b5ef3cba225046fdc142bf5029954e02fcb08df1 100644 (file)
@@ -396,8 +396,7 @@ static int __init ulog_tg_init(void)
        for (i = 0; i < ULOG_MAXNLGROUPS; i++)
                setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
 
-       nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
-                                       THIS_MODULE, &cfg);
+       nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg);
        if (!nflognl)
                return -ENOMEM;
 
index 31371be8174be1d8da1a50f11dd6c48409b29b74..c30130062cd6515f31d7497eaa6a403d2b1d629d 100644 (file)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
                        return ipv4_is_local_multicast(iph->daddr) ^ invert;
                flow.flowi4_iif = 0;
        } else {
-               flow.flowi4_iif = dev_net(par->in)->loopback_dev->ifindex;
+               flow.flowi4_iif = LOOPBACK_IFINDEX;
        }
 
        flow.daddr = iph->saddr;
index 851acec852d284bbe61f7fc9fd9b1cf2ab4bb1f1..6b3da5cf54e96d99054170c77a911e3af9e9d139 100644 (file)
@@ -69,9 +69,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
        net->ipv4.iptable_filter =
                ipt_register_table(net, &packet_filter, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_filter))
-               return PTR_ERR(net->ipv4.iptable_filter);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_filter);
 }
 
 static void __net_exit iptable_filter_net_exit(struct net *net)
@@ -96,14 +94,10 @@ static int __init iptable_filter_init(void)
        filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
        if (IS_ERR(filter_ops)) {
                ret = PTR_ERR(filter_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_filter_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_filter_net_ops);
-       return ret;
 }
 
 static void __exit iptable_filter_fini(void)
index aef5d1fbe77dc39b5e6b951a97897e02ead47a62..85d88f20644701f63ad9b8250d9fd391c108ee55 100644 (file)
@@ -104,9 +104,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
        net->ipv4.iptable_mangle =
                ipt_register_table(net, &packet_mangler, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_mangle))
-               return PTR_ERR(net->ipv4.iptable_mangle);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_mangle);
 }
 
 static void __net_exit iptable_mangle_net_exit(struct net *net)
@@ -131,14 +129,10 @@ static int __init iptable_mangle_init(void)
        mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
        if (IS_ERR(mangle_ops)) {
                ret = PTR_ERR(mangle_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_mangle_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_mangle_net_ops);
-       return ret;
 }
 
 static void __exit iptable_mangle_fini(void)
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
new file mode 100644 (file)
index 0000000..9e0ffaf
--- /dev/null
@@ -0,0 +1,320 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+
+static const struct xt_table nf_nat_ipv4_table = {
+       .name           = "nat",
+       .valid_hooks    = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .me             = THIS_MODULE,
+       .af             = NFPROTO_IPV4,
+};
+
+static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+       /* Force range to this IP; let proto decide mapping for
+        * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+        */
+       struct nf_nat_range range;
+
+       range.flags = 0;
+       pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
+                HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
+
+       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+
+static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
+                                    const struct net_device *in,
+                                    const struct net_device *out,
+                                    struct nf_conn *ct)
+{
+       struct net *net = nf_ct_net(ct);
+       unsigned int ret;
+
+       ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
+       if (ret == NF_ACCEPT) {
+               if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
+                       ret = alloc_null_binding(ct, hooknum);
+       }
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv4_fn(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
+{
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn_nat *nat;
+       /* maniptype == SRC for postrouting. */
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+
+       /* We never see fragments: conntrack defrags on pre-routing
+        * and local-out, and nf_nat_out protects post-routing.
+        */
+       NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
+
+       ct = nf_ct_get(skb, &ctinfo);
+       /* Can't track?  It's not due to stress, or conntrack would
+        * have dropped it.  Hence it's the user's responsibilty to
+        * packet filter it out, or implement conntrack/NAT for that
+        * protocol. 8) --RR
+        */
+       if (!ct)
+               return NF_ACCEPT;
+
+       /* Don't try to NAT if this packet is not conntracked */
+       if (nf_ct_is_untracked(ct))
+               return NF_ACCEPT;
+
+       nat = nfct_nat(ct);
+       if (!nat) {
+               /* NAT module was loaded late. */
+               if (nf_ct_is_confirmed(ct))
+                       return NF_ACCEPT;
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL) {
+                       pr_debug("failed to add NAT extension\n");
+                       return NF_ACCEPT;
+               }
+       }
+
+       switch (ctinfo) {
+       case IP_CT_RELATED:
+       case IP_CT_RELATED_REPLY:
+               if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+                       if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+                                                          hooknum))
+                               return NF_DROP;
+                       else
+                               return NF_ACCEPT;
+               }
+               /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
+       case IP_CT_NEW:
+               /* Seen it before?  This can happen for loopback, retrans,
+                * or local packets.
+                */
+               if (!nf_nat_initialized(ct, maniptype)) {
+                       unsigned int ret;
+
+                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+                       if (ret != NF_ACCEPT)
+                               return ret;
+               } else
+                       pr_debug("Already setup manip %s for ct %p\n",
+                                maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
+                                ct);
+               break;
+
+       default:
+               /* ESTABLISHED */
+               NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
+                            ctinfo == IP_CT_ESTABLISHED_REPLY);
+       }
+
+       return nf_nat_packet(ct, ctinfo, hooknum, skb);
+}
+
+static unsigned int
+nf_nat_ipv4_in(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
+{
+       unsigned int ret;
+       __be32 daddr = ip_hdr(skb)->daddr;
+
+       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           daddr != ip_hdr(skb)->daddr)
+               skb_dst_drop(skb);
+
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv4_out(unsigned int hooknum,
+               struct sk_buff *skb,
+               const struct net_device *in,
+               const struct net_device *out,
+               int (*okfn)(struct sk_buff *))
+{
+#ifdef CONFIG_XFRM
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+#endif
+       unsigned int ret;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct iphdr) ||
+           ip_hdrlen(skb) < sizeof(struct iphdr))
+               return NF_ACCEPT;
+
+       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if ((ct->tuplehash[dir].tuple.src.u3.ip !=
+                    ct->tuplehash[!dir].tuple.dst.u3.ip) ||
+                   (ct->tuplehash[dir].tuple.src.u.all !=
+                    ct->tuplehash[!dir].tuple.dst.u.all))
+                       if (nf_xfrm_me_harder(skb, AF_INET) < 0)
+                               ret = NF_DROP;
+       }
+#endif
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv4_local_fn(unsigned int hooknum,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out,
+                    int (*okfn)(struct sk_buff *))
+{
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       unsigned int ret;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct iphdr) ||
+           ip_hdrlen(skb) < sizeof(struct iphdr))
+               return NF_ACCEPT;
+
+       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+                   ct->tuplehash[!dir].tuple.src.u3.ip) {
+                       if (ip_route_me_harder(skb, RTN_UNSPEC))
+                               ret = NF_DROP;
+               }
+#ifdef CONFIG_XFRM
+               else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+                        ct->tuplehash[dir].tuple.dst.u.all !=
+                        ct->tuplehash[!dir].tuple.src.u.all)
+                       if (nf_xfrm_me_harder(skb, AF_INET) < 0)
+                               ret = NF_DROP;
+#endif
+       }
+       return ret;
+}
+
+static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
+       /* Before packet filtering, change destination */
+       {
+               .hook           = nf_nat_ipv4_in,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV4,
+               .hooknum        = NF_INET_PRE_ROUTING,
+               .priority       = NF_IP_PRI_NAT_DST,
+       },
+       /* After packet filtering, change source */
+       {
+               .hook           = nf_nat_ipv4_out,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV4,
+               .hooknum        = NF_INET_POST_ROUTING,
+               .priority       = NF_IP_PRI_NAT_SRC,
+       },
+       /* Before packet filtering, change destination */
+       {
+               .hook           = nf_nat_ipv4_local_fn,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV4,
+               .hooknum        = NF_INET_LOCAL_OUT,
+               .priority       = NF_IP_PRI_NAT_DST,
+       },
+       /* After packet filtering, change source */
+       {
+               .hook           = nf_nat_ipv4_fn,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV4,
+               .hooknum        = NF_INET_LOCAL_IN,
+               .priority       = NF_IP_PRI_NAT_SRC,
+       },
+};
+
+static int __net_init iptable_nat_net_init(struct net *net)
+{
+       struct ipt_replace *repl;
+
+       repl = ipt_alloc_initial_table(&nf_nat_ipv4_table);
+       if (repl == NULL)
+               return -ENOMEM;
+       net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl);
+       kfree(repl);
+       if (IS_ERR(net->ipv4.nat_table))
+               return PTR_ERR(net->ipv4.nat_table);
+       return 0;
+}
+
+static void __net_exit iptable_nat_net_exit(struct net *net)
+{
+       ipt_unregister_table(net, net->ipv4.nat_table);
+}
+
+static struct pernet_operations iptable_nat_net_ops = {
+       .init   = iptable_nat_net_init,
+       .exit   = iptable_nat_net_exit,
+};
+
+static int __init iptable_nat_init(void)
+{
+       int err;
+
+       err = register_pernet_subsys(&iptable_nat_net_ops);
+       if (err < 0)
+               goto err1;
+
+       err = nf_register_hooks(nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       unregister_pernet_subsys(&iptable_nat_net_ops);
+err1:
+       return err;
+}
+
+static void __exit iptable_nat_exit(void)
+{
+       nf_unregister_hooks(nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops));
+       unregister_pernet_subsys(&iptable_nat_net_ops);
+}
+
+module_init(iptable_nat_init);
+module_exit(iptable_nat_exit);
+
+MODULE_LICENSE("GPL");
index 07fb710cd722f329ea297b764f50cfeb0ad8175e..03d9696d3c6eb27b24eed32ad3fb0c4e069d9bf3 100644 (file)
@@ -48,9 +48,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
        net->ipv4.iptable_raw =
                ipt_register_table(net, &packet_raw, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_raw))
-               return PTR_ERR(net->ipv4.iptable_raw);
-       return 0;
+       return PTR_RET(net->ipv4.iptable_raw);
 }
 
 static void __net_exit iptable_raw_net_exit(struct net *net)
@@ -75,14 +73,10 @@ static int __init iptable_raw_init(void)
        rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
        if (IS_ERR(rawtable_ops)) {
                ret = PTR_ERR(rawtable_ops);
-               goto cleanup_table;
+               unregister_pernet_subsys(&iptable_raw_net_ops);
        }
 
        return ret;
-
- cleanup_table:
-       unregister_pernet_subsys(&iptable_raw_net_ops);
-       return ret;
 }
 
 static void __exit iptable_raw_fini(void)
index be45bdc4c60251a0936e8e5f7d0c6ea56d0e6eec..b283d8e2601abfadb80c1024fc7558862784f7b2 100644 (file)
@@ -66,10 +66,7 @@ static int __net_init iptable_security_net_init(struct net *net)
        net->ipv4.iptable_security =
                ipt_register_table(net, &security_table, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv4.iptable_security))
-               return PTR_ERR(net->ipv4.iptable_security);
-
-       return 0;
+       return PTR_RET(net->ipv4.iptable_security);
 }
 
 static void __net_exit iptable_security_net_exit(struct net *net)
index e7ff2dcab6cec0fdd82cfb5ce804e172b67adc1f..fcdd0c2406e6d85d888633222e6697bd2352db86 100644 (file)
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 #include <net/netfilter/nf_log.h>
 
-int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
-                             struct nf_conn *ct,
-                             enum ip_conntrack_info ctinfo);
-EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
-
 static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
                              struct nf_conntrack_tuple *tuple)
 {
@@ -149,7 +144,8 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
                typeof(nf_nat_seq_adjust_hook) seq_adjust;
 
                seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
-               if (!seq_adjust || !seq_adjust(skb, ct, ctinfo)) {
+               if (!seq_adjust ||
+                   !seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
                        NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
                        return NF_DROP;
                }
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
deleted file mode 100644 (file)
index 3c04d24..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Amanda extension for TCP NAT alteration.
- * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca>
- * based on a copy of HW's ip_nat_irc.c as well as other modules
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/udp.h>
-
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <linux/netfilter/nf_conntrack_amanda.h>
-
-MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
-MODULE_DESCRIPTION("Amanda NAT helper");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ip_nat_amanda");
-
-static unsigned int help(struct sk_buff *skb,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned int matchoff,
-                        unsigned int matchlen,
-                        struct nf_conntrack_expect *exp)
-{
-       char buffer[sizeof("65535")];
-       u_int16_t port;
-       unsigned int ret;
-
-       /* Connection comes from client. */
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->dir = IP_CT_DIR_ORIGINAL;
-
-       /* When you see the packet, we need to NAT it the same as the
-        * this one (ie. same IP: it will be TCP and master is UDP). */
-       exp->expectfn = nf_nat_follow_master;
-
-       /* Try to get same port: if not, try to change it. */
-       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               int res;
-
-               exp->tuple.dst.u.tcp.port = htons(port);
-               res = nf_ct_expect_related(exp);
-               if (res == 0)
-                       break;
-               else if (res != -EBUSY) {
-                       port = 0;
-                       break;
-               }
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       sprintf(buffer, "%u", port);
-       ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo,
-                                      matchoff, matchlen,
-                                      buffer, strlen(buffer));
-       if (ret != NF_ACCEPT)
-               nf_ct_unexpect_related(exp);
-       return ret;
-}
-
-static void __exit nf_nat_amanda_fini(void)
-{
-       RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init nf_nat_amanda_init(void)
-{
-       BUG_ON(nf_nat_amanda_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_amanda_hook, help);
-       return 0;
-}
-
-module_init(nf_nat_amanda_init);
-module_exit(nf_nat_amanda_fini);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
deleted file mode 100644 (file)
index 44b082f..0000000
+++ /dev/null
@@ -1,763 +0,0 @@
-/* NAT for netfilter; shared with compatibility layer. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/skbuff.h>
-#include <linux/gfp.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/tcp.h>  /* For tcp_prot in getorigdst */
-#include <linux/icmp.h>
-#include <linux/udp.h>
-#include <linux/jhash.h>
-
-#include <linux/netfilter_ipv4.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
-#include <net/netfilter/nf_conntrack_zones.h>
-
-static DEFINE_SPINLOCK(nf_nat_lock);
-
-static struct nf_conntrack_l3proto *l3proto __read_mostly;
-
-#define MAX_IP_NAT_PROTO 256
-static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
-                                               __read_mostly;
-
-static inline const struct nf_nat_protocol *
-__nf_nat_proto_find(u_int8_t protonum)
-{
-       return rcu_dereference(nf_nat_protos[protonum]);
-}
-
-/* We keep an extra hash for each conntrack, for fast searching. */
-static inline unsigned int
-hash_by_src(const struct net *net, u16 zone,
-           const struct nf_conntrack_tuple *tuple)
-{
-       unsigned int hash;
-
-       /* Original src, to ensure we map it consistently if poss. */
-       hash = jhash_3words((__force u32)tuple->src.u3.ip,
-                           (__force u32)tuple->src.u.all ^ zone,
-                           tuple->dst.protonum, nf_conntrack_hash_rnd);
-       return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
-}
-
-/* Is this tuple already taken? (not by us) */
-int
-nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
-                 const struct nf_conn *ignored_conntrack)
-{
-       /* Conntrack tracking doesn't keep track of outgoing tuples; only
-          incoming ones.  NAT means they don't have a fixed mapping,
-          so we invert the tuple and look for the incoming reply.
-
-          We could keep a separate hash if this proves too slow. */
-       struct nf_conntrack_tuple reply;
-
-       nf_ct_invert_tuplepr(&reply, tuple);
-       return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
-}
-EXPORT_SYMBOL(nf_nat_used_tuple);
-
-/* If we source map this tuple so reply looks like reply_tuple, will
- * that meet the constraints of range. */
-static int
-in_range(const struct nf_conntrack_tuple *tuple,
-        const struct nf_nat_ipv4_range *range)
-{
-       const struct nf_nat_protocol *proto;
-       int ret = 0;
-
-       /* If we are supposed to map IPs, then we must be in the
-          range specified, otherwise let this drag us onto a new src IP. */
-       if (range->flags & NF_NAT_RANGE_MAP_IPS) {
-               if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
-                   ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
-                       return 0;
-       }
-
-       rcu_read_lock();
-       proto = __nf_nat_proto_find(tuple->dst.protonum);
-       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
-           proto->in_range(tuple, NF_NAT_MANIP_SRC,
-                           &range->min, &range->max))
-               ret = 1;
-       rcu_read_unlock();
-
-       return ret;
-}
-
-static inline int
-same_src(const struct nf_conn *ct,
-        const struct nf_conntrack_tuple *tuple)
-{
-       const struct nf_conntrack_tuple *t;
-
-       t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
-       return (t->dst.protonum == tuple->dst.protonum &&
-               t->src.u3.ip == tuple->src.u3.ip &&
-               t->src.u.all == tuple->src.u.all);
-}
-
-/* Only called for SRC manip */
-static int
-find_appropriate_src(struct net *net, u16 zone,
-                    const struct nf_conntrack_tuple *tuple,
-                    struct nf_conntrack_tuple *result,
-                    const struct nf_nat_ipv4_range *range)
-{
-       unsigned int h = hash_by_src(net, zone, tuple);
-       const struct nf_conn_nat *nat;
-       const struct nf_conn *ct;
-       const struct hlist_node *n;
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
-               ct = nat->ct;
-               if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
-                       /* Copy source part from reply tuple. */
-                       nf_ct_invert_tuplepr(result,
-                                      &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-                       result->dst = tuple->dst;
-
-                       if (in_range(result, range)) {
-                               rcu_read_unlock();
-                               return 1;
-                       }
-               }
-       }
-       rcu_read_unlock();
-       return 0;
-}
-
-/* For [FUTURE] fragmentation handling, we want the least-used
-   src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
-   if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
-   1-65535, we don't do pro-rata allocation based on ports; we choose
-   the ip with the lowest src-ip/dst-ip/proto usage.
-*/
-static void
-find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
-                   const struct nf_nat_ipv4_range *range,
-                   const struct nf_conn *ct,
-                   enum nf_nat_manip_type maniptype)
-{
-       __be32 *var_ipp;
-       /* Host order */
-       u_int32_t minip, maxip, j;
-
-       /* No IP mapping?  Do nothing. */
-       if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
-               return;
-
-       if (maniptype == NF_NAT_MANIP_SRC)
-               var_ipp = &tuple->src.u3.ip;
-       else
-               var_ipp = &tuple->dst.u3.ip;
-
-       /* Fast path: only one choice. */
-       if (range->min_ip == range->max_ip) {
-               *var_ipp = range->min_ip;
-               return;
-       }
-
-       /* Hashing source and destination IPs gives a fairly even
-        * spread in practice (if there are a small number of IPs
-        * involved, there usually aren't that many connections
-        * anyway).  The consistency means that servers see the same
-        * client coming from the same IP (some Internet Banking sites
-        * like this), even across reboots. */
-       minip = ntohl(range->min_ip);
-       maxip = ntohl(range->max_ip);
-       j = jhash_2words((__force u32)tuple->src.u3.ip,
-                        range->flags & NF_NAT_RANGE_PERSISTENT ?
-                               0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
-       j = ((u64)j * (maxip - minip + 1)) >> 32;
-       *var_ipp = htonl(minip + j);
-}
-
-/* Manipulate the tuple into the range given.  For NF_INET_POST_ROUTING,
- * we change the source to map into the range.  For NF_INET_PRE_ROUTING
- * and NF_INET_LOCAL_OUT, we change the destination to map into the
- * range.  It might not be possible to get a unique tuple, but we try.
- * At worst (or if we race), we will end up with a final duplicate in
- * __ip_conntrack_confirm and drop the packet. */
-static void
-get_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_conntrack_tuple *orig_tuple,
-                const struct nf_nat_ipv4_range *range,
-                struct nf_conn *ct,
-                enum nf_nat_manip_type maniptype)
-{
-       struct net *net = nf_ct_net(ct);
-       const struct nf_nat_protocol *proto;
-       u16 zone = nf_ct_zone(ct);
-
-       /* 1) If this srcip/proto/src-proto-part is currently mapped,
-          and that same mapping gives a unique tuple within the given
-          range, use that.
-
-          This is only required for source (ie. NAT/masq) mappings.
-          So far, we don't do local source mappings, so multiple
-          manips not an issue.  */
-       if (maniptype == NF_NAT_MANIP_SRC &&
-           !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
-               /* try the original tuple first */
-               if (in_range(orig_tuple, range)) {
-                       if (!nf_nat_used_tuple(orig_tuple, ct)) {
-                               *tuple = *orig_tuple;
-                               return;
-                       }
-               } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
-                          range)) {
-                       pr_debug("get_unique_tuple: Found current src map\n");
-                       if (!nf_nat_used_tuple(tuple, ct))
-                               return;
-               }
-       }
-
-       /* 2) Select the least-used IP/proto combination in the given
-          range. */
-       *tuple = *orig_tuple;
-       find_best_ips_proto(zone, tuple, range, ct, maniptype);
-
-       /* 3) The per-protocol part of the manip is made to map into
-          the range to make a unique tuple. */
-
-       rcu_read_lock();
-       proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
-
-       /* Only bother mapping if it's not already in range and unique */
-       if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
-               if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
-                       if (proto->in_range(tuple, maniptype, &range->min,
-                                           &range->max) &&
-                           (range->min.all == range->max.all ||
-                            !nf_nat_used_tuple(tuple, ct)))
-                               goto out;
-               } else if (!nf_nat_used_tuple(tuple, ct)) {
-                       goto out;
-               }
-       }
-
-       /* Last change: get protocol to try to obtain unique tuple. */
-       proto->unique_tuple(tuple, range, maniptype, ct);
-out:
-       rcu_read_unlock();
-}
-
-unsigned int
-nf_nat_setup_info(struct nf_conn *ct,
-                 const struct nf_nat_ipv4_range *range,
-                 enum nf_nat_manip_type maniptype)
-{
-       struct net *net = nf_ct_net(ct);
-       struct nf_conntrack_tuple curr_tuple, new_tuple;
-       struct nf_conn_nat *nat;
-
-       /* nat helper or nfctnetlink also setup binding */
-       nat = nfct_nat(ct);
-       if (!nat) {
-               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
-               if (nat == NULL) {
-                       pr_debug("failed to add NAT extension\n");
-                       return NF_ACCEPT;
-               }
-       }
-
-       NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
-                    maniptype == NF_NAT_MANIP_DST);
-       BUG_ON(nf_nat_initialized(ct, maniptype));
-
-       /* What we've got will look like inverse of reply. Normally
-          this is what is in the conntrack, except for prior
-          manipulations (future optimization: if num_manips == 0,
-          orig_tp =
-          conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
-       nf_ct_invert_tuplepr(&curr_tuple,
-                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
-       get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
-
-       if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
-               struct nf_conntrack_tuple reply;
-
-               /* Alter conntrack table so will recognize replies. */
-               nf_ct_invert_tuplepr(&reply, &new_tuple);
-               nf_conntrack_alter_reply(ct, &reply);
-
-               /* Non-atomic: we own this at the moment. */
-               if (maniptype == NF_NAT_MANIP_SRC)
-                       ct->status |= IPS_SRC_NAT;
-               else
-                       ct->status |= IPS_DST_NAT;
-       }
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               unsigned int srchash;
-
-               srchash = hash_by_src(net, nf_ct_zone(ct),
-                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-               spin_lock_bh(&nf_nat_lock);
-               /* nf_conntrack_alter_reply might re-allocate extension area */
-               nat = nfct_nat(ct);
-               nat->ct = ct;
-               hlist_add_head_rcu(&nat->bysource,
-                                  &net->ipv4.nat_bysource[srchash]);
-               spin_unlock_bh(&nf_nat_lock);
-       }
-
-       /* It's done. */
-       if (maniptype == NF_NAT_MANIP_DST)
-               ct->status |= IPS_DST_NAT_DONE;
-       else
-               ct->status |= IPS_SRC_NAT_DONE;
-
-       return NF_ACCEPT;
-}
-EXPORT_SYMBOL(nf_nat_setup_info);
-
-/* Returns true if succeeded. */
-static bool
-manip_pkt(u_int16_t proto,
-         struct sk_buff *skb,
-         unsigned int iphdroff,
-         const struct nf_conntrack_tuple *target,
-         enum nf_nat_manip_type maniptype)
-{
-       struct iphdr *iph;
-       const struct nf_nat_protocol *p;
-
-       if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
-               return false;
-
-       iph = (void *)skb->data + iphdroff;
-
-       /* Manipulate protcol part. */
-
-       /* rcu_read_lock()ed by nf_hook_slow */
-       p = __nf_nat_proto_find(proto);
-       if (!p->manip_pkt(skb, iphdroff, target, maniptype))
-               return false;
-
-       iph = (void *)skb->data + iphdroff;
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
-               iph->saddr = target->src.u3.ip;
-       } else {
-               csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
-               iph->daddr = target->dst.u3.ip;
-       }
-       return true;
-}
-
-/* Do packet manipulations according to nf_nat_setup_info. */
-unsigned int nf_nat_packet(struct nf_conn *ct,
-                          enum ip_conntrack_info ctinfo,
-                          unsigned int hooknum,
-                          struct sk_buff *skb)
-{
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       unsigned long statusbit;
-       enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
-
-       if (mtype == NF_NAT_MANIP_SRC)
-               statusbit = IPS_SRC_NAT;
-       else
-               statusbit = IPS_DST_NAT;
-
-       /* Invert if this is reply dir. */
-       if (dir == IP_CT_DIR_REPLY)
-               statusbit ^= IPS_NAT_MASK;
-
-       /* Non-atomic: these bits don't change. */
-       if (ct->status & statusbit) {
-               struct nf_conntrack_tuple target;
-
-               /* We are aiming to look like inverse of other direction. */
-               nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
-
-               if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
-                       return NF_DROP;
-       }
-       return NF_ACCEPT;
-}
-EXPORT_SYMBOL_GPL(nf_nat_packet);
-
-/* Dir is direction ICMP is coming from (opposite to packet it contains) */
-int nf_nat_icmp_reply_translation(struct nf_conn *ct,
-                                 enum ip_conntrack_info ctinfo,
-                                 unsigned int hooknum,
-                                 struct sk_buff *skb)
-{
-       struct {
-               struct icmphdr icmp;
-               struct iphdr ip;
-       } *inside;
-       struct nf_conntrack_tuple target;
-       int hdrlen = ip_hdrlen(skb);
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       unsigned long statusbit;
-       enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
-
-       if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
-               return 0;
-
-       inside = (void *)skb->data + hdrlen;
-
-       /* We're actually going to mangle it beyond trivial checksum
-          adjustment, so make sure the current checksum is correct. */
-       if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
-               return 0;
-
-       /* Must be RELATED */
-       NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
-                    skb->nfctinfo == IP_CT_RELATED_REPLY);
-
-       /* Redirects on non-null nats must be dropped, else they'll
-          start talking to each other without our translation, and be
-          confused... --RR */
-       if (inside->icmp.type == ICMP_REDIRECT) {
-               /* If NAT isn't finished, assume it and drop. */
-               if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
-                       return 0;
-
-               if (ct->status & IPS_NAT_MASK)
-                       return 0;
-       }
-
-       if (manip == NF_NAT_MANIP_SRC)
-               statusbit = IPS_SRC_NAT;
-       else
-               statusbit = IPS_DST_NAT;
-
-       /* Invert if this is reply dir. */
-       if (dir == IP_CT_DIR_REPLY)
-               statusbit ^= IPS_NAT_MASK;
-
-       if (!(ct->status & statusbit))
-               return 1;
-
-       pr_debug("icmp_reply_translation: translating error %p manip %u "
-                "dir %s\n", skb, manip,
-                dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
-
-       /* Change inner back to look like incoming packet.  We do the
-          opposite manip on this hook to normal, because it might not
-          pass all hooks (locally-generated ICMP).  Consider incoming
-          packet: PREROUTING (DST manip), routing produces ICMP, goes
-          through POSTROUTING (which must correct the DST manip). */
-       if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
-                      &ct->tuplehash[!dir].tuple, !manip))
-               return 0;
-
-       if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               /* Reloading "inside" here since manip_pkt inner. */
-               inside = (void *)skb->data + hdrlen;
-               inside->icmp.checksum = 0;
-               inside->icmp.checksum =
-                       csum_fold(skb_checksum(skb, hdrlen,
-                                              skb->len - hdrlen, 0));
-       }
-
-       /* Change outer to look the reply to an incoming packet
-        * (proto 0 means don't invert per-proto part). */
-       nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
-       if (!manip_pkt(0, skb, 0, &target, manip))
-               return 0;
-
-       return 1;
-}
-EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
-
-/* Protocol registration. */
-int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
-{
-       int ret = 0;
-
-       spin_lock_bh(&nf_nat_lock);
-       if (rcu_dereference_protected(
-                       nf_nat_protos[proto->protonum],
-                       lockdep_is_held(&nf_nat_lock)
-                       ) != &nf_nat_unknown_protocol) {
-               ret = -EBUSY;
-               goto out;
-       }
-       RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
- out:
-       spin_unlock_bh(&nf_nat_lock);
-       return ret;
-}
-EXPORT_SYMBOL(nf_nat_protocol_register);
-
-/* No one stores the protocol anywhere; simply delete it. */
-void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
-{
-       spin_lock_bh(&nf_nat_lock);
-       RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
-                          &nf_nat_unknown_protocol);
-       spin_unlock_bh(&nf_nat_lock);
-       synchronize_rcu();
-}
-EXPORT_SYMBOL(nf_nat_protocol_unregister);
-
-/* No one using conntrack by the time this called. */
-static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
-{
-       struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
-
-       if (nat == NULL || nat->ct == NULL)
-               return;
-
-       NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
-
-       spin_lock_bh(&nf_nat_lock);
-       hlist_del_rcu(&nat->bysource);
-       spin_unlock_bh(&nf_nat_lock);
-}
-
-static void nf_nat_move_storage(void *new, void *old)
-{
-       struct nf_conn_nat *new_nat = new;
-       struct nf_conn_nat *old_nat = old;
-       struct nf_conn *ct = old_nat->ct;
-
-       if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
-               return;
-
-       spin_lock_bh(&nf_nat_lock);
-       hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
-       spin_unlock_bh(&nf_nat_lock);
-}
-
-static struct nf_ct_ext_type nat_extend __read_mostly = {
-       .len            = sizeof(struct nf_conn_nat),
-       .align          = __alignof__(struct nf_conn_nat),
-       .destroy        = nf_nat_cleanup_conntrack,
-       .move           = nf_nat_move_storage,
-       .id             = NF_CT_EXT_NAT,
-       .flags          = NF_CT_EXT_F_PREALLOC,
-};
-
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-
-static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
-       [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
-       [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
-};
-
-static int nfnetlink_parse_nat_proto(struct nlattr *attr,
-                                    const struct nf_conn *ct,
-                                    struct nf_nat_ipv4_range *range)
-{
-       struct nlattr *tb[CTA_PROTONAT_MAX+1];
-       const struct nf_nat_protocol *npt;
-       int err;
-
-       err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
-       if (err < 0)
-               return err;
-
-       rcu_read_lock();
-       npt = __nf_nat_proto_find(nf_ct_protonum(ct));
-       if (npt->nlattr_to_range)
-               err = npt->nlattr_to_range(tb, range);
-       rcu_read_unlock();
-       return err;
-}
-
-static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
-       [CTA_NAT_MINIP]         = { .type = NLA_U32 },
-       [CTA_NAT_MAXIP]         = { .type = NLA_U32 },
-       [CTA_NAT_PROTO]         = { .type = NLA_NESTED },
-};
-
-static int
-nfnetlink_parse_nat(const struct nlattr *nat,
-                   const struct nf_conn *ct, struct nf_nat_ipv4_range *range)
-{
-       struct nlattr *tb[CTA_NAT_MAX+1];
-       int err;
-
-       memset(range, 0, sizeof(*range));
-
-       err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
-       if (err < 0)
-               return err;
-
-       if (tb[CTA_NAT_MINIP])
-               range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]);
-
-       if (!tb[CTA_NAT_MAXIP])
-               range->max_ip = range->min_ip;
-       else
-               range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
-
-       if (range->min_ip)
-               range->flags |= NF_NAT_RANGE_MAP_IPS;
-
-       if (!tb[CTA_NAT_PROTO])
-               return 0;
-
-       err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
-       if (err < 0)
-               return err;
-
-       return 0;
-}
-
-static int
-nfnetlink_parse_nat_setup(struct nf_conn *ct,
-                         enum nf_nat_manip_type manip,
-                         const struct nlattr *attr)
-{
-       struct nf_nat_ipv4_range range;
-
-       if (nfnetlink_parse_nat(attr, ct, &range) < 0)
-               return -EINVAL;
-       if (nf_nat_initialized(ct, manip))
-               return -EEXIST;
-
-       return nf_nat_setup_info(ct, &range, manip);
-}
-#else
-static int
-nfnetlink_parse_nat_setup(struct nf_conn *ct,
-                         enum nf_nat_manip_type manip,
-                         const struct nlattr *attr)
-{
-       return -EOPNOTSUPP;
-}
-#endif
-
-static int __net_init nf_nat_net_init(struct net *net)
-{
-       /* Leave them the same for the moment. */
-       net->ipv4.nat_htable_size = net->ct.htable_size;
-       net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
-       if (!net->ipv4.nat_bysource)
-               return -ENOMEM;
-       return 0;
-}
-
-/* Clear NAT section of all conntracks, in case we're loaded again. */
-static int clean_nat(struct nf_conn *i, void *data)
-{
-       struct nf_conn_nat *nat = nfct_nat(i);
-
-       if (!nat)
-               return 0;
-       memset(nat, 0, sizeof(*nat));
-       i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
-       return 0;
-}
-
-static void __net_exit nf_nat_net_exit(struct net *net)
-{
-       nf_ct_iterate_cleanup(net, &clean_nat, NULL);
-       synchronize_rcu();
-       nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
-}
-
-static struct pernet_operations nf_nat_net_ops = {
-       .init = nf_nat_net_init,
-       .exit = nf_nat_net_exit,
-};
-
-static struct nf_ct_helper_expectfn follow_master_nat = {
-       .name           = "nat-follow-master",
-       .expectfn       = nf_nat_follow_master,
-};
-
-static struct nfq_ct_nat_hook nfq_ct_nat = {
-       .seq_adjust     = nf_nat_tcp_seq_adjust,
-};
-
-static int __init nf_nat_init(void)
-{
-       size_t i;
-       int ret;
-
-       need_ipv4_conntrack();
-
-       ret = nf_ct_extend_register(&nat_extend);
-       if (ret < 0) {
-               printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
-               return ret;
-       }
-
-       ret = register_pernet_subsys(&nf_nat_net_ops);
-       if (ret < 0)
-               goto cleanup_extend;
-
-       /* Sew in builtin protocols. */
-       spin_lock_bh(&nf_nat_lock);
-       for (i = 0; i < MAX_IP_NAT_PROTO; i++)
-               RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
-       RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
-       RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
-       RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
-       spin_unlock_bh(&nf_nat_lock);
-
-       /* Initialize fake conntrack so that NAT will skip it */
-       nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
-
-       l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
-
-       nf_ct_helper_expectfn_register(&follow_master_nat);
-
-       BUG_ON(nf_nat_seq_adjust_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
-       BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
-       RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
-                          nfnetlink_parse_nat_setup);
-       BUG_ON(nf_ct_nat_offset != NULL);
-       RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
-       RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
-       return 0;
-
- cleanup_extend:
-       nf_ct_extend_unregister(&nat_extend);
-       return ret;
-}
-
-static void __exit nf_nat_cleanup(void)
-{
-       unregister_pernet_subsys(&nf_nat_net_ops);
-       nf_ct_l3proto_put(l3proto);
-       nf_ct_extend_unregister(&nat_extend);
-       nf_ct_helper_expectfn_unregister(&follow_master_nat);
-       RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
-       RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
-       RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
-       RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
-       synchronize_net();
-}
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("nf-nat-ipv4");
-
-module_init(nf_nat_init);
-module_exit(nf_nat_cleanup);
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
deleted file mode 100644 (file)
index e462a95..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/* FTP extension for TCP NAT alteration. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/netfilter_ipv4.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <linux/netfilter/nf_conntrack_ftp.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
-MODULE_DESCRIPTION("ftp NAT helper");
-MODULE_ALIAS("ip_nat_ftp");
-
-/* FIXME: Time out? --RR */
-
-static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
-                             char *buffer, size_t buflen,
-                             __be32 addr, u16 port)
-{
-       switch (type) {
-       case NF_CT_FTP_PORT:
-       case NF_CT_FTP_PASV:
-               return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
-                               ((unsigned char *)&addr)[0],
-                               ((unsigned char *)&addr)[1],
-                               ((unsigned char *)&addr)[2],
-                               ((unsigned char *)&addr)[3],
-                               port >> 8,
-                               port & 0xFF);
-       case NF_CT_FTP_EPRT:
-               return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port);
-       case NF_CT_FTP_EPSV:
-               return snprintf(buffer, buflen, "|||%u|", port);
-       }
-
-       return 0;
-}
-
-/* So, this packet has hit the connection tracking matching code.
-   Mangle it, and change the expectation to match the new version. */
-static unsigned int nf_nat_ftp(struct sk_buff *skb,
-                              enum ip_conntrack_info ctinfo,
-                              enum nf_ct_ftp_type type,
-                              unsigned int matchoff,
-                              unsigned int matchlen,
-                              struct nf_conntrack_expect *exp)
-{
-       __be32 newip;
-       u_int16_t port;
-       int dir = CTINFO2DIR(ctinfo);
-       struct nf_conn *ct = exp->master;
-       char buffer[sizeof("|1|255.255.255.255|65535|")];
-       unsigned int buflen;
-
-       pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
-
-       /* Connection will come from wherever this packet goes, hence !dir */
-       newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->dir = !dir;
-
-       /* When you see the packet, we need to NAT it the same as the
-        * this one. */
-       exp->expectfn = nf_nat_follow_master;
-
-       /* Try to get same port: if not, try to change it. */
-       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               int ret;
-
-               exp->tuple.dst.u.tcp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
-               if (ret == 0)
-                       break;
-               else if (ret != -EBUSY) {
-                       port = 0;
-                       break;
-               }
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port);
-       if (!buflen)
-               goto out;
-
-       pr_debug("calling nf_nat_mangle_tcp_packet\n");
-
-       if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
-                                     matchlen, buffer, buflen))
-               goto out;
-
-       return NF_ACCEPT;
-
-out:
-       nf_ct_unexpect_related(exp);
-       return NF_DROP;
-}
-
-static void __exit nf_nat_ftp_fini(void)
-{
-       RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init nf_nat_ftp_init(void)
-{
-       BUG_ON(nf_nat_ftp_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
-       return 0;
-}
-
-/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
-static int warn_set(const char *val, struct kernel_param *kp)
-{
-       printk(KERN_INFO KBUILD_MODNAME
-              ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
-       return 0;
-}
-module_param_call(ports, warn_set, NULL, NULL, 0);
-
-module_init(nf_nat_ftp_init);
-module_exit(nf_nat_ftp_fini);
index c6784a18c1c45f3f01ce67bef5f9214a9ab82385..9c3db10b22d33862fd1ceea6382a85b57cc44e6d 100644 (file)
 
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <linux/netfilter/nf_conntrack_h323.h>
 
 /****************************************************************************/
-static int set_addr(struct sk_buff *skb,
+static int set_addr(struct sk_buff *skb, unsigned int protoff,
                    unsigned char **data, int dataoff,
                    unsigned int addroff, __be32 ip, __be16 port)
 {
@@ -40,7 +39,7 @@ static int set_addr(struct sk_buff *skb,
 
        if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
                if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
-                                             addroff, sizeof(buf),
+                                             protoff, addroff, sizeof(buf),
                                              (char *) &buf, sizeof(buf))) {
                        net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
                        return -1;
@@ -54,7 +53,7 @@ static int set_addr(struct sk_buff *skb,
                *data = skb->data + ip_hdrlen(skb) + th->doff * 4 + dataoff;
        } else {
                if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
-                                             addroff, sizeof(buf),
+                                             protoff, addroff, sizeof(buf),
                                              (char *) &buf, sizeof(buf))) {
                        net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
                        return -1;
@@ -69,22 +68,22 @@ static int set_addr(struct sk_buff *skb,
 }
 
 /****************************************************************************/
-static int set_h225_addr(struct sk_buff *skb,
+static int set_h225_addr(struct sk_buff *skb, unsigned int protoff,
                         unsigned char **data, int dataoff,
                         TransportAddress *taddr,
                         union nf_inet_addr *addr, __be16 port)
 {
-       return set_addr(skb, data, dataoff, taddr->ipAddress.ip,
+       return set_addr(skb, protoff, data, dataoff, taddr->ipAddress.ip,
                        addr->ip, port);
 }
 
 /****************************************************************************/
-static int set_h245_addr(struct sk_buff *skb,
+static int set_h245_addr(struct sk_buff *skb, unsigned protoff,
                         unsigned char **data, int dataoff,
                         H245_TransportAddress *taddr,
                         union nf_inet_addr *addr, __be16 port)
 {
-       return set_addr(skb, data, dataoff,
+       return set_addr(skb, protoff, data, dataoff,
                        taddr->unicastAddress.iPAddress.network,
                        addr->ip, port);
 }
@@ -92,7 +91,7 @@ static int set_h245_addr(struct sk_buff *skb,
 /****************************************************************************/
 static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data,
+                       unsigned int protoff, unsigned char **data,
                        TransportAddress *taddr, int count)
 {
        const struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -118,7 +117,8 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
                                         &addr.ip, port,
                                         &ct->tuplehash[!dir].tuple.dst.u3.ip,
                                         info->sig_port[!dir]);
-                               return set_h225_addr(skb, data, 0, &taddr[i],
+                               return set_h225_addr(skb, protoff, data, 0,
+                                                    &taddr[i],
                                                     &ct->tuplehash[!dir].
                                                     tuple.dst.u3,
                                                     info->sig_port[!dir]);
@@ -129,7 +129,8 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
                                         &addr.ip, port,
                                         &ct->tuplehash[!dir].tuple.src.u3.ip,
                                         info->sig_port[!dir]);
-                               return set_h225_addr(skb, data, 0, &taddr[i],
+                               return set_h225_addr(skb, protoff, data, 0,
+                                                    &taddr[i],
                                                     &ct->tuplehash[!dir].
                                                     tuple.src.u3,
                                                     info->sig_port[!dir]);
@@ -143,7 +144,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data,
+                       unsigned int protoff, unsigned char **data,
                        TransportAddress *taddr, int count)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -159,7 +160,7 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
                                 &addr.ip, ntohs(port),
                                 &ct->tuplehash[!dir].tuple.dst.u3.ip,
                                 ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port));
-                       return set_h225_addr(skb, data, 0, &taddr[i],
+                       return set_h225_addr(skb, protoff, data, 0, &taddr[i],
                                             &ct->tuplehash[!dir].tuple.dst.u3,
                                             ct->tuplehash[!dir].tuple.
                                                                dst.u.udp.port);
@@ -172,7 +173,7 @@ static int set_ras_addr(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
+                       unsigned int protoff, unsigned char **data, int dataoff,
                        H245_TransportAddress *taddr,
                        __be16 port, __be16 rtp_port,
                        struct nf_conntrack_expect *rtp_exp,
@@ -244,7 +245,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h245_addr(skb, data, dataoff, taddr,
+       if (set_h245_addr(skb, protoff, data, dataoff, taddr,
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons((port & htons(1)) ? nated_port + 1 :
                                                    nated_port)) == 0) {
@@ -275,7 +276,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
                    enum ip_conntrack_info ctinfo,
-                   unsigned char **data, int dataoff,
+                   unsigned int protoff, unsigned char **data, int dataoff,
                    H245_TransportAddress *taddr, __be16 port,
                    struct nf_conntrack_expect *exp)
 {
@@ -307,7 +308,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h245_addr(skb, data, dataoff, taddr,
+       if (set_h245_addr(skb, protoff, data, dataoff, taddr,
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons(nated_port)) < 0) {
                nf_ct_unexpect_related(exp);
@@ -326,7 +327,7 @@ static int nat_t120(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
                    enum ip_conntrack_info ctinfo,
-                   unsigned char **data, int dataoff,
+                   unsigned int protoff, unsigned char **data, int dataoff,
                    TransportAddress *taddr, __be16 port,
                    struct nf_conntrack_expect *exp)
 {
@@ -363,7 +364,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h225_addr(skb, data, dataoff, taddr,
+       if (set_h225_addr(skb, protoff, data, dataoff, taddr,
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons(nated_port)) == 0) {
                /* Save ports */
@@ -390,7 +391,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
 static void ip_nat_q931_expect(struct nf_conn *new,
                               struct nf_conntrack_expect *this)
 {
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        if (this->tuple.src.u3.ip != 0) {       /* Only accept calls from GK */
                nf_nat_follow_master(new, this);
@@ -402,21 +403,23 @@ static void ip_nat_q931_expect(struct nf_conn *new,
 
        /* Change src to where master sends to */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
+       range.min_addr = range.max_addr =
+           new->tuplehash[!this->dir].tuple.src.u3;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = this->saved_proto;
-       range.min_ip = range.max_ip =
-           new->master->tuplehash[!this->dir].tuple.src.u3.ip;
+       range.min_proto = range.max_proto = this->saved_proto;
+       range.min_addr = range.max_addr =
+           new->master->tuplehash[!this->dir].tuple.src.u3;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
 static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
                    enum ip_conntrack_info ctinfo,
-                   unsigned char **data, TransportAddress *taddr, int idx,
+                   unsigned int protoff, unsigned char **data,
+                   TransportAddress *taddr, int idx,
                    __be16 port, struct nf_conntrack_expect *exp)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -453,7 +456,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (set_h225_addr(skb, data, 0, &taddr[idx],
+       if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
                          &ct->tuplehash[!dir].tuple.dst.u3,
                          htons(nated_port)) == 0) {
                /* Save ports */
@@ -464,7 +467,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
                if (idx > 0 &&
                    get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
                    (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
-                       set_h225_addr(skb, data, 0, &taddr[0],
+                       set_h225_addr(skb, protoff, data, 0, &taddr[0],
                                      &ct->tuplehash[!dir].tuple.dst.u3,
                                      info->sig_port[!dir]);
                }
@@ -487,26 +490,28 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
 static void ip_nat_callforwarding_expect(struct nf_conn *new,
                                         struct nf_conntrack_expect *this)
 {
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        /* This must be a fresh one. */
        BUG_ON(new->status & IPS_NAT_DONE_MASK);
 
        /* Change src to where master sends to */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip;
+       range.min_addr = range.max_addr =
+           new->tuplehash[!this->dir].tuple.src.u3;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = this->saved_proto;
-       range.min_ip = range.max_ip = this->saved_ip;
+       range.min_proto = range.max_proto = this->saved_proto;
+       range.min_addr = range.max_addr = this->saved_addr;
        nf_nat_setup_info(new, &range, NF_NAT_MANIP_DST);
 }
 
 /****************************************************************************/
 static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
                              enum ip_conntrack_info ctinfo,
+                             unsigned int protoff,
                              unsigned char **data, int dataoff,
                              TransportAddress *taddr, __be16 port,
                              struct nf_conntrack_expect *exp)
@@ -515,7 +520,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
        u_int16_t nated_port;
 
        /* Set expectations for NAT */
-       exp->saved_ip = exp->tuple.dst.u3.ip;
+       exp->saved_addr = exp->tuple.dst.u3;
        exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
        exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
        exp->expectfn = ip_nat_callforwarding_expect;
@@ -541,7 +546,7 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        /* Modify signal */
-       if (!set_h225_addr(skb, data, dataoff, taddr,
+       if (!set_h225_addr(skb, protoff, data, dataoff, taddr,
                           &ct->tuplehash[!dir].tuple.dst.u3,
                           htons(nated_port)) == 0) {
                nf_ct_unexpect_related(exp);
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
deleted file mode 100644 (file)
index 2e59ad0..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-/* ip_nat_helper.c - generic support functions for NAT helpers
- *
- * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
- * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/kmod.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <net/checksum.h>
-#include <net/tcp.h>
-#include <net/route.h>
-
-#include <linux/netfilter_ipv4.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_ecache.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_helper.h>
-
-#define DUMP_OFFSET(x) \
-       pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
-                x->offset_before, x->offset_after, x->correction_pos);
-
-static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
-
-/* Setup TCP sequence correction given this change at this sequence */
-static inline void
-adjust_tcp_sequence(u32 seq,
-                   int sizediff,
-                   struct nf_conn *ct,
-                   enum ip_conntrack_info ctinfo)
-{
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       struct nf_conn_nat *nat = nfct_nat(ct);
-       struct nf_nat_seq *this_way = &nat->seq[dir];
-
-       pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
-                seq, sizediff);
-
-       pr_debug("adjust_tcp_sequence: Seq_offset before: ");
-       DUMP_OFFSET(this_way);
-
-       spin_lock_bh(&nf_nat_seqofs_lock);
-
-       /* SYN adjust. If it's uninitialized, or this is after last
-        * correction, record it: we don't handle more than one
-        * adjustment in the window, but do deal with common case of a
-        * retransmit */
-       if (this_way->offset_before == this_way->offset_after ||
-           before(this_way->correction_pos, seq)) {
-               this_way->correction_pos = seq;
-               this_way->offset_before = this_way->offset_after;
-               this_way->offset_after += sizediff;
-       }
-       spin_unlock_bh(&nf_nat_seqofs_lock);
-
-       pr_debug("adjust_tcp_sequence: Seq_offset after: ");
-       DUMP_OFFSET(this_way);
-}
-
-/* Get the offset value, for conntrack */
-s16 nf_nat_get_offset(const struct nf_conn *ct,
-                     enum ip_conntrack_dir dir,
-                     u32 seq)
-{
-       struct nf_conn_nat *nat = nfct_nat(ct);
-       struct nf_nat_seq *this_way;
-       s16 offset;
-
-       if (!nat)
-               return 0;
-
-       this_way = &nat->seq[dir];
-       spin_lock_bh(&nf_nat_seqofs_lock);
-       offset = after(seq, this_way->correction_pos)
-                ? this_way->offset_after : this_way->offset_before;
-       spin_unlock_bh(&nf_nat_seqofs_lock);
-
-       return offset;
-}
-EXPORT_SYMBOL_GPL(nf_nat_get_offset);
-
-/* Frobs data inside this packet, which is linear. */
-static void mangle_contents(struct sk_buff *skb,
-                           unsigned int dataoff,
-                           unsigned int match_offset,
-                           unsigned int match_len,
-                           const char *rep_buffer,
-                           unsigned int rep_len)
-{
-       unsigned char *data;
-
-       BUG_ON(skb_is_nonlinear(skb));
-       data = skb_network_header(skb) + dataoff;
-
-       /* move post-replacement */
-       memmove(data + match_offset + rep_len,
-               data + match_offset + match_len,
-               skb->tail - (skb->network_header + dataoff +
-                            match_offset + match_len));
-
-       /* insert data from buffer */
-       memcpy(data + match_offset, rep_buffer, rep_len);
-
-       /* update skb info */
-       if (rep_len > match_len) {
-               pr_debug("nf_nat_mangle_packet: Extending packet by "
-                        "%u from %u bytes\n", rep_len - match_len, skb->len);
-               skb_put(skb, rep_len - match_len);
-       } else {
-               pr_debug("nf_nat_mangle_packet: Shrinking packet from "
-                        "%u from %u bytes\n", match_len - rep_len, skb->len);
-               __skb_trim(skb, skb->len + rep_len - match_len);
-       }
-
-       /* fix IP hdr checksum information */
-       ip_hdr(skb)->tot_len = htons(skb->len);
-       ip_send_check(ip_hdr(skb));
-}
-
-/* Unusual, but possible case. */
-static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
-{
-       if (skb->len + extra > 65535)
-               return 0;
-
-       if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
-               return 0;
-
-       return 1;
-}
-
-void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                          __be32 seq, s16 off)
-{
-       if (!off)
-               return;
-       set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
-       adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
-       nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
-}
-EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
-
-void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-                          u32 ctinfo, int off)
-{
-       const struct tcphdr *th;
-
-       if (nf_ct_protonum(ct) != IPPROTO_TCP)
-               return;
-
-       th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
-       nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
-}
-EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
-
-static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
-                       int datalen, __sum16 *check, int oldlen)
-{
-       struct rtable *rt = skb_rtable(skb);
-
-       if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               if (!(rt->rt_flags & RTCF_LOCAL) &&
-                   (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
-                       skb->ip_summed = CHECKSUM_PARTIAL;
-                       skb->csum_start = skb_headroom(skb) +
-                                         skb_network_offset(skb) +
-                                         iph->ihl * 4;
-                       skb->csum_offset = (void *)check - data;
-                       *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                   datalen, iph->protocol, 0);
-               } else {
-                       *check = 0;
-                       *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                  datalen, iph->protocol,
-                                                  csum_partial(data, datalen,
-                                                               0));
-                       if (iph->protocol == IPPROTO_UDP && !*check)
-                               *check = CSUM_MANGLED_0;
-               }
-       } else
-               inet_proto_csum_replace2(check, skb,
-                                        htons(oldlen), htons(datalen), 1);
-}
-
-/* Generic function for mangling variable-length address changes inside
- * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
- * command in FTP).
- *
- * Takes care about all the nasty sequence number changes, checksumming,
- * skb enlargement, ...
- *
- * */
-int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
-                              struct nf_conn *ct,
-                              enum ip_conntrack_info ctinfo,
-                              unsigned int match_offset,
-                              unsigned int match_len,
-                              const char *rep_buffer,
-                              unsigned int rep_len, bool adjust)
-{
-       struct iphdr *iph;
-       struct tcphdr *tcph;
-       int oldlen, datalen;
-
-       if (!skb_make_writable(skb, skb->len))
-               return 0;
-
-       if (rep_len > match_len &&
-           rep_len - match_len > skb_tailroom(skb) &&
-           !enlarge_skb(skb, rep_len - match_len))
-               return 0;
-
-       SKB_LINEAR_ASSERT(skb);
-
-       iph = ip_hdr(skb);
-       tcph = (void *)iph + iph->ihl*4;
-
-       oldlen = skb->len - iph->ihl*4;
-       mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
-                       match_offset, match_len, rep_buffer, rep_len);
-
-       datalen = skb->len - iph->ihl*4;
-       nf_nat_csum(skb, iph, tcph, datalen, &tcph->check, oldlen);
-
-       if (adjust && rep_len != match_len)
-               nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
-                                     (int)rep_len - (int)match_len);
-
-       return 1;
-}
-EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
-
-/* Generic function for mangling variable-length address changes inside
- * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
- * command in the Amanda protocol)
- *
- * Takes care about all the nasty sequence number changes, checksumming,
- * skb enlargement, ...
- *
- * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
- *       should be fairly easy to do.
- */
-int
-nf_nat_mangle_udp_packet(struct sk_buff *skb,
-                        struct nf_conn *ct,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned int match_offset,
-                        unsigned int match_len,
-                        const char *rep_buffer,
-                        unsigned int rep_len)
-{
-       struct iphdr *iph;
-       struct udphdr *udph;
-       int datalen, oldlen;
-
-       if (!skb_make_writable(skb, skb->len))
-               return 0;
-
-       if (rep_len > match_len &&
-           rep_len - match_len > skb_tailroom(skb) &&
-           !enlarge_skb(skb, rep_len - match_len))
-               return 0;
-
-       iph = ip_hdr(skb);
-       udph = (void *)iph + iph->ihl*4;
-
-       oldlen = skb->len - iph->ihl*4;
-       mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
-                       match_offset, match_len, rep_buffer, rep_len);
-
-       /* update the length of the UDP packet */
-       datalen = skb->len - iph->ihl*4;
-       udph->len = htons(datalen);
-
-       /* fix udp checksum if udp checksum was previously calculated */
-       if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
-               return 1;
-
-       nf_nat_csum(skb, iph, udph, datalen, &udph->check, oldlen);
-
-       return 1;
-}
-EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
-
-/* Adjust one found SACK option including checksum correction */
-static void
-sack_adjust(struct sk_buff *skb,
-           struct tcphdr *tcph,
-           unsigned int sackoff,
-           unsigned int sackend,
-           struct nf_nat_seq *natseq)
-{
-       while (sackoff < sackend) {
-               struct tcp_sack_block_wire *sack;
-               __be32 new_start_seq, new_end_seq;
-
-               sack = (void *)skb->data + sackoff;
-               if (after(ntohl(sack->start_seq) - natseq->offset_before,
-                         natseq->correction_pos))
-                       new_start_seq = htonl(ntohl(sack->start_seq)
-                                       - natseq->offset_after);
-               else
-                       new_start_seq = htonl(ntohl(sack->start_seq)
-                                       - natseq->offset_before);
-
-               if (after(ntohl(sack->end_seq) - natseq->offset_before,
-                         natseq->correction_pos))
-                       new_end_seq = htonl(ntohl(sack->end_seq)
-                                     - natseq->offset_after);
-               else
-                       new_end_seq = htonl(ntohl(sack->end_seq)
-                                     - natseq->offset_before);
-
-               pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
-                        ntohl(sack->start_seq), new_start_seq,
-                        ntohl(sack->end_seq), new_end_seq);
-
-               inet_proto_csum_replace4(&tcph->check, skb,
-                                        sack->start_seq, new_start_seq, 0);
-               inet_proto_csum_replace4(&tcph->check, skb,
-                                        sack->end_seq, new_end_seq, 0);
-               sack->start_seq = new_start_seq;
-               sack->end_seq = new_end_seq;
-               sackoff += sizeof(*sack);
-       }
-}
-
-/* TCP SACK sequence number adjustment */
-static inline unsigned int
-nf_nat_sack_adjust(struct sk_buff *skb,
-                  struct tcphdr *tcph,
-                  struct nf_conn *ct,
-                  enum ip_conntrack_info ctinfo)
-{
-       unsigned int dir, optoff, optend;
-       struct nf_conn_nat *nat = nfct_nat(ct);
-
-       optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
-       optend = ip_hdrlen(skb) + tcph->doff * 4;
-
-       if (!skb_make_writable(skb, optend))
-               return 0;
-
-       dir = CTINFO2DIR(ctinfo);
-
-       while (optoff < optend) {
-               /* Usually: option, length. */
-               unsigned char *op = skb->data + optoff;
-
-               switch (op[0]) {
-               case TCPOPT_EOL:
-                       return 1;
-               case TCPOPT_NOP:
-                       optoff++;
-                       continue;
-               default:
-                       /* no partial options */
-                       if (optoff + 1 == optend ||
-                           optoff + op[1] > optend ||
-                           op[1] < 2)
-                               return 0;
-                       if (op[0] == TCPOPT_SACK &&
-                           op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
-                           ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
-                               sack_adjust(skb, tcph, optoff+2,
-                                           optoff+op[1], &nat->seq[!dir]);
-                       optoff += op[1];
-               }
-       }
-       return 1;
-}
-
-/* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
-int
-nf_nat_seq_adjust(struct sk_buff *skb,
-                 struct nf_conn *ct,
-                 enum ip_conntrack_info ctinfo)
-{
-       struct tcphdr *tcph;
-       int dir;
-       __be32 newseq, newack;
-       s16 seqoff, ackoff;
-       struct nf_conn_nat *nat = nfct_nat(ct);
-       struct nf_nat_seq *this_way, *other_way;
-
-       dir = CTINFO2DIR(ctinfo);
-
-       this_way = &nat->seq[dir];
-       other_way = &nat->seq[!dir];
-
-       if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
-               return 0;
-
-       tcph = (void *)skb->data + ip_hdrlen(skb);
-       if (after(ntohl(tcph->seq), this_way->correction_pos))
-               seqoff = this_way->offset_after;
-       else
-               seqoff = this_way->offset_before;
-
-       if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
-                 other_way->correction_pos))
-               ackoff = other_way->offset_after;
-       else
-               ackoff = other_way->offset_before;
-
-       newseq = htonl(ntohl(tcph->seq) + seqoff);
-       newack = htonl(ntohl(tcph->ack_seq) - ackoff);
-
-       inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
-       inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
-
-       pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
-                ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
-                ntohl(newack));
-
-       tcph->seq = newseq;
-       tcph->ack_seq = newack;
-
-       return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
-}
-
-/* Setup NAT on this expected conntrack so it follows master. */
-/* If we fail to get a free NAT slot, we'll get dropped on confirm */
-void nf_nat_follow_master(struct nf_conn *ct,
-                         struct nf_conntrack_expect *exp)
-{
-       struct nf_nat_ipv4_range range;
-
-       /* This must be a fresh one. */
-       BUG_ON(ct->status & IPS_NAT_DONE_MASK);
-
-       /* Change src to where master sends to */
-       range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
-       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
-
-       /* For DST manip, map port here to where it's expected. */
-       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = exp->saved_proto;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
-       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
-}
-EXPORT_SYMBOL(nf_nat_follow_master);
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
deleted file mode 100644 (file)
index 979ae16..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/* IRC extension for TCP NAT alteration.
- *
- * (C) 2000-2001 by Harald Welte <laforge@gnumonks.org>
- * (C) 2004 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
- * based on a copy of RR's ip_nat_ftp.c
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/tcp.h>
-#include <linux/kernel.h>
-
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <linux/netfilter/nf_conntrack_irc.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("IRC (DCC) NAT helper");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ip_nat_irc");
-
-static unsigned int help(struct sk_buff *skb,
-                        enum ip_conntrack_info ctinfo,
-                        unsigned int matchoff,
-                        unsigned int matchlen,
-                        struct nf_conntrack_expect *exp)
-{
-       char buffer[sizeof("4294967296 65635")];
-       u_int32_t ip;
-       u_int16_t port;
-       unsigned int ret;
-
-       /* Reply comes from server. */
-       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
-       exp->dir = IP_CT_DIR_REPLY;
-       exp->expectfn = nf_nat_follow_master;
-
-       /* Try to get same port: if not, try to change it. */
-       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-               int ret;
-
-               exp->tuple.dst.u.tcp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
-               if (ret == 0)
-                       break;
-               else if (ret != -EBUSY) {
-                       port = 0;
-                       break;
-               }
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
-       sprintf(buffer, "%u %u", ip, port);
-       pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n",
-                buffer, &ip, port);
-
-       ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
-                                      matchoff, matchlen, buffer,
-                                      strlen(buffer));
-       if (ret != NF_ACCEPT)
-               nf_ct_unexpect_related(exp);
-       return ret;
-}
-
-static void __exit nf_nat_irc_fini(void)
-{
-       RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init nf_nat_irc_init(void)
-{
-       BUG_ON(nf_nat_irc_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_irc_hook, help);
-       return 0;
-}
-
-/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
-static int warn_set(const char *val, struct kernel_param *kp)
-{
-       printk(KERN_INFO KBUILD_MODNAME
-              ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
-       return 0;
-}
-module_param_call(ports, warn_set, NULL, NULL, 0);
-
-module_init(nf_nat_irc_init);
-module_exit(nf_nat_irc_fini);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
new file mode 100644 (file)
index 0000000..d8b2e14
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/secure_seq.h>
+#include <net/checksum.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv4;
+
+#ifdef CONFIG_XFRM
+static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
+                                      const struct nf_conn *ct,
+                                      enum ip_conntrack_dir dir,
+                                      unsigned long statusbit,
+                                      struct flowi *fl)
+{
+       const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
+       struct flowi4 *fl4 = &fl->u.ip4;
+
+       if (ct->status & statusbit) {
+               fl4->daddr = t->dst.u3.ip;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl4->fl4_dport = t->dst.u.all;
+       }
+
+       statusbit ^= IPS_NAT_MASK;
+
+       if (ct->status & statusbit) {
+               fl4->saddr = t->src.u3.ip;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl4->fl4_sport = t->src.u.all;
+       }
+}
+#endif /* CONFIG_XFRM */
+
+static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
+                                const struct nf_nat_range *range)
+{
+       return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
+              ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
+}
+
+static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t,
+                                  __be16 dport)
+{
+       return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport);
+}
+
+static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
+                                 unsigned int iphdroff,
+                                 const struct nf_nat_l4proto *l4proto,
+                                 const struct nf_conntrack_tuple *target,
+                                 enum nf_nat_manip_type maniptype)
+{
+       struct iphdr *iph;
+       unsigned int hdroff;
+
+       if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
+               return false;
+
+       iph = (void *)skb->data + iphdroff;
+       hdroff = iphdroff + iph->ihl * 4;
+
+       if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff,
+                               target, maniptype))
+               return false;
+       iph = (void *)skb->data + iphdroff;
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
+               iph->saddr = target->src.u3.ip;
+       } else {
+               csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
+               iph->daddr = target->dst.u3.ip;
+       }
+       return true;
+}
+
+static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
+                                   unsigned int iphdroff, __sum16 *check,
+                                   const struct nf_conntrack_tuple *t,
+                                   enum nf_nat_manip_type maniptype)
+{
+       struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
+       __be32 oldip, newip;
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               oldip = iph->saddr;
+               newip = t->src.u3.ip;
+       } else {
+               oldip = iph->daddr;
+               newip = t->dst.u3.ip;
+       }
+       inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+}
+
+static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
+                                   u8 proto, void *data, __sum16 *check,
+                                   int datalen, int oldlen)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct rtable *rt = skb_rtable(skb);
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               if (!(rt->rt_flags & RTCF_LOCAL) &&
+                   (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
+                       skb->ip_summed = CHECKSUM_PARTIAL;
+                       skb->csum_start = skb_headroom(skb) +
+                                         skb_network_offset(skb) +
+                                         ip_hdrlen(skb);
+                       skb->csum_offset = (void *)check - data;
+                       *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                   datalen, proto, 0);
+               } else {
+                       *check = 0;
+                       *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                  datalen, proto,
+                                                  csum_partial(data, datalen,
+                                                               0));
+                       if (proto == IPPROTO_UDP && !*check)
+                               *check = CSUM_MANGLED_0;
+               }
+       } else
+               inet_proto_csum_replace2(check, skb,
+                                        htons(oldlen), htons(datalen), 1);
+}
+
+static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
+                                      struct nf_nat_range *range)
+{
+       if (tb[CTA_NAT_V4_MINIP]) {
+               range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
+               range->flags |= NF_NAT_RANGE_MAP_IPS;
+       }
+
+       if (tb[CTA_NAT_V4_MAXIP])
+               range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
+       else
+               range->max_addr.ip = range->min_addr.ip;
+
+       return 0;
+}
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
+       .l3proto                = NFPROTO_IPV4,
+       .in_range               = nf_nat_ipv4_in_range,
+       .secure_port            = nf_nat_ipv4_secure_port,
+       .manip_pkt              = nf_nat_ipv4_manip_pkt,
+       .csum_update            = nf_nat_ipv4_csum_update,
+       .csum_recalc            = nf_nat_ipv4_csum_recalc,
+       .nlattr_to_range        = nf_nat_ipv4_nlattr_to_range,
+#ifdef CONFIG_XFRM
+       .decode_session         = nf_nat_ipv4_decode_session,
+#endif
+};
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb,
+                                 struct nf_conn *ct,
+                                 enum ip_conntrack_info ctinfo,
+                                 unsigned int hooknum)
+{
+       struct {
+               struct icmphdr  icmp;
+               struct iphdr    ip;
+       } *inside;
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+       unsigned int hdrlen = ip_hdrlen(skb);
+       const struct nf_nat_l4proto *l4proto;
+       struct nf_conntrack_tuple target;
+       unsigned long statusbit;
+
+       NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
+
+       if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+               return 0;
+       if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+               return 0;
+
+       inside = (void *)skb->data + hdrlen;
+       if (inside->icmp.type == ICMP_REDIRECT) {
+               if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
+                       return 0;
+               if (ct->status & IPS_NAT_MASK)
+                       return 0;
+       }
+
+       if (manip == NF_NAT_MANIP_SRC)
+               statusbit = IPS_SRC_NAT;
+       else
+               statusbit = IPS_DST_NAT;
+
+       /* Invert if this is reply direction */
+       if (dir == IP_CT_DIR_REPLY)
+               statusbit ^= IPS_NAT_MASK;
+
+       if (!(ct->status & statusbit))
+               return 1;
+
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol);
+       if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
+                                  l4proto, &ct->tuplehash[!dir].tuple, !manip))
+               return 0;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               /* Reloading "inside" here since manip_pkt may reallocate */
+               inside = (void *)skb->data + hdrlen;
+               inside->icmp.checksum = 0;
+               inside->icmp.checksum =
+                       csum_fold(skb_checksum(skb, hdrlen,
+                                              skb->len - hdrlen, 0));
+       }
+
+       /* Change outer to look like the reply to an incoming packet */
+       nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0);
+       if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip))
+               return 0;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
+
+static int __init nf_nat_l3proto_ipv4_init(void)
+{
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
+       if (err < 0)
+               goto err2;
+       return err;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
+err1:
+       return err;
+}
+
+static void __exit nf_nat_l3proto_ipv4_exit(void)
+{
+       nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("nf-nat-" __stringify(AF_INET));
+
+module_init(nf_nat_l3proto_ipv4_init);
+module_exit(nf_nat_l3proto_ipv4_exit);
index 388140881ebe2eac05ed78a8ee68de518cfddef8..a06d7d74817d3976d5cb15147036a3270941dc80 100644 (file)
@@ -22,7 +22,6 @@
 
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_zones.h>
@@ -47,7 +46,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
        struct nf_conntrack_tuple t;
        const struct nf_ct_pptp_master *ct_pptp_info;
        const struct nf_nat_pptp *nat_pptp_info;
-       struct nf_nat_ipv4_range range;
+       struct nf_nat_range range;
 
        ct_pptp_info = nfct_help_data(master);
        nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
@@ -89,21 +88,21 @@ static void pptp_nat_expected(struct nf_conn *ct,
 
        /* Change src to where master sends to */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
        if (exp->dir == IP_CT_DIR_ORIGINAL) {
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
-               range.min = range.max = exp->saved_proto;
+               range.min_proto = range.max_proto = exp->saved_proto;
        }
        nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
        /* For DST manip, map port here to where it's expected. */
        range.flags = NF_NAT_RANGE_MAP_IPS;
-       range.min_ip = range.max_ip
-               = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.src.u3;
        if (exp->dir == IP_CT_DIR_REPLY) {
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
-               range.min = range.max = exp->saved_proto;
+               range.min_proto = range.max_proto = exp->saved_proto;
        }
        nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
 }
@@ -113,6 +112,7 @@ static int
 pptp_outbound_pkt(struct sk_buff *skb,
                  struct nf_conn *ct,
                  enum ip_conntrack_info ctinfo,
+                 unsigned int protoff,
                  struct PptpControlHeader *ctlh,
                  union pptp_ctrl_union *pptpReq)
 
@@ -175,7 +175,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
                 ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid));
 
        /* mangle packet */
-       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
                                     cid_off + sizeof(struct pptp_pkt_hdr) +
                                     sizeof(struct PptpControlHeader),
                                     sizeof(new_callid), (char *)&new_callid,
@@ -216,6 +216,7 @@ static int
 pptp_inbound_pkt(struct sk_buff *skb,
                 struct nf_conn *ct,
                 enum ip_conntrack_info ctinfo,
+                unsigned int protoff,
                 struct PptpControlHeader *ctlh,
                 union pptp_ctrl_union *pptpReq)
 {
@@ -268,7 +269,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
        pr_debug("altering peer call id from 0x%04x to 0x%04x\n",
                 ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid));
 
-       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+       if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
                                     pcid_off + sizeof(struct pptp_pkt_hdr) +
                                     sizeof(struct PptpControlHeader),
                                     sizeof(new_pcid), (char *)&new_pcid,
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
deleted file mode 100644 (file)
index 9993bc9..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/random.h>
-#include <linux/ip.h>
-
-#include <linux/netfilter.h>
-#include <linux/export.h>
-#include <net/secure_seq.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
-
-bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
-                          enum nf_nat_manip_type maniptype,
-                          const union nf_conntrack_man_proto *min,
-                          const union nf_conntrack_man_proto *max)
-{
-       __be16 port;
-
-       if (maniptype == NF_NAT_MANIP_SRC)
-               port = tuple->src.u.all;
-       else
-               port = tuple->dst.u.all;
-
-       return ntohs(port) >= ntohs(min->all) &&
-              ntohs(port) <= ntohs(max->all);
-}
-EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
-
-void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
-                              const struct nf_nat_ipv4_range *range,
-                              enum nf_nat_manip_type maniptype,
-                              const struct nf_conn *ct,
-                              u_int16_t *rover)
-{
-       unsigned int range_size, min, i;
-       __be16 *portptr;
-       u_int16_t off;
-
-       if (maniptype == NF_NAT_MANIP_SRC)
-               portptr = &tuple->src.u.all;
-       else
-               portptr = &tuple->dst.u.all;
-
-       /* If no range specified... */
-       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
-               /* If it's dst rewrite, can't change port */
-               if (maniptype == NF_NAT_MANIP_DST)
-                       return;
-
-               if (ntohs(*portptr) < 1024) {
-                       /* Loose convention: >> 512 is credential passing */
-                       if (ntohs(*portptr) < 512) {
-                               min = 1;
-                               range_size = 511 - min + 1;
-                       } else {
-                               min = 600;
-                               range_size = 1023 - min + 1;
-                       }
-               } else {
-                       min = 1024;
-                       range_size = 65535 - 1024 + 1;
-               }
-       } else {
-               min = ntohs(range->min.all);
-               range_size = ntohs(range->max.all) - min + 1;
-       }
-
-       if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
-               off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
-                                                maniptype == NF_NAT_MANIP_SRC
-                                                ? tuple->dst.u.all
-                                                : tuple->src.u.all);
-       else
-               off = *rover;
-
-       for (i = 0; ; ++off) {
-               *portptr = htons(min + off % range_size);
-               if (++i != range_size && nf_nat_used_tuple(tuple, ct))
-                       continue;
-               if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM))
-                       *rover = off;
-               return;
-       }
-       return;
-}
-EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
-
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
-                                struct nf_nat_ipv4_range *range)
-{
-       if (tb[CTA_PROTONAT_PORT_MIN]) {
-               range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
-               range->max.all = range->min.tcp.port;
-               range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
-       }
-       if (tb[CTA_PROTONAT_PORT_MAX]) {
-               range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
-               range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(nf_nat_proto_nlattr_to_range);
-#endif
diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/ipv4/netfilter/nf_nat_proto_dccp.c
deleted file mode 100644 (file)
index 3f67138..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * DCCP NAT protocol helper
- *
- * Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <linux/dccp.h>
-
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
-
-static u_int16_t dccp_port_rover;
-
-static void
-dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_ipv4_range *range,
-                 enum nf_nat_manip_type maniptype,
-                 const struct nf_conn *ct)
-{
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                 &dccp_port_rover);
-}
-
-static bool
-dccp_manip_pkt(struct sk_buff *skb,
-              unsigned int iphdroff,
-              const struct nf_conntrack_tuple *tuple,
-              enum nf_nat_manip_type maniptype)
-{
-       const struct iphdr *iph = (const void *)(skb->data + iphdroff);
-       struct dccp_hdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl * 4;
-       __be32 oldip, newip;
-       __be16 *portptr, oldport, newport;
-       int hdrsize = 8; /* DCCP connection tracking guarantees this much */
-
-       if (skb->len >= hdroff + sizeof(struct dccp_hdr))
-               hdrsize = sizeof(struct dccp_hdr);
-
-       if (!skb_make_writable(skb, hdroff + hdrsize))
-               return false;
-
-       iph = (struct iphdr *)(skb->data + iphdroff);
-       hdr = (struct dccp_hdr *)(skb->data + hdroff);
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
-               newport = tuple->src.u.dccp.port;
-               portptr = &hdr->dccph_sport;
-       } else {
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
-               newport = tuple->dst.u.dccp.port;
-               portptr = &hdr->dccph_dport;
-       }
-
-       oldport = *portptr;
-       *portptr = newport;
-
-       if (hdrsize < sizeof(*hdr))
-               return true;
-
-       inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1);
-       inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
-                                0);
-       return true;
-}
-
-static const struct nf_nat_protocol nf_nat_protocol_dccp = {
-       .protonum               = IPPROTO_DCCP,
-       .manip_pkt              = dccp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
-       .unique_tuple           = dccp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
-#endif
-};
-
-static int __init nf_nat_proto_dccp_init(void)
-{
-       return nf_nat_protocol_register(&nf_nat_protocol_dccp);
-}
-
-static void __exit nf_nat_proto_dccp_fini(void)
-{
-       nf_nat_protocol_unregister(&nf_nat_protocol_dccp);
-}
-
-module_init(nf_nat_proto_dccp_init);
-module_exit(nf_nat_proto_dccp_fini);
-
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_DESCRIPTION("DCCP NAT protocol helper");
-MODULE_LICENSE("GPL");
index 46ba0b9ab985b70ac5c80883a642d3048ac3cf83..ea44f02563b5dc5275bb13484fae50b207079b96 100644 (file)
@@ -28,8 +28,7 @@
 #include <linux/ip.h>
 
 #include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 #include <linux/netfilter/nf_conntrack_proto_gre.h>
 
 MODULE_LICENSE("GPL");
@@ -38,8 +37,9 @@ MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
 
 /* generate unique tuple ... */
 static void
-gre_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_ipv4_range *range,
+gre_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                struct nf_conntrack_tuple *tuple,
+                const struct nf_nat_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
@@ -62,8 +62,8 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
                min = 1;
                range_size = 0xffff;
        } else {
-               min = ntohs(range->min.gre.key);
-               range_size = ntohs(range->max.gre.key) - min + 1;
+               min = ntohs(range->min_proto.gre.key);
+               range_size = ntohs(range->max_proto.gre.key) - min + 1;
        }
 
        pr_debug("min = %u, range_size = %u\n", min, range_size);
@@ -80,14 +80,14 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
 
 /* manipulate a GRE packet according to maniptype */
 static bool
-gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
+gre_manip_pkt(struct sk_buff *skb,
+             const struct nf_nat_l3proto *l3proto,
+             unsigned int iphdroff, unsigned int hdroff,
              const struct nf_conntrack_tuple *tuple,
              enum nf_nat_manip_type maniptype)
 {
        const struct gre_hdr *greh;
        struct gre_hdr_pptp *pgreh;
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
-       unsigned int hdroff = iphdroff + iph->ihl * 4;
 
        /* pgreh includes two optional 32bit fields which are not required
         * to be there.  That's where the magic '8' comes from */
@@ -117,24 +117,24 @@ gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff,
        return true;
 }
 
-static const struct nf_nat_protocol gre = {
-       .protonum               = IPPROTO_GRE,
+static const struct nf_nat_l4proto gre = {
+       .l4proto                = IPPROTO_GRE,
        .manip_pkt              = gre_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
+       .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = gre_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
 
 static int __init nf_nat_proto_gre_init(void)
 {
-       return nf_nat_protocol_register(&gre);
+       return nf_nat_l4proto_register(NFPROTO_IPV4, &gre);
 }
 
 static void __exit nf_nat_proto_gre_fini(void)
 {
-       nf_nat_protocol_unregister(&gre);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &gre);
 }
 
 module_init(nf_nat_proto_gre_init);
index b35172851bae8b92094bff8dc0da833968e43f39..eb303471bcf6c252c2017061fd866e4ccf6a1fd1 100644 (file)
@@ -15,8 +15,7 @@
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 
 static bool
 icmp_in_range(const struct nf_conntrack_tuple *tuple,
@@ -29,8 +28,9 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
 }
 
 static void
-icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_ipv4_range *range,
+icmp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                 struct nf_conntrack_tuple *tuple,
+                 const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
@@ -38,13 +38,14 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
        unsigned int range_size;
        unsigned int i;
 
-       range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
+       range_size = ntohs(range->max_proto.icmp.id) -
+                    ntohs(range->min_proto.icmp.id) + 1;
        /* If no range specified... */
        if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
                range_size = 0xFFFF;
 
        for (i = 0; ; ++id) {
-               tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
+               tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
                                             (id % range_size));
                if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
                        return;
@@ -54,13 +55,12 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
 
 static bool
 icmp_manip_pkt(struct sk_buff *skb,
-              unsigned int iphdroff,
+              const struct nf_nat_l3proto *l3proto,
+              unsigned int iphdroff, unsigned int hdroff,
               const struct nf_conntrack_tuple *tuple,
               enum nf_nat_manip_type maniptype)
 {
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
        struct icmphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
 
        if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
                return false;
@@ -72,12 +72,12 @@ icmp_manip_pkt(struct sk_buff *skb,
        return true;
 }
 
-const struct nf_nat_protocol nf_nat_protocol_icmp = {
-       .protonum               = IPPROTO_ICMP,
+const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
+       .l4proto                = IPPROTO_ICMP,
        .manip_pkt              = icmp_manip_pkt,
        .in_range               = icmp_in_range,
        .unique_tuple           = icmp_unique_tuple,
 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
deleted file mode 100644 (file)
index 3cce9b6..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/sctp.h>
-#include <linux/module.h>
-#include <net/sctp/checksum.h>
-
-#include <net/netfilter/nf_nat_protocol.h>
-
-static u_int16_t nf_sctp_port_rover;
-
-static void
-sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                 const struct nf_nat_ipv4_range *range,
-                 enum nf_nat_manip_type maniptype,
-                 const struct nf_conn *ct)
-{
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                 &nf_sctp_port_rover);
-}
-
-static bool
-sctp_manip_pkt(struct sk_buff *skb,
-              unsigned int iphdroff,
-              const struct nf_conntrack_tuple *tuple,
-              enum nf_nat_manip_type maniptype)
-{
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
-       struct sk_buff *frag;
-       sctp_sctphdr_t *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
-       __be32 crc32;
-
-       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
-               return false;
-
-       iph = (struct iphdr *)(skb->data + iphdroff);
-       hdr = (struct sctphdr *)(skb->data + hdroff);
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
-               hdr->source = tuple->src.u.sctp.port;
-       } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
-               hdr->dest = tuple->dst.u.sctp.port;
-       }
-
-       crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
-       skb_walk_frags(skb, frag)
-               crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
-                                         crc32);
-       crc32 = sctp_end_cksum(crc32);
-       hdr->checksum = crc32;
-
-       return true;
-}
-
-static const struct nf_nat_protocol nf_nat_protocol_sctp = {
-       .protonum               = IPPROTO_SCTP,
-       .manip_pkt              = sctp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
-       .unique_tuple           = sctp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
-#endif
-};
-
-static int __init nf_nat_proto_sctp_init(void)
-{
-       return nf_nat_protocol_register(&nf_nat_protocol_sctp);
-}
-
-static void __exit nf_nat_proto_sctp_exit(void)
-{
-       nf_nat_protocol_unregister(&nf_nat_protocol_sctp);
-}
-
-module_init(nf_nat_proto_sctp_init);
-module_exit(nf_nat_proto_sctp_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SCTP NAT protocol helper");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
deleted file mode 100644 (file)
index 9fb4b4e..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
-#include <net/netfilter/nf_nat_core.h>
-
-static u_int16_t tcp_port_rover;
-
-static void
-tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_ipv4_range *range,
-                enum nf_nat_manip_type maniptype,
-                const struct nf_conn *ct)
-{
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover);
-}
-
-static bool
-tcp_manip_pkt(struct sk_buff *skb,
-             unsigned int iphdroff,
-             const struct nf_conntrack_tuple *tuple,
-             enum nf_nat_manip_type maniptype)
-{
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
-       struct tcphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
-       __be16 *portptr, newport, oldport;
-       int hdrsize = 8; /* TCP connection tracking guarantees this much */
-
-       /* this could be a inner header returned in icmp packet; in such
-          cases we cannot update the checksum field since it is outside of
-          the 8 bytes of transport layer headers we are guaranteed */
-       if (skb->len >= hdroff + sizeof(struct tcphdr))
-               hdrsize = sizeof(struct tcphdr);
-
-       if (!skb_make_writable(skb, hdroff + hdrsize))
-               return false;
-
-       iph = (struct iphdr *)(skb->data + iphdroff);
-       hdr = (struct tcphdr *)(skb->data + hdroff);
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
-               newport = tuple->src.u.tcp.port;
-               portptr = &hdr->source;
-       } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
-               newport = tuple->dst.u.tcp.port;
-               portptr = &hdr->dest;
-       }
-
-       oldport = *portptr;
-       *portptr = newport;
-
-       if (hdrsize < sizeof(*hdr))
-               return true;
-
-       inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
-       inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
-       return true;
-}
-
-const struct nf_nat_protocol nf_nat_protocol_tcp = {
-       .protonum               = IPPROTO_TCP,
-       .manip_pkt              = tcp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
-       .unique_tuple           = tcp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
-#endif
-};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
deleted file mode 100644 (file)
index 9883336..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
-
-static u_int16_t udp_port_rover;
-
-static void
-udp_unique_tuple(struct nf_conntrack_tuple *tuple,
-                const struct nf_nat_ipv4_range *range,
-                enum nf_nat_manip_type maniptype,
-                const struct nf_conn *ct)
-{
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover);
-}
-
-static bool
-udp_manip_pkt(struct sk_buff *skb,
-             unsigned int iphdroff,
-             const struct nf_conntrack_tuple *tuple,
-             enum nf_nat_manip_type maniptype)
-{
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
-       struct udphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
-       __be16 *portptr, newport;
-
-       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
-               return false;
-
-       iph = (struct iphdr *)(skb->data + iphdroff);
-       hdr = (struct udphdr *)(skb->data + hdroff);
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
-               newport = tuple->src.u.udp.port;
-               portptr = &hdr->source;
-       } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
-               newport = tuple->dst.u.udp.port;
-               portptr = &hdr->dest;
-       }
-       if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
-               inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
-               inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
-                                        0);
-               if (!hdr->check)
-                       hdr->check = CSUM_MANGLED_0;
-       }
-       *portptr = newport;
-       return true;
-}
-
-const struct nf_nat_protocol nf_nat_protocol_udp = {
-       .protonum               = IPPROTO_UDP,
-       .manip_pkt              = udp_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
-       .unique_tuple           = udp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
-#endif
-};
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
deleted file mode 100644 (file)
index d24d10a..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <linux/module.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_protocol.h>
-
-static u_int16_t udplite_port_rover;
-
-static void
-udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
-                    const struct nf_nat_ipv4_range *range,
-                    enum nf_nat_manip_type maniptype,
-                    const struct nf_conn *ct)
-{
-       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                 &udplite_port_rover);
-}
-
-static bool
-udplite_manip_pkt(struct sk_buff *skb,
-                 unsigned int iphdroff,
-                 const struct nf_conntrack_tuple *tuple,
-                 enum nf_nat_manip_type maniptype)
-{
-       const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
-       struct udphdr *hdr;
-       unsigned int hdroff = iphdroff + iph->ihl*4;
-       __be32 oldip, newip;
-       __be16 *portptr, newport;
-
-       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
-               return false;
-
-       iph = (struct iphdr *)(skb->data + iphdroff);
-       hdr = (struct udphdr *)(skb->data + hdroff);
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of src ip and src pt */
-               oldip = iph->saddr;
-               newip = tuple->src.u3.ip;
-               newport = tuple->src.u.udp.port;
-               portptr = &hdr->source;
-       } else {
-               /* Get rid of dst ip and dst pt */
-               oldip = iph->daddr;
-               newip = tuple->dst.u3.ip;
-               newport = tuple->dst.u.udp.port;
-               portptr = &hdr->dest;
-       }
-
-       inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
-       inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
-       if (!hdr->check)
-               hdr->check = CSUM_MANGLED_0;
-
-       *portptr = newport;
-       return true;
-}
-
-static const struct nf_nat_protocol nf_nat_protocol_udplite = {
-       .protonum               = IPPROTO_UDPLITE,
-       .manip_pkt              = udplite_manip_pkt,
-       .in_range               = nf_nat_proto_in_range,
-       .unique_tuple           = udplite_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
-       .nlattr_to_range        = nf_nat_proto_nlattr_to_range,
-#endif
-};
-
-static int __init nf_nat_proto_udplite_init(void)
-{
-       return nf_nat_protocol_register(&nf_nat_protocol_udplite);
-}
-
-static void __exit nf_nat_proto_udplite_fini(void)
-{
-       nf_nat_protocol_unregister(&nf_nat_protocol_udplite);
-}
-
-module_init(nf_nat_proto_udplite_init);
-module_exit(nf_nat_proto_udplite_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("UDP-Lite NAT protocol helper");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c
deleted file mode 100644 (file)
index e0afe81..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/* The "unknown" protocol.  This is what is used for protocols we
- * don't understand.  It's returned by ip_ct_find_proto().
- */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
-
-static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
-                            enum nf_nat_manip_type manip_type,
-                            const union nf_conntrack_man_proto *min,
-                            const union nf_conntrack_man_proto *max)
-{
-       return true;
-}
-
-static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
-                                const struct nf_nat_ipv4_range *range,
-                                enum nf_nat_manip_type maniptype,
-                                const struct nf_conn *ct)
-{
-       /* Sorry: we can't help you; if it's not unique, we can't frob
-          anything. */
-       return;
-}
-
-static bool
-unknown_manip_pkt(struct sk_buff *skb,
-                 unsigned int iphdroff,
-                 const struct nf_conntrack_tuple *tuple,
-                 enum nf_nat_manip_type maniptype)
-{
-       return true;
-}
-
-const struct nf_nat_protocol nf_nat_unknown_protocol = {
-       .manip_pkt              = unknown_manip_pkt,
-       .in_range               = unknown_in_range,
-       .unique_tuple           = unknown_unique_tuple,
-};
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
deleted file mode 100644 (file)
index d2a9dc3..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* Everything about the rules for NAT. */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <net/checksum.h>
-#include <net/route.h>
-#include <linux/bitops.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_rule.h>
-
-#define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
-                        (1 << NF_INET_POST_ROUTING) | \
-                        (1 << NF_INET_LOCAL_OUT) | \
-                        (1 << NF_INET_LOCAL_IN))
-
-static const struct xt_table nat_table = {
-       .name           = "nat",
-       .valid_hooks    = NAT_VALID_HOOKS,
-       .me             = THIS_MODULE,
-       .af             = NFPROTO_IPV4,
-};
-
-/* Source NAT */
-static unsigned int
-ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_IN);
-
-       ct = nf_ct_get(skb, &ctinfo);
-
-       /* Connection must be valid and new. */
-       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
-                           ctinfo == IP_CT_RELATED_REPLY));
-       NF_CT_ASSERT(par->out != NULL);
-
-       return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC);
-}
-
-static unsigned int
-ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
-                    par->hooknum == NF_INET_LOCAL_OUT);
-
-       ct = nf_ct_get(skb, &ctinfo);
-
-       /* Connection must be valid and new. */
-       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
-
-       return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST);
-}
-
-static int ipt_snat_checkentry(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       /* Must be a valid range */
-       if (mr->rangesize != 1) {
-               pr_info("SNAT: multiple ranges no longer supported\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
-{
-       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
-
-       /* Must be a valid range */
-       if (mr->rangesize != 1) {
-               pr_info("DNAT: multiple ranges no longer supported\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static unsigned int
-alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
-{
-       /* Force range to this IP; let proto decide mapping for
-          per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED).
-       */
-       struct nf_nat_ipv4_range range;
-
-       range.flags = 0;
-       pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
-                HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
-                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
-                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
-
-       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
-}
-
-int nf_nat_rule_find(struct sk_buff *skb,
-                    unsigned int hooknum,
-                    const struct net_device *in,
-                    const struct net_device *out,
-                    struct nf_conn *ct)
-{
-       struct net *net = nf_ct_net(ct);
-       int ret;
-
-       ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
-
-       if (ret == NF_ACCEPT) {
-               if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
-                       /* NUL mapping */
-                       ret = alloc_null_binding(ct, hooknum);
-       }
-       return ret;
-}
-
-static struct xt_target ipt_snat_reg __read_mostly = {
-       .name           = "SNAT",
-       .target         = ipt_snat_target,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
-       .checkentry     = ipt_snat_checkentry,
-       .family         = AF_INET,
-};
-
-static struct xt_target ipt_dnat_reg __read_mostly = {
-       .name           = "DNAT",
-       .target         = ipt_dnat_target,
-       .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
-       .table          = "nat",
-       .hooks          = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
-       .checkentry     = ipt_dnat_checkentry,
-       .family         = AF_INET,
-};
-
-static int __net_init nf_nat_rule_net_init(struct net *net)
-{
-       struct ipt_replace *repl;
-
-       repl = ipt_alloc_initial_table(&nat_table);
-       if (repl == NULL)
-               return -ENOMEM;
-       net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
-       kfree(repl);
-       if (IS_ERR(net->ipv4.nat_table))
-               return PTR_ERR(net->ipv4.nat_table);
-       return 0;
-}
-
-static void __net_exit nf_nat_rule_net_exit(struct net *net)
-{
-       ipt_unregister_table(net, net->ipv4.nat_table);
-}
-
-static struct pernet_operations nf_nat_rule_net_ops = {
-       .init = nf_nat_rule_net_init,
-       .exit = nf_nat_rule_net_exit,
-};
-
-int __init nf_nat_rule_init(void)
-{
-       int ret;
-
-       ret = register_pernet_subsys(&nf_nat_rule_net_ops);
-       if (ret != 0)
-               goto out;
-       ret = xt_register_target(&ipt_snat_reg);
-       if (ret != 0)
-               goto unregister_table;
-
-       ret = xt_register_target(&ipt_dnat_reg);
-       if (ret != 0)
-               goto unregister_snat;
-
-       return ret;
-
- unregister_snat:
-       xt_unregister_target(&ipt_snat_reg);
- unregister_table:
-       unregister_pernet_subsys(&nf_nat_rule_net_ops);
- out:
-       return ret;
-}
-
-void nf_nat_rule_cleanup(void)
-{
-       xt_unregister_target(&ipt_dnat_reg);
-       xt_unregister_target(&ipt_snat_reg);
-       unregister_pernet_subsys(&nf_nat_rule_net_ops);
-}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
deleted file mode 100644 (file)
index 9c87cde..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-/* SIP extension for NAT alteration.
- *
- * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
- * based on RR's ip_nat_ftp.c and other modules.
- * (C) 2007 United Security Providers
- * (C) 2007, 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/udp.h>
-#include <linux/tcp.h>
-
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <linux/netfilter/nf_conntrack_sip.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
-MODULE_DESCRIPTION("SIP NAT helper");
-MODULE_ALIAS("ip_nat_sip");
-
-
-static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
-                                 const char **dptr, unsigned int *datalen,
-                                 unsigned int matchoff, unsigned int matchlen,
-                                 const char *buffer, unsigned int buflen)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       struct tcphdr *th;
-       unsigned int baseoff;
-
-       if (nf_ct_protonum(ct) == IPPROTO_TCP) {
-               th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
-               baseoff = ip_hdrlen(skb) + th->doff * 4;
-               matchoff += dataoff - baseoff;
-
-               if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
-                                               matchoff, matchlen,
-                                               buffer, buflen, false))
-                       return 0;
-       } else {
-               baseoff = ip_hdrlen(skb) + sizeof(struct udphdr);
-               matchoff += dataoff - baseoff;
-
-               if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
-                                             matchoff, matchlen,
-                                             buffer, buflen))
-                       return 0;
-       }
-
-       /* Reload data pointer and adjust datalen value */
-       *dptr = skb->data + dataoff;
-       *datalen += buflen - matchlen;
-       return 1;
-}
-
-static int map_addr(struct sk_buff *skb, unsigned int dataoff,
-                   const char **dptr, unsigned int *datalen,
-                   unsigned int matchoff, unsigned int matchlen,
-                   union nf_inet_addr *addr, __be16 port)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-       unsigned int buflen;
-       __be32 newaddr;
-       __be16 newport;
-
-       if (ct->tuplehash[dir].tuple.src.u3.ip == addr->ip &&
-           ct->tuplehash[dir].tuple.src.u.udp.port == port) {
-               newaddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
-               newport = ct->tuplehash[!dir].tuple.dst.u.udp.port;
-       } else if (ct->tuplehash[dir].tuple.dst.u3.ip == addr->ip &&
-                  ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
-               newaddr = ct->tuplehash[!dir].tuple.src.u3.ip;
-               newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
-       } else
-               return 1;
-
-       if (newaddr == addr->ip && newport == port)
-               return 1;
-
-       buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
-
-       return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                            buffer, buflen);
-}
-
-static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
-                       const char **dptr, unsigned int *datalen,
-                       enum sip_header_types type)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       unsigned int matchlen, matchoff;
-       union nf_inet_addr addr;
-       __be16 port;
-
-       if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
-                                   &matchoff, &matchlen, &addr, &port) <= 0)
-               return 1;
-       return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                       &addr, port);
-}
-
-static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
-                              const char **dptr, unsigned int *datalen)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       unsigned int coff, matchoff, matchlen;
-       enum sip_header_types hdr;
-       union nf_inet_addr addr;
-       __be16 port;
-       int request, in_header;
-
-       /* Basic rules: requests and responses. */
-       if (strnicmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) {
-               if (ct_sip_parse_request(ct, *dptr, *datalen,
-                                        &matchoff, &matchlen,
-                                        &addr, &port) > 0 &&
-                   !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                             &addr, port))
-                       return NF_DROP;
-               request = 1;
-       } else
-               request = 0;
-
-       if (nf_ct_protonum(ct) == IPPROTO_TCP)
-               hdr = SIP_HDR_VIA_TCP;
-       else
-               hdr = SIP_HDR_VIA_UDP;
-
-       /* Translate topmost Via header and parameters */
-       if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
-                                   hdr, NULL, &matchoff, &matchlen,
-                                   &addr, &port) > 0) {
-               unsigned int olen, matchend, poff, plen, buflen, n;
-               char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-
-               /* We're only interested in headers related to this
-                * connection */
-               if (request) {
-                       if (addr.ip != ct->tuplehash[dir].tuple.src.u3.ip ||
-                           port != ct->tuplehash[dir].tuple.src.u.udp.port)
-                               goto next;
-               } else {
-                       if (addr.ip != ct->tuplehash[dir].tuple.dst.u3.ip ||
-                           port != ct->tuplehash[dir].tuple.dst.u.udp.port)
-                               goto next;
-               }
-
-               olen = *datalen;
-               if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                             &addr, port))
-                       return NF_DROP;
-
-               matchend = matchoff + matchlen + *datalen - olen;
-
-               /* The maddr= parameter (RFC 2361) specifies where to send
-                * the reply. */
-               if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
-                                              "maddr=", &poff, &plen,
-                                              &addr, true) > 0 &&
-                   addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
-                   addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
-                       buflen = sprintf(buffer, "%pI4",
-                                       &ct->tuplehash[!dir].tuple.dst.u3.ip);
-                       if (!mangle_packet(skb, dataoff, dptr, datalen,
-                                          poff, plen, buffer, buflen))
-                               return NF_DROP;
-               }
-
-               /* The received= parameter (RFC 2361) contains the address
-                * from which the server received the request. */
-               if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
-                                              "received=", &poff, &plen,
-                                              &addr, false) > 0 &&
-                   addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
-                   addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
-                       buflen = sprintf(buffer, "%pI4",
-                                       &ct->tuplehash[!dir].tuple.src.u3.ip);
-                       if (!mangle_packet(skb, dataoff, dptr, datalen,
-                                          poff, plen, buffer, buflen))
-                               return NF_DROP;
-               }
-
-               /* The rport= parameter (RFC 3581) contains the port number
-                * from which the server received the request. */
-               if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen,
-                                                "rport=", &poff, &plen,
-                                                &n) > 0 &&
-                   htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port &&
-                   htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
-                       __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
-                       buflen = sprintf(buffer, "%u", ntohs(p));
-                       if (!mangle_packet(skb, dataoff, dptr, datalen,
-                                          poff, plen, buffer, buflen))
-                               return NF_DROP;
-               }
-       }
-
-next:
-       /* Translate Contact headers */
-       coff = 0;
-       in_header = 0;
-       while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
-                                      SIP_HDR_CONTACT, &in_header,
-                                      &matchoff, &matchlen,
-                                      &addr, &port) > 0) {
-               if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                             &addr, port))
-                       return NF_DROP;
-       }
-
-       if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
-           !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
-               return NF_DROP;
-
-       return NF_ACCEPT;
-}
-
-static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       const struct tcphdr *th;
-
-       if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
-               return;
-
-       th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
-       nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
-}
-
-/* Handles expected signalling connections and media streams */
-static void ip_nat_sip_expected(struct nf_conn *ct,
-                               struct nf_conntrack_expect *exp)
-{
-       struct nf_nat_ipv4_range range;
-
-       /* This must be a fresh one. */
-       BUG_ON(ct->status & IPS_NAT_DONE_MASK);
-
-       /* For DST manip, map port here to where it's expected. */
-       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
-       range.min = range.max = exp->saved_proto;
-       range.min_ip = range.max_ip = exp->saved_ip;
-       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
-
-       /* Change src to where master sends to, but only if the connection
-        * actually came from the same source. */
-       if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip ==
-           ct->master->tuplehash[exp->dir].tuple.src.u3.ip) {
-               range.flags = NF_NAT_RANGE_MAP_IPS;
-               range.min_ip = range.max_ip
-                       = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
-               nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
-       }
-}
-
-static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
-                                     const char **dptr, unsigned int *datalen,
-                                     struct nf_conntrack_expect *exp,
-                                     unsigned int matchoff,
-                                     unsigned int matchlen)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       __be32 newip;
-       u_int16_t port;
-       char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-       unsigned int buflen;
-
-       /* Connection will come from reply */
-       if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
-               newip = exp->tuple.dst.u3.ip;
-       else
-               newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
-
-       /* If the signalling port matches the connection's source port in the
-        * original direction, try to use the destination port in the opposite
-        * direction. */
-       if (exp->tuple.dst.u.udp.port ==
-           ct->tuplehash[dir].tuple.src.u.udp.port)
-               port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
-       else
-               port = ntohs(exp->tuple.dst.u.udp.port);
-
-       exp->saved_ip = exp->tuple.dst.u3.ip;
-       exp->tuple.dst.u3.ip = newip;
-       exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
-       exp->dir = !dir;
-       exp->expectfn = ip_nat_sip_expected;
-
-       for (; port != 0; port++) {
-               int ret;
-
-               exp->tuple.dst.u.udp.port = htons(port);
-               ret = nf_ct_expect_related(exp);
-               if (ret == 0)
-                       break;
-               else if (ret != -EBUSY) {
-                       port = 0;
-                       break;
-               }
-       }
-
-       if (port == 0)
-               return NF_DROP;
-
-       if (exp->tuple.dst.u3.ip != exp->saved_ip ||
-           exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
-               buflen = sprintf(buffer, "%pI4:%u", &newip, port);
-               if (!mangle_packet(skb, dataoff, dptr, datalen,
-                                  matchoff, matchlen, buffer, buflen))
-                       goto err;
-       }
-       return NF_ACCEPT;
-
-err:
-       nf_ct_unexpect_related(exp);
-       return NF_DROP;
-}
-
-static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
-                             const char **dptr, unsigned int *datalen)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       unsigned int matchoff, matchlen;
-       char buffer[sizeof("65536")];
-       int buflen, c_len;
-
-       /* Get actual SDP length */
-       if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
-                                 SDP_HDR_VERSION, SDP_HDR_UNSPEC,
-                                 &matchoff, &matchlen) <= 0)
-               return 0;
-       c_len = *datalen - matchoff + strlen("v=");
-
-       /* Now, update SDP length */
-       if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CONTENT_LENGTH,
-                             &matchoff, &matchlen) <= 0)
-               return 0;
-
-       buflen = sprintf(buffer, "%u", c_len);
-       return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                            buffer, buflen);
-}
-
-static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
-                            const char **dptr, unsigned int *datalen,
-                            unsigned int sdpoff,
-                            enum sdp_header_types type,
-                            enum sdp_header_types term,
-                            char *buffer, int buflen)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       unsigned int matchlen, matchoff;
-
-       if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
-                                 &matchoff, &matchlen) <= 0)
-               return -ENOENT;
-       return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                            buffer, buflen) ? 0 : -EINVAL;
-}
-
-static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff,
-                                   const char **dptr, unsigned int *datalen,
-                                   unsigned int sdpoff,
-                                   enum sdp_header_types type,
-                                   enum sdp_header_types term,
-                                   const union nf_inet_addr *addr)
-{
-       char buffer[sizeof("nnn.nnn.nnn.nnn")];
-       unsigned int buflen;
-
-       buflen = sprintf(buffer, "%pI4", &addr->ip);
-       if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term,
-                             buffer, buflen))
-               return 0;
-
-       return mangle_content_len(skb, dataoff, dptr, datalen);
-}
-
-static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
-                                   const char **dptr, unsigned int *datalen,
-                                   unsigned int matchoff,
-                                   unsigned int matchlen,
-                                   u_int16_t port)
-{
-       char buffer[sizeof("nnnnn")];
-       unsigned int buflen;
-
-       buflen = sprintf(buffer, "%u", port);
-       if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
-                          buffer, buflen))
-               return 0;
-
-       return mangle_content_len(skb, dataoff, dptr, datalen);
-}
-
-static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff,
-                                      const char **dptr, unsigned int *datalen,
-                                      unsigned int sdpoff,
-                                      const union nf_inet_addr *addr)
-{
-       char buffer[sizeof("nnn.nnn.nnn.nnn")];
-       unsigned int buflen;
-
-       /* Mangle session description owner and contact addresses */
-       buflen = sprintf(buffer, "%pI4", &addr->ip);
-       if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
-                              SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
-                              buffer, buflen))
-               return 0;
-
-       switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
-                                 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
-                                 buffer, buflen)) {
-       case 0:
-       /*
-        * RFC 2327:
-        *
-        * Session description
-        *
-        * c=* (connection information - not required if included in all media)
-        */
-       case -ENOENT:
-               break;
-       default:
-               return 0;
-       }
-
-       return mangle_content_len(skb, dataoff, dptr, datalen);
-}
-
-/* So, this packet has hit the connection tracking matching code.
-   Mangle it, and change the expectation to match the new version. */
-static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
-                                    const char **dptr, unsigned int *datalen,
-                                    struct nf_conntrack_expect *rtp_exp,
-                                    struct nf_conntrack_expect *rtcp_exp,
-                                    unsigned int mediaoff,
-                                    unsigned int medialen,
-                                    union nf_inet_addr *rtp_addr)
-{
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-       u_int16_t port;
-
-       /* Connection will come from reply */
-       if (ct->tuplehash[dir].tuple.src.u3.ip ==
-           ct->tuplehash[!dir].tuple.dst.u3.ip)
-               rtp_addr->ip = rtp_exp->tuple.dst.u3.ip;
-       else
-               rtp_addr->ip = ct->tuplehash[!dir].tuple.dst.u3.ip;
-
-       rtp_exp->saved_ip = rtp_exp->tuple.dst.u3.ip;
-       rtp_exp->tuple.dst.u3.ip = rtp_addr->ip;
-       rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
-       rtp_exp->dir = !dir;
-       rtp_exp->expectfn = ip_nat_sip_expected;
-
-       rtcp_exp->saved_ip = rtcp_exp->tuple.dst.u3.ip;
-       rtcp_exp->tuple.dst.u3.ip = rtp_addr->ip;
-       rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
-       rtcp_exp->dir = !dir;
-       rtcp_exp->expectfn = ip_nat_sip_expected;
-
-       /* Try to get same pair of ports: if not, try to change them. */
-       for (port = ntohs(rtp_exp->tuple.dst.u.udp.port);
-            port != 0; port += 2) {
-               int ret;
-
-               rtp_exp->tuple.dst.u.udp.port = htons(port);
-               ret = nf_ct_expect_related(rtp_exp);
-               if (ret == -EBUSY)
-                       continue;
-               else if (ret < 0) {
-                       port = 0;
-                       break;
-               }
-               rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
-               ret = nf_ct_expect_related(rtcp_exp);
-               if (ret == 0)
-                       break;
-               else if (ret == -EBUSY) {
-                       nf_ct_unexpect_related(rtp_exp);
-                       continue;
-               } else if (ret < 0) {
-                       nf_ct_unexpect_related(rtp_exp);
-                       port = 0;
-                       break;
-               }
-       }
-
-       if (port == 0)
-               goto err1;
-
-       /* Update media port. */
-       if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
-           !ip_nat_sdp_port(skb, dataoff, dptr, datalen,
-                            mediaoff, medialen, port))
-               goto err2;
-
-       return NF_ACCEPT;
-
-err2:
-       nf_ct_unexpect_related(rtp_exp);
-       nf_ct_unexpect_related(rtcp_exp);
-err1:
-       return NF_DROP;
-}
-
-static struct nf_ct_helper_expectfn sip_nat = {
-        .name           = "sip",
-        .expectfn       = ip_nat_sip_expected,
-};
-
-static void __exit nf_nat_sip_fini(void)
-{
-       RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
-       nf_ct_helper_expectfn_unregister(&sip_nat);
-       synchronize_rcu();
-}
-
-static int __init nf_nat_sip_init(void)
-{
-       BUG_ON(nf_nat_sip_hook != NULL);
-       BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
-       BUG_ON(nf_nat_sip_expect_hook != NULL);
-       BUG_ON(nf_nat_sdp_addr_hook != NULL);
-       BUG_ON(nf_nat_sdp_port_hook != NULL);
-       BUG_ON(nf_nat_sdp_session_hook != NULL);
-       BUG_ON(nf_nat_sdp_media_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip);
-       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
-       RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect);
-       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
-       RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
-       RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
-       RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
-       nf_ct_helper_expectfn_register(&sip_nat);
-       return 0;
-}
-
-module_init(nf_nat_sip_init);
-module_exit(nf_nat_sip_fini);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
deleted file mode 100644 (file)
index 3828a42..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/icmp.h>
-#include <linux/gfp.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/proc_fs.h>
-#include <net/ip.h>
-#include <net/checksum.h>
-#include <linux/spinlock.h>
-
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_extend.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <net/netfilter/nf_nat_protocol.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-#ifdef CONFIG_XFRM
-static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
-{
-       struct flowi4 *fl4 = &fl->u.ip4;
-       const struct nf_conn *ct;
-       const struct nf_conntrack_tuple *t;
-       enum ip_conntrack_info ctinfo;
-       enum ip_conntrack_dir dir;
-       unsigned long statusbit;
-
-       ct = nf_ct_get(skb, &ctinfo);
-       if (ct == NULL)
-               return;
-       dir = CTINFO2DIR(ctinfo);
-       t = &ct->tuplehash[dir].tuple;
-
-       if (dir == IP_CT_DIR_ORIGINAL)
-               statusbit = IPS_DST_NAT;
-       else
-               statusbit = IPS_SRC_NAT;
-
-       if (ct->status & statusbit) {
-               fl4->daddr = t->dst.u3.ip;
-               if (t->dst.protonum == IPPROTO_TCP ||
-                   t->dst.protonum == IPPROTO_UDP ||
-                   t->dst.protonum == IPPROTO_UDPLITE ||
-                   t->dst.protonum == IPPROTO_DCCP ||
-                   t->dst.protonum == IPPROTO_SCTP)
-                       fl4->fl4_dport = t->dst.u.tcp.port;
-       }
-
-       statusbit ^= IPS_NAT_MASK;
-
-       if (ct->status & statusbit) {
-               fl4->saddr = t->src.u3.ip;
-               if (t->dst.protonum == IPPROTO_TCP ||
-                   t->dst.protonum == IPPROTO_UDP ||
-                   t->dst.protonum == IPPROTO_UDPLITE ||
-                   t->dst.protonum == IPPROTO_DCCP ||
-                   t->dst.protonum == IPPROTO_SCTP)
-                       fl4->fl4_sport = t->src.u.tcp.port;
-       }
-}
-#endif
-
-static unsigned int
-nf_nat_fn(unsigned int hooknum,
-         struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         int (*okfn)(struct sk_buff *))
-{
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn_nat *nat;
-       /* maniptype == SRC for postrouting. */
-       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
-
-       /* We never see fragments: conntrack defrags on pre-routing
-          and local-out, and nf_nat_out protects post-routing. */
-       NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
-       ct = nf_ct_get(skb, &ctinfo);
-       /* Can't track?  It's not due to stress, or conntrack would
-          have dropped it.  Hence it's the user's responsibilty to
-          packet filter it out, or implement conntrack/NAT for that
-          protocol. 8) --RR */
-       if (!ct)
-               return NF_ACCEPT;
-
-       /* Don't try to NAT if this packet is not conntracked */
-       if (nf_ct_is_untracked(ct))
-               return NF_ACCEPT;
-
-       nat = nfct_nat(ct);
-       if (!nat) {
-               /* NAT module was loaded late. */
-               if (nf_ct_is_confirmed(ct))
-                       return NF_ACCEPT;
-               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
-               if (nat == NULL) {
-                       pr_debug("failed to add NAT extension\n");
-                       return NF_ACCEPT;
-               }
-       }
-
-       switch (ctinfo) {
-       case IP_CT_RELATED:
-       case IP_CT_RELATED_REPLY:
-               if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
-                       if (!nf_nat_icmp_reply_translation(ct, ctinfo,
-                                                          hooknum, skb))
-                               return NF_DROP;
-                       else
-                               return NF_ACCEPT;
-               }
-               /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
-       case IP_CT_NEW:
-
-               /* Seen it before?  This can happen for loopback, retrans,
-                  or local packets.. */
-               if (!nf_nat_initialized(ct, maniptype)) {
-                       unsigned int ret;
-
-                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
-                       if (ret != NF_ACCEPT)
-                               return ret;
-               } else
-                       pr_debug("Already setup manip %s for ct %p\n",
-                                maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
-                                ct);
-               break;
-
-       default:
-               /* ESTABLISHED */
-               NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
-                            ctinfo == IP_CT_ESTABLISHED_REPLY);
-       }
-
-       return nf_nat_packet(ct, ctinfo, hooknum, skb);
-}
-
-static unsigned int
-nf_nat_in(unsigned int hooknum,
-         struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         int (*okfn)(struct sk_buff *))
-{
-       unsigned int ret;
-       __be32 daddr = ip_hdr(skb)->daddr;
-
-       ret = nf_nat_fn(hooknum, skb, in, out, okfn);
-       if (ret != NF_DROP && ret != NF_STOLEN &&
-           daddr != ip_hdr(skb)->daddr)
-               skb_dst_drop(skb);
-
-       return ret;
-}
-
-static unsigned int
-nf_nat_out(unsigned int hooknum,
-          struct sk_buff *skb,
-          const struct net_device *in,
-          const struct net_device *out,
-          int (*okfn)(struct sk_buff *))
-{
-#ifdef CONFIG_XFRM
-       const struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-#endif
-       unsigned int ret;
-
-       /* root is playing with raw sockets. */
-       if (skb->len < sizeof(struct iphdr) ||
-           ip_hdrlen(skb) < sizeof(struct iphdr))
-               return NF_ACCEPT;
-
-       ret = nf_nat_fn(hooknum, skb, in, out, okfn);
-#ifdef CONFIG_XFRM
-       if (ret != NF_DROP && ret != NF_STOLEN &&
-           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
-               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
-               if ((ct->tuplehash[dir].tuple.src.u3.ip !=
-                    ct->tuplehash[!dir].tuple.dst.u3.ip) ||
-                   (ct->tuplehash[dir].tuple.src.u.all !=
-                    ct->tuplehash[!dir].tuple.dst.u.all)
-                  )
-                       return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP;
-       }
-#endif
-       return ret;
-}
-
-static unsigned int
-nf_nat_local_fn(unsigned int hooknum,
-               struct sk_buff *skb,
-               const struct net_device *in,
-               const struct net_device *out,
-               int (*okfn)(struct sk_buff *))
-{
-       const struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       unsigned int ret;
-
-       /* root is playing with raw sockets. */
-       if (skb->len < sizeof(struct iphdr) ||
-           ip_hdrlen(skb) < sizeof(struct iphdr))
-               return NF_ACCEPT;
-
-       ret = nf_nat_fn(hooknum, skb, in, out, okfn);
-       if (ret != NF_DROP && ret != NF_STOLEN &&
-           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
-               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
-               if (ct->tuplehash[dir].tuple.dst.u3.ip !=
-                   ct->tuplehash[!dir].tuple.src.u3.ip) {
-                       if (ip_route_me_harder(skb, RTN_UNSPEC))
-                               ret = NF_DROP;
-               }
-#ifdef CONFIG_XFRM
-               else if (ct->tuplehash[dir].tuple.dst.u.all !=
-                        ct->tuplehash[!dir].tuple.src.u.all)
-                       if (ip_xfrm_me_harder(skb))
-                               ret = NF_DROP;
-#endif
-       }
-       return ret;
-}
-
-/* We must be after connection tracking and before packet filtering. */
-
-static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
-       /* Before packet filtering, change destination */
-       {
-               .hook           = nf_nat_in,
-               .owner          = THIS_MODULE,
-               .pf             = NFPROTO_IPV4,
-               .hooknum        = NF_INET_PRE_ROUTING,
-               .priority       = NF_IP_PRI_NAT_DST,
-       },
-       /* After packet filtering, change source */
-       {
-               .hook           = nf_nat_out,
-               .owner          = THIS_MODULE,
-               .pf             = NFPROTO_IPV4,
-               .hooknum        = NF_INET_POST_ROUTING,
-               .priority       = NF_IP_PRI_NAT_SRC,
-       },
-       /* Before packet filtering, change destination */
-       {
-               .hook           = nf_nat_local_fn,
-               .owner          = THIS_MODULE,
-               .pf             = NFPROTO_IPV4,
-               .hooknum        = NF_INET_LOCAL_OUT,
-               .priority       = NF_IP_PRI_NAT_DST,
-       },
-       /* After packet filtering, change source */
-       {
-               .hook           = nf_nat_fn,
-               .owner          = THIS_MODULE,
-               .pf             = NFPROTO_IPV4,
-               .hooknum        = NF_INET_LOCAL_IN,
-               .priority       = NF_IP_PRI_NAT_SRC,
-       },
-};
-
-static int __init nf_nat_standalone_init(void)
-{
-       int ret = 0;
-
-       need_ipv4_conntrack();
-
-#ifdef CONFIG_XFRM
-       BUG_ON(ip_nat_decode_session != NULL);
-       RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
-#endif
-       ret = nf_nat_rule_init();
-       if (ret < 0) {
-               pr_err("nf_nat_init: can't setup rules.\n");
-               goto cleanup_decode_session;
-       }
-       ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
-       if (ret < 0) {
-               pr_err("nf_nat_init: can't register hooks.\n");
-               goto cleanup_rule_init;
-       }
-       return ret;
-
- cleanup_rule_init:
-       nf_nat_rule_cleanup();
- cleanup_decode_session:
-#ifdef CONFIG_XFRM
-       RCU_INIT_POINTER(ip_nat_decode_session, NULL);
-       synchronize_net();
-#endif
-       return ret;
-}
-
-static void __exit nf_nat_standalone_fini(void)
-{
-       nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
-       nf_nat_rule_cleanup();
-#ifdef CONFIG_XFRM
-       RCU_INIT_POINTER(ip_nat_decode_session, NULL);
-       synchronize_net();
-#endif
-       /* Conntrack caches are unregistered in nf_conntrack_cleanup */
-}
-
-module_init(nf_nat_standalone_init);
-module_exit(nf_nat_standalone_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ip_nat");
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
deleted file mode 100644 (file)
index 9dbb8d2..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/udp.h>
-
-#include <net/netfilter/nf_conntrack_helper.h>
-#include <net/netfilter/nf_conntrack_expect.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
-#include <linux/netfilter/nf_conntrack_tftp.h>
-
-MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
-MODULE_DESCRIPTION("TFTP NAT helper");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ip_nat_tftp");
-
-static unsigned int help(struct sk_buff *skb,
-                        enum ip_conntrack_info ctinfo,
-                        struct nf_conntrack_expect *exp)
-{
-       const struct nf_conn *ct = exp->master;
-
-       exp->saved_proto.udp.port
-               = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
-       exp->dir = IP_CT_DIR_REPLY;
-       exp->expectfn = nf_nat_follow_master;
-       if (nf_ct_expect_related(exp) != 0)
-               return NF_DROP;
-       return NF_ACCEPT;
-}
-
-static void __exit nf_nat_tftp_fini(void)
-{
-       RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
-       synchronize_rcu();
-}
-
-static int __init nf_nat_tftp_init(void)
-{
-       BUG_ON(nf_nat_tftp_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_tftp_hook, help);
-       return 0;
-}
-
-module_init(nf_nat_tftp_init);
-module_exit(nf_nat_tftp_fini);
index 6232d476f37e952add50d1cbec69c316b879a27a..8f3d05424a3e8f2f318c36bf007fd17724533997 100644 (file)
@@ -185,10 +185,10 @@ exit:
        return sk;
 }
 
-static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
-                                         gid_t *high)
+static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
+                                         kgid_t *high)
 {
-       gid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.sysctl_ping_group_range;
        unsigned int seq;
 
        do {
@@ -203,19 +203,13 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
 static int ping_init_sock(struct sock *sk)
 {
        struct net *net = sock_net(sk);
-       gid_t group = current_egid();
-       gid_t range[2];
+       kgid_t group = current_egid();
        struct group_info *group_info = get_current_groups();
        int i, j, count = group_info->ngroups;
        kgid_t low, high;
 
-       inet_get_ping_group_range_net(net, range, range+1);
-       low = make_kgid(&init_user_ns, range[0]);
-       high = make_kgid(&init_user_ns, range[1]);
-       if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low))
-               return -EACCES;
-
-       if (range[0] <= group && group <= range[1])
+       inet_get_ping_group_range_net(net, &low, &high);
+       if (gid_lte(low, group) && gid_lte(group, high))
                return 0;
 
        for (i = 0; i < group_info->nblocks; i++) {
@@ -845,7 +839,9 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
                bucket, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
                atomic_read(&sp->sk_drops), len);
 }
index 957acd12250bd1ee078ef0fbc6831225448559ee..8de53e1ddd544b594909b9d15c96b2cad85ffa3d 100644 (file)
@@ -263,6 +263,10 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
        SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
        SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
+       SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
+       SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
+       SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
+       SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
        SNMP_MIB_SENTINEL
 };
 
index d23c6571ba1c34525114af16f0818cfe6bbf1f14..73d1e4df4bf630f176f385b96639b4e803469458 100644 (file)
@@ -994,7 +994,9 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
                i, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
 }
 
index fd9af60397b590dd817e02a52a33fdf99bb24c8e..ff622069fcefbe5ac2248440c2059b5a16d524e0 100644 (file)
@@ -1111,10 +1111,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
        const struct rtable *rt = (const struct rtable *) dst;
        unsigned int mtu = rt->rt_pmtu;
 
-       if (mtu && time_after_eq(jiffies, rt->dst.expires))
-               mtu = 0;
-
-       if (!mtu)
+       if (!mtu || time_after_eq(jiffies, rt->dst.expires))
                mtu = dst_metric_raw(dst, RTAX_MTU);
 
        if (mtu && rt_is_output_route(rt))
@@ -1566,11 +1563,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        if (ipv4_is_zeronet(daddr))
                goto martian_destination;
 
-       if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
-               if (ipv4_is_loopback(daddr))
+       /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
+        * and call it once if daddr or/and saddr are loopback addresses
+        */
+       if (ipv4_is_loopback(daddr)) {
+               if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
                        goto martian_destination;
-
-               if (ipv4_is_loopback(saddr))
+       } else if (ipv4_is_loopback(saddr)) {
+               if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
                        goto martian_source;
        }
 
@@ -1595,7 +1595,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        if (res.type == RTN_LOCAL) {
                err = fib_validate_source(skb, saddr, daddr, tos,
-                                         net->loopback_dev->ifindex,
+                                         LOOPBACK_IFINDEX,
                                          dev, in_dev, &itag);
                if (err < 0)
                        goto martian_source_keep_err;
@@ -1871,7 +1871,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
 
        orig_oif = fl4->flowi4_oif;
 
-       fl4->flowi4_iif = net->loopback_dev->ifindex;
+       fl4->flowi4_iif = LOOPBACK_IFINDEX;
        fl4->flowi4_tos = tos & IPTOS_RT_MASK;
        fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
                         RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -1960,7 +1960,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                if (!fl4->daddr)
                        fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
                dev_out = net->loopback_dev;
-               fl4->flowi4_oif = net->loopback_dev->ifindex;
+               fl4->flowi4_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
                flags |= RTCF_LOCAL;
                goto make_route;
@@ -2131,7 +2131,7 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
 EXPORT_SYMBOL_GPL(ip_route_output_flow);
 
 static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
-                       struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
+                       struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
                        u32 seq, int event, int nowait, unsigned int flags)
 {
        struct rtable *rt = skb_rtable(skb);
@@ -2141,7 +2141,7 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        u32 error;
        u32 metrics[RTAX_MAX];
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2301,12 +2301,12 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
                rt->rt_flags |= RTCF_NOTIFY;
 
        err = rt_fill_info(net, dst, src, &fl4, skb,
-                          NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+                          NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
                           RTM_NEWROUTE, 0, 0);
        if (err <= 0)
                goto errout_free;
 
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 errout:
        return err;
 
index 650e1528e1e669a7828ab94bb4a5fa4bf65c1186..ba48e799b031b3a45c902dff2f8690a8ce2627fa 100644 (file)
@@ -319,6 +319,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        ireq->tstamp_ok         = tcp_opt.saw_tstamp;
        req->ts_recent          = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
        treq->snt_synack        = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
+       treq->listener          = NULL;
 
        /* We throwed the options of the initial SYN away, so we hope
         * the ACK carries the same options again (see RFC1122 4.2.3.8)
index 1b5ce96707a38124c9ae3e11b261995ce19b6668..9205e492dc9d8a36b05f18ccedcdf4704867c986 100644 (file)
@@ -76,9 +76,9 @@ static int ipv4_local_port_range(ctl_table *table, int write,
 }
 
 
-static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
 {
-       gid_t *data = table->data;
+       kgid_t *data = table->data;
        unsigned int seq;
        do {
                seq = read_seqbegin(&sysctl_local_ports.lock);
@@ -89,12 +89,12 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low,
 }
 
 /* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
+static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
 {
-       gid_t *data = table->data;
+       kgid_t *data = table->data;
        write_seqlock(&sysctl_local_ports.lock);
-       data[0] = range[0];
-       data[1] = range[1];
+       data[0] = low;
+       data[1] = high;
        write_sequnlock(&sysctl_local_ports.lock);
 }
 
@@ -103,21 +103,33 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
                                 void __user *buffer,
                                 size_t *lenp, loff_t *ppos)
 {
+       struct user_namespace *user_ns = current_user_ns();
        int ret;
-       gid_t range[2];
+       gid_t urange[2];
+       kgid_t low, high;
        ctl_table tmp = {
-               .data = &range,
-               .maxlen = sizeof(range),
+               .data = &urange,
+               .maxlen = sizeof(urange),
                .mode = table->mode,
                .extra1 = &ip_ping_group_range_min,
                .extra2 = &ip_ping_group_range_max,
        };
 
-       inet_get_ping_group_range_table(table, range, range + 1);
+       inet_get_ping_group_range_table(table, &low, &high);
+       urange[0] = from_kgid_munged(user_ns, low);
+       urange[1] = from_kgid_munged(user_ns, high);
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
-       if (write && ret == 0)
-               set_ping_group_range(table, range);
+       if (write && ret == 0) {
+               low = make_kgid(user_ns, urange[0]);
+               high = make_kgid(user_ns, urange[1]);
+               if (!gid_valid(low) || !gid_valid(high) ||
+                   (urange[1] < urange[0]) || gid_lt(high, low)) {
+                       low = make_kgid(&init_user_ns, 1);
+                       high = make_kgid(&init_user_ns, 0);
+               }
+               set_ping_group_range(table, low, high);
+       }
 
        return ret;
 }
@@ -220,6 +232,45 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
        return 0;
 }
 
+int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
+                         size_t *lenp, loff_t *ppos)
+{
+       ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+       struct tcp_fastopen_context *ctxt;
+       int ret;
+       u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+
+       tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
+       if (!tbl.data)
+               return -ENOMEM;
+
+       rcu_read_lock();
+       ctxt = rcu_dereference(tcp_fastopen_ctx);
+       if (ctxt)
+               memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+       rcu_read_unlock();
+
+       snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
+               user_key[0], user_key[1], user_key[2], user_key[3]);
+       ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+
+       if (write && ret == 0) {
+               if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1,
+                          user_key + 2, user_key + 3) != 4) {
+                       ret = -EINVAL;
+                       goto bad_key;
+               }
+               tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
+       }
+
+bad_key:
+       pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
+              user_key[0], user_key[1], user_key[2], user_key[3],
+              (char *)tbl.data, ret);
+       kfree(tbl.data);
+       return ret;
+}
+
 static struct ctl_table ipv4_table[] = {
        {
                .procname       = "tcp_timestamps",
@@ -373,6 +424,12 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "tcp_fastopen_key",
+               .mode           = 0600,
+               .maxlen         = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
+               .proc_handler   = proc_tcp_fastopen_key,
+       },
        {
                .procname       = "tcp_tw_recycle",
                .data           = &tcp_death_row.sysctl_tw_recycle,
@@ -786,7 +843,7 @@ static struct ctl_table ipv4_net_table[] = {
        {
                .procname       = "ping_group_range",
                .data           = &init_net.ipv4.sysctl_ping_group_range,
-               .maxlen         = sizeof(init_net.ipv4.sysctl_ping_group_range),
+               .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
        },
@@ -830,8 +887,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
         * Sane defaults - nobody may create ping sockets.
         * Boot scripts should set this to distro-specific group.
         */
-       net->ipv4.sysctl_ping_group_range[0] = 1;
-       net->ipv4.sysctl_ping_group_range[1] = 0;
+       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
 
        tcp_init_mem(net);
 
index 5f64193418216393448ec9d8e839f83e85636716..f32c02e2a54346cf4e120d39e17d4d1b5e966189 100644 (file)
@@ -486,8 +486,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 
-       /* Connected? */
-       if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+       /* Connected or passive Fast Open socket? */
+       if (sk->sk_state != TCP_SYN_SENT &&
+           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
                if (tp->urg_seq == tp->copied_seq &&
@@ -840,10 +841,15 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
        ssize_t copied;
        long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 
-       /* Wait for a connection to finish. */
-       if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+       /* Wait for a connection to finish. One exception is TCP Fast Open
+        * (passive side) where data is allowed to be sent before a connection
+        * is fully established.
+        */
+       if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
+           !tcp_passive_fastopen(sk)) {
                if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
                        goto out_err;
+       }
 
        clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 
@@ -1042,10 +1048,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 
-       /* Wait for a connection to finish. */
-       if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
+       /* Wait for a connection to finish. One exception is TCP Fast Open
+        * (passive side) where data is allowed to be sent before a connection
+        * is fully established.
+        */
+       if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
+           !tcp_passive_fastopen(sk)) {
                if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
                        goto do_error;
+       }
 
        if (unlikely(tp->repair)) {
                if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -1139,78 +1150,43 @@ new_segment:
                                if (err)
                                        goto do_fault;
                        } else {
-                               bool merge = false;
+                               bool merge = true;
                                int i = skb_shinfo(skb)->nr_frags;
-                               struct page *page = sk->sk_sndmsg_page;
-                               int off;
-
-                               if (page && page_count(page) == 1)
-                                       sk->sk_sndmsg_off = 0;
-
-                               off = sk->sk_sndmsg_off;
-
-                               if (skb_can_coalesce(skb, i, page, off) &&
-                                   off != PAGE_SIZE) {
-                                       /* We can extend the last page
-                                        * fragment. */
-                                       merge = true;
-                               } else if (i == MAX_SKB_FRAGS || !sg) {
-                                       /* Need to add new fragment and cannot
-                                        * do this because interface is non-SG,
-                                        * or because all the page slots are
-                                        * busy. */
-                                       tcp_mark_push(tp, skb);
-                                       goto new_segment;
-                               } else if (page) {
-                                       if (off == PAGE_SIZE) {
-                                               put_page(page);
-                                               sk->sk_sndmsg_page = page = NULL;
-                                               off = 0;
+                               struct page_frag *pfrag = sk_page_frag(sk);
+
+                               if (!sk_page_frag_refill(sk, pfrag))
+                                       goto wait_for_memory;
+
+                               if (!skb_can_coalesce(skb, i, pfrag->page,
+                                                     pfrag->offset)) {
+                                       if (i == MAX_SKB_FRAGS || !sg) {
+                                               tcp_mark_push(tp, skb);
+                                               goto new_segment;
                                        }
-                               } else
-                                       off = 0;
+                                       merge = false;
+                               }
 
-                               if (copy > PAGE_SIZE - off)
-                                       copy = PAGE_SIZE - off;
+                               copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
                                if (!sk_wmem_schedule(sk, copy))
                                        goto wait_for_memory;
 
-                               if (!page) {
-                                       /* Allocate new cache page. */
-                                       if (!(page = sk_stream_alloc_page(sk)))
-                                               goto wait_for_memory;
-                               }
-
-                               /* Time to copy data. We are close to
-                                * the end! */
                                err = skb_copy_to_page_nocache(sk, from, skb,
-                                                              page, off, copy);
-                               if (err) {
-                                       /* If this page was new, give it to the
-                                        * socket so it does not get leaked.
-                                        */
-                                       if (!sk->sk_sndmsg_page) {
-                                               sk->sk_sndmsg_page = page;
-                                               sk->sk_sndmsg_off = 0;
-                                       }
+                                                              pfrag->page,
+                                                              pfrag->offset,
+                                                              copy);
+                               if (err)
                                        goto do_error;
-                               }
 
                                /* Update the skb. */
                                if (merge) {
                                        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                                } else {
-                                       skb_fill_page_desc(skb, i, page, off, copy);
-                                       if (sk->sk_sndmsg_page) {
-                                               get_page(page);
-                                       } else if (off + copy < PAGE_SIZE) {
-                                               get_page(page);
-                                               sk->sk_sndmsg_page = page;
-                                       }
+                                       skb_fill_page_desc(skb, i, pfrag->page,
+                                                          pfrag->offset, copy);
+                                       get_page(pfrag->page);
                                }
-
-                               sk->sk_sndmsg_off = off + copy;
+                               pfrag->offset += copy;
                        }
 
                        if (!copied)
@@ -2150,6 +2126,10 @@ void tcp_close(struct sock *sk, long timeout)
                 * they look as CLOSING or LAST_ACK for Linux)
                 * Probably, I missed some more holelets.
                 *                                              --ANK
+                * XXX (TFO) - To start off we don't support SYN+ACK+FIN
+                * in a single packet! (May consider it later but will
+                * probably need API support or TCP_CORK SYN-ACK until
+                * data is written and socket is closed.)
                 */
                tcp_send_fin(sk);
        }
@@ -2221,8 +2201,16 @@ adjudge_to_death:
                }
        }
 
-       if (sk->sk_state == TCP_CLOSE)
+       if (sk->sk_state == TCP_CLOSE) {
+               struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+               /* We could get here with a non-NULL req if the socket is
+                * aborted (e.g., closed with unread data) before 3WHS
+                * finishes.
+                */
+               if (req != NULL)
+                       reqsk_fastopen_remove(sk, req, false);
                inet_csk_destroy_sock(sk);
+       }
        /* Otherwise, socket is reprieved until protocol close. */
 
 out:
@@ -2308,6 +2296,13 @@ int tcp_disconnect(struct sock *sk, int flags)
 }
 EXPORT_SYMBOL(tcp_disconnect);
 
+void tcp_sock_destruct(struct sock *sk)
+{
+       inet_sock_destruct(sk);
+
+       kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
+}
+
 static inline bool tcp_can_repair_sock(const struct sock *sk)
 {
        return capable(CAP_NET_ADMIN) &&
@@ -2701,6 +2696,14 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                else
                        icsk->icsk_user_timeout = msecs_to_jiffies(val);
                break;
+
+       case TCP_FASTOPEN:
+               if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+                   TCPF_LISTEN)))
+                       err = fastopen_init_queue(sk, val);
+               else
+                       err = -EINVAL;
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
@@ -3514,11 +3517,15 @@ EXPORT_SYMBOL(tcp_cookie_generator);
 
 void tcp_done(struct sock *sk)
 {
+       struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+
        if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
+       if (req != NULL)
+               reqsk_fastopen_remove(sk, req, false);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
 
index a7f729c409d78bddb6ff4a1cc5a63fbdc69db581..8f7ef0ad80e5b6b062b7b40f3049634e63b93b40 100644 (file)
@@ -1,10 +1,91 @@
+#include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/tcp.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <net/inetpeer.h>
+#include <net/tcp.h>
 
-int sysctl_tcp_fastopen;
+int sysctl_tcp_fastopen __read_mostly;
+
+struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
+
+static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
+
+static void tcp_fastopen_ctx_free(struct rcu_head *head)
+{
+       struct tcp_fastopen_context *ctx =
+           container_of(head, struct tcp_fastopen_context, rcu);
+       crypto_free_cipher(ctx->tfm);
+       kfree(ctx);
+}
+
+int tcp_fastopen_reset_cipher(void *key, unsigned int len)
+{
+       int err;
+       struct tcp_fastopen_context *ctx, *octx;
+
+       ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+
+       if (IS_ERR(ctx->tfm)) {
+               err = PTR_ERR(ctx->tfm);
+error:         kfree(ctx);
+               pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
+               return err;
+       }
+       err = crypto_cipher_setkey(ctx->tfm, key, len);
+       if (err) {
+               pr_err("TCP: TFO cipher key error: %d\n", err);
+               crypto_free_cipher(ctx->tfm);
+               goto error;
+       }
+       memcpy(ctx->key, key, len);
+
+       spin_lock(&tcp_fastopen_ctx_lock);
+
+       octx = rcu_dereference_protected(tcp_fastopen_ctx,
+                               lockdep_is_held(&tcp_fastopen_ctx_lock));
+       rcu_assign_pointer(tcp_fastopen_ctx, ctx);
+       spin_unlock(&tcp_fastopen_ctx_lock);
+
+       if (octx)
+               call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
+       return err;
+}
+
+/* Computes the fastopen cookie for the peer.
+ * The peer address is a 128 bits long (pad with zeros for IPv4).
+ *
+ * The caller must check foc->len to determine if a valid cookie
+ * has been generated successfully.
+*/
+void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+{
+       __be32 peer_addr[4] = { addr, 0, 0, 0 };
+       struct tcp_fastopen_context *ctx;
+
+       rcu_read_lock();
+       ctx = rcu_dereference(tcp_fastopen_ctx);
+       if (ctx) {
+               crypto_cipher_encrypt_one(ctx->tfm,
+                                         foc->val,
+                                         (__u8 *)peer_addr);
+               foc->len = TCP_FASTOPEN_COOKIE_SIZE;
+       }
+       rcu_read_unlock();
+}
 
 static int __init tcp_fastopen_init(void)
 {
+       __u8 key[TCP_FASTOPEN_KEY_LENGTH];
+
+       get_random_bytes(key, sizeof(key));
+       tcp_fastopen_reset_cipher(key, sizeof(key));
        return 0;
 }
 
index d377f4854cb853c454541062fb6f8438608d470c..432c36649db3dd8d579ddb05ae886dd3251dcb66 100644 (file)
@@ -237,7 +237,11 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
                        tcp_enter_quickack_mode((struct sock *)tp);
                break;
        case INET_ECN_CE:
-               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+               if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+                       /* Better not delay acks, sender can have a very low cwnd */
+                       tcp_enter_quickack_mode((struct sock *)tp);
+                       tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+               }
                /* fallinto */
        default:
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -374,7 +378,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
 /* 4. Try to fixup all. It is made immediately after connection enters
  *    established state.
  */
-static void tcp_init_buffer_space(struct sock *sk)
+void tcp_init_buffer_space(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int maxwin;
@@ -739,29 +743,6 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
-/* Set slow start threshold and cwnd not falling to slow start */
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-
-       tp->prior_ssthresh = 0;
-       tp->bytes_acked = 0;
-       if (icsk->icsk_ca_state < TCP_CA_CWR) {
-               tp->undo_marker = 0;
-               if (set_ssthresh)
-                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
-               tp->snd_cwnd = min(tp->snd_cwnd,
-                                  tcp_packets_in_flight(tp) + 1U);
-               tp->snd_cwnd_cnt = 0;
-               tp->high_seq = tp->snd_nxt;
-               tp->snd_cwnd_stamp = tcp_time_stamp;
-               TCP_ECN_queue_cwr(tp);
-
-               tcp_set_ca_state(sk, TCP_CA_CWR);
-       }
-}
-
 /*
  * Packet counting of FACK is based on in-order assumptions, therefore TCP
  * disables it when reordering is detected
@@ -2489,35 +2470,6 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-/* Lower bound on congestion window is slow start threshold
- * unless congestion avoidance choice decides to overide it.
- */
-static inline u32 tcp_cwnd_min(const struct sock *sk)
-{
-       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
-
-       return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
-}
-
-/* Decrease cwnd each second ack. */
-static void tcp_cwnd_down(struct sock *sk, int flag)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int decr = tp->snd_cwnd_cnt + 1;
-
-       if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
-           (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
-               tp->snd_cwnd_cnt = decr & 1;
-               decr >>= 1;
-
-               if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
-                       tp->snd_cwnd -= decr;
-
-               tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
-               tp->snd_cwnd_stamp = tcp_time_stamp;
-       }
-}
-
 /* Nothing was retransmitted or returned timestamp is less
  * than timestamp of the first retransmission.
  */
@@ -2719,24 +2671,80 @@ static bool tcp_try_undo_loss(struct sock *sk)
        return false;
 }
 
-static inline void tcp_complete_cwr(struct sock *sk)
+/* The cwnd reduction in CWR and Recovery use the PRR algorithm
+ * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+ * It computes the number of packets to send (sndcnt) based on packets newly
+ * delivered:
+ *   1) If the packets in flight is larger than ssthresh, PRR spreads the
+ *     cwnd reductions across a full RTT.
+ *   2) If packets in flight is lower than ssthresh (such as due to excess
+ *     losses and/or application stalls), do not perform any further cwnd
+ *     reductions, but instead slow start up to ssthresh.
+ */
+static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* Do not moderate cwnd if it's already undone in cwr or recovery. */
-       if (tp->undo_marker) {
-               if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
-                       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
-                       tp->snd_cwnd_stamp = tcp_time_stamp;
-               } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
-                       /* PRR algorithm. */
-                       tp->snd_cwnd = tp->snd_ssthresh;
-                       tp->snd_cwnd_stamp = tcp_time_stamp;
-               }
+       tp->high_seq = tp->snd_nxt;
+       tp->bytes_acked = 0;
+       tp->snd_cwnd_cnt = 0;
+       tp->prior_cwnd = tp->snd_cwnd;
+       tp->prr_delivered = 0;
+       tp->prr_out = 0;
+       if (set_ssthresh)
+               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+       TCP_ECN_queue_cwr(tp);
+}
+
+static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
+                              int fast_rexmit)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int sndcnt = 0;
+       int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
+
+       tp->prr_delivered += newly_acked_sacked;
+       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+               u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
+                              tp->prior_cwnd - 1;
+               sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
+       } else {
+               sndcnt = min_t(int, delta,
+                              max_t(int, tp->prr_delivered - tp->prr_out,
+                                    newly_acked_sacked) + 1);
+       }
+
+       sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
+       tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
+}
+
+static inline void tcp_end_cwnd_reduction(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+       if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
+           (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
+               tp->snd_cwnd = tp->snd_ssthresh;
+               tp->snd_cwnd_stamp = tcp_time_stamp;
        }
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
+/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tp->prior_ssthresh = 0;
+       tp->bytes_acked = 0;
+       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+               tp->undo_marker = 0;
+               tcp_init_cwnd_reduction(sk, set_ssthresh);
+               tcp_set_ca_state(sk, TCP_CA_CWR);
+       }
+}
+
 static void tcp_try_keep_open(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2751,7 +2759,7 @@ static void tcp_try_keep_open(struct sock *sk)
        }
 }
 
-static void tcp_try_to_open(struct sock *sk, int flag)
+static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2768,7 +2776,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
                if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
                        tcp_moderate_cwnd(tp);
        } else {
-               tcp_cwnd_down(sk, flag);
+               tcp_cwnd_reduction(sk, newly_acked_sacked, 0);
        }
 }
 
@@ -2850,38 +2858,6 @@ void tcp_simple_retransmit(struct sock *sk)
 }
 EXPORT_SYMBOL(tcp_simple_retransmit);
 
-/* This function implements the PRR algorithm, specifcally the PRR-SSRB
- * (proportional rate reduction with slow start reduction bound) as described in
- * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
- * It computes the number of packets to send (sndcnt) based on packets newly
- * delivered:
- *   1) If the packets in flight is larger than ssthresh, PRR spreads the
- *     cwnd reductions across a full RTT.
- *   2) If packets in flight is lower than ssthresh (such as due to excess
- *     losses and/or application stalls), do not perform any further cwnd
- *     reductions, but instead slow start up to ssthresh.
- */
-static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
-                                       int fast_rexmit, int flag)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int sndcnt = 0;
-       int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
-
-       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
-               u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
-                              tp->prior_cwnd - 1;
-               sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-       } else {
-               sndcnt = min_t(int, delta,
-                              max_t(int, tp->prr_delivered - tp->prr_out,
-                                    newly_acked_sacked) + 1);
-       }
-
-       sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
-       tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
-}
-
 static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2894,7 +2870,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 
        NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
-       tp->high_seq = tp->snd_nxt;
        tp->prior_ssthresh = 0;
        tp->undo_marker = tp->snd_una;
        tp->undo_retrans = tp->retrans_out;
@@ -2902,15 +2877,8 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                if (!ece_ack)
                        tp->prior_ssthresh = tcp_current_ssthresh(sk);
-               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
-               TCP_ECN_queue_cwr(tp);
+               tcp_init_cwnd_reduction(sk, true);
        }
-
-       tp->bytes_acked = 0;
-       tp->snd_cwnd_cnt = 0;
-       tp->prior_cwnd = tp->snd_cwnd;
-       tp->prr_delivered = 0;
-       tp->prr_out = 0;
        tcp_set_ca_state(sk, TCP_CA_Recovery);
 }
 
@@ -2970,7 +2938,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        /* CWR is to be held something *above* high_seq
                         * is ACKed for CWR bit to reach receiver. */
                        if (tp->snd_una != tp->high_seq) {
-                               tcp_complete_cwr(sk);
+                               tcp_end_cwnd_reduction(sk);
                                tcp_set_ca_state(sk, TCP_CA_Open);
                        }
                        break;
@@ -2980,7 +2948,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_reset_reno_sack(tp);
                        if (tcp_try_undo_recovery(sk))
                                return;
-                       tcp_complete_cwr(sk);
+                       tcp_end_cwnd_reduction(sk);
                        break;
                }
        }
@@ -3021,7 +2989,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        tcp_try_undo_dsack(sk);
 
                if (!tcp_time_to_recover(sk, flag)) {
-                       tcp_try_to_open(sk, flag);
+                       tcp_try_to_open(sk, flag, newly_acked_sacked);
                        return;
                }
 
@@ -3043,8 +3011,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
 
        if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
                tcp_update_scoreboard(sk, fast_rexmit);
-       tp->prr_delivered += newly_acked_sacked;
-       tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
+       tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit);
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3123,6 +3090,12 @@ void tcp_rearm_rto(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
+       /* If the retrans timer is currently being used by Fast Open
+        * for SYN-ACK retrans purpose, stay put.
+        */
+       if (tp->fastopen_rsk)
+               return;
+
        if (!tp->packets_out) {
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
        } else {
@@ -3384,7 +3357,7 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
-               !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
+               !tcp_in_cwnd_reduction(sk);
 }
 
 /* Check that window update is acceptable.
@@ -3452,9 +3425,9 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
 }
 
 /* A conservative spurious RTO response algorithm: reduce cwnd using
- * rate halving and continue in congestion avoidance.
+ * PRR and continue in congestion avoidance.
  */
-static void tcp_ratehalving_spur_to_response(struct sock *sk)
+static void tcp_cwr_spur_to_response(struct sock *sk)
 {
        tcp_enter_cwr(sk, 0);
 }
@@ -3462,7 +3435,7 @@ static void tcp_ratehalving_spur_to_response(struct sock *sk)
 static void tcp_undo_spur_to_response(struct sock *sk, int flag)
 {
        if (flag & FLAG_ECE)
-               tcp_ratehalving_spur_to_response(sk);
+               tcp_cwr_spur_to_response(sk);
        else
                tcp_undo_cwr(sk, true);
 }
@@ -3569,7 +3542,7 @@ static bool tcp_process_frto(struct sock *sk, int flag)
                        tcp_conservative_spur_to_response(tp);
                        break;
                default:
-                       tcp_ratehalving_spur_to_response(sk);
+                       tcp_cwr_spur_to_response(sk);
                        break;
                }
                tp->frto_counter = 0;
@@ -4034,7 +4007,7 @@ static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
 }
 
 /* When we get a reset we do this. */
-static void tcp_reset(struct sock *sk)
+void tcp_reset(struct sock *sk)
 {
        /* We want the right error as BSD sees it (and indeed as we do). */
        switch (sk->sk_state) {
@@ -5740,7 +5713,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 
                TCP_ECN_rcv_synack(tp, th);
 
-               tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
                /* Ok.. it's good. Set up sequence numbers and
@@ -5753,7 +5726,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * never scaled.
                 */
                tp->snd_wnd = ntohs(th->window);
-               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                if (!tp->rx_opt.wscale_ok) {
                        tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5891,7 +5863,9 @@ discard:
                tcp_send_synack(sk);
 #if 0
                /* Note, we could accept data and URG from this segment.
-                * There are no obstacles to make this.
+                * There are no obstacles to make this (except that we must
+                * either change tcp_recvmsg() to prevent it from returning data
+                * before 3WHS completes per RFC793, or employ TCP Fast Open).
                 *
                 * However, if we ignore data in ACKless segments sometimes,
                 * we have no reasons to accept it sometimes.
@@ -5931,6 +5905,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock *req;
        int queued = 0;
 
        tp->rx_opt.saw_tstamp = 0;
@@ -5986,6 +5961,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                return 0;
        }
 
+       req = tp->fastopen_rsk;
+       if (req != NULL) {
+               BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+                   sk->sk_state != TCP_FIN_WAIT1);
+
+               if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
+                       goto discard;
+       }
        if (!tcp_validate_incoming(sk, skb, th, 0))
                return 0;
 
@@ -5996,7 +5979,25 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
                        if (acceptable) {
-                               tp->copied_seq = tp->rcv_nxt;
+                               /* Once we leave TCP_SYN_RECV, we no longer
+                                * need req so release it.
+                                */
+                               if (req) {
+                                       tcp_synack_rtt_meas(sk, req);
+                                       tp->total_retrans = req->retrans;
+
+                                       reqsk_fastopen_remove(sk, req, false);
+                               } else {
+                                       /* Make sure socket is routed, for
+                                        * correct metrics.
+                                        */
+                                       icsk->icsk_af_ops->rebuild_header(sk);
+                                       tcp_init_congestion_control(sk);
+
+                                       tcp_mtup_init(sk);
+                                       tcp_init_buffer_space(sk);
+                                       tp->copied_seq = tp->rcv_nxt;
+                               }
                                smp_mb();
                                tcp_set_state(sk, TCP_ESTABLISHED);
                                sk->sk_state_change(sk);
@@ -6018,23 +6019,27 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                if (tp->rx_opt.tstamp_ok)
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
 
-                               /* Make sure socket is routed, for
-                                * correct metrics.
-                                */
-                               icsk->icsk_af_ops->rebuild_header(sk);
-
-                               tcp_init_metrics(sk);
-
-                               tcp_init_congestion_control(sk);
+                               if (req) {
+                                       /* Re-arm the timer because data may
+                                        * have been sent out. This is similar
+                                        * to the regular data transmission case
+                                        * when new data has just been ack'ed.
+                                        *
+                                        * (TFO) - we could try to be more
+                                        * aggressive and retranmitting any data
+                                        * sooner based on when they were sent
+                                        * out.
+                                        */
+                                       tcp_rearm_rto(sk);
+                               } else
+                                       tcp_init_metrics(sk);
 
                                /* Prevent spurious tcp_cwnd_restart() on
                                 * first data packet.
                                 */
                                tp->lsndtime = tcp_time_stamp;
 
-                               tcp_mtup_init(sk);
                                tcp_initialize_rcv_mss(sk);
-                               tcp_init_buffer_space(sk);
                                tcp_fast_path_on(tp);
                        } else {
                                return 1;
@@ -6042,6 +6047,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        break;
 
                case TCP_FIN_WAIT1:
+                       /* If we enter the TCP_FIN_WAIT1 state and we are a
+                        * Fast Open socket and this is the first acceptable
+                        * ACK we have received, this would have acknowledged
+                        * our SYNACK so stop the SYNACK timer.
+                        */
+                       if (acceptable && req != NULL) {
+                               /* We no longer need the request sock. */
+                               reqsk_fastopen_remove(sk, req, false);
+                               tcp_rearm_rto(sk);
+                       }
                        if (tp->snd_una == tp->write_seq) {
                                struct dst_entry *dst;
 
index 00a748d14062d5f0410568be79aa6dc609a201be..75735c9a6a9df2bf4026266c8c30d92bd474092c 100644 (file)
@@ -352,6 +352,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        const int code = icmp_hdr(icmp_skb)->code;
        struct sock *sk;
        struct sk_buff *skb;
+       struct request_sock *req;
        __u32 seq;
        __u32 remaining;
        int err;
@@ -394,9 +395,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
+       req = tp->fastopen_rsk;
        seq = ntohl(th->seq);
        if (sk->sk_state != TCP_LISTEN &&
-           !between(seq, tp->snd_una, tp->snd_nxt)) {
+           !between(seq, tp->snd_una, tp->snd_nxt) &&
+           (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
+               /* For a Fast Open socket, allow seq to be snt_isn. */
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
@@ -435,6 +439,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                    !icsk->icsk_backoff)
                        break;
 
+               /* XXX (TFO) - revisit the following logic for TFO */
+
                if (sock_owned_by_user(sk))
                        break;
 
@@ -466,6 +472,14 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                goto out;
        }
 
+       /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
+        * than following the TCP_SYN_RECV case and closing the socket,
+        * we ignore the ICMP error and keep trying like a fully established
+        * socket. Is this the right thing to do?
+        */
+       if (req && req->sk == NULL)
+               goto out;
+
        switch (sk->sk_state) {
                struct request_sock *req, **prev;
        case TCP_LISTEN:
@@ -498,7 +512,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        case TCP_SYN_SENT:
        case TCP_SYN_RECV:  /* Cannot happen.
-                              It can f.e. if SYNs crossed.
+                              It can f.e. if SYNs crossed,
+                              or Fast Open.
                             */
                if (!sock_owned_by_user(sk)) {
                        sk->sk_err = err;
@@ -809,8 +824,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req)
 {
-       tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
-                       tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
+       /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+        * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+        */
+       tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+                       tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+                       tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
                        req->ts_recent,
                        0,
                        tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
@@ -839,7 +858,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                return -1;
 
-       skb = tcp_make_synack(sk, dst, req, rvp);
+       skb = tcp_make_synack(sk, dst, req, rvp, NULL);
 
        if (skb) {
                __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
@@ -849,6 +868,8 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                                            ireq->rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
+               if (!tcp_rsk(req)->snt_synack && !err)
+                       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        }
 
        return err;
@@ -904,8 +925,7 @@ EXPORT_SYMBOL(tcp_syn_flood_action);
 /*
  * Save and compile IPv4 options into the request_sock if needed.
  */
-static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
-                                                 struct sk_buff *skb)
+static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
 {
        const struct ip_options *opt = &(IPCB(skb)->opt);
        struct ip_options_rcu *dopt = NULL;
@@ -1272,6 +1292,182 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
 };
 #endif
 
+static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
+                              struct request_sock *req,
+                              struct tcp_fastopen_cookie *foc,
+                              struct tcp_fastopen_cookie *valid_foc)
+{
+       bool skip_cookie = false;
+       struct fastopen_queue *fastopenq;
+
+       if (likely(!fastopen_cookie_present(foc))) {
+               /* See include/net/tcp.h for the meaning of these knobs */
+               if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
+                   ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
+                   (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
+                       skip_cookie = true; /* no cookie to validate */
+               else
+                       return false;
+       }
+       fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
+       /* A FO option is present; bump the counter. */
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
+
+       /* Make sure the listener has enabled fastopen, and we don't
+        * exceed the max # of pending TFO requests allowed before trying
+        * to validating the cookie in order to avoid burning CPU cycles
+        * unnecessarily.
+        *
+        * XXX (TFO) - The implication of checking the max_qlen before
+        * processing a cookie request is that clients can't differentiate
+        * between qlen overflow causing Fast Open to be disabled
+        * temporarily vs a server not supporting Fast Open at all.
+        */
+       if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
+           fastopenq == NULL || fastopenq->max_qlen == 0)
+               return false;
+
+       if (fastopenq->qlen >= fastopenq->max_qlen) {
+               struct request_sock *req1;
+               spin_lock(&fastopenq->lock);
+               req1 = fastopenq->rskq_rst_head;
+               if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+                       spin_unlock(&fastopenq->lock);
+                       NET_INC_STATS_BH(sock_net(sk),
+                           LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
+                       /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
+                       foc->len = -1;
+                       return false;
+               }
+               fastopenq->rskq_rst_head = req1->dl_next;
+               fastopenq->qlen--;
+               spin_unlock(&fastopenq->lock);
+               reqsk_free(req1);
+       }
+       if (skip_cookie) {
+               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+               return true;
+       }
+       if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
+               if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
+                       tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+                       if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
+                           memcmp(&foc->val[0], &valid_foc->val[0],
+                           TCP_FASTOPEN_COOKIE_SIZE) != 0)
+                               return false;
+                       valid_foc->len = -1;
+               }
+               /* Acknowledge the data received from the peer. */
+               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+               return true;
+       } else if (foc->len == 0) { /* Client requesting a cookie */
+               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+               NET_INC_STATS_BH(sock_net(sk),
+                   LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+       } else {
+               /* Client sent a cookie with wrong size. Treat it
+                * the same as invalid and return a valid one.
+                */
+               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+       }
+       return false;
+}
+
+static int tcp_v4_conn_req_fastopen(struct sock *sk,
+                                   struct sk_buff *skb,
+                                   struct sk_buff *skb_synack,
+                                   struct request_sock *req,
+                                   struct request_values *rvp)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+       const struct inet_request_sock *ireq = inet_rsk(req);
+       struct sock *child;
+       int err;
+
+       req->retrans = 0;
+       req->sk = NULL;
+
+       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+       if (child == NULL) {
+               NET_INC_STATS_BH(sock_net(sk),
+                                LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+               kfree_skb(skb_synack);
+               return -1;
+       }
+       err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+                                   ireq->rmt_addr, ireq->opt);
+       err = net_xmit_eval(err);
+       if (!err)
+               tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       /* XXX (TFO) - is it ok to ignore error and continue? */
+
+       spin_lock(&queue->fastopenq->lock);
+       queue->fastopenq->qlen++;
+       spin_unlock(&queue->fastopenq->lock);
+
+       /* Initialize the child socket. Have to fix some values to take
+        * into account the child is a Fast Open socket and is created
+        * only out of the bits carried in the SYN packet.
+        */
+       tp = tcp_sk(child);
+
+       tp->fastopen_rsk = req;
+       /* Do a hold on the listner sk so that if the listener is being
+        * closed, the child that has been accepted can live on and still
+        * access listen_lock.
+        */
+       sock_hold(sk);
+       tcp_rsk(req)->listener = sk;
+
+       /* RFC1323: The window in SYN & SYN/ACK segments is never
+        * scaled. So correct it appropriately.
+        */
+       tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+
+       /* Activate the retrans timer so that SYNACK can be retransmitted.
+        * The request socket is not added to the SYN table of the parent
+        * because it's been added to the accept queue directly.
+        */
+       inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
+           TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+
+       /* Add the child socket directly into the accept queue */
+       inet_csk_reqsk_queue_add(sk, req, child);
+
+       /* Now finish processing the fastopen child socket. */
+       inet_csk(child)->icsk_af_ops->rebuild_header(child);
+       tcp_init_congestion_control(child);
+       tcp_mtup_init(child);
+       tcp_init_buffer_space(child);
+       tcp_init_metrics(child);
+
+       /* Queue the data carried in the SYN packet. We need to first
+        * bump skb's refcnt because the caller will attempt to free it.
+        *
+        * XXX (TFO) - we honor a zero-payload TFO request for now.
+        * (Any reason not to?)
+        */
+       if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
+               /* Don't queue the skb if there is no payload in SYN.
+                * XXX (TFO) - How about SYN+FIN?
+                */
+               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       } else {
+               skb = skb_get(skb);
+               skb_dst_drop(skb);
+               __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+               skb_set_owner_r(skb, child);
+               __skb_queue_tail(&child->sk_receive_queue, skb);
+               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       }
+       sk->sk_data_ready(sk, 0);
+       bh_unlock_sock(child);
+       sock_put(child);
+       WARN_ON(req->sk == NULL);
+       return 0;
+}
+
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_extend_values tmp_ext;
@@ -1285,6 +1481,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
        bool want_cookie = false;
+       struct flowi4 fl4;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       struct tcp_fastopen_cookie valid_foc = { .len = -1 };
+       struct sk_buff *skb_synack;
+       int do_fastopen;
 
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1319,7 +1520,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
        tmp_opt.user_mss  = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
+       tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
+           want_cookie ? NULL : &foc);
 
        if (tmp_opt.cookie_plus > 0 &&
            tmp_opt.saw_tstamp &&
@@ -1365,7 +1567,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->loc_addr = daddr;
        ireq->rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
-       ireq->opt = tcp_v4_save_options(sk, skb);
+       ireq->opt = tcp_v4_save_options(skb);
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
@@ -1377,8 +1579,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                isn = cookie_v4_init_sequence(sk, skb, &req->mss);
                req->cookie_ts = tmp_opt.tstamp_ok;
        } else if (!isn) {
-               struct flowi4 fl4;
-
                /* VJ's idea. We save last timestamp seen
                 * from the destination in peer table, when entering
                 * state TIME-WAIT, and check against it before
@@ -1417,16 +1617,54 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                isn = tcp_v4_init_sequence(skb);
        }
        tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
-       if (tcp_v4_send_synack(sk, dst, req,
-                              (struct request_values *)&tmp_ext,
-                              skb_get_queue_mapping(skb),
-                              want_cookie) ||
-           want_cookie)
+       if (dst == NULL) {
+               dst = inet_csk_route_req(sk, &fl4, req);
+               if (dst == NULL)
+                       goto drop_and_free;
+       }
+       do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
+
+       /* We don't call tcp_v4_send_synack() directly because we need
+        * to make sure a child socket can be created successfully before
+        * sending back synack!
+        *
+        * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
+        * (or better yet, call tcp_send_synack() in the child context
+        * directly, but will have to fix bunch of other code first)
+        * after syn_recv_sock() except one will need to first fix the
+        * latter to remove its dependency on the current implementation
+        * of tcp_v4_send_synack()->tcp_select_initial_window().
+        */
+       skb_synack = tcp_make_synack(sk, dst, req,
+           (struct request_values *)&tmp_ext,
+           fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
+
+       if (skb_synack) {
+               __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+               skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
+       } else
+               goto drop_and_free;
+
+       if (likely(!do_fastopen)) {
+               int err;
+               err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
+                    ireq->rmt_addr, ireq->opt);
+               err = net_xmit_eval(err);
+               if (err || want_cookie)
+                       goto drop_and_free;
+
+               tcp_rsk(req)->snt_synack = tcp_time_stamp;
+               tcp_rsk(req)->listener = NULL;
+               /* Add the request_sock to the SYN table */
+               inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+               if (fastopen_cookie_present(&foc) && foc.len != 0)
+                       NET_INC_STATS_BH(sock_net(sk),
+                           LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+       } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
+           (struct request_values *)&tmp_ext))
                goto drop_and_free;
 
-       inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        return 0;
 
 drop_and_release:
@@ -1500,9 +1738,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 
        tcp_initialize_rcv_mss(newsk);
-       if (tcp_rsk(req)->snt_synack)
-               tcp_valid_rtt_meas(newsk,
-                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
+       tcp_synack_rtt_meas(newsk, req);
        newtp->total_retrans = req->retrans;
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -1554,7 +1790,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
                                                       iph->saddr, iph->daddr);
        if (req)
-               return tcp_check_req(sk, skb, req, prev);
+               return tcp_check_req(sk, skb, req, prev, false);
 
        nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
                        th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -1963,20 +2199,13 @@ void tcp_v4_destroy_sock(struct sock *sk)
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       /*
-        * If sendmsg cached page exists, toss it.
-        */
-       if (sk->sk_sndmsg_page) {
-               __free_page(sk->sk_sndmsg_page);
-               sk->sk_sndmsg_page = NULL;
-       }
-
        /* TCP Cookie Transactions */
        if (tp->cookie_values != NULL) {
                kref_put(&tp->cookie_values->kref,
                         tcp_cookie_values_release);
                tp->cookie_values = NULL;
        }
+       BUG_ON(tp->fastopen_rsk != NULL);
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
@@ -2393,10 +2622,10 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
 EXPORT_SYMBOL(tcp_proc_unregister);
 
 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
-                        struct seq_file *f, int i, int uid, int *len)
+                        struct seq_file *f, int i, kuid_t uid, int *len)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
-       int ttd = req->expires - jiffies;
+       long delta = req->expires - jiffies;
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
@@ -2408,9 +2637,9 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
                TCP_SYN_RECV,
                0, 0, /* could print option size, but that is af dependent. */
                1,    /* timers active (only the expire timer) */
-               jiffies_to_clock_t(ttd),
+               jiffies_delta_to_clock_t(delta),
                req->retrans,
-               uid,
+               from_kuid_munged(seq_user_ns(f), uid),
                0,  /* non standard timer */
                0, /* open_requests have no inode */
                atomic_read(&sk->sk_refcnt),
@@ -2425,6 +2654,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
        const struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct inet_sock *inet = inet_sk(sk);
+       struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
        __be32 dest = inet->inet_daddr;
        __be32 src = inet->inet_rcv_saddr;
        __u16 destp = ntohs(inet->inet_dport);
@@ -2459,9 +2689,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
                tp->write_seq - tp->snd_una,
                rx_queue,
                timer_active,
-               jiffies_to_clock_t(timer_expires - jiffies),
+               jiffies_delta_to_clock_t(timer_expires - jiffies),
                icsk->icsk_retransmits,
-               sock_i_uid(sk),
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
                icsk->icsk_probes_out,
                sock_i_ino(sk),
                atomic_read(&sk->sk_refcnt), sk,
@@ -2469,7 +2699,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
                jiffies_to_clock_t(icsk->icsk_ack.ato),
                (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                tp->snd_cwnd,
-               tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
+               sk->sk_state == TCP_LISTEN ?
+                   (fastopenq ? fastopenq->max_qlen : 0) :
+                   (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
                len);
 }
 
@@ -2478,10 +2710,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
 {
        __be32 dest, src;
        __u16 destp, srcp;
-       int ttd = tw->tw_ttd - jiffies;
-
-       if (ttd < 0)
-               ttd = 0;
+       long delta = tw->tw_ttd - jiffies;
 
        dest  = tw->tw_daddr;
        src   = tw->tw_rcv_saddr;
@@ -2491,7 +2720,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
                i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
-               3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+               3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
                atomic_read(&tw->tw_refcnt), tw, len);
 }
 
@@ -2574,6 +2803,8 @@ void tcp4_proc_exit(void)
 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
        const struct iphdr *iph = skb_gro_network_header(skb);
+       __wsum wsum;
+       __sum16 sum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_COMPLETE:
@@ -2582,11 +2813,22 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        break;
                }
-
-               /* fall through */
-       case CHECKSUM_NONE:
+flush:
                NAPI_GRO_CB(skb)->flush = 1;
                return NULL;
+
+       case CHECKSUM_NONE:
+               wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+                                         skb_gro_len(skb), IPPROTO_TCP, 0);
+               sum = csum_fold(skb_checksum(skb,
+                                            skb_gro_offset(skb),
+                                            skb_gro_len(skb),
+                                            wsum));
+               if (sum)
+                       goto flush;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               break;
        }
 
        return tcp_gro_receive(head, skb);
index 0abe67bb4d3a3adb0d9df820b1fe64b6ef9da591..4c752a6e0bcd91b0b932b483a2b9f988908c04a2 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/init.h>
 #include <linux/tcp.h>
 #include <linux/hash.h>
+#include <linux/tcp_metrics.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/net_namespace.h>
 #include <net/ipv6.h>
 #include <net/dst.h>
 #include <net/tcp.h>
+#include <net/genetlink.h>
 
 int sysctl_tcp_nometrics_save __read_mostly;
 
-enum tcp_metric_index {
-       TCP_METRIC_RTT,
-       TCP_METRIC_RTTVAR,
-       TCP_METRIC_SSTHRESH,
-       TCP_METRIC_CWND,
-       TCP_METRIC_REORDERING,
-
-       /* Always last.  */
-       TCP_METRIC_MAX,
-};
-
 struct tcp_fastopen_metrics {
        u16     mss;
        u16     syn_loss:10;            /* Recurring Fast Open SYN losses */
@@ -45,8 +36,10 @@ struct tcp_metrics_block {
        u32                             tcpm_ts;
        u32                             tcpm_ts_stamp;
        u32                             tcpm_lock;
-       u32                             tcpm_vals[TCP_METRIC_MAX];
+       u32                             tcpm_vals[TCP_METRIC_MAX + 1];
        struct tcp_fastopen_metrics     tcpm_fastopen;
+
+       struct rcu_head                 rcu_head;
 };
 
 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
@@ -690,6 +683,325 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
        rcu_read_unlock();
 }
 
+static struct genl_family tcp_metrics_nl_family = {
+       .id             = GENL_ID_GENERATE,
+       .hdrsize        = 0,
+       .name           = TCP_METRICS_GENL_NAME,
+       .version        = TCP_METRICS_GENL_VERSION,
+       .maxattr        = TCP_METRICS_ATTR_MAX,
+       .netnsok        = true,
+};
+
+static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
+       [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
+       [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
+                                           .len = sizeof(struct in6_addr), },
+       /* Following attributes are not received for GET/DEL,
+        * we keep them for reference
+        */
+#if 0
+       [TCP_METRICS_ATTR_AGE]          = { .type = NLA_MSECS, },
+       [TCP_METRICS_ATTR_TW_TSVAL]     = { .type = NLA_U32, },
+       [TCP_METRICS_ATTR_TW_TS_STAMP]  = { .type = NLA_S32, },
+       [TCP_METRICS_ATTR_VALS]         = { .type = NLA_NESTED, },
+       [TCP_METRICS_ATTR_FOPEN_MSS]    = { .type = NLA_U16, },
+       [TCP_METRICS_ATTR_FOPEN_SYN_DROPS]      = { .type = NLA_U16, },
+       [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]    = { .type = NLA_MSECS, },
+       [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
+                                           .len = TCP_FASTOPEN_COOKIE_MAX, },
+#endif
+};
+
+/* Add attributes, caller cancels its header on failure */
+static int tcp_metrics_fill_info(struct sk_buff *msg,
+                                struct tcp_metrics_block *tm)
+{
+       struct nlattr *nest;
+       int i;
+
+       switch (tm->tcpm_addr.family) {
+       case AF_INET:
+               if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
+                               tm->tcpm_addr.addr.a4) < 0)
+                       goto nla_put_failure;
+               break;
+       case AF_INET6:
+               if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
+                           tm->tcpm_addr.addr.a6) < 0)
+                       goto nla_put_failure;
+               break;
+       default:
+               return -EAFNOSUPPORT;
+       }
+
+       if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
+                         jiffies - tm->tcpm_stamp) < 0)
+               goto nla_put_failure;
+       if (tm->tcpm_ts_stamp) {
+               if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
+                               (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
+                       goto nla_put_failure;
+               if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
+                               tm->tcpm_ts) < 0)
+                       goto nla_put_failure;
+       }
+
+       {
+               int n = 0;
+
+               nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
+               if (!nest)
+                       goto nla_put_failure;
+               for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
+                       if (!tm->tcpm_vals[i])
+                               continue;
+                       if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
+                               goto nla_put_failure;
+                       n++;
+               }
+               if (n)
+                       nla_nest_end(msg, nest);
+               else
+                       nla_nest_cancel(msg, nest);
+       }
+
+       {
+               struct tcp_fastopen_metrics tfom_copy[1], *tfom;
+               unsigned int seq;
+
+               do {
+                       seq = read_seqbegin(&fastopen_seqlock);
+                       tfom_copy[0] = tm->tcpm_fastopen;
+               } while (read_seqretry(&fastopen_seqlock, seq));
+
+               tfom = tfom_copy;
+               if (tfom->mss &&
+                   nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
+                               tfom->mss) < 0)
+                       goto nla_put_failure;
+               if (tfom->syn_loss &&
+                   (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
+                               tfom->syn_loss) < 0 ||
+                    nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
+                               jiffies - tfom->last_syn_loss) < 0))
+                       goto nla_put_failure;
+               if (tfom->cookie.len > 0 &&
+                   nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
+                           tfom->cookie.len, tfom->cookie.val) < 0)
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int tcp_metrics_dump_info(struct sk_buff *skb,
+                                struct netlink_callback *cb,
+                                struct tcp_metrics_block *tm)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                         &tcp_metrics_nl_family, NLM_F_MULTI,
+                         TCP_METRICS_CMD_GET);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (tcp_metrics_fill_info(skb, tm) < 0)
+               goto nla_put_failure;
+
+       return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+       genlmsg_cancel(skb, hdr);
+       return -EMSGSIZE;
+}
+
+static int tcp_metrics_nl_dump(struct sk_buff *skb,
+                              struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
+       unsigned int row, s_row = cb->args[0];
+       int s_col = cb->args[1], col = s_col;
+
+       for (row = s_row; row < max_rows; row++, s_col = 0) {
+               struct tcp_metrics_block *tm;
+               struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
+
+               rcu_read_lock();
+               for (col = 0, tm = rcu_dereference(hb->chain); tm;
+                    tm = rcu_dereference(tm->tcpm_next), col++) {
+                       if (col < s_col)
+                               continue;
+                       if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
+                               rcu_read_unlock();
+                               goto done;
+                       }
+               }
+               rcu_read_unlock();
+       }
+
+done:
+       cb->args[0] = row;
+       cb->args[1] = col;
+       return skb->len;
+}
+
+static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
+                        unsigned int *hash, int optional)
+{
+       struct nlattr *a;
+
+       a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
+       if (a) {
+               addr->family = AF_INET;
+               addr->addr.a4 = nla_get_be32(a);
+               *hash = (__force unsigned int) addr->addr.a4;
+               return 0;
+       }
+       a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
+       if (a) {
+               if (nla_len(a) != sizeof(sizeof(struct in6_addr)))
+                       return -EINVAL;
+               addr->family = AF_INET6;
+               memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
+               *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
+               return 0;
+       }
+       return optional ? 1 : -EAFNOSUPPORT;
+}
+
+static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct tcp_metrics_block *tm;
+       struct inetpeer_addr addr;
+       unsigned int hash;
+       struct sk_buff *msg;
+       struct net *net = genl_info_net(info);
+       void *reply;
+       int ret;
+
+       ret = parse_nl_addr(info, &addr, &hash, 0);
+       if (ret < 0)
+               return ret;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
+                                 info->genlhdr->cmd);
+       if (!reply)
+               goto nla_put_failure;
+
+       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       ret = -ESRCH;
+       rcu_read_lock();
+       for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+            tm = rcu_dereference(tm->tcpm_next)) {
+               if (addr_same(&tm->tcpm_addr, &addr)) {
+                       ret = tcp_metrics_fill_info(msg, tm);
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       if (ret < 0)
+               goto out_free;
+
+       genlmsg_end(msg, reply);
+       return genlmsg_reply(msg, info);
+
+nla_put_failure:
+       ret = -EMSGSIZE;
+
+out_free:
+       nlmsg_free(msg);
+       return ret;
+}
+
+#define deref_locked_genl(p)   \
+       rcu_dereference_protected(p, lockdep_genl_is_held() && \
+                                    lockdep_is_held(&tcp_metrics_lock))
+
+#define deref_genl(p)  rcu_dereference_protected(p, lockdep_genl_is_held())
+
+static int tcp_metrics_flush_all(struct net *net)
+{
+       unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
+       struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
+       struct tcp_metrics_block *tm;
+       unsigned int row;
+
+       for (row = 0; row < max_rows; row++, hb++) {
+               spin_lock_bh(&tcp_metrics_lock);
+               tm = deref_locked_genl(hb->chain);
+               if (tm)
+                       hb->chain = NULL;
+               spin_unlock_bh(&tcp_metrics_lock);
+               while (tm) {
+                       struct tcp_metrics_block *next;
+
+                       next = deref_genl(tm->tcpm_next);
+                       kfree_rcu(tm, rcu_head);
+                       tm = next;
+               }
+       }
+       return 0;
+}
+
+static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct tcpm_hash_bucket *hb;
+       struct tcp_metrics_block *tm;
+       struct tcp_metrics_block __rcu **pp;
+       struct inetpeer_addr addr;
+       unsigned int hash;
+       struct net *net = genl_info_net(info);
+       int ret;
+
+       ret = parse_nl_addr(info, &addr, &hash, 1);
+       if (ret < 0)
+               return ret;
+       if (ret > 0)
+               return tcp_metrics_flush_all(net);
+
+       hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+       hb = net->ipv4.tcp_metrics_hash + hash;
+       pp = &hb->chain;
+       spin_lock_bh(&tcp_metrics_lock);
+       for (tm = deref_locked_genl(*pp); tm;
+            pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
+               if (addr_same(&tm->tcpm_addr, &addr)) {
+                       *pp = tm->tcpm_next;
+                       break;
+               }
+       }
+       spin_unlock_bh(&tcp_metrics_lock);
+       if (!tm)
+               return -ESRCH;
+       kfree_rcu(tm, rcu_head);
+       return 0;
+}
+
+static struct genl_ops tcp_metrics_nl_ops[] = {
+       {
+               .cmd = TCP_METRICS_CMD_GET,
+               .doit = tcp_metrics_nl_cmd_get,
+               .dumpit = tcp_metrics_nl_dump,
+               .policy = tcp_metrics_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = TCP_METRICS_CMD_DEL,
+               .doit = tcp_metrics_nl_cmd_del,
+               .policy = tcp_metrics_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+};
+
 static unsigned int tcpmhash_entries;
 static int __init set_tcpmhash_entries(char *str)
 {
@@ -753,5 +1065,21 @@ static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
 
 void __init tcp_metrics_init(void)
 {
-       register_pernet_subsys(&tcp_net_metrics_ops);
+       int ret;
+
+       ret = register_pernet_subsys(&tcp_net_metrics_ops);
+       if (ret < 0)
+               goto cleanup;
+       ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
+                                           tcp_metrics_nl_ops,
+                                           ARRAY_SIZE(tcp_metrics_nl_ops));
+       if (ret < 0)
+               goto cleanup_subsys;
+       return;
+
+cleanup_subsys:
+       unregister_pernet_subsys(&tcp_net_metrics_ops);
+
+cleanup:
+       return;
 }
index 6ff7f10dce9d56c2f99f0cb13dab38f69eec4619..27536ba16c9da7b89d5385073c7ae4d4f9e27f3c 100644 (file)
@@ -85,6 +85,8 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
  * spinlock it. I do not want! Well, probability of misbehaviour
  * is ridiculously low and, seems, we could use some mb() tricks
  * to avoid misread sequence numbers, states etc.  --ANK
+ *
+ * We don't need to initialize tmp_out.sack_ok as we don't use the results
  */
 enum tcp_tw_status
 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
@@ -507,6 +509,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
+               newtp->fastopen_rsk = NULL;
 
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
        }
@@ -515,13 +518,20 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
 EXPORT_SYMBOL(tcp_create_openreq_child);
 
 /*
- *     Process an incoming packet for SYN_RECV sockets represented
- *     as a request_sock.
+ * Process an incoming packet for SYN_RECV sockets represented as a
+ * request_sock. Normally sk is the listener socket but for TFO it
+ * points to the child socket.
+ *
+ * XXX (TFO) - The current impl contains a special check for ack
+ * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
+ *
+ * We don't need to initialize tmp_opt.sack_ok as we don't use the results
  */
 
 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                           struct request_sock *req,
-                          struct request_sock **prev)
+                          struct request_sock **prev,
+                          bool fastopen)
 {
        struct tcp_options_received tmp_opt;
        const u8 *hash_location;
@@ -530,6 +540,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        bool paws_reject = false;
 
+       BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
+
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(struct tcphdr)>>2)) {
                tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
@@ -565,6 +577,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                 *
                 * Enforce "SYN-ACK" according to figure 8, figure 6
                 * of RFC793, fixed by RFC1122.
+                *
+                * Note that even if there is new data in the SYN packet
+                * they will be thrown away too.
                 */
                req->rsk_ops->rtx_syn_ack(sk, req, NULL);
                return NULL;
@@ -622,9 +637,12 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         *                  sent (the segment carries an unacceptable ACK) ...
         *                  a reset is sent."
         *
-        * Invalid ACK: reset will be sent by listening socket
+        * Invalid ACK: reset will be sent by listening socket.
+        * Note that the ACK validity check for a Fast Open socket is done
+        * elsewhere and is checked directly against the child socket rather
+        * than req because user data may have been sent out.
         */
-       if ((flg & TCP_FLAG_ACK) &&
+       if ((flg & TCP_FLAG_ACK) && !fastopen &&
            (TCP_SKB_CB(skb)->ack_seq !=
             tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
                return sk;
@@ -637,7 +655,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        /* RFC793: "first check sequence number". */
 
        if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-                                         tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
+                                         tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
                /* Out of window: send ACK and drop. */
                if (!(flg & TCP_FLAG_RST))
                        req->rsk_ops->send_ack(sk, skb, req);
@@ -648,7 +666,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 
        /* In sequence, PAWS is OK. */
 
-       if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
+       if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
                req->ts_recent = tmp_opt.rcv_tsval;
 
        if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
@@ -667,10 +685,25 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 
        /* ACK sequence verified above, just make sure ACK is
         * set.  If ACK not set, just silently drop the packet.
+        *
+        * XXX (TFO) - if we ever allow "data after SYN", the
+        * following check needs to be removed.
         */
        if (!(flg & TCP_FLAG_ACK))
                return NULL;
 
+       /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
+       if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
+               tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
+       else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
+               tcp_rsk(req)->snt_synack = 0;
+
+       /* For Fast Open no more processing is needed (sk is the
+        * child socket).
+        */
+       if (fastopen)
+               return sk;
+
        /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
        if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
            TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
@@ -678,10 +711,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
                return NULL;
        }
-       if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
-               tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
-       else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
-               tcp_rsk(req)->snt_synack = 0;
 
        /* OK, ACK is valid, create big socket and
         * feed this segment to it. It will repeat all
@@ -706,11 +735,21 @@ listen_overflow:
        }
 
 embryonic_reset:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
-       if (!(flg & TCP_FLAG_RST))
+       if (!(flg & TCP_FLAG_RST)) {
+               /* Received a bad SYN pkt - for TFO We try not to reset
+                * the local connection unless it's really necessary to
+                * avoid becoming vulnerable to outside attack aiming at
+                * resetting legit local connections.
+                */
                req->rsk_ops->send_reset(sk, skb);
-
-       inet_csk_reqsk_queue_drop(sk, req, prev);
+       } else if (fastopen) { /* received a valid RST pkt */
+               reqsk_fastopen_remove(sk, req, true);
+               tcp_reset(sk);
+       }
+       if (!fastopen) {
+               inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+       }
        return NULL;
 }
 EXPORT_SYMBOL(tcp_check_req);
@@ -719,6 +758,12 @@ EXPORT_SYMBOL(tcp_check_req);
  * Queue segment on the new socket if the new socket is active,
  * otherwise we just shortcircuit this and continue with
  * the new socket.
+ *
+ * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
+ * when entering. But other states are possible due to a race condition
+ * where after __inet_lookup_established() fails but before the listener
+ * locked is obtained, other packets cause the same connection to
+ * be created.
  */
 
 int tcp_child_process(struct sock *parent, struct sock *child,
index d04632673a9e5f27725731e420d0997d91259aef..cfe6ffe1c1778b6517297ad3ac9b87d17ce14582 100644 (file)
@@ -702,7 +702,8 @@ static unsigned int tcp_synack_options(struct sock *sk,
                                   unsigned int mss, struct sk_buff *skb,
                                   struct tcp_out_options *opts,
                                   struct tcp_md5sig_key **md5,
-                                  struct tcp_extend_values *xvp)
+                                  struct tcp_extend_values *xvp,
+                                  struct tcp_fastopen_cookie *foc)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -747,7 +748,15 @@ static unsigned int tcp_synack_options(struct sock *sk,
                if (unlikely(!ireq->tstamp_ok))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
-
+       if (foc != NULL) {
+               u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
+               need = (need + 3) & ~3U;  /* Align to 32 bits */
+               if (remaining >= need) {
+                       opts->options |= OPTION_FAST_OPEN_COOKIE;
+                       opts->fastopen_cookie = foc;
+                       remaining -= need;
+               }
+       }
        /* Similar rationale to tcp_syn_options() applies here, too.
         * If the <SYN> options fit, the same options should fit now!
         */
@@ -2028,10 +2037,10 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (push_one)
                        break;
        }
-       if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
-               tp->prr_out += sent_pkts;
 
        if (likely(sent_pkts)) {
+               if (tcp_in_cwnd_reduction(sk))
+                       tp->prr_out += sent_pkts;
                tcp_cwnd_validate(sk);
                return false;
        }
@@ -2533,7 +2542,7 @@ begin_fwd:
                }
                NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
-               if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
+               if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += tcp_skb_pcount(skb);
 
                if (skb == tcp_write_queue_head(sk))
@@ -2658,7 +2667,8 @@ int tcp_send_synack(struct sock *sk)
  */
 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
                                struct request_sock *req,
-                               struct request_values *rvp)
+                               struct request_values *rvp,
+                               struct tcp_fastopen_cookie *foc)
 {
        struct tcp_out_options opts;
        struct tcp_extend_values *xvp = tcp_xv(rvp);
@@ -2718,7 +2728,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 #endif
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
        tcp_header_size = tcp_synack_options(sk, req, mss,
-                                            skb, &opts, &md5, xvp)
+                                            skb, &opts, &md5, xvp, foc)
                        + sizeof(*th);
 
        skb_push(skb, tcp_header_size);
@@ -2772,7 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        }
 
        th->seq = htonl(TCP_SKB_CB(skb)->seq);
-       th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
+       /* XXX data is queued and acked as is. No buffer/window check */
+       th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
 
        /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
        th->window = htons(min(req->rcv_wnd, 65535U));
index b774a03bd1dcc1ccafa245a892ac0b312511a900..fc04711e80c89dd0dc92ff1027efaa7324b218c3 100644 (file)
@@ -304,6 +304,35 @@ static void tcp_probe_timer(struct sock *sk)
        }
 }
 
+/*
+ *     Timer for Fast Open socket to retransmit SYNACK. Note that the
+ *     sk here is the child socket, not the parent (listener) socket.
+ */
+static void tcp_fastopen_synack_timer(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       int max_retries = icsk->icsk_syn_retries ? :
+           sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+       struct request_sock *req;
+
+       req = tcp_sk(sk)->fastopen_rsk;
+       req->rsk_ops->syn_ack_timeout(sk, req);
+
+       if (req->retrans >= max_retries) {
+               tcp_write_err(sk);
+               return;
+       }
+       /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
+        * returned from rtx_syn_ack() to make it more persistent like
+        * regular retransmit because if the child socket has been accepted
+        * it's not good to give up too easily.
+        */
+       req->rsk_ops->rtx_syn_ack(sk, req, NULL);
+       req->retrans++;
+       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                         TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX);
+}
+
 /*
  *     The TCP retransmit timer.
  */
@@ -317,7 +346,15 @@ void tcp_retransmit_timer(struct sock *sk)
                tcp_resume_early_retransmit(sk);
                return;
        }
-
+       if (tp->fastopen_rsk) {
+               BUG_ON(sk->sk_state != TCP_SYN_RECV &&
+                   sk->sk_state != TCP_FIN_WAIT1);
+               tcp_fastopen_synack_timer(sk);
+               /* Before we receive ACK to our SYN-ACK don't retransmit
+                * anything else (e.g., data or FIN segments).
+                */
+               return;
+       }
        if (!tp->packets_out)
                goto out;
 
index 2814f66dac64cf5775806138c91903c7a02eeae3..79c8dbe59b5474bdc3e23ba8adc227e1bee016a4 100644 (file)
@@ -2115,7 +2115,9 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
                bucket, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
-               0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+               0, 0L, 0,
+               from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+               0, sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
                atomic_read(&sp->sk_drops), len);
 }
index 16d0960062be9a5fd0bb8c031e224c04595313c2..505b30ad9182dc83e42e106b4bbaa507dd84f591 100644 (file)
@@ -24,7 +24,9 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
        if (!inet_diag_bc_sk(bc, sk))
                return 0;
 
-       return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+       return inet_sk_diag_fill(sk, NULL, skb, req,
+                       sk_user_ns(NETLINK_CB(cb->skb).ssk),
+                       NETLINK_CB(cb->skb).portid,
                        cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
@@ -69,14 +71,15 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                goto out;
 
        err = inet_sk_diag_fill(sk, NULL, rep, req,
-                          NETLINK_CB(in_skb).pid,
+                          sk_user_ns(NETLINK_CB(in_skb).ssk),
+                          NETLINK_CB(in_skb).portid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(rep);
                goto out;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
index 5728695b54492dc2d275458b8db786f410e94002..4f7fe7270e3703226de121041d1e5e96a5b127df 100644 (file)
@@ -201,6 +201,22 @@ config IPV6_TUNNEL
 
          If unsure, say N.
 
+config IPV6_GRE
+       tristate "IPv6: GRE tunnel"
+       select IPV6_TUNNEL
+       ---help---
+         Tunneling means encapsulating data of one protocol type within
+         another protocol and sending it over a channel that understands the
+         encapsulating protocol. This particular tunneling driver implements
+         GRE (Generic Routing Encapsulation) and at this time allows
+         encapsulating of IPv4 or IPv6 over existing IPv6 infrastructure.
+         This driver is useful if the other endpoint is a Cisco router: Cisco
+         likes GRE much better than the other Linux tunneling driver ("IP
+         tunneling" above). In addition, GRE allows multicast redistribution
+         through the tunnel.
+
+         Saying M here will produce a module called ip6_gre. If unsure, say N.
+
 config IPV6_MULTIPLE_TABLES
        bool "IPv6: Multiple Routing Tables"
        depends on EXPERIMENTAL
index 686934acfac18eac215a17b8d4d01ea5ad03c860..b6d3f79151e28251f3b3b5a062869fa9acb64526 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_NETFILTER)       += netfilter/
 
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
+obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
 
 obj-y += addrconf_core.o exthdrs_core.o
 
index 6bc85f7c31e3c58a01a6d1aa351cd827584fad24..480e68422efb3c0f3ff7267ac89b8fe2b0d42fee 100644 (file)
@@ -127,8 +127,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
 #endif
 
 #ifdef CONFIG_IPV6_PRIVACY
-static int __ipv6_regen_rndid(struct inet6_dev *idev);
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
+static void __ipv6_regen_rndid(struct inet6_dev *idev);
+static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
 static void ipv6_regen_rndid(unsigned long data);
 #endif
 
@@ -788,10 +788,16 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
                struct in6_addr prefix;
                struct rt6_info *rt;
                struct net *net = dev_net(ifp->idev->dev);
+               struct flowi6 fl6 = {};
+
                ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
-               rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
+               fl6.flowi6_oif = ifp->idev->dev->ifindex;
+               fl6.daddr = prefix;
+               rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
+                                                        RT6_LOOKUP_F_IFACE);
 
-               if (rt && addrconf_is_prefix_route(rt)) {
+               if (rt != net->ipv6.ip6_null_entry &&
+                   addrconf_is_prefix_route(rt)) {
                        if (onlink == 0) {
                                ip6_del_rt(rt);
                                rt = NULL;
@@ -852,16 +858,7 @@ retry:
        }
        in6_ifa_hold(ifp);
        memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
-       if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
-               spin_unlock_bh(&ifp->lock);
-               write_unlock(&idev->lock);
-               pr_warn("%s: regeneration of randomized interface id failed\n",
-                       __func__);
-               in6_ifa_put(ifp);
-               in6_dev_put(idev);
-               ret = -1;
-               goto out;
-       }
+       __ipv6_try_regen_rndid(idev, tmpaddr);
        memcpy(&addr.s6_addr[8], idev->rndid, 8);
        age = (now - ifp->tstamp) / HZ;
        tmp_valid_lft = min_t(__u32,
@@ -1079,8 +1076,10 @@ static int ipv6_get_saddr_eval(struct net *net,
                break;
        case IPV6_SADDR_RULE_PREFIX:
                /* Rule 8: Use longest matching prefix */
-               score->matchlen = ret = ipv6_addr_diff(&score->ifa->addr,
-                                                      dst->addr);
+               ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
+               if (ret > score->ifa->prefix_len)
+                       ret = score->ifa->prefix_len;
+               score->matchlen = ret;
                break;
        default:
                ret = 0;
@@ -1093,7 +1092,7 @@ out:
        return ret;
 }
 
-int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
                       const struct in6_addr *daddr, unsigned int prefs,
                       struct in6_addr *saddr)
 {
@@ -1600,7 +1599,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
 
 #ifdef CONFIG_IPV6_PRIVACY
 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
-static int __ipv6_regen_rndid(struct inet6_dev *idev)
+static void __ipv6_regen_rndid(struct inet6_dev *idev)
 {
 regen:
        get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -1627,8 +1626,6 @@ regen:
                if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
                        goto regen;
        }
-
-       return 0;
 }
 
 static void ipv6_regen_rndid(unsigned long data)
@@ -1642,8 +1639,7 @@ static void ipv6_regen_rndid(unsigned long data)
        if (idev->dead)
                goto out;
 
-       if (__ipv6_regen_rndid(idev) < 0)
-               goto out;
+       __ipv6_regen_rndid(idev);
 
        expires = jiffies +
                idev->cnf.temp_prefered_lft * HZ -
@@ -1664,13 +1660,10 @@ out:
        in6_dev_put(idev);
 }
 
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+static void  __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
 {
-       int ret = 0;
-
        if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
-               ret = __ipv6_regen_rndid(idev);
-       return ret;
+               __ipv6_regen_rndid(idev);
 }
 #endif
 
@@ -1721,7 +1714,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        if (table == NULL)
                return NULL;
 
-       write_lock_bh(&table->tb6_lock);
+       read_lock_bh(&table->tb6_lock);
        fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
        if (!fn)
                goto out;
@@ -1736,7 +1729,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                break;
        }
 out:
-       write_unlock_bh(&table->tb6_lock);
+       read_unlock_bh(&table->tb6_lock);
        return rt;
 }
 
@@ -3549,12 +3542,12 @@ static inline int inet6_ifaddr_msgsize(void)
 }
 
 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
-                            u32 pid, u32 seq, int event, unsigned int flags)
+                            u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct nlmsghdr  *nlh;
        u32 preferred, valid;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -3592,7 +3585,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
 }
 
 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
-                               u32 pid, u32 seq, int event, u16 flags)
+                               u32 portid, u32 seq, int event, u16 flags)
 {
        struct nlmsghdr  *nlh;
        u8 scope = RT_SCOPE_UNIVERSE;
@@ -3601,7 +3594,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
        if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
                scope = RT_SCOPE_SITE;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -3617,7 +3610,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
 }
 
 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
-                               u32 pid, u32 seq, int event, unsigned int flags)
+                               u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct nlmsghdr  *nlh;
        u8 scope = RT_SCOPE_UNIVERSE;
@@ -3626,7 +3619,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
        if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
                scope = RT_SCOPE_SITE;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -3667,7 +3660,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                        if (++ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifaddr(skb, ifa,
-                                               NETLINK_CB(cb->skb).pid,
+                                               NETLINK_CB(cb->skb).portid,
                                                cb->nlh->nlmsg_seq,
                                                RTM_NEWADDR,
                                                NLM_F_MULTI);
@@ -3683,7 +3676,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                        if (ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifmcaddr(skb, ifmca,
-                                                 NETLINK_CB(cb->skb).pid,
+                                                 NETLINK_CB(cb->skb).portid,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_GETMULTICAST,
                                                  NLM_F_MULTI);
@@ -3698,7 +3691,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
                        if (ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifacaddr(skb, ifaca,
-                                                 NETLINK_CB(cb->skb).pid,
+                                                 NETLINK_CB(cb->skb).portid,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_GETANYCAST,
                                                  NLM_F_MULTI);
@@ -3820,7 +3813,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                goto errout_ifa;
        }
 
-       err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
+       err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
                                nlh->nlmsg_seq, RTM_NEWADDR, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
@@ -3828,7 +3821,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                kfree_skb(skb);
                goto errout_ifa;
        }
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 errout_ifa:
        in6_ifa_put(ifa);
 errout:
@@ -4030,14 +4023,14 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
 }
 
 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
-                            u32 pid, u32 seq, int event, unsigned int flags)
+                            u32 portid, u32 seq, int event, unsigned int flags)
 {
        struct net_device *dev = idev->dev;
        struct ifinfomsg *hdr;
        struct nlmsghdr *nlh;
        void *protoinfo;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -4095,7 +4088,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                        if (!idev)
                                goto cont;
                        if (inet6_fill_ifinfo(skb, idev,
-                                             NETLINK_CB(cb->skb).pid,
+                                             NETLINK_CB(cb->skb).portid,
                                              cb->nlh->nlmsg_seq,
                                              RTM_NEWLINK, NLM_F_MULTI) <= 0)
                                goto out;
@@ -4143,14 +4136,14 @@ static inline size_t inet6_prefix_nlmsg_size(void)
 }
 
 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
-                            struct prefix_info *pinfo, u32 pid, u32 seq,
+                            struct prefix_info *pinfo, u32 portid, u32 seq,
                             int event, unsigned int flags)
 {
        struct prefixmsg *pmsg;
        struct nlmsghdr *nlh;
        struct prefix_cacheinfo ci;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
        if (nlh == NULL)
                return -EMSGSIZE;
 
index eb6a63632d3c1b6b2fab973588760053c4d9e3bb..4be23da32b89c14ef19d5b0b349c244e33b881cd 100644 (file)
@@ -57,7 +57,7 @@ struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
 }
 
 /*
- * Default policy table (RFC3484 + extensions)
+ * Default policy table (RFC6724 + extensions)
  *
  * prefix              addr_type       label
  * -------------------------------------------------------------------------
@@ -69,8 +69,12 @@ struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
  * fc00::/7            N/A             5               ULA (RFC 4193)
  * 2001::/32           N/A             6               Teredo (RFC 4380)
  * 2001:10::/28                N/A             7               ORCHID (RFC 4843)
+ * fec0::/10           N/A             11              Site-local
+ *                                                     (deprecated by RFC3879)
+ * 3ffe::/16           N/A             12              6bone
  *
  * Note: 0xffffffff is used if we do not have any policies.
+ * Note: Labels for ULA and 6to4 are different from labels listed in RFC6724.
  */
 
 #define IPV6_ADDR_LABEL_DEFAULT        0xffffffffUL
@@ -88,10 +92,18 @@ static const __net_initdata struct ip6addrlbl_init_table
                .prefix = &(struct in6_addr){{{ 0xfc }}},
                .prefixlen = 7,
                .label = 5,
+       },{     /* fec0::/10 */
+               .prefix = &(struct in6_addr){{{ 0xfe, 0xc0 }}},
+               .prefixlen = 10,
+               .label = 11,
        },{     /* 2002::/16 */
                .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}},
                .prefixlen = 16,
                .label = 2,
+       },{     /* 3ffe::/16 */
+               .prefix = &(struct in6_addr){{{ 0x3f, 0xfe }}},
+               .prefixlen = 16,
+               .label = 12,
        },{     /* 2001::/32 */
                .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
                .prefixlen = 32,
@@ -470,10 +482,10 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
 static int ip6addrlbl_fill(struct sk_buff *skb,
                           struct ip6addrlbl_entry *p,
                           u32 lseq,
-                          u32 pid, u32 seq, int event,
+                          u32 portid, u32 seq, int event,
                           unsigned int flags)
 {
-       struct nlmsghdr *nlh = nlmsg_put(skb, pid, seq, event,
+       struct nlmsghdr *nlh = nlmsg_put(skb, portid, seq, event,
                                         sizeof(struct ifaddrlblmsg), flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -503,7 +515,7 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
                    net_eq(ip6addrlbl_net(p), net)) {
                        if ((err = ip6addrlbl_fill(skb, p,
                                                   ip6addrlbl_table.seq,
-                                                  NETLINK_CB(cb->skb).pid,
+                                                  NETLINK_CB(cb->skb).portid,
                                                   cb->nlh->nlmsg_seq,
                                                   RTM_NEWADDRLABEL,
                                                   NLM_F_MULTI)) <= 0)
@@ -574,7 +586,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
        }
 
        err = ip6addrlbl_fill(skb, p, lseq,
-                             NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+                             NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
                              RTM_NEWADDRLABEL, 0);
 
        ip6addrlbl_put(p);
@@ -585,7 +597,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
                goto out;
        }
 
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 out:
        return err;
 }
index 286acfc21250cd760565b182cbc2d3d4490463dd..24995a93ef8c94b22224dd344a9d474a72d28cc1 100644 (file)
@@ -514,7 +514,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
        ln = node_alloc();
 
        if (!ln)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        ln->fn_bit = plen;
 
        ln->parent = pn;
@@ -561,7 +561,7 @@ insert_above:
                                node_free(in);
                        if (ln)
                                node_free(ln);
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
                }
 
                /*
@@ -611,7 +611,7 @@ insert_above:
                ln = node_alloc();
 
                if (!ln)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                ln->fn_bit = plen;
 
@@ -777,11 +777,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
        if (IS_ERR(fn)) {
                err = PTR_ERR(fn);
-               fn = NULL;
-       }
-
-       if (!fn)
                goto out;
+       }
 
        pn = fn;
 
@@ -820,15 +817,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
                                        allow_create, replace_required);
 
                        if (IS_ERR(sn)) {
-                               err = PTR_ERR(sn);
-                               sn = NULL;
-                       }
-                       if (!sn) {
                                /* If it is failed, discard just allocated
                                   root, and then (in st_failure) stale node
                                   in main tree.
                                 */
                                node_free(sfn);
+                               err = PTR_ERR(sn);
                                goto st_failure;
                        }
 
@@ -843,10 +837,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
                        if (IS_ERR(sn)) {
                                err = PTR_ERR(sn);
-                               sn = NULL;
-                       }
-                       if (!sn)
                                goto st_failure;
+                       }
                }
 
                if (!fn->leaf) {
index 9772fbd8a3f5b5c3ce36f1715153f21673ea5acd..90bbefb579435d3f3c4117f738d38366d0421b33 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/pid_namespace.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -91,6 +92,8 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
 static void fl_free(struct ip6_flowlabel *fl)
 {
        if (fl) {
+               if (fl->share == IPV6_FL_S_PROCESS)
+                       put_pid(fl->owner.pid);
                release_net(fl->fl_net);
                kfree(fl->opt);
        }
@@ -394,10 +397,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
        case IPV6_FL_S_ANY:
                break;
        case IPV6_FL_S_PROCESS:
-               fl->owner = current->pid;
+               fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
                break;
        case IPV6_FL_S_USER:
-               fl->owner = current_euid();
+               fl->owner.uid = current_euid();
                break;
        default:
                err = -EINVAL;
@@ -561,7 +564,10 @@ recheck:
                                err = -EPERM;
                                if (fl1->share == IPV6_FL_S_EXCL ||
                                    fl1->share != fl->share ||
-                                   fl1->owner != fl->owner)
+                                   ((fl1->share == IPV6_FL_S_PROCESS) &&
+                                    (fl1->owner.pid == fl->owner.pid)) ||
+                                   ((fl1->share == IPV6_FL_S_USER) &&
+                                    uid_eq(fl1->owner.uid, fl->owner.uid)))
                                        goto release;
 
                                err = -EINVAL;
@@ -621,6 +627,7 @@ done:
 
 struct ip6fl_iter_state {
        struct seq_net_private p;
+       struct pid_namespace *pid_ns;
        int bucket;
 };
 
@@ -699,6 +706,7 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v)
 
 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 {
+       struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
        if (v == SEQ_START_TOKEN)
                seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
                           "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
@@ -708,7 +716,11 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
                           "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
                           (unsigned int)ntohl(fl->label),
                           fl->share,
-                          (int)fl->owner,
+                          ((fl->share == IPV6_FL_S_PROCESS) ?
+                           pid_nr_ns(fl->owner.pid, state->pid_ns) :
+                           ((fl->share == IPV6_FL_S_USER) ?
+                            from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
+                            0)),
                           atomic_read(&fl->users),
                           fl->linger/HZ,
                           (long)(fl->expires - jiffies)/HZ,
@@ -727,8 +739,29 @@ static const struct seq_operations ip6fl_seq_ops = {
 
 static int ip6fl_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open_net(inode, file, &ip6fl_seq_ops,
-                           sizeof(struct ip6fl_iter_state));
+       struct seq_file *seq;
+       struct ip6fl_iter_state *state;
+       int err;
+
+       err = seq_open_net(inode, file, &ip6fl_seq_ops,
+                          sizeof(struct ip6fl_iter_state));
+
+       if (!err) {
+               seq = file->private_data;
+               state = ip6fl_seq_private(seq);
+               rcu_read_lock();
+               state->pid_ns = get_pid_ns(task_active_pid_ns(current));
+               rcu_read_unlock();
+       }
+       return err;
+}
+
+static int ip6fl_seq_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
+       put_pid_ns(state->pid_ns);
+       return seq_release_net(inode, file);
 }
 
 static const struct file_operations ip6fl_seq_fops = {
@@ -736,7 +769,7 @@ static const struct file_operations ip6fl_seq_fops = {
        .open           =       ip6fl_seq_open,
        .read           =       seq_read,
        .llseek         =       seq_lseek,
-       .release        =       seq_release_net,
+       .release        =       ip6fl_seq_release,
 };
 
 static int __net_init ip6_flowlabel_proc_init(struct net *net)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
new file mode 100644 (file)
index 0000000..0185679
--- /dev/null
@@ -0,0 +1,1770 @@
+/*
+ *     GRE over IPv6 protocol decoder.
+ *
+ *     Authors: Dmitry Kozlov (xeb@mail.ru)
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/mroute.h>
+#include <linux/init.h>
+#include <linux/in6.h>
+#include <linux/inetdevice.h>
+#include <linux/igmp.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/hash.h>
+#include <linux/if_tunnel.h>
+#include <linux/ip6_tunnel.h>
+
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/protocol.h>
+#include <net/addrconf.h>
+#include <net/arp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/rtnetlink.h>
+
+#include <net/ipv6.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#include <net/ip6_tunnel.h>
+
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
+#define IPV6_TCLASS_SHIFT 20
+
+#define HASH_SIZE_SHIFT  5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static int ip6gre_net_id __read_mostly;
+struct ip6gre_net {
+       struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
+
+       struct net_device *fb_tunnel_dev;
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
+static int ip6gre_tunnel_init(struct net_device *dev);
+static void ip6gre_tunnel_setup(struct net_device *dev);
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
+
+/* Tunnel hash table */
+
+/*
+   4 hash tables:
+
+   3: (remote,local)
+   2: (remote,*)
+   1: (*,local)
+   0: (*,*)
+
+   We require exact key match i.e. if a key is present in packet
+   it will match only tunnel with the same key; if it is not present,
+   it will match only keyless tunnel.
+
+   All keysless packets, if not matched configured keyless tunnels
+   will match fallback tunnel.
+ */
+
+#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
+static u32 HASH_ADDR(const struct in6_addr *addr)
+{
+       u32 hash = ipv6_addr_hash(addr);
+
+       return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+#define tunnels_r_l    tunnels[3]
+#define tunnels_r      tunnels[2]
+#define tunnels_l      tunnels[1]
+#define tunnels_wc     tunnels[0]
+/*
+ * Locking : hash tables are protected by RCU and RTNL
+ */
+
+#define for_each_ip_tunnel_rcu(start) \
+       for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+       u64     rx_packets;
+       u64     rx_bytes;
+       u64     tx_packets;
+       u64     tx_bytes;
+       struct u64_stats_sync   syncp;
+};
+
+static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
+               struct rtnl_link_stats64 *tot)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+               u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+               unsigned int start;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
+                       rx_packets = tstats->rx_packets;
+                       tx_packets = tstats->tx_packets;
+                       rx_bytes = tstats->rx_bytes;
+                       tx_bytes = tstats->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+               tot->rx_packets += rx_packets;
+               tot->tx_packets += tx_packets;
+               tot->rx_bytes   += rx_bytes;
+               tot->tx_bytes   += tx_bytes;
+       }
+
+       tot->multicast = dev->stats.multicast;
+       tot->rx_crc_errors = dev->stats.rx_crc_errors;
+       tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+       tot->rx_length_errors = dev->stats.rx_length_errors;
+       tot->rx_frame_errors = dev->stats.rx_frame_errors;
+       tot->rx_errors = dev->stats.rx_errors;
+
+       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+       tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+       tot->tx_dropped = dev->stats.tx_dropped;
+       tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+       tot->tx_errors = dev->stats.tx_errors;
+
+       return tot;
+}
+
+/* Given src, dst and key, find appropriate for input tunnel. */
+
+static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
+               const struct in6_addr *remote, const struct in6_addr *local,
+               __be32 key, __be16 gre_proto)
+{
+       struct net *net = dev_net(dev);
+       int link = dev->ifindex;
+       unsigned int h0 = HASH_ADDR(remote);
+       unsigned int h1 = HASH_KEY(key);
+       struct ip6_tnl *t, *cand = NULL;
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
+                      ARPHRD_ETHER : ARPHRD_IP6GRE;
+       int score, cand_score = 4;
+
+       for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
+               if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+                   !ipv6_addr_equal(remote, &t->parms.raddr) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
+               if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
+               if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
+                         (!ipv6_addr_equal(local, &t->parms.raddr) ||
+                                !ipv6_addr_is_multicast(local))) ||
+                   key != t->parms.i_key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
+               if (t->parms.i_key != key ||
+                   !(t->dev->flags & IFF_UP))
+                       continue;
+
+               if (t->dev->type != ARPHRD_IP6GRE &&
+                   t->dev->type != dev_type)
+                       continue;
+
+               score = 0;
+               if (t->parms.link != link)
+                       score |= 1;
+               if (t->dev->type != dev_type)
+                       score |= 2;
+               if (score == 0)
+                       return t;
+
+               if (score < cand_score) {
+                       cand = t;
+                       cand_score = score;
+               }
+       }
+
+       if (cand != NULL)
+               return cand;
+
+       dev = ign->fb_tunnel_dev;
+       if (dev->flags & IFF_UP)
+               return netdev_priv(dev);
+
+       return NULL;
+}
+
+static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
+               const struct __ip6_tnl_parm *p)
+{
+       const struct in6_addr *remote = &p->raddr;
+       const struct in6_addr *local = &p->laddr;
+       unsigned int h = HASH_KEY(p->i_key);
+       int prio = 0;
+
+       if (!ipv6_addr_any(local))
+               prio |= 1;
+       if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
+               prio |= 2;
+               h ^= HASH_ADDR(remote);
+       }
+
+       return &ign->tunnels[prio][h];
+}
+
+static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
+               const struct ip6_tnl *t)
+{
+       return __ip6gre_bucket(ign, &t->parms);
+}
+
+static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
+
+       rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+       rcu_assign_pointer(*tp, t);
+}
+
+static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp;
+       struct ip6_tnl *iter;
+
+       for (tp = ip6gre_bucket(ign, t);
+            (iter = rtnl_dereference(*tp)) != NULL;
+            tp = &iter->next) {
+               if (t == iter) {
+                       rcu_assign_pointer(*tp, t->next);
+                       break;
+               }
+       }
+}
+
+static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
+                                          const struct __ip6_tnl_parm *parms,
+                                          int type)
+{
+       const struct in6_addr *remote = &parms->raddr;
+       const struct in6_addr *local = &parms->laddr;
+       __be32 key = parms->i_key;
+       int link = parms->link;
+       struct ip6_tnl *t;
+       struct ip6_tnl __rcu **tp;
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       for (tp = __ip6gre_bucket(ign, parms);
+            (t = rtnl_dereference(*tp)) != NULL;
+            tp = &t->next)
+               if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_equal(remote, &t->parms.raddr) &&
+                   key == t->parms.i_key &&
+                   link == t->parms.link &&
+                   type == t->dev->type)
+                       break;
+
+       return t;
+}
+
+static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+               const struct __ip6_tnl_parm *parms, int create)
+{
+       struct ip6_tnl *t, *nt;
+       struct net_device *dev;
+       char name[IFNAMSIZ];
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
+       if (t || !create)
+               return t;
+
+       if (parms->name[0])
+               strlcpy(name, parms->name, IFNAMSIZ);
+       else
+               strcpy(name, "ip6gre%d");
+
+       dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
+       if (!dev)
+               return NULL;
+
+       dev_net_set(dev, net);
+
+       nt = netdev_priv(dev);
+       nt->parms = *parms;
+       dev->rtnl_link_ops = &ip6gre_link_ops;
+
+       nt->dev = dev;
+       ip6gre_tnl_link_config(nt, 1);
+
+       if (register_netdevice(dev) < 0)
+               goto failed_free;
+
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & GRE_SEQ))
+               dev->features |= NETIF_F_LLTX;
+
+       dev_hold(dev);
+       ip6gre_tunnel_link(ign, nt);
+       return nt;
+
+failed_free:
+       free_netdev(dev);
+       return NULL;
+}
+
+static void ip6gre_tunnel_uninit(struct net_device *dev)
+{
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       ip6gre_tunnel_unlink(ign, netdev_priv(dev));
+       dev_put(dev);
+}
+
+
+static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+               u8 type, u8 code, int offset, __be32 info)
+{
+       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+       __be16 *p = (__be16 *)(skb->data + offset);
+       int grehlen = offset + 4;
+       struct ip6_tnl *t;
+       __be16 flags;
+
+       flags = p[0];
+       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+               if (flags&(GRE_VERSION|GRE_ROUTING))
+                       return;
+               if (flags&GRE_KEY) {
+                       grehlen += 4;
+                       if (flags&GRE_CSUM)
+                               grehlen += 4;
+               }
+       }
+
+       /* If only 8 bytes returned, keyed message will be dropped here */
+       if (!pskb_may_pull(skb, grehlen))
+               return;
+       ipv6h = (const struct ipv6hdr *)skb->data;
+       p = (__be16 *)(skb->data + offset);
+
+       t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+                               flags & GRE_KEY ?
+                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+                               p[1]);
+       if (t == NULL)
+               return;
+
+       switch (type) {
+               __u32 teli;
+               struct ipv6_tlv_tnl_enc_lim *tel;
+               __u32 mtu;
+       case ICMPV6_DEST_UNREACH:
+               net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
+                                    t->parms.name);
+               break;
+       case ICMPV6_TIME_EXCEED:
+               if (code == ICMPV6_EXC_HOPLIMIT) {
+                       net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+                                            t->parms.name);
+               }
+               break;
+       case ICMPV6_PARAMPROB:
+               teli = 0;
+               if (code == ICMPV6_HDR_FIELD)
+                       teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
+
+               if (teli && teli == info - 2) {
+                       tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
+                       if (tel->encap_limit == 0) {
+                               net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+                                                    t->parms.name);
+                       }
+               } else {
+                       net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+                                            t->parms.name);
+               }
+               break;
+       case ICMPV6_PKT_TOOBIG:
+               mtu = info - offset;
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+               t->dev->mtu = mtu;
+               break;
+       }
+
+       if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
+               t->err_count++;
+       else
+               t->err_count = 1;
+       t->err_time = jiffies;
+}
+
+static int ip6gre_rcv(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h;
+       u8     *h;
+       __be16    flags;
+       __sum16   csum = 0;
+       __be32 key = 0;
+       u32    seqno = 0;
+       struct ip6_tnl *tunnel;
+       int    offset = 4;
+       __be16 gre_proto;
+       int err;
+
+       if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
+               goto drop;
+
+       ipv6h = ipv6_hdr(skb);
+       h = skb->data;
+       flags = *(__be16 *)h;
+
+       if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
+               /* - Version must be 0.
+                  - We do not support routing headers.
+                */
+               if (flags&(GRE_VERSION|GRE_ROUTING))
+                       goto drop;
+
+               if (flags&GRE_CSUM) {
+                       switch (skb->ip_summed) {
+                       case CHECKSUM_COMPLETE:
+                               csum = csum_fold(skb->csum);
+                               if (!csum)
+                                       break;
+                               /* fall through */
+                       case CHECKSUM_NONE:
+                               skb->csum = 0;
+                               csum = __skb_checksum_complete(skb);
+                               skb->ip_summed = CHECKSUM_COMPLETE;
+                       }
+                       offset += 4;
+               }
+               if (flags&GRE_KEY) {
+                       key = *(__be32 *)(h + offset);
+                       offset += 4;
+               }
+               if (flags&GRE_SEQ) {
+                       seqno = ntohl(*(__be32 *)(h + offset));
+                       offset += 4;
+               }
+       }
+
+       gre_proto = *(__be16 *)(h + 2);
+
+       tunnel = ip6gre_tunnel_lookup(skb->dev,
+                                         &ipv6h->saddr, &ipv6h->daddr, key,
+                                         gre_proto);
+       if (tunnel) {
+               struct pcpu_tstats *tstats;
+
+               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+
+               if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
+                       tunnel->dev->stats.rx_dropped++;
+                       goto drop;
+               }
+
+               secpath_reset(skb);
+
+               skb->protocol = gre_proto;
+               /* WCCP version 1 and 2 protocol decoding.
+                * - Change protocol to IP
+                * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+                */
+               if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
+                       skb->protocol = htons(ETH_P_IP);
+                       if ((*(h + offset) & 0xF0) != 0x40)
+                               offset += 4;
+               }
+
+               skb->mac_header = skb->network_header;
+               __pskb_pull(skb, offset);
+               skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
+               skb->pkt_type = PACKET_HOST;
+
+               if (((flags&GRE_CSUM) && csum) ||
+                   (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
+                       tunnel->dev->stats.rx_crc_errors++;
+                       tunnel->dev->stats.rx_errors++;
+                       goto drop;
+               }
+               if (tunnel->parms.i_flags&GRE_SEQ) {
+                       if (!(flags&GRE_SEQ) ||
+                           (tunnel->i_seqno &&
+                                       (s32)(seqno - tunnel->i_seqno) < 0)) {
+                               tunnel->dev->stats.rx_fifo_errors++;
+                               tunnel->dev->stats.rx_errors++;
+                               goto drop;
+                       }
+                       tunnel->i_seqno = seqno + 1;
+               }
+
+               /* Warning: All skb pointers will be invalidated! */
+               if (tunnel->dev->type == ARPHRD_ETHER) {
+                       if (!pskb_may_pull(skb, ETH_HLEN)) {
+                               tunnel->dev->stats.rx_length_errors++;
+                               tunnel->dev->stats.rx_errors++;
+                               goto drop;
+                       }
+
+                       ipv6h = ipv6_hdr(skb);
+                       skb->protocol = eth_type_trans(skb, tunnel->dev);
+                       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+               }
+
+               __skb_tunnel_rx(skb, tunnel->dev);
+
+               skb_reset_network_header(skb);
+
+               err = IP6_ECN_decapsulate(ipv6h, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+                               net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
+                                                    &ipv6h->saddr,
+                                                    ipv6_get_dsfield(ipv6h));
+                       if (err > 1) {
+                               ++tunnel->dev->stats.rx_frame_errors;
+                               ++tunnel->dev->stats.rx_errors;
+                               goto drop;
+                       }
+               }
+
+               tstats = this_cpu_ptr(tunnel->dev->tstats);
+               u64_stats_update_begin(&tstats->syncp);
+               tstats->rx_packets++;
+               tstats->rx_bytes += skb->len;
+               u64_stats_update_end(&tstats->syncp);
+
+               netif_rx(skb);
+
+               return 0;
+       }
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+struct ipv6_tel_txoption {
+       struct ipv6_txoptions ops;
+       __u8 dst_opt[8];
+};
+
+static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
+{
+       memset(opt, 0, sizeof(struct ipv6_tel_txoption));
+
+       opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
+       opt->dst_opt[3] = 1;
+       opt->dst_opt[4] = encap_limit;
+       opt->dst_opt[5] = IPV6_TLV_PADN;
+       opt->dst_opt[6] = 1;
+
+       opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
+       opt->ops.opt_nflen = 8;
+}
+
+static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
+                        struct net_device *dev,
+                        __u8 dsfield,
+                        struct flowi6 *fl6,
+                        int encap_limit,
+                        __u32 *pmtu)
+{
+       struct net *net = dev_net(dev);
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       struct net_device *tdev;    /* Device to other host */
+       struct ipv6hdr  *ipv6h;     /* Our new IP header */
+       unsigned int max_headroom;  /* The extra header space needed */
+       int    gre_hlen;
+       struct ipv6_tel_txoption opt;
+       int    mtu;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct net_device_stats *stats = &tunnel->dev->stats;
+       int err = -1;
+       u8 proto;
+       int pkt_len;
+       struct sk_buff *new_skb;
+
+       if (dev->type == ARPHRD_ETHER)
+               IPCB(skb)->flags = 0;
+
+       if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
+               gre_hlen = 0;
+               ipv6h = (struct ipv6hdr *)skb->data;
+               fl6->daddr = ipv6h->daddr;
+       } else {
+               gre_hlen = tunnel->hlen;
+               fl6->daddr = tunnel->parms.raddr;
+       }
+
+       if (!fl6->flowi6_mark)
+               dst = ip6_tnl_dst_check(tunnel);
+
+       if (!dst) {
+               ndst = ip6_route_output(net, NULL, fl6);
+
+               if (ndst->error)
+                       goto tx_err_link_failure;
+               ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
+               if (IS_ERR(ndst)) {
+                       err = PTR_ERR(ndst);
+                       ndst = NULL;
+                       goto tx_err_link_failure;
+               }
+               dst = ndst;
+       }
+
+       tdev = dst->dev;
+
+       if (tdev == dev) {
+               stats->collisions++;
+               net_warn_ratelimited("%s: Local routing loop detected!\n",
+                                    tunnel->parms.name);
+               goto tx_err_dst_release;
+       }
+
+       mtu = dst_mtu(dst) - sizeof(*ipv6h);
+       if (encap_limit >= 0) {
+               max_headroom += 8;
+               mtu -= 8;
+       }
+       if (mtu < IPV6_MIN_MTU)
+               mtu = IPV6_MIN_MTU;
+       if (skb_dst(skb))
+               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+       if (skb->len > mtu) {
+               *pmtu = mtu;
+               err = -EMSGSIZE;
+               goto tx_err_dst_release;
+       }
+
+       if (tunnel->err_count > 0) {
+               if (time_before(jiffies,
+                               tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
+                       tunnel->err_count--;
+
+                       dst_link_failure(skb);
+               } else
+                       tunnel->err_count = 0;
+       }
+
+       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
+
+       if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
+           (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
+               new_skb = skb_realloc_headroom(skb, max_headroom);
+               if (max_headroom > dev->needed_headroom)
+                       dev->needed_headroom = max_headroom;
+               if (!new_skb)
+                       goto tx_err_dst_release;
+
+               if (skb->sk)
+                       skb_set_owner_w(new_skb, skb->sk);
+               consume_skb(skb);
+               skb = new_skb;
+       }
+
+       skb_dst_drop(skb);
+
+       if (fl6->flowi6_mark) {
+               skb_dst_set(skb, dst);
+               ndst = NULL;
+       } else {
+               skb_dst_set_noref(skb, dst);
+       }
+
+       skb->transport_header = skb->network_header;
+
+       proto = NEXTHDR_GRE;
+       if (encap_limit >= 0) {
+               init_tel_txopt(&opt, encap_limit);
+               ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
+       }
+
+       skb_push(skb, gre_hlen);
+       skb_reset_network_header(skb);
+
+       /*
+        *      Push down and install the IP header.
+        */
+       ipv6h = ipv6_hdr(skb);
+       *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
+       dsfield = INET_ECN_encapsulate(0, dsfield);
+       ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
+       ipv6h->hop_limit = tunnel->parms.hop_limit;
+       ipv6h->nexthdr = proto;
+       ipv6h->saddr = fl6->saddr;
+       ipv6h->daddr = fl6->daddr;
+
+       ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
+       ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
+                                  htons(ETH_P_TEB) : skb->protocol;
+
+       if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
+               __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
+
+               if (tunnel->parms.o_flags&GRE_SEQ) {
+                       ++tunnel->o_seqno;
+                       *ptr = htonl(tunnel->o_seqno);
+                       ptr--;
+               }
+               if (tunnel->parms.o_flags&GRE_KEY) {
+                       *ptr = tunnel->parms.o_key;
+                       ptr--;
+               }
+               if (tunnel->parms.o_flags&GRE_CSUM) {
+                       *ptr = 0;
+                       *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
+                               skb->len - sizeof(struct ipv6hdr));
+               }
+       }
+
+       nf_reset(skb);
+       pkt_len = skb->len;
+       err = ip6_local_out(skb);
+
+       if (net_xmit_eval(err) == 0) {
+               struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
+
+               tstats->tx_bytes += pkt_len;
+               tstats->tx_packets++;
+       } else {
+               stats->tx_errors++;
+               stats->tx_aborted_errors++;
+       }
+
+       if (ndst)
+               ip6_tnl_dst_store(tunnel, ndst);
+
+       return 0;
+tx_err_link_failure:
+       stats->tx_carrier_errors++;
+       dst_link_failure(skb);
+tx_err_dst_release:
+       dst_release(ndst);
+       return err;
+}
+
+static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       const struct iphdr  *iph = ip_hdr(skb);
+       int encap_limit = -1;
+       struct flowi6 fl6;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPIP;
+
+       dsfield = ipv4_get_dsfield(iph);
+
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+               fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+                                         & IPV6_TCLASS_MASK;
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+               fl6.flowi6_mark = skb->mark;
+
+       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       if (err != 0) {
+               /* XXX: send ICMP error even if DF is not set. */
+               if (err == -EMSGSIZE)
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+               return -1;
+       }
+
+       return 0;
+}
+
+static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       int encap_limit = -1;
+       __u16 offset;
+       struct flowi6 fl6;
+       __u8 dsfield;
+       __u32 mtu;
+       int err;
+
+       if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+               return -1;
+
+       offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+       if (offset > 0) {
+               struct ipv6_tlv_tnl_enc_lim *tel;
+               tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+               if (tel->encap_limit == 0) {
+                       icmpv6_send(skb, ICMPV6_PARAMPROB,
+                                   ICMPV6_HDR_FIELD, offset + 2);
+                       return -1;
+               }
+               encap_limit = tel->encap_limit - 1;
+       } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = IPPROTO_IPV6;
+
+       dsfield = ipv6_get_dsfield(ipv6h);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+               fl6.flowi6_mark = skb->mark;
+
+       err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+       if (err != 0) {
+               if (err == -EMSGSIZE)
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
+ *   @t: the outgoing tunnel device
+ *   @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ *   Avoid trivial tunneling loop by checking that tunnel exit-point
+ *   doesn't match source of incoming packet.
+ *
+ * Return:
+ *   1 if conflict,
+ *   0 else
+ **/
+
+static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
+       const struct ipv6hdr *hdr)
+{
+       return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       int encap_limit = -1;
+       struct flowi6 fl6;
+       __u32 mtu;
+       int err;
+
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               encap_limit = t->parms.encap_limit;
+
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       fl6.flowi6_proto = skb->protocol;
+
+       err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
+
+       return err;
+}
+
+static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
+       struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->dev->stats;
+       int ret;
+
+       if (!ip6_tnl_xmit_ctl(t))
+               return -1;
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               ret = ip6gre_xmit_ipv4(skb, dev);
+               break;
+       case htons(ETH_P_IPV6):
+               ret = ip6gre_xmit_ipv6(skb, dev);
+               break;
+       default:
+               ret = ip6gre_xmit_other(skb, dev);
+               break;
+       }
+
+       if (ret < 0)
+               goto tx_err;
+
+       return NETDEV_TX_OK;
+
+tx_err:
+       stats->tx_errors++;
+       stats->tx_dropped++;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+{
+       struct net_device *dev = t->dev;
+       struct __ip6_tnl_parm *p = &t->parms;
+       struct flowi6 *fl6 = &t->fl.u.ip6;
+       int addend = sizeof(struct ipv6hdr) + 4;
+
+       if (dev->type != ARPHRD_ETHER) {
+               memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+               memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+       }
+
+       /* Set up flowi template */
+       fl6->saddr = p->laddr;
+       fl6->daddr = p->raddr;
+       fl6->flowi6_oif = p->link;
+       fl6->flowlabel = 0;
+
+       if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
+               fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
+       if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
+               fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
+
+       p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
+       p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+       if (p->flags&IP6_TNL_F_CAP_XMIT &&
+                       p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
+               dev->flags |= IFF_POINTOPOINT;
+       else
+               dev->flags &= ~IFF_POINTOPOINT;
+
+       dev->iflink = p->link;
+
+       /* Precalculate GRE options length */
+       if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
+               if (t->parms.o_flags&GRE_CSUM)
+                       addend += 4;
+               if (t->parms.o_flags&GRE_KEY)
+                       addend += 4;
+               if (t->parms.o_flags&GRE_SEQ)
+                       addend += 4;
+       }
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT) {
+               int strict = (ipv6_addr_type(&p->raddr) &
+                             (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
+
+               struct rt6_info *rt = rt6_lookup(dev_net(dev),
+                                                &p->raddr, &p->laddr,
+                                                p->link, strict);
+
+               if (rt == NULL)
+                       return;
+
+               if (rt->dst.dev) {
+                       dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
+
+                       if (set_mtu) {
+                               dev->mtu = rt->dst.dev->mtu - addend;
+                               if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+                                       dev->mtu -= 8;
+
+                               if (dev->mtu < IPV6_MIN_MTU)
+                                       dev->mtu = IPV6_MIN_MTU;
+                       }
+               }
+               dst_release(&rt->dst);
+       }
+
+       t->hlen = addend;
+}
+
+static int ip6gre_tnl_change(struct ip6_tnl *t,
+       const struct __ip6_tnl_parm *p, int set_mtu)
+{
+       t->parms.laddr = p->laddr;
+       t->parms.raddr = p->raddr;
+       t->parms.flags = p->flags;
+       t->parms.hop_limit = p->hop_limit;
+       t->parms.encap_limit = p->encap_limit;
+       t->parms.flowinfo = p->flowinfo;
+       t->parms.link = p->link;
+       t->parms.proto = p->proto;
+       t->parms.i_key = p->i_key;
+       t->parms.o_key = p->o_key;
+       t->parms.i_flags = p->i_flags;
+       t->parms.o_flags = p->o_flags;
+       ip6_tnl_dst_reset(t);
+       ip6gre_tnl_link_config(t, set_mtu);
+       return 0;
+}
+
+static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
+       const struct ip6_tnl_parm2 *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->flags = u->flags;
+       p->hop_limit = u->hop_limit;
+       p->encap_limit = u->encap_limit;
+       p->flowinfo = u->flowinfo;
+       p->link = u->link;
+       p->i_key = u->i_key;
+       p->o_key = u->o_key;
+       p->i_flags = u->i_flags;
+       p->o_flags = u->o_flags;
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
+       const struct __ip6_tnl_parm *p)
+{
+       u->proto = IPPROTO_GRE;
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->flags = p->flags;
+       u->hop_limit = p->hop_limit;
+       u->encap_limit = p->encap_limit;
+       u->flowinfo = p->flowinfo;
+       u->link = p->link;
+       u->i_key = p->i_key;
+       u->o_key = p->o_key;
+       u->i_flags = p->i_flags;
+       u->o_flags = p->o_flags;
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
+static int ip6gre_tunnel_ioctl(struct net_device *dev,
+       struct ifreq *ifr, int cmd)
+{
+       int err = 0;
+       struct ip6_tnl_parm2 p;
+       struct __ip6_tnl_parm p1;
+       struct ip6_tnl *t;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+
+       switch (cmd) {
+       case SIOCGETTUNNEL:
+               t = NULL;
+               if (dev == ign->fb_tunnel_dev) {
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+                               err = -EFAULT;
+                               break;
+                       }
+                       ip6gre_tnl_parm_from_user(&p1, &p);
+                       t = ip6gre_tunnel_locate(net, &p1, 0);
+               }
+               if (t == NULL)
+                       t = netdev_priv(dev);
+               ip6gre_tnl_parm_to_user(&p, &t->parms);
+               if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                       err = -EFAULT;
+               break;
+
+       case SIOCADDTUNNEL:
+       case SIOCCHGTUNNEL:
+               err = -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       goto done;
+
+               err = -EFAULT;
+               if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                       goto done;
+
+               err = -EINVAL;
+               if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
+                       goto done;
+
+               if (!(p.i_flags&GRE_KEY))
+                       p.i_key = 0;
+               if (!(p.o_flags&GRE_KEY))
+                       p.o_key = 0;
+
+               ip6gre_tnl_parm_from_user(&p1, &p);
+               t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
+
+               if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
+                       if (t != NULL) {
+                               if (t->dev != dev) {
+                                       err = -EEXIST;
+                                       break;
+                               }
+                       } else {
+                               t = netdev_priv(dev);
+
+                               ip6gre_tunnel_unlink(ign, t);
+                               synchronize_net();
+                               ip6gre_tnl_change(t, &p1, 1);
+                               ip6gre_tunnel_link(ign, t);
+                               netdev_state_change(dev);
+                       }
+               }
+
+               if (t) {
+                       err = 0;
+
+                       ip6gre_tnl_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                               err = -EFAULT;
+               } else
+                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+               break;
+
+       case SIOCDELTUNNEL:
+               err = -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       goto done;
+
+               if (dev == ign->fb_tunnel_dev) {
+                       err = -EFAULT;
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                               goto done;
+                       err = -ENOENT;
+                       ip6gre_tnl_parm_from_user(&p1, &p);
+                       t = ip6gre_tunnel_locate(net, &p1, 0);
+                       if (t == NULL)
+                               goto done;
+                       err = -EPERM;
+                       if (t == netdev_priv(ign->fb_tunnel_dev))
+                               goto done;
+                       dev = t->dev;
+               }
+               unregister_netdevice(dev);
+               err = 0;
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+done:
+       return err;
+}
+
+static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       if (new_mtu < 68 ||
+           new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
+                       unsigned short type,
+                       const void *daddr, const void *saddr, unsigned int len)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
+       __be16 *p = (__be16 *)(ipv6h+1);
+
+       *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
+       ipv6h->hop_limit = t->parms.hop_limit;
+       ipv6h->nexthdr = NEXTHDR_GRE;
+       ipv6h->saddr = t->parms.laddr;
+       ipv6h->daddr = t->parms.raddr;
+
+       p[0]            = t->parms.o_flags;
+       p[1]            = htons(type);
+
+       /*
+        *      Set the source hardware address.
+        */
+
+       if (saddr)
+               memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
+       if (daddr)
+               memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
+       if (!ipv6_addr_any(&ipv6h->daddr))
+               return t->hlen;
+
+       return -t->hlen;
+}
+
+static const struct header_ops ip6gre_header_ops = {
+       .create = ip6gre_header,
+};
+
+static const struct net_device_ops ip6gre_netdev_ops = {
+       .ndo_init               = ip6gre_tunnel_init,
+       .ndo_uninit             = ip6gre_tunnel_uninit,
+       .ndo_start_xmit         = ip6gre_tunnel_xmit,
+       .ndo_do_ioctl           = ip6gre_tunnel_ioctl,
+       .ndo_change_mtu         = ip6gre_tunnel_change_mtu,
+       .ndo_get_stats64        = ip6gre_get_stats64,
+};
+
+static void ip6gre_dev_free(struct net_device *dev)
+{
+       free_percpu(dev->tstats);
+       free_netdev(dev);
+}
+
+static void ip6gre_tunnel_setup(struct net_device *dev)
+{
+       struct ip6_tnl *t;
+
+       dev->netdev_ops = &ip6gre_netdev_ops;
+       dev->destructor = ip6gre_dev_free;
+
+       dev->type = ARPHRD_IP6GRE;
+       dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
+       dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
+       t = netdev_priv(dev);
+       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+               dev->mtu -= 8;
+       dev->flags |= IFF_NOARP;
+       dev->iflink = 0;
+       dev->addr_len = sizeof(struct in6_addr);
+       dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+static int ip6gre_tunnel_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel;
+
+       tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
+       memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
+
+       if (ipv6_addr_any(&tunnel->parms.raddr))
+               dev->header_ops = &ip6gre_header_ops;
+
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void ip6gre_fb_tunnel_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       tunnel->hlen            = sizeof(struct ipv6hdr) + 4;
+
+       dev_hold(dev);
+}
+
+
+static struct inet6_protocol ip6gre_protocol __read_mostly = {
+       .handler     = ip6gre_rcv,
+       .err_handler = ip6gre_err,
+       .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+};
+
+static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
+       struct list_head *head)
+{
+       int prio;
+
+       for (prio = 0; prio < 4; prio++) {
+               int h;
+               for (h = 0; h < HASH_SIZE; h++) {
+                       struct ip6_tnl *t;
+
+                       t = rtnl_dereference(ign->tunnels[prio][h]);
+
+                       while (t != NULL) {
+                               unregister_netdevice_queue(t->dev, head);
+                               t = rtnl_dereference(t->next);
+                       }
+               }
+       }
+}
+
+static int __net_init ip6gre_init_net(struct net *net)
+{
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int err;
+
+       ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+                                          ip6gre_tunnel_setup);
+       if (!ign->fb_tunnel_dev) {
+               err = -ENOMEM;
+               goto err_alloc_dev;
+       }
+       dev_net_set(ign->fb_tunnel_dev, net);
+
+       ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
+       ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
+
+       err = register_netdev(ign->fb_tunnel_dev);
+       if (err)
+               goto err_reg_dev;
+
+       rcu_assign_pointer(ign->tunnels_wc[0],
+                          netdev_priv(ign->fb_tunnel_dev));
+       return 0;
+
+err_reg_dev:
+       ip6gre_dev_free(ign->fb_tunnel_dev);
+err_alloc_dev:
+       return err;
+}
+
+static void __net_exit ip6gre_exit_net(struct net *net)
+{
+       struct ip6gre_net *ign;
+       LIST_HEAD(list);
+
+       ign = net_generic(net, ip6gre_net_id);
+       rtnl_lock();
+       ip6gre_destroy_tunnels(ign, &list);
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
+static struct pernet_operations ip6gre_net_ops = {
+       .init = ip6gre_init_net,
+       .exit = ip6gre_exit_net,
+       .id   = &ip6gre_net_id,
+       .size = sizeof(struct ip6gre_net),
+};
+
+static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       __be16 flags;
+
+       if (!data)
+               return 0;
+
+       flags = 0;
+       if (data[IFLA_GRE_IFLAGS])
+               flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
+       if (data[IFLA_GRE_OFLAGS])
+               flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
+       if (flags & (GRE_VERSION|GRE_ROUTING))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       struct in6_addr daddr;
+
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+
+       if (!data)
+               goto out;
+
+       if (data[IFLA_GRE_REMOTE]) {
+               nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+               if (ipv6_addr_any(&daddr))
+                       return -EINVAL;
+       }
+
+out:
+       return ip6gre_tunnel_validate(tb, data);
+}
+
+
+static void ip6gre_netlink_parms(struct nlattr *data[],
+                               struct __ip6_tnl_parm *parms)
+{
+       memset(parms, 0, sizeof(*parms));
+
+       if (!data)
+               return;
+
+       if (data[IFLA_GRE_LINK])
+               parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
+
+       if (data[IFLA_GRE_IFLAGS])
+               parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
+
+       if (data[IFLA_GRE_OFLAGS])
+               parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
+
+       if (data[IFLA_GRE_IKEY])
+               parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
+
+       if (data[IFLA_GRE_OKEY])
+               parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
+
+       if (data[IFLA_GRE_LOCAL])
+               nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
+
+       if (data[IFLA_GRE_REMOTE])
+               nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
+
+       if (data[IFLA_GRE_TTL])
+               parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
+
+       if (data[IFLA_GRE_ENCAP_LIMIT])
+               parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
+
+       if (data[IFLA_GRE_FLOWINFO])
+               parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
+
+       if (data[IFLA_GRE_FLAGS])
+               parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
+}
+
+static int ip6gre_tap_init(struct net_device *dev)
+{
+       struct ip6_tnl *tunnel;
+
+       tunnel = netdev_priv(dev);
+
+       tunnel->dev = dev;
+       strcpy(tunnel->parms.name, dev->name);
+
+       ip6gre_tnl_link_config(tunnel, 1);
+
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static const struct net_device_ops ip6gre_tap_netdev_ops = {
+       .ndo_init = ip6gre_tap_init,
+       .ndo_uninit = ip6gre_tunnel_uninit,
+       .ndo_start_xmit = ip6gre_tunnel_xmit,
+       .ndo_set_mac_address = eth_mac_addr,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_change_mtu = ip6gre_tunnel_change_mtu,
+       .ndo_get_stats64 = ip6gre_get_stats64,
+};
+
+static void ip6gre_tap_setup(struct net_device *dev)
+{
+
+       ether_setup(dev);
+
+       dev->netdev_ops = &ip6gre_tap_netdev_ops;
+       dev->destructor = ip6gre_dev_free;
+
+       dev->iflink = 0;
+       dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+       struct nlattr *tb[], struct nlattr *data[])
+{
+       struct ip6_tnl *nt;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       int err;
+
+       nt = netdev_priv(dev);
+       ip6gre_netlink_parms(data, &nt->parms);
+
+       if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+               return -EEXIST;
+
+       if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
+               eth_hw_addr_random(dev);
+
+       nt->dev = dev;
+       ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & GRE_SEQ))
+               dev->features |= NETIF_F_LLTX;
+
+       err = register_netdevice(dev);
+       if (err)
+               goto out;
+
+       dev_hold(dev);
+       ip6gre_tunnel_link(ign, nt);
+
+out:
+       return err;
+}
+
+static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+                           struct nlattr *data[])
+{
+       struct ip6_tnl *t, *nt;
+       struct net *net = dev_net(dev);
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       struct __ip6_tnl_parm p;
+
+       if (dev == ign->fb_tunnel_dev)
+               return -EINVAL;
+
+       nt = netdev_priv(dev);
+       ip6gre_netlink_parms(data, &p);
+
+       t = ip6gre_tunnel_locate(net, &p, 0);
+
+       if (t) {
+               if (t->dev != dev)
+                       return -EEXIST;
+       } else {
+               t = nt;
+
+               ip6gre_tunnel_unlink(ign, t);
+               ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+               ip6gre_tunnel_link(ign, t);
+               netdev_state_change(dev);
+       }
+
+       return 0;
+}
+
+static size_t ip6gre_get_size(const struct net_device *dev)
+{
+       return
+               /* IFLA_GRE_LINK */
+               nla_total_size(4) +
+               /* IFLA_GRE_IFLAGS */
+               nla_total_size(2) +
+               /* IFLA_GRE_OFLAGS */
+               nla_total_size(2) +
+               /* IFLA_GRE_IKEY */
+               nla_total_size(4) +
+               /* IFLA_GRE_OKEY */
+               nla_total_size(4) +
+               /* IFLA_GRE_LOCAL */
+               nla_total_size(4) +
+               /* IFLA_GRE_REMOTE */
+               nla_total_size(4) +
+               /* IFLA_GRE_TTL */
+               nla_total_size(1) +
+               /* IFLA_GRE_TOS */
+               nla_total_size(1) +
+               /* IFLA_GRE_ENCAP_LIMIT */
+               nla_total_size(1) +
+               /* IFLA_GRE_FLOWINFO */
+               nla_total_size(4) +
+               /* IFLA_GRE_FLAGS */
+               nla_total_size(4) +
+               0;
+}
+
+static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct __ip6_tnl_parm *p = &t->parms;
+
+       if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+           nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+           nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+           nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+           nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+           nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
+           nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
+           nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
+           /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
+           nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
+           nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
+           nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
+       [IFLA_GRE_LINK]        = { .type = NLA_U32 },
+       [IFLA_GRE_IFLAGS]      = { .type = NLA_U16 },
+       [IFLA_GRE_OFLAGS]      = { .type = NLA_U16 },
+       [IFLA_GRE_IKEY]        = { .type = NLA_U32 },
+       [IFLA_GRE_OKEY]        = { .type = NLA_U32 },
+       [IFLA_GRE_LOCAL]       = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
+       [IFLA_GRE_REMOTE]      = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
+       [IFLA_GRE_TTL]         = { .type = NLA_U8 },
+       [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
+       [IFLA_GRE_FLOWINFO]    = { .type = NLA_U32 },
+       [IFLA_GRE_FLAGS]       = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+       .kind           = "ip6gre",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = ip6gre_tunnel_setup,
+       .validate       = ip6gre_tunnel_validate,
+       .newlink        = ip6gre_newlink,
+       .changelink     = ip6gre_changelink,
+       .get_size       = ip6gre_get_size,
+       .fill_info      = ip6gre_fill_info,
+};
+
+static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
+       .kind           = "ip6gretap",
+       .maxtype        = IFLA_GRE_MAX,
+       .policy         = ip6gre_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = ip6gre_tap_setup,
+       .validate       = ip6gre_tap_validate,
+       .newlink        = ip6gre_newlink,
+       .changelink     = ip6gre_changelink,
+       .get_size       = ip6gre_get_size,
+       .fill_info      = ip6gre_fill_info,
+};
+
+/*
+ *     And now the modules code and kernel interface.
+ */
+
+static int __init ip6gre_init(void)
+{
+       int err;
+
+       pr_info("GRE over IPv6 tunneling driver\n");
+
+       err = register_pernet_device(&ip6gre_net_ops);
+       if (err < 0)
+               return err;
+
+       err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
+       if (err < 0) {
+               pr_info("%s: can't add protocol\n", __func__);
+               goto add_proto_failed;
+       }
+
+       err = rtnl_link_register(&ip6gre_link_ops);
+       if (err < 0)
+               goto rtnl_link_failed;
+
+       err = rtnl_link_register(&ip6gre_tap_ops);
+       if (err < 0)
+               goto tap_ops_failed;
+
+out:
+       return err;
+
+tap_ops_failed:
+       rtnl_link_unregister(&ip6gre_link_ops);
+rtnl_link_failed:
+       inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+add_proto_failed:
+       unregister_pernet_device(&ip6gre_net_ops);
+       goto out;
+}
+
+static void __exit ip6gre_fini(void)
+{
+       rtnl_link_unregister(&ip6gre_tap_ops);
+       rtnl_link_unregister(&ip6gre_link_ops);
+       inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
+       unregister_pernet_device(&ip6gre_net_ops);
+}
+
+module_init(ip6gre_init);
+module_exit(ip6gre_fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
+MODULE_ALIAS_RTNL_LINK("ip6gre");
+MODULE_ALIAS_NETDEV("ip6gre0");
index 5b2d63ed793e1ffe7a568e9c581ccf7bab9057a5..aece3e792f84ad4cfcc88ada01817c70242b396d 100644 (file)
@@ -123,16 +123,11 @@ static int ip6_finish_output2(struct sk_buff *skb)
                                skb->len);
        }
 
-       rcu_read_lock();
        rt = (struct rt6_info *) dst;
        neigh = rt->n;
-       if (neigh) {
-               int res = dst_neigh_output(dst, neigh, skb);
+       if (neigh)
+               return dst_neigh_output(dst, neigh, skb);
 
-               rcu_read_unlock();
-               return res;
-       }
-       rcu_read_unlock();
        IP6_INC_STATS_BH(dev_net(dst->dev),
                         ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
        kfree_skb(skb);
@@ -493,7 +488,8 @@ int ip6_forward(struct sk_buff *skb)
        if (mtu < IPV6_MIN_MTU)
                mtu = IPV6_MIN_MTU;
 
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
+           (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
                /* Again, force OUTPUT device used as source address */
                skb->dev = dst->dev;
                icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -636,7 +632,9 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        /* We must not fragment if the socket is set to force MTU discovery
         * or if the skb it not generated by a local socket.
         */
-       if (unlikely(!skb->local_df && skb->len > mtu)) {
+       if (unlikely(!skb->local_df && skb->len > mtu) ||
+                    (IP6CB(skb)->frag_max_size &&
+                     IP6CB(skb)->frag_max_size > mtu)) {
                if (skb->sk && dst_allfrag(skb_dst(skb)))
                        sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
 
@@ -980,7 +978,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         * dst entry and replace it instead with the
         * dst entry of the nexthop router
         */
-       rcu_read_lock();
        rt = (struct rt6_info *) *dst;
        n = rt->n;
        if (n && !(n->nud_state & NUD_VALID)) {
@@ -988,7 +985,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                struct flowi6 fl_gw6;
                int redirect;
 
-               rcu_read_unlock();
                ifp = ipv6_get_ifaddr(net, &fl6->saddr,
                                      (*dst)->dev, 1);
 
@@ -1008,8 +1004,6 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                        if ((err = (*dst)->error))
                                goto out_err_release;
                }
-       } else {
-               rcu_read_unlock();
        }
 #endif
 
@@ -1285,8 +1279,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                if (dst_allfrag(rt->dst.path))
                        cork->flags |= IPCORK_ALLFRAG;
                cork->length = 0;
-               sk->sk_sndmsg_page = NULL;
-               sk->sk_sndmsg_off = 0;
                exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
                length += exthdrlen;
                transhdrlen += exthdrlen;
@@ -1510,48 +1502,31 @@ alloc_new_skb:
                        }
                } else {
                        int i = skb_shinfo(skb)->nr_frags;
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
-                       struct page *page = sk->sk_sndmsg_page;
-                       int off = sk->sk_sndmsg_off;
-                       unsigned int left;
-
-                       if (page && (left = PAGE_SIZE - off) > 0) {
-                               if (copy >= left)
-                                       copy = left;
-                               if (page != skb_frag_page(frag)) {
-                                       if (i == MAX_SKB_FRAGS) {
-                                               err = -EMSGSIZE;
-                                               goto error;
-                                       }
-                                       skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
-                                       skb_frag_ref(skb, i);
-                                       frag = &skb_shinfo(skb)->frags[i];
-                               }
-                       } else if(i < MAX_SKB_FRAGS) {
-                               if (copy > PAGE_SIZE)
-                                       copy = PAGE_SIZE;
-                               page = alloc_pages(sk->sk_allocation, 0);
-                               if (page == NULL) {
-                                       err = -ENOMEM;
-                                       goto error;
-                               }
-                               sk->sk_sndmsg_page = page;
-                               sk->sk_sndmsg_off = 0;
+                       struct page_frag *pfrag = sk_page_frag(sk);
 
-                               skb_fill_page_desc(skb, i, page, 0, 0);
-                               frag = &skb_shinfo(skb)->frags[i];
-                       } else {
-                               err = -EMSGSIZE;
+                       err = -ENOMEM;
+                       if (!sk_page_frag_refill(sk, pfrag))
                                goto error;
+
+                       if (!skb_can_coalesce(skb, i, pfrag->page,
+                                             pfrag->offset)) {
+                               err = -EMSGSIZE;
+                               if (i == MAX_SKB_FRAGS)
+                                       goto error;
+
+                               __skb_fill_page_desc(skb, i, pfrag->page,
+                                                    pfrag->offset, 0);
+                               skb_shinfo(skb)->nr_frags = ++i;
+                               get_page(pfrag->page);
                        }
+                       copy = min_t(int, copy, pfrag->size - pfrag->offset);
                        if (getfrag(from,
-                                   skb_frag_address(frag) + skb_frag_size(frag),
-                                   offset, copy, skb->len, skb) < 0) {
-                               err = -EFAULT;
-                               goto error;
-                       }
-                       sk->sk_sndmsg_off += copy;
-                       skb_frag_size_add(frag, copy);
+                                   page_address(pfrag->page) + pfrag->offset,
+                                   offset, copy, skb->len, skb) < 0)
+                               goto error_efault;
+
+                       pfrag->offset += copy;
+                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                        skb->len += copy;
                        skb->data_len += copy;
                        skb->truesize += copy;
@@ -1560,7 +1535,11 @@ alloc_new_skb:
                offset += copy;
                length -= copy;
        }
+
        return 0;
+
+error_efault:
+       err = -EFAULT;
 error:
        cork->length -= length;
        IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
index 9a1d5fe6aef8f229e22c8b2e8f3cef1663be09ce..cb7e2ded6f08cce17f8fb11a7e7e119e8564d661 100644 (file)
@@ -126,7 +126,7 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
  * Locking : hash tables are protected by RCU and RTNL
  */
 
-static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
+struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 {
        struct dst_entry *dst = t->dst_cache;
 
@@ -139,20 +139,23 @@ static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
 
        return dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
 
-static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
+void ip6_tnl_dst_reset(struct ip6_tnl *t)
 {
        dst_release(t->dst_cache);
        t->dst_cache = NULL;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 
-static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
+void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
 {
        struct rt6_info *rt = (struct rt6_info *) dst;
        t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
        dst_release(t->dst_cache);
        t->dst_cache = dst;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
 
 /**
  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -200,7 +203,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
  **/
 
 static struct ip6_tnl __rcu **
-ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
+ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
 {
        const struct in6_addr *remote = &p->raddr;
        const struct in6_addr *local = &p->laddr;
@@ -267,7 +270,7 @@ static void ip6_dev_free(struct net_device *dev)
  *   created tunnel or NULL
  **/
 
-static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
+static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 {
        struct net_device *dev;
        struct ip6_tnl *t;
@@ -322,7 +325,7 @@ failed:
  **/
 
 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
-               struct ip6_tnl_parm *p, int create)
+               struct __ip6_tnl_parm *p, int create)
 {
        const struct in6_addr *remote = &p->raddr;
        const struct in6_addr *local = &p->laddr;
@@ -374,8 +377,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
  *   else index to encapsulation limit
  **/
 
-static __u16
-parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
+__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
 {
        const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
        __u8 nexthdr = ipv6h->nexthdr;
@@ -425,6 +427,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
        }
        return 0;
 }
+EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
 
 /**
  * ip6_tnl_err - tunnel error handler
@@ -480,7 +483,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
        case ICMPV6_PARAMPROB:
                teli = 0;
                if ((*code) == ICMPV6_HDR_FIELD)
-                       teli = parse_tlv_tnl_enc_lim(skb, skb->data);
+                       teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
 
                if (teli && teli == *info - 2) {
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
@@ -693,11 +696,11 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
                IP6_ECN_set_ce(ipv6_hdr(skb));
 }
 
-static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
+__u32 ip6_tnl_get_cap(struct ip6_tnl *t,
                             const struct in6_addr *laddr,
                             const struct in6_addr *raddr)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ltype = ipv6_addr_type(laddr);
        int rtype = ipv6_addr_type(raddr);
        __u32 flags = 0;
@@ -715,13 +718,14 @@ static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
        }
        return flags;
 }
+EXPORT_SYMBOL(ip6_tnl_get_cap);
 
 /* called with rcu_read_lock() */
-static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
                                  const struct in6_addr *laddr,
                                  const struct in6_addr *raddr)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
        struct net *net = dev_net(t->dev);
 
@@ -740,6 +744,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
 /**
  * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
@@ -859,9 +864,9 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
        return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
 
-static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
+int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
 {
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
        struct net *net = dev_net(t->dev);
 
@@ -885,6 +890,8 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
+
 /**
  * ip6_tnl_xmit2 - encapsulate packet and send
  *   @skb: the outgoing socket buffer
@@ -1085,7 +1092,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
            !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
                return -1;
 
-       offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
+       offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
        if (offset > 0) {
                struct ipv6_tlv_tnl_enc_lim *tel;
                tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -1152,7 +1159,7 @@ tx_err:
 static void ip6_tnl_link_config(struct ip6_tnl *t)
 {
        struct net_device *dev = t->dev;
-       struct ip6_tnl_parm *p = &t->parms;
+       struct __ip6_tnl_parm *p = &t->parms;
        struct flowi6 *fl6 = &t->fl.u.ip6;
 
        memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1215,7 +1222,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
  **/
 
 static int
-ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
+ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 {
        t->parms.laddr = p->laddr;
        t->parms.raddr = p->raddr;
@@ -1230,6 +1237,34 @@ ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
        return 0;
 }
 
+static void
+ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->flags = u->flags;
+       p->hop_limit = u->hop_limit;
+       p->encap_limit = u->encap_limit;
+       p->flowinfo = u->flowinfo;
+       p->link = u->link;
+       p->proto = u->proto;
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
+{
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->flags = p->flags;
+       u->hop_limit = p->hop_limit;
+       u->encap_limit = p->encap_limit;
+       u->flowinfo = p->flowinfo;
+       u->link = p->link;
+       u->proto = p->proto;
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
 /**
  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
  *   @dev: virtual device associated with tunnel
@@ -1263,6 +1298,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        int err = 0;
        struct ip6_tnl_parm p;
+       struct __ip6_tnl_parm p1;
        struct ip6_tnl *t = NULL;
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -1274,11 +1310,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                err = -EFAULT;
                                break;
                        }
-                       t = ip6_tnl_locate(net, &p, 0);
+                       ip6_tnl_parm_from_user(&p1, &p);
+                       t = ip6_tnl_locate(net, &p1, 0);
+               } else {
+                       memset(&p, 0, sizeof(p));
                }
                if (t == NULL)
                        t = netdev_priv(dev);
-               memcpy(&p, &t->parms, sizeof (p));
+               ip6_tnl_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
                        err = -EFAULT;
                }
@@ -1295,7 +1334,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
                    p.proto != 0)
                        break;
-               t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
+               ip6_tnl_parm_from_user(&p1, &p);
+               t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
                if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
                                if (t->dev != dev) {
@@ -1307,13 +1347,14 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
                        ip6_tnl_unlink(ip6n, t);
                        synchronize_net();
-                       err = ip6_tnl_change(t, &p);
+                       err = ip6_tnl_change(t, &p1);
                        ip6_tnl_link(ip6n, t);
                        netdev_state_change(dev);
                }
                if (t) {
                        err = 0;
-                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
+                       ip6_tnl_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
                                err = -EFAULT;
 
                } else
@@ -1329,7 +1370,9 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
                                break;
                        err = -ENOENT;
-                       if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
+                       ip6_tnl_parm_from_user(&p1, &p);
+                       t = ip6_tnl_locate(net, &p1, 0);
+                       if (t == NULL)
                                break;
                        err = -EPERM;
                        if (t->dev == ip6n->fb_tnl_dev)
index 4532973f0dd4fc8fd3467a81cc0a2ce0581c72f1..08ea3f0b6e55f9557ec1e919e77f1496ccb1fdf1 100644 (file)
@@ -838,7 +838,7 @@ static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
                        nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
                        skb_trim(skb, nlh->nlmsg_len);
                        ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else
                        kfree_skb(skb);
        }
@@ -1052,7 +1052,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
                                skb_trim(skb, nlh->nlmsg_len);
                                ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
                        }
-                       rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
+                       rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else
                        ip6_mr_forward(net, mrt, skb, c);
        }
@@ -2202,12 +2202,12 @@ int ip6mr_get_route(struct net *net,
 }
 
 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
-                            u32 pid, u32 seq, struct mfc6_cache *c)
+                            u32 portid, u32 seq, struct mfc6_cache *c)
 {
        struct nlmsghdr *nlh;
        struct rtmsg *rtm;
 
-       nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
+       nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2260,7 +2260,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
                                if (e < s_e)
                                        goto next_entry;
                                if (ip6mr_fill_mroute(mrt, skb,
-                                                     NETLINK_CB(cb->skb).pid,
+                                                     NETLINK_CB(cb->skb).portid,
                                                      cb->nlh->nlmsg_seq,
                                                      mfc) < 0)
                                        goto done;
index db31561cc8df31afbf7f18fd4dd849811be5a8dd..429089cb073dd1ae4f565a6b5382d6b12b9354ce 100644 (file)
@@ -15,6 +15,7 @@ int ip6_route_me_harder(struct sk_buff *skb)
 {
        struct net *net = dev_net(skb_dst(skb)->dev);
        const struct ipv6hdr *iph = ipv6_hdr(skb);
+       unsigned int hh_len;
        struct dst_entry *dst;
        struct flowi6 fl6 = {
                .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -47,6 +48,13 @@ int ip6_route_me_harder(struct sk_buff *skb)
        }
 #endif
 
+       /* Change in oif may mean change in hh_len. */
+       hh_len = skb_dst(skb)->dev->hard_header_len;
+       if (skb_headroom(skb) < hh_len &&
+           pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+                            0, GFP_ATOMIC))
+               return -1;
+
        return 0;
 }
 EXPORT_SYMBOL(ip6_route_me_harder);
index 10135342799e293bfa62e87ac01f7e1ec81c8c65..c72532a60d887aeaaf31187f470622de24aabda8 100644 (file)
@@ -181,9 +181,44 @@ config IP6_NF_SECURITY
        help
          This option adds a `security' table to iptables, for use
          with Mandatory Access Control (MAC) policy.
-        
+
          If unsure, say N.
 
+config NF_NAT_IPV6
+       tristate "IPv6 NAT"
+       depends on NF_CONNTRACK_IPV6
+       depends on NETFILTER_ADVANCED
+       select NF_NAT
+       help
+         The IPv6 NAT option allows masquerading, port forwarding and other
+         forms of full Network Address Port Translation. It is controlled by
+         the `nat' table in ip6tables, see the man page for ip6tables(8).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+if NF_NAT_IPV6
+
+config IP6_NF_TARGET_MASQUERADE
+       tristate "MASQUERADE target support"
+       help
+         Masquerading is a special case of NAT: all outgoing connections are
+         changed to seem to come from a particular interface's address, and
+         if the interface goes down, those connections are lost.  This is
+         only useful for dialup accounts with dynamic IP address (ie. your IP
+         address will be different on next dialup).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config IP6_NF_TARGET_NPT
+       tristate "NPT (Network Prefix translation) target support"
+       help
+         This option adds the `SNPT' and `DNPT' target, which perform
+         stateless IPv6-to-IPv6 Network Prefix Translation per RFC 6296.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+endif # NF_NAT_IPV6
+
 endif # IP6_NF_IPTABLES
 
 endmenu
index 534d3f216f7b4fa14475e438cf3ebf2fbd4d1552..2d11fcc2cf3c5c3906b851e9c5daad69de26e0ed 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
 obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
 obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
 obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
+obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
 
 # objects for l3 independent conntrack
 nf_conntrack_ipv6-y  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
@@ -15,6 +16,9 @@ nf_conntrack_ipv6-y  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
 # l3 independent conntrack
 obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
 
+nf_nat_ipv6-y          := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
+obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
+
 # defrag
 nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
@@ -30,4 +34,6 @@ obj-$(CONFIG_IP6_NF_MATCH_RPFILTER) += ip6t_rpfilter.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 
 # targets
+obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
+obj-$(CONFIG_IP6_NF_TARGET_NPT) += ip6t_NPT.o
 obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
new file mode 100644 (file)
index 0000000..60e9053
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+
+static unsigned int
+masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       enum ip_conntrack_info ctinfo;
+       struct in6_addr src;
+       struct nf_conn *ct;
+       struct nf_nat_range newrange;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+                           ctinfo == IP_CT_RELATED_REPLY));
+
+       if (ipv6_dev_get_saddr(dev_net(par->out), par->out,
+                              &ipv6_hdr(skb)->daddr, 0, &src) < 0)
+               return NF_DROP;
+
+       nfct_nat(ct)->masq_index = par->out->ifindex;
+
+       newrange.flags          = range->flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.in6   = src;
+       newrange.max_addr.in6   = src;
+       newrange.min_proto      = range->min_proto;
+       newrange.max_proto      = range->max_proto;
+
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+}
+
+static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+
+       if (range->flags & NF_NAT_RANGE_MAP_IPS)
+               return -EINVAL;
+       return 0;
+}
+
+static int device_cmp(struct nf_conn *ct, void *ifindex)
+{
+       const struct nf_conn_nat *nat = nfct_nat(ct);
+
+       if (!nat)
+               return 0;
+       if (nf_ct_l3num(ct) != NFPROTO_IPV6)
+               return 0;
+       return nat->masq_index == (int)(long)ifindex;
+}
+
+static int masq_device_event(struct notifier_block *this,
+                            unsigned long event, void *ptr)
+{
+       const struct net_device *dev = ptr;
+       struct net *net = dev_net(dev);
+
+       if (event == NETDEV_DOWN)
+               nf_ct_iterate_cleanup(net, device_cmp,
+                                     (void *)(long)dev->ifindex);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block masq_dev_notifier = {
+       .notifier_call  = masq_device_event,
+};
+
+static int masq_inet_event(struct notifier_block *this,
+                          unsigned long event, void *ptr)
+{
+       struct inet6_ifaddr *ifa = ptr;
+
+       return masq_device_event(this, event, ifa->idev->dev);
+}
+
+static struct notifier_block masq_inet_notifier = {
+       .notifier_call  = masq_inet_event,
+};
+
+static struct xt_target masquerade_tg6_reg __read_mostly = {
+       .name           = "MASQUERADE",
+       .family         = NFPROTO_IPV6,
+       .checkentry     = masquerade_tg6_checkentry,
+       .target         = masquerade_tg6,
+       .targetsize     = sizeof(struct nf_nat_range),
+       .table          = "nat",
+       .hooks          = 1 << NF_INET_POST_ROUTING,
+       .me             = THIS_MODULE,
+};
+
+static int __init masquerade_tg6_init(void)
+{
+       int err;
+
+       err = xt_register_target(&masquerade_tg6_reg);
+       if (err == 0) {
+               register_netdevice_notifier(&masq_dev_notifier);
+               register_inet6addr_notifier(&masq_inet_notifier);
+       }
+
+       return err;
+}
+static void __exit masquerade_tg6_exit(void)
+{
+       unregister_inet6addr_notifier(&masq_inet_notifier);
+       unregister_netdevice_notifier(&masq_dev_notifier);
+       xt_unregister_target(&masquerade_tg6_reg);
+}
+
+module_init(masquerade_tg6_init);
+module_exit(masquerade_tg6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Xtables: automatic address SNAT");
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
new file mode 100644 (file)
index 0000000..e948691
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011, 2012 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv6/ip6t_NPT.h>
+#include <linux/netfilter/x_tables.h>
+
+static __sum16 csum16_complement(__sum16 a)
+{
+       return (__force __sum16)(0xffff - (__force u16)a);
+}
+
+static __sum16 csum16_add(__sum16 a, __sum16 b)
+{
+       u16 sum;
+
+       sum = (__force u16)a + (__force u16)b;
+       sum += (__force u16)a < (__force u16)b;
+       return (__force __sum16)sum;
+}
+
+static __sum16 csum16_sub(__sum16 a, __sum16 b)
+{
+       return csum16_add(a, csum16_complement(b));
+}
+
+static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
+{
+       struct ip6t_npt_tginfo *npt = par->targinfo;
+       __sum16 src_sum = 0, dst_sum = 0;
+       unsigned int i;
+
+       if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
+               src_sum = csum16_add(src_sum,
+                               (__force __sum16)npt->src_pfx.in6.s6_addr16[i]);
+               dst_sum = csum16_add(dst_sum,
+                               (__force __sum16)npt->dst_pfx.in6.s6_addr16[i]);
+       }
+
+       npt->adjustment = csum16_sub(src_sum, dst_sum);
+       return 0;
+}
+
+static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
+                            struct in6_addr *addr)
+{
+       unsigned int pfx_len;
+       unsigned int i, idx;
+       __be32 mask;
+       __sum16 sum;
+
+       pfx_len = max(npt->src_pfx_len, npt->dst_pfx_len);
+       for (i = 0; i < pfx_len; i += 32) {
+               if (pfx_len - i >= 32)
+                       mask = 0;
+               else
+                       mask = htonl(~((1 << (pfx_len - i)) - 1));
+
+               idx = i / 32;
+               addr->s6_addr32[idx] &= mask;
+               addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx];
+       }
+
+       if (pfx_len <= 48)
+               idx = 3;
+       else {
+               for (idx = 4; idx < ARRAY_SIZE(addr->s6_addr16); idx++) {
+                       if ((__force __sum16)addr->s6_addr16[idx] !=
+                           CSUM_MANGLED_0)
+                               break;
+               }
+               if (idx == ARRAY_SIZE(addr->s6_addr16))
+                       return false;
+       }
+
+       sum = csum16_add((__force __sum16)addr->s6_addr16[idx],
+                        npt->adjustment);
+       if (sum == CSUM_MANGLED_0)
+               sum = 0;
+       *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
+
+       return true;
+}
+
+static unsigned int
+ip6t_snpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct ip6t_npt_tginfo *npt = par->targinfo;
+
+       if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->saddr)) {
+               icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
+                           offsetof(struct ipv6hdr, saddr));
+               return NF_DROP;
+       }
+       return XT_CONTINUE;
+}
+
+static unsigned int
+ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct ip6t_npt_tginfo *npt = par->targinfo;
+
+       if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->daddr)) {
+               icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
+                           offsetof(struct ipv6hdr, daddr));
+               return NF_DROP;
+       }
+       return XT_CONTINUE;
+}
+
+static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
+       {
+               .name           = "SNPT",
+               .target         = ip6t_snpt_tg,
+               .targetsize     = sizeof(struct ip6t_npt_tginfo),
+               .checkentry     = ip6t_npt_checkentry,
+               .family         = NFPROTO_IPV6,
+               .hooks          = (1 << NF_INET_LOCAL_IN) |
+                                 (1 << NF_INET_POST_ROUTING),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "DNPT",
+               .target         = ip6t_dnpt_tg,
+               .targetsize     = sizeof(struct ip6t_npt_tginfo),
+               .checkentry     = ip6t_npt_checkentry,
+               .family         = NFPROTO_IPV6,
+               .hooks          = (1 << NF_INET_PRE_ROUTING) |
+                                 (1 << NF_INET_LOCAL_OUT),
+               .me             = THIS_MODULE,
+       },
+};
+
+static int __init ip6t_npt_init(void)
+{
+       return xt_register_targets(ip6t_npt_target_reg,
+                                  ARRAY_SIZE(ip6t_npt_target_reg));
+}
+
+static void __exit ip6t_npt_exit(void)
+{
+       xt_unregister_targets(ip6t_npt_target_reg,
+                             ARRAY_SIZE(ip6t_npt_target_reg));
+}
+
+module_init(ip6t_npt_init);
+module_exit(ip6t_npt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6-to-IPv6 Network Prefix Translation (RFC 6296)");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS("ip6t_SNPT");
+MODULE_ALIAS("ip6t_DNPT");
index 325e59a0224ffa3f0b08e7474628d614e2d3ee49..beb5777d20437321cf19d37ad3e7c630d8f0663c 100644 (file)
@@ -61,9 +61,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
        net->ipv6.ip6table_filter =
                ip6t_register_table(net, &packet_filter, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_filter))
-               return PTR_ERR(net->ipv6.ip6table_filter);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_filter);
 }
 
 static void __net_exit ip6table_filter_net_exit(struct net *net)
index 4d782405f125da5e7819333ce7dfadd501044c12..7431121b87dee6fa628f53d37dfa7f93c546a60b 100644 (file)
@@ -97,9 +97,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
        net->ipv6.ip6table_mangle =
                ip6t_register_table(net, &packet_mangler, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_mangle))
-               return PTR_ERR(net->ipv6.ip6table_mangle);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_mangle);
 }
 
 static void __net_exit ip6table_mangle_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
new file mode 100644 (file)
index 0000000..e418bd6
--- /dev/null
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 NAT code. Development of IPv6 NAT
+ * funded by Astaro.
+ */
+
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+
+static const struct xt_table nf_nat_ipv6_table = {
+       .name           = "nat",
+       .valid_hooks    = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .me             = THIS_MODULE,
+       .af             = NFPROTO_IPV6,
+};
+
+static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+       /* Force range to this IP; let proto decide mapping for
+        * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+        */
+       struct nf_nat_range range;
+
+       range.flags = 0;
+       pr_debug("Allocating NULL binding for %p (%pI6)\n", ct,
+                HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6 :
+                &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6);
+
+       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+
+static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
+                                    const struct net_device *in,
+                                    const struct net_device *out,
+                                    struct nf_conn *ct)
+{
+       struct net *net = nf_ct_net(ct);
+       unsigned int ret;
+
+       ret = ip6t_do_table(skb, hooknum, in, out, net->ipv6.ip6table_nat);
+       if (ret == NF_ACCEPT) {
+               if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
+                       ret = alloc_null_binding(ct, hooknum);
+       }
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv6_fn(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
+{
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn_nat *nat;
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+       __be16 frag_off;
+       int hdrlen;
+       u8 nexthdr;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       /* Can't track?  It's not due to stress, or conntrack would
+        * have dropped it.  Hence it's the user's responsibilty to
+        * packet filter it out, or implement conntrack/NAT for that
+        * protocol. 8) --RR
+        */
+       if (!ct)
+               return NF_ACCEPT;
+
+       /* Don't try to NAT if this packet is not conntracked */
+       if (nf_ct_is_untracked(ct))
+               return NF_ACCEPT;
+
+       nat = nfct_nat(ct);
+       if (!nat) {
+               /* NAT module was loaded late. */
+               if (nf_ct_is_confirmed(ct))
+                       return NF_ACCEPT;
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL) {
+                       pr_debug("failed to add NAT extension\n");
+                       return NF_ACCEPT;
+               }
+       }
+
+       switch (ctinfo) {
+       case IP_CT_RELATED:
+       case IP_CT_RELATED_REPLY:
+               nexthdr = ipv6_hdr(skb)->nexthdr;
+               hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+                                         &nexthdr, &frag_off);
+
+               if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+                       if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+                                                            hooknum, hdrlen))
+                               return NF_DROP;
+                       else
+                               return NF_ACCEPT;
+               }
+               /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
+       case IP_CT_NEW:
+               /* Seen it before?  This can happen for loopback, retrans,
+                * or local packets.
+                */
+               if (!nf_nat_initialized(ct, maniptype)) {
+                       unsigned int ret;
+
+                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+                       if (ret != NF_ACCEPT)
+                               return ret;
+               } else
+                       pr_debug("Already setup manip %s for ct %p\n",
+                                maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
+                                ct);
+               break;
+
+       default:
+               /* ESTABLISHED */
+               NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
+                            ctinfo == IP_CT_ESTABLISHED_REPLY);
+       }
+
+       return nf_nat_packet(ct, ctinfo, hooknum, skb);
+}
+
+static unsigned int
+nf_nat_ipv6_in(unsigned int hooknum,
+              struct sk_buff *skb,
+              const struct net_device *in,
+              const struct net_device *out,
+              int (*okfn)(struct sk_buff *))
+{
+       unsigned int ret;
+       struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+
+       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+               skb_dst_drop(skb);
+
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv6_out(unsigned int hooknum,
+               struct sk_buff *skb,
+               const struct net_device *in,
+               const struct net_device *out,
+               int (*okfn)(struct sk_buff *))
+{
+#ifdef CONFIG_XFRM
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+#endif
+       unsigned int ret;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct ipv6hdr))
+               return NF_ACCEPT;
+
+       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                                     &ct->tuplehash[!dir].tuple.dst.u3) ||
+                   (ct->tuplehash[dir].tuple.src.u.all !=
+                    ct->tuplehash[!dir].tuple.dst.u.all))
+                       if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
+                               ret = NF_DROP;
+       }
+#endif
+       return ret;
+}
+
+static unsigned int
+nf_nat_ipv6_local_fn(unsigned int hooknum,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out,
+                    int (*okfn)(struct sk_buff *))
+{
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       unsigned int ret;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct ipv6hdr))
+               return NF_ACCEPT;
+
+       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+                                     &ct->tuplehash[!dir].tuple.src.u3)) {
+                       if (ip6_route_me_harder(skb))
+                               ret = NF_DROP;
+               }
+#ifdef CONFIG_XFRM
+               else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+                        ct->tuplehash[dir].tuple.dst.u.all !=
+                        ct->tuplehash[!dir].tuple.src.u.all)
+                       if (nf_xfrm_me_harder(skb, AF_INET6))
+                               ret = NF_DROP;
+#endif
+       }
+       return ret;
+}
+
+static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
+       /* Before packet filtering, change destination */
+       {
+               .hook           = nf_nat_ipv6_in,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_PRE_ROUTING,
+               .priority       = NF_IP6_PRI_NAT_DST,
+       },
+       /* After packet filtering, change source */
+       {
+               .hook           = nf_nat_ipv6_out,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_POST_ROUTING,
+               .priority       = NF_IP6_PRI_NAT_SRC,
+       },
+       /* Before packet filtering, change destination */
+       {
+               .hook           = nf_nat_ipv6_local_fn,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_LOCAL_OUT,
+               .priority       = NF_IP6_PRI_NAT_DST,
+       },
+       /* After packet filtering, change source */
+       {
+               .hook           = nf_nat_ipv6_fn,
+               .owner          = THIS_MODULE,
+               .pf             = NFPROTO_IPV6,
+               .hooknum        = NF_INET_LOCAL_IN,
+               .priority       = NF_IP6_PRI_NAT_SRC,
+       },
+};
+
+static int __net_init ip6table_nat_net_init(struct net *net)
+{
+       struct ip6t_replace *repl;
+
+       repl = ip6t_alloc_initial_table(&nf_nat_ipv6_table);
+       if (repl == NULL)
+               return -ENOMEM;
+       net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl);
+       kfree(repl);
+       if (IS_ERR(net->ipv6.ip6table_nat))
+               return PTR_ERR(net->ipv6.ip6table_nat);
+       return 0;
+}
+
+static void __net_exit ip6table_nat_net_exit(struct net *net)
+{
+       ip6t_unregister_table(net, net->ipv6.ip6table_nat);
+}
+
+static struct pernet_operations ip6table_nat_net_ops = {
+       .init   = ip6table_nat_net_init,
+       .exit   = ip6table_nat_net_exit,
+};
+
+static int __init ip6table_nat_init(void)
+{
+       int err;
+
+       err = register_pernet_subsys(&ip6table_nat_net_ops);
+       if (err < 0)
+               goto err1;
+
+       err = nf_register_hooks(nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops));
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       unregister_pernet_subsys(&ip6table_nat_net_ops);
+err1:
+       return err;
+}
+
+static void __exit ip6table_nat_exit(void)
+{
+       nf_unregister_hooks(nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops));
+       unregister_pernet_subsys(&ip6table_nat_net_ops);
+}
+
+module_init(ip6table_nat_init);
+module_exit(ip6table_nat_exit);
+
+MODULE_LICENSE("GPL");
index 5b9926a011bd99faff714042746f161ee58a152a..60d1bddff7a038c54c518e850c64f08745accccb 100644 (file)
@@ -40,9 +40,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
        net->ipv6.ip6table_raw =
                ip6t_register_table(net, &packet_raw, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_raw))
-               return PTR_ERR(net->ipv6.ip6table_raw);
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_raw);
 }
 
 static void __net_exit ip6table_raw_net_exit(struct net *net)
index 91aa2b4d83c9c1571d4538ad380dfae5296a180b..db155351339c7c63ed48d23233ef47919192a05c 100644 (file)
@@ -58,10 +58,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
        net->ipv6.ip6table_security =
                ip6t_register_table(net, &security_table, repl);
        kfree(repl);
-       if (IS_ERR(net->ipv6.ip6table_security))
-               return PTR_ERR(net->ipv6.ip6table_security);
-
-       return 0;
+       return PTR_RET(net->ipv6.ip6table_security);
 }
 
 static void __net_exit ip6table_security_net_exit(struct net *net)
index 4794f96cf2e01bfcd5cc858cc22f73b325dbc907..8860d23e61cfff646e3e034da7fe4c985fbb5575 100644 (file)
@@ -28,6 +28,7 @@
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#include <net/netfilter/nf_nat_helper.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 #include <net/netfilter/nf_log.h>
 
@@ -64,82 +65,31 @@ static int ipv6_print_tuple(struct seq_file *s,
                          tuple->src.u3.ip6, tuple->dst.u3.ip6);
 }
 
-/*
- * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
- *
- * This function parses (probably truncated) exthdr set "hdr"
- * of length "len". "nexthdrp" initially points to some place,
- * where type of the first header can be found.
- *
- * It skips all well-known exthdrs, and returns pointer to the start
- * of unparsable area i.e. the first header with unknown type.
- * if success, *nexthdr is updated by type/protocol of this header.
- *
- * NOTES: - it may return pointer pointing beyond end of packet,
- *          if the last recognized header is truncated in the middle.
- *        - if packet is truncated, so that all parsed headers are skipped,
- *          it returns -1.
- *        - if packet is fragmented, return pointer of the fragment header.
- *        - ESP is unparsable for now and considered like
- *          normal payload protocol.
- *        - Note also special handling of AUTH header. Thanks to IPsec wizards.
- */
-
-static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
-                                 u8 *nexthdrp, int len)
-{
-       u8 nexthdr = *nexthdrp;
-
-       while (ipv6_ext_hdr(nexthdr)) {
-               struct ipv6_opt_hdr hdr;
-               int hdrlen;
-
-               if (len < (int)sizeof(struct ipv6_opt_hdr))
-                       return -1;
-               if (nexthdr == NEXTHDR_NONE)
-                       break;
-               if (nexthdr == NEXTHDR_FRAGMENT)
-                       break;
-               if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
-                       BUG();
-               if (nexthdr == NEXTHDR_AUTH)
-                       hdrlen = (hdr.hdrlen+2)<<2;
-               else
-                       hdrlen = ipv6_optlen(&hdr);
-
-               nexthdr = hdr.nexthdr;
-               len -= hdrlen;
-               start += hdrlen;
-       }
-
-       *nexthdrp = nexthdr;
-       return start;
-}
-
 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
                            unsigned int *dataoff, u_int8_t *protonum)
 {
        unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
-       unsigned char pnum;
+       __be16 frag_off;
        int protoff;
+       u8 nexthdr;
 
        if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
-                         &pnum, sizeof(pnum)) != 0) {
+                         &nexthdr, sizeof(nexthdr)) != 0) {
                pr_debug("ip6_conntrack_core: can't get nexthdr\n");
                return -NF_ACCEPT;
        }
-       protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum, skb->len - extoff);
+       protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
        /*
         * (protoff == skb->len) mean that the packet doesn't have no data
         * except of IPv6 & ext headers. but it's tracked anyway. - YK
         */
-       if ((protoff < 0) || (protoff > skb->len)) {
+       if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
                pr_debug("ip6_conntrack_core: can't find proto in pkt\n");
                return -NF_ACCEPT;
        }
 
        *dataoff = protoff;
-       *protonum = pnum;
+       *protonum = nexthdr;
        return NF_ACCEPT;
 }
 
@@ -153,10 +103,10 @@ static unsigned int ipv6_helper(unsigned int hooknum,
        const struct nf_conn_help *help;
        const struct nf_conntrack_helper *helper;
        enum ip_conntrack_info ctinfo;
-       unsigned int ret, protoff;
-       unsigned int extoff = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
-       unsigned char pnum = ipv6_hdr(skb)->nexthdr;
-
+       unsigned int ret;
+       __be16 frag_off;
+       int protoff;
+       u8 nexthdr;
 
        /* This is where we call the helper: as the packet goes out. */
        ct = nf_ct_get(skb, &ctinfo);
@@ -171,9 +121,10 @@ static unsigned int ipv6_helper(unsigned int hooknum,
        if (!helper)
                return NF_ACCEPT;
 
-       protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum,
-                                        skb->len - extoff);
-       if (protoff > skb->len || pnum == NEXTHDR_FRAGMENT) {
+       nexthdr = ipv6_hdr(skb)->nexthdr;
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+                                  &frag_off);
+       if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
                pr_debug("proto header not found\n");
                return NF_ACCEPT;
        }
@@ -192,6 +143,36 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
                                 const struct net_device *out,
                                 int (*okfn)(struct sk_buff *))
 {
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       unsigned char pnum = ipv6_hdr(skb)->nexthdr;
+       int protoff;
+       __be16 frag_off;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+               goto out;
+
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+                                  &frag_off);
+       if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+               pr_debug("proto header not found\n");
+               goto out;
+       }
+
+       /* adjust seqs for loopback traffic only in outgoing direction */
+       if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+           !nf_is_loopback_packet(skb)) {
+               typeof(nf_nat_seq_adjust_hook) seq_adjust;
+
+               seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
+               if (!seq_adjust ||
+                   !seq_adjust(skb, ct, ctinfo, protoff)) {
+                       NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+                       return NF_DROP;
+               }
+       }
+out:
        /* We've seen it coming out the other side: confirm it */
        return nf_conntrack_confirm(skb);
 }
@@ -199,9 +180,14 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
 static unsigned int __ipv6_conntrack_in(struct net *net,
                                        unsigned int hooknum,
                                        struct sk_buff *skb,
+                                       const struct net_device *in,
+                                       const struct net_device *out,
                                        int (*okfn)(struct sk_buff *))
 {
        struct sk_buff *reasm = skb->nfct_reasm;
+       const struct nf_conn_help *help;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
 
        /* This packet is fragmented and has reassembled packet. */
        if (reasm) {
@@ -213,6 +199,25 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
                        if (ret != NF_ACCEPT)
                                return ret;
                }
+
+               /* Conntrack helpers need the entire reassembled packet in the
+                * POST_ROUTING hook. In case of unconfirmed connections NAT
+                * might reassign a helper, so the entire packet is also
+                * required.
+                */
+               ct = nf_ct_get(reasm, &ctinfo);
+               if (ct != NULL && !nf_ct_is_untracked(ct)) {
+                       help = nfct_help(ct);
+                       if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
+                               nf_conntrack_get_reasm(skb);
+                               NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
+                                              (struct net_device *)in,
+                                              (struct net_device *)out,
+                                              okfn, NF_IP6_PRI_CONNTRACK + 1);
+                               return NF_DROP_ERR(-ECANCELED);
+                       }
+               }
+
                nf_conntrack_get(reasm->nfct);
                skb->nfct = reasm->nfct;
                skb->nfctinfo = reasm->nfctinfo;
@@ -228,7 +233,7 @@ static unsigned int ipv6_conntrack_in(unsigned int hooknum,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
 {
-       return __ipv6_conntrack_in(dev_net(in), hooknum, skb, okfn);
+       return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
 }
 
 static unsigned int ipv6_conntrack_local(unsigned int hooknum,
@@ -242,7 +247,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
                net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
                return NF_ACCEPT;
        }
-       return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn);
+       return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
index c9c78c2e666b86b397d95756695afe49a6a47b5a..18bd9bbbd1c6c0f50d8bf74c947c05f91a4f20cd 100644 (file)
@@ -57,41 +57,27 @@ struct nf_ct_frag6_skb_cb
 
 #define NFCT_FRAG6_CB(skb)     ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
 
-struct nf_ct_frag6_queue
-{
-       struct inet_frag_queue  q;
-
-       __be32                  id;             /* fragment id          */
-       u32                     user;
-       struct in6_addr         saddr;
-       struct in6_addr         daddr;
-
-       unsigned int            csum;
-       __u16                   nhoffset;
-};
-
 static struct inet_frags nf_frags;
-static struct netns_frags nf_init_frags;
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
        {
                .procname       = "nf_conntrack_frag6_timeout",
-               .data           = &nf_init_frags.timeout,
+               .data           = &init_net.nf_frag.frags.timeout,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "nf_conntrack_frag6_low_thresh",
-               .data           = &nf_init_frags.low_thresh,
+               .data           = &init_net.nf_frag.frags.low_thresh,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "nf_conntrack_frag6_high_thresh",
-               .data           = &nf_init_frags.high_thresh,
+               .data           = &init_net.nf_frag.frags.high_thresh,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
@@ -99,68 +85,86 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
        { }
 };
 
-static struct ctl_table_header *nf_ct_frag6_sysctl_header;
-#endif
-
-static unsigned int nf_hashfn(struct inet_frag_queue *q)
+static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
 {
-       const struct nf_ct_frag6_queue *nq;
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+
+       table = nf_ct_frag6_sysctl_table;
+       if (!net_eq(net, &init_net)) {
+               table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
+                               GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+
+               table[0].data = &net->ipv6.frags.high_thresh;
+               table[1].data = &net->ipv6.frags.low_thresh;
+               table[2].data = &net->ipv6.frags.timeout;
+       }
 
-       nq = container_of(q, struct nf_ct_frag6_queue, q);
-       return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
+       hdr = register_net_sysctl(net, "net/netfilter", table);
+       if (hdr == NULL)
+               goto err_reg;
+
+       net->nf_frag.sysctl.frags_hdr = hdr;
+       return 0;
+
+err_reg:
+       if (!net_eq(net, &init_net))
+               kfree(table);
+err_alloc:
+       return -ENOMEM;
 }
 
-static void nf_skb_free(struct sk_buff *skb)
+static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 {
-       if (NFCT_FRAG6_CB(skb)->orig)
-               kfree_skb(NFCT_FRAG6_CB(skb)->orig);
-}
+       struct ctl_table *table;
 
-/* Destruction primitives. */
+       table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+       if (!net_eq(net, &init_net))
+               kfree(table);
+}
 
-static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
+#else
+static int __net_init nf_ct_frag6_sysctl_register(struct net *net)
 {
-       inet_frag_put(&fq->q, &nf_frags);
+       return 0;
 }
+static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
+{
+}
+#endif
 
-/* Kill fq entry. It is not destroyed immediately,
- * because caller (and someone more) holds reference count.
- */
-static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
+static unsigned int nf_hashfn(struct inet_frag_queue *q)
 {
-       inet_frag_kill(&fq->q, &nf_frags);
+       const struct frag_queue *nq;
+
+       nq = container_of(q, struct frag_queue, q);
+       return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
 }
 
-static void nf_ct_frag6_evictor(void)
+static void nf_skb_free(struct sk_buff *skb)
 {
-       local_bh_disable();
-       inet_frag_evictor(&nf_init_frags, &nf_frags);
-       local_bh_enable();
+       if (NFCT_FRAG6_CB(skb)->orig)
+               kfree_skb(NFCT_FRAG6_CB(skb)->orig);
 }
 
 static void nf_ct_frag6_expire(unsigned long data)
 {
-       struct nf_ct_frag6_queue *fq;
-
-       fq = container_of((struct inet_frag_queue *)data,
-                       struct nf_ct_frag6_queue, q);
-
-       spin_lock(&fq->q.lock);
+       struct frag_queue *fq;
+       struct net *net;
 
-       if (fq->q.last_in & INET_FRAG_COMPLETE)
-               goto out;
+       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+       net = container_of(fq->q.net, struct net, nf_frag.frags);
 
-       fq_kill(fq);
-
-out:
-       spin_unlock(&fq->q.lock);
-       fq_put(fq);
+       ip6_expire_frag_queue(net, fq, &nf_frags);
 }
 
 /* Creation primitives. */
-
-static __inline__ struct nf_ct_frag6_queue *
-fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+                                        u32 user, struct in6_addr *src,
+                                        struct in6_addr *dst)
 {
        struct inet_frag_queue *q;
        struct ip6_create_arg arg;
@@ -174,22 +178,23 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
        read_lock_bh(&nf_frags.lock);
        hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
 
-       q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
+       q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
        local_bh_enable();
        if (q == NULL)
                goto oom;
 
-       return container_of(q, struct nf_ct_frag6_queue, q);
+       return container_of(q, struct frag_queue, q);
 
 oom:
        return NULL;
 }
 
 
-static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
+static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
                             const struct frag_hdr *fhdr, int nhoff)
 {
        struct sk_buff *prev, *next;
+       unsigned int payload_len;
        int offset, end;
 
        if (fq->q.last_in & INET_FRAG_COMPLETE) {
@@ -197,8 +202,10 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
                goto err;
        }
 
+       payload_len = ntohs(ipv6_hdr(skb)->payload_len);
+
        offset = ntohs(fhdr->frag_off) & ~0x7;
-       end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
+       end = offset + (payload_len -
                        ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
        if ((unsigned int)end > IPV6_MAXPLEN) {
@@ -307,7 +314,9 @@ found:
        skb->dev = NULL;
        fq->q.stamp = skb->tstamp;
        fq->q.meat += skb->len;
-       atomic_add(skb->truesize, &nf_init_frags.mem);
+       if (payload_len > fq->q.max_size)
+               fq->q.max_size = payload_len;
+       atomic_add(skb->truesize, &fq->q.net->mem);
 
        /* The first fragment.
         * nhoffset is obtained from the first fragment, of course.
@@ -317,12 +326,12 @@ found:
                fq->q.last_in |= INET_FRAG_FIRST_IN;
        }
        write_lock(&nf_frags.lock);
-       list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list);
+       list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
        write_unlock(&nf_frags.lock);
        return 0;
 
 discard_fq:
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &nf_frags);
 err:
        return -1;
 }
@@ -337,12 +346,12 @@ err:
  *     the last and the first frames arrived and all the bits are here.
  */
 static struct sk_buff *
-nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
+nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
 {
        struct sk_buff *fp, *op, *head = fq->q.fragments;
        int    payload_len;
 
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &nf_frags);
 
        WARN_ON(head == NULL);
        WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
@@ -386,7 +395,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
                clone->ip_summed = head->ip_summed;
 
                NFCT_FRAG6_CB(clone)->orig = NULL;
-               atomic_add(clone->truesize, &nf_init_frags.mem);
+               atomic_add(clone->truesize, &fq->q.net->mem);
        }
 
        /* We have to remove fragment header from datagram and to relocate
@@ -410,12 +419,14 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
                        head->csum = csum_add(head->csum, fp->csum);
                head->truesize += fp->truesize;
        }
-       atomic_sub(head->truesize, &nf_init_frags.mem);
+       atomic_sub(head->truesize, &fq->q.net->mem);
 
+       head->local_df = 1;
        head->next = NULL;
        head->dev = dev;
        head->tstamp = fq->q.stamp;
        ipv6_hdr(head)->payload_len = htons(payload_len);
+       IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
 
        /* Yes, and fold redundant checksum back. 8) */
        if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -520,8 +531,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
 {
        struct sk_buff *clone;
        struct net_device *dev = skb->dev;
+       struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev)
+                                      : dev_net(skb->dev);
        struct frag_hdr *fhdr;
-       struct nf_ct_frag6_queue *fq;
+       struct frag_queue *fq;
        struct ipv6hdr *hdr;
        int fhoff, nhoff;
        u8 prevhdr;
@@ -553,10 +566,11 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        hdr = ipv6_hdr(clone);
        fhdr = (struct frag_hdr *)skb_transport_header(clone);
 
-       if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
-               nf_ct_frag6_evictor();
+       local_bh_disable();
+       inet_frag_evictor(&net->nf_frag.frags, &nf_frags, false);
+       local_bh_enable();
 
-       fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
+       fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr);
        if (fq == NULL) {
                pr_debug("Can't find and can't create new queue\n");
                goto ret_orig;
@@ -567,7 +581,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
                spin_unlock_bh(&fq->q.lock);
                pr_debug("Can't insert skb to queue\n");
-               fq_put(fq);
+               inet_frag_put(&fq->q, &nf_frags);
                goto ret_orig;
        }
 
@@ -579,7 +593,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        }
        spin_unlock_bh(&fq->q.lock);
 
-       fq_put(fq);
+       inet_frag_put(&fq->q, &nf_frags);
        return ret_skb;
 
 ret_orig:
@@ -592,6 +606,7 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
                        int (*okfn)(struct sk_buff *))
 {
        struct sk_buff *s, *s2;
+       unsigned int ret = 0;
 
        for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
                nf_conntrack_put_reasm(s->nfct_reasm);
@@ -601,49 +616,62 @@ void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
                s2 = s->next;
                s->next = NULL;
 
-               NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn,
-                              NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+               if (ret != -ECANCELED)
+                       ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
+                                            in, out, okfn,
+                                            NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+               else
+                       kfree_skb(s);
+
                s = s2;
        }
        nf_conntrack_put_reasm(skb);
 }
 
+static int nf_ct_net_init(struct net *net)
+{
+       net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+       net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+       net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
+       inet_frags_init_net(&net->nf_frag.frags);
+
+       return nf_ct_frag6_sysctl_register(net);
+}
+
+static void nf_ct_net_exit(struct net *net)
+{
+       nf_ct_frags6_sysctl_unregister(net);
+       inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
+}
+
+static struct pernet_operations nf_ct_net_ops = {
+       .init = nf_ct_net_init,
+       .exit = nf_ct_net_exit,
+};
+
 int nf_ct_frag6_init(void)
 {
+       int ret = 0;
+
        nf_frags.hashfn = nf_hashfn;
        nf_frags.constructor = ip6_frag_init;
        nf_frags.destructor = NULL;
        nf_frags.skb_free = nf_skb_free;
-       nf_frags.qsize = sizeof(struct nf_ct_frag6_queue);
+       nf_frags.qsize = sizeof(struct frag_queue);
        nf_frags.match = ip6_frag_match;
        nf_frags.frag_expire = nf_ct_frag6_expire;
        nf_frags.secret_interval = 10 * 60 * HZ;
-       nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
-       nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
-       nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
-       inet_frags_init_net(&nf_init_frags);
        inet_frags_init(&nf_frags);
 
-#ifdef CONFIG_SYSCTL
-       nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
-                                                       nf_ct_frag6_sysctl_table);
-       if (!nf_ct_frag6_sysctl_header) {
+       ret = register_pernet_subsys(&nf_ct_net_ops);
+       if (ret)
                inet_frags_fini(&nf_frags);
-               return -ENOMEM;
-       }
-#endif
 
-       return 0;
+       return ret;
 }
 
 void nf_ct_frag6_cleanup(void)
 {
-#ifdef CONFIG_SYSCTL
-       unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
-       nf_ct_frag6_sysctl_header = NULL;
-#endif
+       unregister_pernet_subsys(&nf_ct_net_ops);
        inet_frags_fini(&nf_frags);
-
-       nf_init_frags.low_thresh = 0;
-       nf_ct_frag6_evictor();
 }
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
new file mode 100644 (file)
index 0000000..abfe75a
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of IPv6 NAT funded by Astaro.
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/secure_seq.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv6;
+
+#ifdef CONFIG_XFRM
+static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
+                                      const struct nf_conn *ct,
+                                      enum ip_conntrack_dir dir,
+                                      unsigned long statusbit,
+                                      struct flowi *fl)
+{
+       const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
+       struct flowi6 *fl6 = &fl->u.ip6;
+
+       if (ct->status & statusbit) {
+               fl6->daddr = t->dst.u3.in6;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl6->fl6_dport = t->dst.u.all;
+       }
+
+       statusbit ^= IPS_NAT_MASK;
+
+       if (ct->status & statusbit) {
+               fl6->saddr = t->src.u3.in6;
+               if (t->dst.protonum == IPPROTO_TCP ||
+                   t->dst.protonum == IPPROTO_UDP ||
+                   t->dst.protonum == IPPROTO_UDPLITE ||
+                   t->dst.protonum == IPPROTO_DCCP ||
+                   t->dst.protonum == IPPROTO_SCTP)
+                       fl6->fl6_sport = t->src.u.all;
+       }
+}
+#endif
+
+static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t,
+                                const struct nf_nat_range *range)
+{
+       return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
+              ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
+}
+
+static u32 nf_nat_ipv6_secure_port(const struct nf_conntrack_tuple *t,
+                                  __be16 dport)
+{
+       return secure_ipv6_port_ephemeral(t->src.u3.ip6, t->dst.u3.ip6, dport);
+}
+
+static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
+                                 unsigned int iphdroff,
+                                 const struct nf_nat_l4proto *l4proto,
+                                 const struct nf_conntrack_tuple *target,
+                                 enum nf_nat_manip_type maniptype)
+{
+       struct ipv6hdr *ipv6h;
+       __be16 frag_off;
+       int hdroff;
+       u8 nexthdr;
+
+       if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h)))
+               return false;
+
+       ipv6h = (void *)skb->data + iphdroff;
+       nexthdr = ipv6h->nexthdr;
+       hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
+                                 &nexthdr, &frag_off);
+       if (hdroff < 0)
+               goto manip_addr;
+
+       if ((frag_off & htons(~0x7)) == 0 &&
+           !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
+                               target, maniptype))
+               return false;
+manip_addr:
+       if (maniptype == NF_NAT_MANIP_SRC)
+               ipv6h->saddr = target->src.u3.in6;
+       else
+               ipv6h->daddr = target->dst.u3.in6;
+
+       return true;
+}
+
+static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
+                                   unsigned int iphdroff, __sum16 *check,
+                                   const struct nf_conntrack_tuple *t,
+                                   enum nf_nat_manip_type maniptype)
+{
+       const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
+       const struct in6_addr *oldip, *newip;
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               oldip = &ipv6h->saddr;
+               newip = &t->src.u3.in6;
+       } else {
+               oldip = &ipv6h->daddr;
+               newip = &t->dst.u3.in6;
+       }
+       inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
+                                 newip->s6_addr32, 1);
+}
+
+static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
+                                   u8 proto, void *data, __sum16 *check,
+                                   int datalen, int oldlen)
+{
+       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               if (!(rt->rt6i_flags & RTF_LOCAL) &&
+                   (!skb->dev || skb->dev->features & NETIF_F_V6_CSUM)) {
+                       skb->ip_summed = CHECKSUM_PARTIAL;
+                       skb->csum_start = skb_headroom(skb) +
+                                         skb_network_offset(skb) +
+                                         (data - (void *)skb->data);
+                       skb->csum_offset = (void *)check - data;
+                       *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+                                                 datalen, proto, 0);
+               } else {
+                       *check = 0;
+                       *check = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+                                                datalen, proto,
+                                                csum_partial(data, datalen,
+                                                             0));
+                       if (proto == IPPROTO_UDP && !*check)
+                               *check = CSUM_MANGLED_0;
+               }
+       } else
+               inet_proto_csum_replace2(check, skb,
+                                        htons(oldlen), htons(datalen), 1);
+}
+
+static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
+                                      struct nf_nat_range *range)
+{
+       if (tb[CTA_NAT_V6_MINIP]) {
+               nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
+                          sizeof(struct in6_addr));
+               range->flags |= NF_NAT_RANGE_MAP_IPS;
+       }
+
+       if (tb[CTA_NAT_V6_MAXIP])
+               nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
+                          sizeof(struct in6_addr));
+       else
+               range->max_addr = range->min_addr;
+
+       return 0;
+}
+
+static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
+       .l3proto                = NFPROTO_IPV6,
+       .secure_port            = nf_nat_ipv6_secure_port,
+       .in_range               = nf_nat_ipv6_in_range,
+       .manip_pkt              = nf_nat_ipv6_manip_pkt,
+       .csum_update            = nf_nat_ipv6_csum_update,
+       .csum_recalc            = nf_nat_ipv6_csum_recalc,
+       .nlattr_to_range        = nf_nat_ipv6_nlattr_to_range,
+#ifdef CONFIG_XFRM
+       .decode_session = nf_nat_ipv6_decode_session,
+#endif
+};
+
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
+                                   struct nf_conn *ct,
+                                   enum ip_conntrack_info ctinfo,
+                                   unsigned int hooknum,
+                                   unsigned int hdrlen)
+{
+       struct {
+               struct icmp6hdr icmp6;
+               struct ipv6hdr  ip6;
+       } *inside;
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
+       const struct nf_nat_l4proto *l4proto;
+       struct nf_conntrack_tuple target;
+       unsigned long statusbit;
+
+       NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
+
+       if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+               return 0;
+       if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
+               return 0;
+
+       inside = (void *)skb->data + hdrlen;
+       if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
+               if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
+                       return 0;
+               if (ct->status & IPS_NAT_MASK)
+                       return 0;
+       }
+
+       if (manip == NF_NAT_MANIP_SRC)
+               statusbit = IPS_SRC_NAT;
+       else
+               statusbit = IPS_DST_NAT;
+
+       /* Invert if this is reply direction */
+       if (dir == IP_CT_DIR_REPLY)
+               statusbit ^= IPS_NAT_MASK;
+
+       if (!(ct->status & statusbit))
+               return 1;
+
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, inside->ip6.nexthdr);
+       if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
+                                  l4proto, &ct->tuplehash[!dir].tuple, !manip))
+               return 0;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL) {
+               struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+               inside = (void *)skb->data + hdrlen;
+               inside->icmp6.icmp6_cksum = 0;
+               inside->icmp6.icmp6_cksum =
+                       csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+                                       skb->len - hdrlen, IPPROTO_ICMPV6,
+                                       csum_partial(&inside->icmp6,
+                                                    skb->len - hdrlen, 0));
+       }
+
+       nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+       l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, IPPROTO_ICMPV6);
+       if (!nf_nat_ipv6_manip_pkt(skb, 0, l4proto, &target, manip))
+               return 0;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
+
+static int __init nf_nat_l3proto_ipv6_init(void)
+{
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv6);
+       if (err < 0)
+               goto err2;
+       return err;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
+err1:
+       return err;
+}
+
+static void __exit nf_nat_l3proto_ipv6_exit(void)
+{
+       nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv6);
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("nf-nat-" __stringify(AF_INET6));
+
+module_init(nf_nat_l3proto_ipv6_init);
+module_exit(nf_nat_l3proto_ipv6_exit);
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
new file mode 100644 (file)
index 0000000..5d6da78
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 Patrick Mchardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 ICMP NAT code. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/icmpv6.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static bool
+icmpv6_in_range(const struct nf_conntrack_tuple *tuple,
+               enum nf_nat_manip_type maniptype,
+               const union nf_conntrack_man_proto *min,
+               const union nf_conntrack_man_proto *max)
+{
+       return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
+              ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
+}
+
+static void
+icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                   struct nf_conntrack_tuple *tuple,
+                   const struct nf_nat_range *range,
+                   enum nf_nat_manip_type maniptype,
+                   const struct nf_conn *ct)
+{
+       static u16 id;
+       unsigned int range_size;
+       unsigned int i;
+
+       range_size = ntohs(range->max_proto.icmp.id) -
+                    ntohs(range->min_proto.icmp.id) + 1;
+
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
+               range_size = 0xffff;
+
+       for (i = 0; ; ++id) {
+               tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
+                                            (id % range_size));
+               if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
+                       return;
+       }
+}
+
+static bool
+icmpv6_manip_pkt(struct sk_buff *skb,
+                const struct nf_nat_l3proto *l3proto,
+                unsigned int iphdroff, unsigned int hdroff,
+                const struct nf_conntrack_tuple *tuple,
+                enum nf_nat_manip_type maniptype)
+{
+       struct icmp6hdr *hdr;
+
+       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+               return false;
+
+       hdr = (struct icmp6hdr *)(skb->data + hdroff);
+       l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
+                            tuple, maniptype);
+       if (hdr->icmp6_code == ICMPV6_ECHO_REQUEST ||
+           hdr->icmp6_code == ICMPV6_ECHO_REPLY) {
+               inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
+                                        hdr->icmp6_identifier,
+                                        tuple->src.u.icmp.id, 0);
+               hdr->icmp6_identifier = tuple->src.u.icmp.id;
+       }
+       return true;
+}
+
+const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = {
+       .l4proto                = IPPROTO_ICMPV6,
+       .manip_pkt              = icmpv6_manip_pkt,
+       .in_range               = icmpv6_in_range,
+       .unique_tuple           = icmpv6_unique_tuple,
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
index 4a5f78b50495060470777aa41edd540bb699662f..d8e95c77db99e5bac5b7cab803c82a731e14a697 100644 (file)
@@ -1250,7 +1250,8 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
                   sk_wmem_alloc_get(sp),
                   sk_rmem_alloc_get(sp),
                   0, 0L, 0,
-                  sock_i_uid(sp), 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
 }
index 4ff9af628e72762843cf35c083c6ee1427b57b03..da8a4e301b1b04ec5d8d0d7aa042a328c986e1d9 100644 (file)
@@ -65,36 +65,8 @@ struct ip6frag_skb_cb
 #define FRAG6_CB(skb)  ((struct ip6frag_skb_cb*)((skb)->cb))
 
 
-/*
- *     Equivalent of ipv4 struct ipq
- */
-
-struct frag_queue
-{
-       struct inet_frag_queue  q;
-
-       __be32                  id;             /* fragment id          */
-       u32                     user;
-       struct in6_addr         saddr;
-       struct in6_addr         daddr;
-
-       int                     iif;
-       unsigned int            csum;
-       __u16                   nhoffset;
-};
-
 static struct inet_frags ip6_frags;
 
-int ip6_frag_nqueues(struct net *net)
-{
-       return net->ipv6.frags.nqueues;
-}
-
-int ip6_frag_mem(struct net *net)
-{
-       return atomic_read(&net->ipv6.frags.mem);
-}
-
 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
                          struct net_device *dev);
 
@@ -159,46 +131,18 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
 }
 EXPORT_SYMBOL(ip6_frag_init);
 
-/* Destruction primitives. */
-
-static __inline__ void fq_put(struct frag_queue *fq)
-{
-       inet_frag_put(&fq->q, &ip6_frags);
-}
-
-/* Kill fq entry. It is not destroyed immediately,
- * because caller (and someone more) holds reference count.
- */
-static __inline__ void fq_kill(struct frag_queue *fq)
-{
-       inet_frag_kill(&fq->q, &ip6_frags);
-}
-
-static void ip6_evictor(struct net *net, struct inet6_dev *idev)
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
+                          struct inet_frags *frags)
 {
-       int evicted;
-
-       evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
-       if (evicted)
-               IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
-}
-
-static void ip6_frag_expire(unsigned long data)
-{
-       struct frag_queue *fq;
        struct net_device *dev = NULL;
-       struct net *net;
-
-       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
 
        spin_lock(&fq->q.lock);
 
        if (fq->q.last_in & INET_FRAG_COMPLETE)
                goto out;
 
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, frags);
 
-       net = container_of(fq->q.net, struct net, ipv6.frags);
        rcu_read_lock();
        dev = dev_get_by_index_rcu(net, fq->iif);
        if (!dev)
@@ -222,7 +166,19 @@ out_rcu_unlock:
        rcu_read_unlock();
 out:
        spin_unlock(&fq->q.lock);
-       fq_put(fq);
+       inet_frag_put(&fq->q, frags);
+}
+EXPORT_SYMBOL(ip6_expire_frag_queue);
+
+static void ip6_frag_expire(unsigned long data)
+{
+       struct frag_queue *fq;
+       struct net *net;
+
+       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+       net = container_of(fq->q.net, struct net, ipv6.frags);
+
+       ip6_expire_frag_queue(net, fq, &ip6_frags);
 }
 
 static __inline__ struct frag_queue *
@@ -391,7 +347,7 @@ found:
        return -1;
 
 discard_fq:
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &ip6_frags);
 err:
        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                      IPSTATS_MIB_REASMFAILS);
@@ -417,7 +373,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
        unsigned int nhoff;
        int sum_truesize;
 
-       fq_kill(fq);
+       inet_frag_kill(&fq->q, &ip6_frags);
 
        /* Make the one we just received the head. */
        if (prev) {
@@ -550,6 +506,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        struct frag_queue *fq;
        const struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct net *net = dev_net(skb_dst(skb)->dev);
+       int evicted;
 
        IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
 
@@ -574,8 +531,10 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                return 1;
        }
 
-       if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
-               ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
+       evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
+       if (evicted)
+               IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
+                                IPSTATS_MIB_REASMFAILS, evicted);
 
        fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
        if (fq != NULL) {
@@ -586,7 +545,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
 
                spin_unlock(&fq->q.lock);
-               fq_put(fq);
+               inet_frag_put(&fq->q, &ip6_frags);
                return ret;
        }
 
index 854e4018d205c826032633ce2641cbc1af2c057a..d1ddbc6ddac50907ea22983be8fdf3389967b39a 100644 (file)
@@ -222,7 +222,7 @@ static const u32 ip6_template_metrics[RTAX_MAX] = {
        [RTAX_HOPLIMIT - 1] = 255,
 };
 
-static struct rt6_info ip6_null_entry_template = {
+static const struct rt6_info ip6_null_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
                .__use          = 1,
@@ -242,7 +242,7 @@ static struct rt6_info ip6_null_entry_template = {
 static int ip6_pkt_prohibit(struct sk_buff *skb);
 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
 
-static struct rt6_info ip6_prohibit_entry_template = {
+static const struct rt6_info ip6_prohibit_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
                .__use          = 1,
@@ -257,7 +257,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
        .rt6i_ref       = ATOMIC_INIT(1),
 };
 
-static struct rt6_info ip6_blk_hole_entry_template = {
+static const struct rt6_info ip6_blk_hole_entry_template = {
        .dst = {
                .__refcnt       = ATOMIC_INIT(1),
                .__use          = 1,
@@ -370,15 +370,11 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 
 static bool rt6_check_expired(const struct rt6_info *rt)
 {
-       struct rt6_info *ort = NULL;
-
        if (rt->rt6i_flags & RTF_EXPIRES) {
                if (time_after(jiffies, rt->dst.expires))
                        return true;
        } else if (rt->dst.from) {
-               ort = (struct rt6_info *) rt->dst.from;
-               return (ort->rt6i_flags & RTF_EXPIRES) &&
-                       time_after(jiffies, ort->dst.expires);
+               return rt6_check_expired((struct rt6_info *) rt->dst.from);
        }
        return false;
 }
@@ -452,10 +448,9 @@ static void rt6_probe(struct rt6_info *rt)
         * Router Reachability Probe MUST be rate-limited
         * to no more than one per minute.
         */
-       rcu_read_lock();
        neigh = rt ? rt->n : NULL;
        if (!neigh || (neigh->nud_state & NUD_VALID))
-               goto out;
+               return;
        read_lock_bh(&neigh->lock);
        if (!(neigh->nud_state & NUD_VALID) &&
            time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -471,8 +466,6 @@ static void rt6_probe(struct rt6_info *rt)
        } else {
                read_unlock_bh(&neigh->lock);
        }
-out:
-       rcu_read_unlock();
 }
 #else
 static inline void rt6_probe(struct rt6_info *rt)
@@ -499,7 +492,6 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
        struct neighbour *neigh;
        int m;
 
-       rcu_read_lock();
        neigh = rt->n;
        if (rt->rt6i_flags & RTF_NONEXTHOP ||
            !(rt->rt6i_flags & RTF_GATEWAY))
@@ -517,7 +509,6 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
                read_unlock_bh(&neigh->lock);
        } else
                m = 0;
-       rcu_read_unlock();
        return m;
 }
 
@@ -966,7 +957,7 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
 {
        int flags = 0;
 
-       fl6->flowi6_iif = net->loopback_dev->ifindex;
+       fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
        if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
                flags |= RT6_LOOKUP_F_IFACE;
@@ -1469,8 +1460,21 @@ int ip6_route_add(struct fib6_config *cfg)
                }
                rt->dst.output = ip6_pkt_discard_out;
                rt->dst.input = ip6_pkt_discard;
-               rt->dst.error = -ENETUNREACH;
                rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
+               switch (cfg->fc_type) {
+               case RTN_BLACKHOLE:
+                       rt->dst.error = -EINVAL;
+                       break;
+               case RTN_PROHIBIT:
+                       rt->dst.error = -EACCES;
+                       break;
+               case RTN_THROW:
+                       rt->dst.error = -EAGAIN;
+                       break;
+               default:
+                       rt->dst.error = -ENETUNREACH;
+                       break;
+               }
                goto install_route;
        }
 
@@ -1835,7 +1839,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
        if (!table)
                return NULL;
 
-       write_lock_bh(&table->tb6_lock);
+       read_lock_bh(&table->tb6_lock);
        fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
        if (!fn)
                goto out;
@@ -1851,7 +1855,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                break;
        }
 out:
-       write_unlock_bh(&table->tb6_lock);
+       read_unlock_bh(&table->tb6_lock);
        return rt;
 }
 
@@ -1867,7 +1871,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
-               .fc_nlinfo.pid = 0,
+               .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = net,
        };
@@ -1894,7 +1898,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        if (!table)
                return NULL;
 
-       write_lock_bh(&table->tb6_lock);
+       read_lock_bh(&table->tb6_lock);
        for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
                if (dev == rt->dst.dev &&
                    ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
@@ -1903,7 +1907,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        }
        if (rt)
                dst_hold(&rt->dst);
-       write_unlock_bh(&table->tb6_lock);
+       read_unlock_bh(&table->tb6_lock);
        return rt;
 }
 
@@ -1917,7 +1921,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
                                  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
-               .fc_nlinfo.pid = 0,
+               .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
                .fc_nlinfo.nl_net = dev_net(dev),
        };
@@ -2266,14 +2270,18 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        cfg->fc_src_len = rtm->rtm_src_len;
        cfg->fc_flags = RTF_UP;
        cfg->fc_protocol = rtm->rtm_protocol;
+       cfg->fc_type = rtm->rtm_type;
 
-       if (rtm->rtm_type == RTN_UNREACHABLE)
+       if (rtm->rtm_type == RTN_UNREACHABLE ||
+           rtm->rtm_type == RTN_BLACKHOLE ||
+           rtm->rtm_type == RTN_PROHIBIT ||
+           rtm->rtm_type == RTN_THROW)
                cfg->fc_flags |= RTF_REJECT;
 
        if (rtm->rtm_type == RTN_LOCAL)
                cfg->fc_flags |= RTF_LOCAL;
 
-       cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
+       cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
        cfg->fc_nlinfo.nlh = nlh;
        cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
 
@@ -2364,7 +2372,7 @@ static inline size_t rt6_nlmsg_size(void)
 static int rt6_fill_node(struct net *net,
                         struct sk_buff *skb, struct rt6_info *rt,
                         struct in6_addr *dst, struct in6_addr *src,
-                        int iif, int type, u32 pid, u32 seq,
+                        int iif, int type, u32 portid, u32 seq,
                         int prefix, int nowait, unsigned int flags)
 {
        struct rtmsg *rtm;
@@ -2380,7 +2388,7 @@ static int rt6_fill_node(struct net *net,
                }
        }
 
-       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -2396,8 +2404,22 @@ static int rt6_fill_node(struct net *net,
        rtm->rtm_table = table;
        if (nla_put_u32(skb, RTA_TABLE, table))
                goto nla_put_failure;
-       if (rt->rt6i_flags & RTF_REJECT)
-               rtm->rtm_type = RTN_UNREACHABLE;
+       if (rt->rt6i_flags & RTF_REJECT) {
+               switch (rt->dst.error) {
+               case -EINVAL:
+                       rtm->rtm_type = RTN_BLACKHOLE;
+                       break;
+               case -EACCES:
+                       rtm->rtm_type = RTN_PROHIBIT;
+                       break;
+               case -EAGAIN:
+                       rtm->rtm_type = RTN_THROW;
+                       break;
+               default:
+                       rtm->rtm_type = RTN_UNREACHABLE;
+                       break;
+               }
+       }
        else if (rt->rt6i_flags & RTF_LOCAL)
                rtm->rtm_type = RTN_LOCAL;
        else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
@@ -2470,15 +2492,11 @@ static int rt6_fill_node(struct net *net,
        if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
                goto nla_put_failure;
 
-       rcu_read_lock();
        n = rt->n;
        if (n) {
-               if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
-                       rcu_read_unlock();
+               if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0)
                        goto nla_put_failure;
-               }
        }
-       rcu_read_unlock();
 
        if (rt->dst.dev &&
            nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
@@ -2511,7 +2529,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
 
        return rt6_fill_node(arg->net,
                     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
-                    NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
+                    NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
                     prefix, 0, NLM_F_MULTI);
 }
 
@@ -2591,14 +2609,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        skb_dst_set(skb, &rt->dst);
 
        err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
-                           RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
+                           RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
                            nlh->nlmsg_seq, 0, 0, 0);
        if (err < 0) {
                kfree_skb(skb);
                goto errout;
        }
 
-       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
+       err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
 errout:
        return err;
 }
@@ -2618,14 +2636,14 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
                goto errout;
 
        err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
-                               event, info->pid, seq, 0, 0, 0);
+                               event, info->portid, seq, 0, 0, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(skb);
                goto errout;
        }
-       rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
+       rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
                    info->nlh, gfp_any());
        return;
 errout:
@@ -2680,14 +2698,12 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
 #else
        seq_puts(m, "00000000000000000000000000000000 00 ");
 #endif
-       rcu_read_lock();
        n = rt->n;
        if (n) {
                seq_printf(m, "%pi6", n->primary_key);
        } else {
                seq_puts(m, "00000000000000000000000000000000");
        }
-       rcu_read_unlock();
        seq_printf(m, " %08x %08x %08x %08x %8s\n",
                   rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
                   rt->dst.__use, rt->rt6i_flags,
index 3bd1bfc01f8523c9ad0e9698f24974fbf1cc5171..3ed54ffd8d50dfa1ccf5b4b539c5461fae29c4fb 100644 (file)
@@ -545,7 +545,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
 
        err = -ENOENT;
 
-       rcu_read_lock();
        t = ipip6_tunnel_lookup(dev_net(skb->dev),
                                skb->dev,
                                iph->daddr,
@@ -579,7 +578,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
                t->err_count = 1;
        t->err_time = jiffies;
 out:
-       rcu_read_unlock();
        return err;
 }
 
@@ -599,7 +597,6 @@ static int ipip6_rcv(struct sk_buff *skb)
 
        iph = ip_hdr(skb);
 
-       rcu_read_lock();
        tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
                                     iph->saddr, iph->daddr);
        if (tunnel != NULL) {
@@ -615,7 +612,6 @@ static int ipip6_rcv(struct sk_buff *skb)
                if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
                    !isatap_chksrc(skb, iph, tunnel)) {
                        tunnel->dev->stats.rx_errors++;
-                       rcu_read_unlock();
                        kfree_skb(skb);
                        return 0;
                }
@@ -630,12 +626,10 @@ static int ipip6_rcv(struct sk_buff *skb)
 
                netif_rx(skb);
 
-               rcu_read_unlock();
                return 0;
        }
 
        /* no tunnel matched,  let upstream know, ipsec may handle it */
-       rcu_read_unlock();
        return 1;
 out:
        kfree_skb(skb);
index bb46061c813a45c1ef859f5c60c2c5e45773ed29..182ab9a85d6cb5c0ad88e89bab6f5c0d22c380e8 100644 (file)
@@ -190,6 +190,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        ireq = inet_rsk(req);
        ireq6 = inet6_rsk(req);
        treq = tcp_rsk(req);
+       treq->listener = NULL;
 
        if (security_inet_conn_request(sk, skb, req))
                goto out_free;
index acd32e3f1b68e7c11fd211383e05b50dfd07ee6a..49c890386ce9ba6b76e401191d7d64484da7a238 100644 (file)
@@ -476,7 +476,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
                goto done;
 
-       skb = tcp_make_synack(sk, dst, req, rvp);
+       skb = tcp_make_synack(sk, dst, req, rvp, NULL);
 
        if (skb) {
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
@@ -763,6 +763,8 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
                                         struct sk_buff *skb)
 {
        const struct ipv6hdr *iph = skb_gro_network_header(skb);
+       __wsum wsum;
+       __sum16 sum;
 
        switch (skb->ip_summed) {
        case CHECKSUM_COMPLETE:
@@ -771,11 +773,23 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        break;
                }
-
-               /* fall through */
-       case CHECKSUM_NONE:
+flush:
                NAPI_GRO_CB(skb)->flush = 1;
                return NULL;
+
+       case CHECKSUM_NONE:
+               wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+                                                   skb_gro_len(skb),
+                                                   IPPROTO_TCP, 0));
+               sum = csum_fold(skb_checksum(skb,
+                                            skb_gro_offset(skb),
+                                            skb_gro_len(skb),
+                                            wsum));
+               if (sum)
+                       goto flush;
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               break;
        }
 
        return tcp_gro_receive(head, skb);
@@ -988,7 +1002,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
                                   &ipv6_hdr(skb)->saddr,
                                   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
        if (req)
-               return tcp_check_req(sk, skb, req, prev);
+               return tcp_check_req(sk, skb, req, prev, false);
 
        nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
                        &ipv6_hdr(skb)->saddr, th->source,
@@ -1169,7 +1183,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        }
 have_isn:
        tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_release;
@@ -1180,6 +1193,8 @@ have_isn:
            want_cookie)
                goto drop_and_free;
 
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       tcp_rsk(req)->listener = NULL;
        inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        return 0;
 
@@ -1347,9 +1362,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 
        tcp_initialize_rcv_mss(newsk);
-       if (tcp_rsk(req)->snt_synack)
-               tcp_valid_rtt_meas(newsk,
-                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
+       tcp_synack_rtt_meas(newsk, req);
        newtp->total_retrans = req->retrans;
 
        newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
@@ -1829,7 +1842,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCPv6 sock list dumping. */
 static void get_openreq6(struct seq_file *seq,
-                        const struct sock *sk, struct request_sock *req, int i, int uid)
+                        const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
 {
        int ttd = req->expires - jiffies;
        const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -1853,7 +1866,7 @@ static void get_openreq6(struct seq_file *seq,
                   1,   /* timers active (only the expire timer) */
                   jiffies_to_clock_t(ttd),
                   req->retrans,
-                  uid,
+                  from_kuid_munged(seq_user_ns(seq), uid),
                   0,  /* non standard timer */
                   0, /* open_requests have no inode */
                   0, req);
@@ -1901,9 +1914,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   tp->write_seq-tp->snd_una,
                   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
                   timer_active,
-                  jiffies_to_clock_t(timer_expires - jiffies),
+                  jiffies_delta_to_clock_t(timer_expires - jiffies),
                   icsk->icsk_retransmits,
-                  sock_i_uid(sp),
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
                   icsk->icsk_probes_out,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
@@ -1921,10 +1934,7 @@ static void get_timewait6_sock(struct seq_file *seq,
        const struct in6_addr *dest, *src;
        __u16 destp, srcp;
        const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-       int ttd = tw->tw_ttd - jiffies;
-
-       if (ttd < 0)
-               ttd = 0;
+       long delta = tw->tw_ttd - jiffies;
 
        dest = &tw6->tw_v6_daddr;
        src  = &tw6->tw_v6_rcv_saddr;
@@ -1940,7 +1950,7 @@ static void get_timewait6_sock(struct seq_file *seq,
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
                   tw->tw_substate, 0, 0,
-                  3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+                  3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
                   atomic_read(&tw->tw_refcnt), tw);
 }
 
index 07e2bfef6845429ee7e359a6c21141db0a0219de..fc9997260a6bc5b841aafc4f3b57cf11ff7ec404 100644 (file)
@@ -1469,7 +1469,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
                   sk_wmem_alloc_get(sp),
                   sk_rmem_alloc_get(sp),
                   0, 0L, 0,
-                  sock_i_uid(sp), 0,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+                  0,
                   sock_i_ino(sp),
                   atomic_read(&sp->sk_refcnt), sp,
                   atomic_read(&sp->sk_drops));
index f8ba30dfecae430df85199c547a6884bc1461022..02ff7f2f60d4b2bd566be9b92b84ca42851221b4 100644 (file)
@@ -217,7 +217,8 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
        seq_printf(seq, "%08X  %08X  %02X     %03d\n",
                   sk_wmem_alloc_get(s),
                   sk_rmem_alloc_get(s),
-                  s->sk_state, SOCK_INODE(s->sk_socket)->i_uid);
+                  s->sk_state,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
 out:
        return 0;
 }
index 6c7c4b92e4f8ec0e5a2aad62b33a6ada0d813c0e..c32971269280116543c0bf560e1bbc3248df034d 100644 (file)
@@ -100,7 +100,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
                          &irda_nl_family, 0,  IRDA_NL_CMD_GET_MODE);
        if (hdr == NULL) {
                ret = -EMSGSIZE;
index 34e418508a675d1f3427fa09f73ef43f2c9adae7..08897a3c7ec764550d518777811fa8e3179a9492 100644 (file)
@@ -54,7 +54,7 @@ struct pfkey_sock {
 
        struct {
                uint8_t         msg_version;
-               uint32_t        msg_pid;
+               uint32_t        msg_portid;
                int             (*dump)(struct pfkey_sock *sk);
                void            (*done)(struct pfkey_sock *sk);
                union {
@@ -1447,7 +1447,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
        hdr->sadb_msg_errno = 0;
        hdr->sadb_msg_reserved = 0;
        hdr->sadb_msg_seq = c->seq;
-       hdr->sadb_msg_pid = c->pid;
+       hdr->sadb_msg_pid = c->portid;
 
        pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
 
@@ -1486,7 +1486,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
        else
                c.event = XFRM_MSG_UPDSA;
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        km_state_notify(x, &c);
 out:
        xfrm_state_put(x);
@@ -1523,7 +1523,7 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_
                goto out;
 
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.event = XFRM_MSG_DELSA;
        km_state_notify(x, &c);
 out:
@@ -1701,7 +1701,7 @@ static int key_notify_sa_flush(const struct km_event *c)
        hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
        hdr->sadb_msg_type = SADB_FLUSH;
        hdr->sadb_msg_seq = c->seq;
-       hdr->sadb_msg_pid = c->pid;
+       hdr->sadb_msg_pid = c->portid;
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
@@ -1736,7 +1736,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
 
        c.data.proto = proto;
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.event = XFRM_MSG_FLUSHSA;
        c.net = net;
        km_state_notify(NULL, &c);
@@ -1764,7 +1764,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_reserved = 0;
        out_hdr->sadb_msg_seq = count + 1;
-       out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
+       out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
                pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
@@ -1798,7 +1798,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
                return -EINVAL;
 
        pfk->dump.msg_version = hdr->sadb_msg_version;
-       pfk->dump.msg_pid = hdr->sadb_msg_pid;
+       pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sa;
        pfk->dump.done = pfkey_dump_sa_done;
        xfrm_state_walk_init(&pfk->dump.u.state, proto);
@@ -1923,6 +1923,9 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
        int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy);
        struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
 
+       if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
+               return -EINVAL;
+
        while (len >= sizeof(struct sadb_x_ipsecrequest)) {
                if ((err = parse_ipsecrequest(xp, rq)) < 0)
                        return err;
@@ -2157,7 +2160,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
                out_hdr->sadb_msg_type = event2poltype(c->event);
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = c->seq;
-       out_hdr->sadb_msg_pid = c->pid;
+       out_hdr->sadb_msg_pid = c->portid;
        pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
        return 0;
 
@@ -2272,7 +2275,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
                c.event = XFRM_MSG_NEWPOLICY;
 
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
 
        km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
        xfrm_pol_put(xp);
@@ -2351,7 +2354,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
                goto out;
 
        c.seq = hdr->sadb_msg_seq;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.data.byid = 0;
        c.event = XFRM_MSG_DELPOLICY;
        km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c);
@@ -2597,7 +2600,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
                if (err)
                        goto out;
                c.seq = hdr->sadb_msg_seq;
-               c.pid = hdr->sadb_msg_pid;
+               c.portid = hdr->sadb_msg_pid;
                c.data.byid = 1;
                c.event = XFRM_MSG_DELPOLICY;
                km_policy_notify(xp, dir, &c);
@@ -2634,7 +2637,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
        out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = count + 1;
-       out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
+       out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
        if (pfk->dump.skb)
                pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
@@ -2663,7 +2666,7 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
                return -EBUSY;
 
        pfk->dump.msg_version = hdr->sadb_msg_version;
-       pfk->dump.msg_pid = hdr->sadb_msg_pid;
+       pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sp;
        pfk->dump.done = pfkey_dump_sp_done;
        xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
@@ -2682,7 +2685,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
        hdr->sadb_msg_type = SADB_X_SPDFLUSH;
        hdr->sadb_msg_seq = c->seq;
-       hdr->sadb_msg_pid = c->pid;
+       hdr->sadb_msg_pid = c->portid;
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
@@ -2711,7 +2714,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
 
        c.data.type = XFRM_POLICY_TYPE_MAIN;
        c.event = XFRM_MSG_FLUSHPOLICY;
-       c.pid = hdr->sadb_msg_pid;
+       c.portid = hdr->sadb_msg_pid;
        c.seq = hdr->sadb_msg_seq;
        c.net = net;
        km_policy_notify(NULL, 0, &c);
@@ -3024,7 +3027,7 @@ static u32 get_acqseq(void)
        return res;
 }
 
-static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp, int dir)
+static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
 {
        struct sk_buff *skb;
        struct sadb_msg *hdr;
@@ -3105,7 +3108,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
        pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t);
        pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
        pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
-       pol->sadb_x_policy_dir = dir+1;
+       pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
        pol->sadb_x_policy_id = xp->index;
 
        /* Set sadb_comb's. */
@@ -3661,7 +3664,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
                               atomic_read(&s->sk_refcnt),
                               sk_rmem_alloc_get(s),
                               sk_wmem_alloc_get(s),
-                              sock_i_uid(s),
+                              from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
                               sock_i_ino(s)
                               );
        return 0;
index 4b1e71751e1019de005ed9f4f8a9708f2c11f158..147a8fd47a17610dd392a49acee6aaf292c1b5d8 100644 (file)
@@ -4,6 +4,7 @@
 
 menuconfig L2TP
        tristate "Layer Two Tunneling Protocol (L2TP)"
+       depends on (IPV6 || IPV6=n)
        depends on INET
        ---help---
          Layer Two Tunneling Protocol
index 3bfb34aaee293cb697f36ae88a060f3329571214..37b8b8ba31f7395001cd2f36e234d82878bc22c7 100644 (file)
@@ -67,6 +67,7 @@ static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
        return net_generic(net, l2tp_eth_net_id);
 }
 
+static struct lock_class_key l2tp_eth_tx_busylock;
 static int l2tp_eth_dev_init(struct net_device *dev)
 {
        struct l2tp_eth *priv = netdev_priv(dev);
@@ -74,7 +75,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
        priv->dev = dev;
        eth_hw_addr_random(dev);
        memset(&dev->broadcast[0], 0xff, 6);
-
+       dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
        return 0;
 }
 
index 6f936358d664cd3a8946317ca879180a937c8b22..6c4cc12c7414f90341513a29da3097f4c9f9ecb4 100644 (file)
@@ -78,7 +78,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
                          &l2tp_nl_family, 0, L2TP_CMD_NOOP);
        if (!hdr) {
                ret = -EMSGSIZE;
@@ -87,7 +87,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
 
        genlmsg_end(msg, hdr);
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
 err_out:
        nlmsg_free(msg);
@@ -235,7 +235,7 @@ out:
        return ret;
 }
 
-static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
                               struct l2tp_tunnel *tunnel)
 {
        void *hdr;
@@ -248,7 +248,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
        struct l2tp_stats stats;
        unsigned int start;
 
-       hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
+       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
                          L2TP_CMD_TUNNEL_GET);
        if (!hdr)
                return -EMSGSIZE;
@@ -359,12 +359,12 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq,
+       ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
                                  NLM_F_ACK, tunnel);
        if (ret < 0)
                goto err_out;
 
-       return genlmsg_unicast(net, msg, info->snd_pid);
+       return genlmsg_unicast(net, msg, info->snd_portid);
 
 err_out:
        nlmsg_free(msg);
@@ -384,7 +384,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
                if (tunnel == NULL)
                        goto out;
 
-               if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid,
+               if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                        tunnel) <= 0)
                        goto out;
@@ -604,7 +604,7 @@ out:
        return ret;
 }
 
-static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
+static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
                                struct l2tp_session *session)
 {
        void *hdr;
@@ -616,7 +616,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
 
        sk = tunnel->sock;
 
-       hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
+       hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
        if (!hdr)
                return -EMSGSIZE;
 
@@ -705,12 +705,12 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
+       ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
                                   0, session);
        if (ret < 0)
                goto err_out;
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
 err_out:
        nlmsg_free(msg);
@@ -742,7 +742,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
                        continue;
                }
 
-               if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
+               if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                         session) <= 0)
                        break;
index a1839c004357a854ca7ee50592584fb66617b441..7b4799cfbf8dbf456fc8f356b4fe01d02e1f396f 100644 (file)
@@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
                   sk_wmem_alloc_get(sk),
                   sk_rmem_alloc_get(sk) - llc->copied_seq,
                   sk->sk_state,
-                  sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1,
+                  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
                   llc->link);
 out:
        return 0;
index b2f2bac2c2a2397b5fd6f6b79659996270b96377..204a8351efffc86f566e51b27f173c8bdee6c4cb 100644 (file)
 #include <net/llc_s_st.h>
 #include <net/llc_pdu.h>
 
-/**
- * struct llc_station - LLC station component
- *
- * SAP and connection resource manager, one per adapter.
- *
- * @state: state of station
- * @xid_r_count: XID response PDU counter
- * @mac_sa: MAC source address
- * @sap_list: list of related SAPs
- * @ev_q: events entering state mach.
- * @mac_pdu_q: PDUs ready to send to MAC
- */
-struct llc_station {
-       u8                          state;
-       u8                          xid_r_count;
-       struct timer_list           ack_timer;
-       u8                          retry_count;
-       u8                          maximum_retry;
-       struct {
-               struct sk_buff_head list;
-               spinlock_t          lock;
-       } ev_q;
-       struct sk_buff_head         mac_pdu_q;
-};
-
-#define LLC_STATION_ACK_TIME (3 * HZ)
-
-int sysctl_llc_station_ack_timeout = LLC_STATION_ACK_TIME;
-
-/* Types of events (possible values in 'ev->type') */
-#define LLC_STATION_EV_TYPE_SIMPLE     1
-#define LLC_STATION_EV_TYPE_CONDITION  2
-#define LLC_STATION_EV_TYPE_PRIM       3
-#define LLC_STATION_EV_TYPE_PDU                4       /* command/response PDU */
-#define LLC_STATION_EV_TYPE_ACK_TMR    5
-#define LLC_STATION_EV_TYPE_RPT_STATUS 6
-
-/* Events */
-#define LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK              1
-#define LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK           2
-#define LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY      3
-#define LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY      4
-#define LLC_STATION_EV_RX_NULL_DSAP_XID_C                      5
-#define LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ       6
-#define LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ       7
-#define LLC_STATION_EV_RX_NULL_DSAP_TEST_C                     8
-#define LLC_STATION_EV_DISABLE_REQ                             9
-
-struct llc_station_state_ev {
-       u8               type;
-       u8               prim;
-       u8               prim_type;
-       u8               reason;
-       struct list_head node; /* node in station->ev_q.list */
-};
-
-static __inline__ struct llc_station_state_ev *
-                                       llc_station_ev(struct sk_buff *skb)
-{
-       return (struct llc_station_state_ev *)skb->cb;
-}
-
-typedef int (*llc_station_ev_t)(struct sk_buff *skb);
-
-#define LLC_STATION_STATE_DOWN         1       /* initial state */
-#define LLC_STATION_STATE_DUP_ADDR_CHK 2
-#define LLC_STATION_STATE_UP           3
-
-#define LLC_NBR_STATION_STATES         3       /* size of state table */
-
-typedef int (*llc_station_action_t)(struct sk_buff *skb);
-
-/* Station component state table structure */
-struct llc_station_state_trans {
-       llc_station_ev_t ev;
-       u8 next_state;
-       llc_station_action_t *ev_actions;
-};
-
-struct llc_station_state {
-       u8 curr_state;
-       struct llc_station_state_trans **transitions;
-};
-
-static struct llc_station llc_main_station;
-
-static int llc_stat_ev_enable_with_dup_addr_check(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
-              ev->prim_type ==
-                             LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK ? 0 : 1;
-}
-
-static int llc_stat_ev_enable_without_dup_addr_check(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_SIMPLE &&
-              ev->prim_type ==
-                       LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK ? 0 : 1;
-}
-
-static int llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
-               llc_main_station.retry_count <
-               llc_main_station.maximum_retry ? 0 : 1;
-}
-
-static int llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_ACK_TMR &&
-               llc_main_station.retry_count ==
-               llc_main_station.maximum_retry ? 0 : 1;
-}
-
 static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
 {
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
        struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
 
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
+       return LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
               LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
               LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
               !pdu->dsap ? 0 : 1;                      /* NULL DSAP value */
 }
 
-static int llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-       struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_RSP(pdu) &&                   /* response PDU */
-              LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
-              LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID &&
-              !pdu->dsap &&                            /* NULL DSAP value */
-              !llc_main_station.xid_r_count ? 0 : 1;
-}
-
-static int llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-       struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_RSP(pdu) &&                   /* response PDU */
-              LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
-              LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID &&
-              !pdu->dsap &&                            /* NULL DSAP value */
-              llc_main_station.xid_r_count == 1 ? 0 : 1;
-}
-
 static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
 {
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
        struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
 
-       return ev->type == LLC_STATION_EV_TYPE_PDU &&
-              LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
+       return LLC_PDU_IS_CMD(pdu) &&                   /* command PDU */
               LLC_PDU_TYPE_IS_U(pdu) &&                /* U type PDU */
               LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
               !pdu->dsap ? 0 : 1;                      /* NULL DSAP */
 }
 
-static int llc_stat_ev_disable_req(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       return ev->type == LLC_STATION_EV_TYPE_PRIM &&
-              ev->prim == LLC_DISABLE_PRIM &&
-              ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1;
-}
-
-/**
- *     llc_station_send_pdu - queues PDU to send
- *     @skb: Address of the PDU
- *
- *     Queues a PDU to send to the MAC layer.
- */
-static void llc_station_send_pdu(struct sk_buff *skb)
-{
-       skb_queue_tail(&llc_main_station.mac_pdu_q, skb);
-       while ((skb = skb_dequeue(&llc_main_station.mac_pdu_q)) != NULL)
-               if (dev_queue_xmit(skb))
-                       break;
-}
-
-static int llc_station_ac_start_ack_timer(struct sk_buff *skb)
-{
-       mod_timer(&llc_main_station.ack_timer,
-                 jiffies + sysctl_llc_station_ack_timeout);
-       return 0;
-}
-
-static int llc_station_ac_set_retry_cnt_0(struct sk_buff *skb)
-{
-       llc_main_station.retry_count = 0;
-       return 0;
-}
-
-static int llc_station_ac_inc_retry_cnt_by_1(struct sk_buff *skb)
-{
-       llc_main_station.retry_count++;
-       return 0;
-}
-
-static int llc_station_ac_set_xid_r_cnt_0(struct sk_buff *skb)
-{
-       llc_main_station.xid_r_count = 0;
-       return 0;
-}
-
-static int llc_station_ac_inc_xid_r_cnt_by_1(struct sk_buff *skb)
-{
-       llc_main_station.xid_r_count++;
-       return 0;
-}
-
-static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
-{
-       int rc = 1;
-       struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
-                                              sizeof(struct llc_xid_info));
-
-       if (!nskb)
-               goto out;
-       llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, 0, LLC_PDU_CMD);
-       llc_pdu_init_as_xid_cmd(nskb, LLC_XID_NULL_CLASS_2, 127);
-       rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, skb->dev->dev_addr);
-       if (unlikely(rc))
-               goto free;
-       llc_station_send_pdu(nskb);
-out:
-       return rc;
-free:
-       kfree_skb(nskb);
-       goto out;
-}
-
 static int llc_station_ac_send_xid_r(struct sk_buff *skb)
 {
        u8 mac_da[ETH_ALEN], dsap;
@@ -289,7 +62,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
        rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
        if (unlikely(rc))
                goto free;
-       llc_station_send_pdu(nskb);
+       dev_queue_xmit(nskb);
 out:
        return rc;
 free:
@@ -318,7 +91,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
        rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da);
        if (unlikely(rc))
                goto free;
-       llc_station_send_pdu(nskb);
+       dev_queue_xmit(nskb);
 out:
        return rc;
 free:
@@ -326,352 +99,6 @@ free:
        goto out;
 }
 
-static int llc_station_ac_report_status(struct sk_buff *skb)
-{
-       return 0;
-}
-
-/* COMMON STATION STATE transitions */
-
-/* dummy last-transition indicator; common to all state transition groups
- * last entry for this state
- * all members are zeros, .bss zeroes it
- */
-static struct llc_station_state_trans llc_stat_state_trans_end;
-
-/* DOWN STATE transitions */
-
-/* state transition for LLC_STATION_EV_ENABLE_WITH_DUP_ADDR_CHECK event */
-static llc_station_action_t llc_stat_down_state_actions_1[] = {
-       [0] = llc_station_ac_start_ack_timer,
-       [1] = llc_station_ac_set_retry_cnt_0,
-       [2] = llc_station_ac_set_xid_r_cnt_0,
-       [3] = llc_station_ac_send_null_dsap_xid_c,
-       [4] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_down_state_trans_1 = {
-       .ev         = llc_stat_ev_enable_with_dup_addr_check,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_down_state_actions_1,
-};
-
-/* state transition for LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK event */
-static llc_station_action_t llc_stat_down_state_actions_2[] = {
-       [0] = llc_station_ac_report_status,     /* STATION UP */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_down_state_trans_2 = {
-       .ev         = llc_stat_ev_enable_without_dup_addr_check,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_down_state_actions_2,
-};
-
-/* array of pointers; one to each transition */
-static struct llc_station_state_trans *llc_stat_dwn_state_trans[] = {
-       [0] = &llc_stat_down_state_trans_1,
-       [1] = &llc_stat_down_state_trans_2,
-       [2] = &llc_stat_state_trans_end,
-};
-
-/* UP STATE transitions */
-/* state transition for LLC_STATION_EV_DISABLE_REQ event */
-static llc_station_action_t llc_stat_up_state_actions_1[] = {
-       [0] = llc_station_ac_report_status,     /* STATION DOWN */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_up_state_trans_1 = {
-       .ev         = llc_stat_ev_disable_req,
-       .next_state = LLC_STATION_STATE_DOWN,
-       .ev_actions = llc_stat_up_state_actions_1,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */
-static llc_station_action_t llc_stat_up_state_actions_2[] = {
-       [0] = llc_station_ac_send_xid_r,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_up_state_trans_2 = {
-       .ev         = llc_stat_ev_rx_null_dsap_xid_c,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_up_state_actions_2,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_TEST_C event */
-static llc_station_action_t llc_stat_up_state_actions_3[] = {
-       [0] = llc_station_ac_send_test_r,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_up_state_trans_3 = {
-       .ev         = llc_stat_ev_rx_null_dsap_test_c,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_up_state_actions_3,
-};
-
-/* array of pointers; one to each transition */
-static struct llc_station_state_trans *llc_stat_up_state_trans [] = {
-       [0] = &llc_stat_up_state_trans_1,
-       [1] = &llc_stat_up_state_trans_2,
-       [2] = &llc_stat_up_state_trans_3,
-       [3] = &llc_stat_state_trans_end,
-};
-
-/* DUP ADDR CHK STATE transitions */
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_0_XID_R_XID_R_CNT_EQ
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_1[] = {
-       [0] = llc_station_ac_inc_xid_r_cnt_by_1,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_1 = {
-       .ev         = llc_stat_ev_rx_null_dsap_0_xid_r_xid_r_cnt_eq,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_dupaddr_state_actions_1,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_1_XID_R_XID_R_CNT_EQ
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_2[] = {
-       [0] = llc_station_ac_report_status,     /* DUPLICATE ADDRESS FOUND */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_2 = {
-       .ev         = llc_stat_ev_rx_null_dsap_1_xid_r_xid_r_cnt_eq,
-       .next_state = LLC_STATION_STATE_DOWN,
-       .ev_actions = llc_stat_dupaddr_state_actions_2,
-};
-
-/* state transition for LLC_STATION_EV_RX_NULL_DSAP_XID_C event */
-static llc_station_action_t llc_stat_dupaddr_state_actions_3[] = {
-       [0] = llc_station_ac_send_xid_r,
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_3 = {
-       .ev         = llc_stat_ev_rx_null_dsap_xid_c,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_dupaddr_state_actions_3,
-};
-
-/* state transition for LLC_STATION_EV_ACK_TMR_EXP_LT_RETRY_CNT_MAX_RETRY
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_4[] = {
-       [0] = llc_station_ac_start_ack_timer,
-       [1] = llc_station_ac_inc_retry_cnt_by_1,
-       [2] = llc_station_ac_set_xid_r_cnt_0,
-       [3] = llc_station_ac_send_null_dsap_xid_c,
-       [4] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_4 = {
-       .ev         = llc_stat_ev_ack_tmr_exp_lt_retry_cnt_max_retry,
-       .next_state = LLC_STATION_STATE_DUP_ADDR_CHK,
-       .ev_actions = llc_stat_dupaddr_state_actions_4,
-};
-
-/* state transition for LLC_STATION_EV_ACK_TMR_EXP_EQ_RETRY_CNT_MAX_RETRY
- * event
- */
-static llc_station_action_t llc_stat_dupaddr_state_actions_5[] = {
-       [0] = llc_station_ac_report_status,     /* STATION UP */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_5 = {
-       .ev         = llc_stat_ev_ack_tmr_exp_eq_retry_cnt_max_retry,
-       .next_state = LLC_STATION_STATE_UP,
-       .ev_actions = llc_stat_dupaddr_state_actions_5,
-};
-
-/* state transition for LLC_STATION_EV_DISABLE_REQ event */
-static llc_station_action_t llc_stat_dupaddr_state_actions_6[] = {
-       [0] = llc_station_ac_report_status,     /* STATION DOWN */
-       [1] = NULL,
-};
-
-static struct llc_station_state_trans llc_stat_dupaddr_state_trans_6 = {
-       .ev         = llc_stat_ev_disable_req,
-       .next_state = LLC_STATION_STATE_DOWN,
-       .ev_actions = llc_stat_dupaddr_state_actions_6,
-};
-
-/* array of pointers; one to each transition */
-static struct llc_station_state_trans *llc_stat_dupaddr_state_trans[] = {
-       [0] = &llc_stat_dupaddr_state_trans_6,  /* Request */
-       [1] = &llc_stat_dupaddr_state_trans_4,  /* Timer */
-       [2] = &llc_stat_dupaddr_state_trans_5,
-       [3] = &llc_stat_dupaddr_state_trans_1,  /* Receive frame */
-       [4] = &llc_stat_dupaddr_state_trans_2,
-       [5] = &llc_stat_dupaddr_state_trans_3,
-       [6] = &llc_stat_state_trans_end,
-};
-
-static struct llc_station_state
-                       llc_station_state_table[LLC_NBR_STATION_STATES] = {
-       [LLC_STATION_STATE_DOWN - 1] = {
-               .curr_state  = LLC_STATION_STATE_DOWN,
-               .transitions = llc_stat_dwn_state_trans,
-       },
-       [LLC_STATION_STATE_DUP_ADDR_CHK - 1] = {
-               .curr_state  = LLC_STATION_STATE_DUP_ADDR_CHK,
-               .transitions = llc_stat_dupaddr_state_trans,
-       },
-       [LLC_STATION_STATE_UP - 1] = {
-               .curr_state  = LLC_STATION_STATE_UP,
-               .transitions = llc_stat_up_state_trans,
-       },
-};
-
-/**
- *     llc_exec_station_trans_actions - executes actions for transition
- *     @trans: Address of the transition
- *     @skb: Address of the event that caused the transition
- *
- *     Executes actions of a transition of the station state machine. Returns
- *     0 if all actions complete successfully, nonzero otherwise.
- */
-static u16 llc_exec_station_trans_actions(struct llc_station_state_trans *trans,
-                                         struct sk_buff *skb)
-{
-       u16 rc = 0;
-       llc_station_action_t *next_action = trans->ev_actions;
-
-       for (; next_action && *next_action; next_action++)
-               if ((*next_action)(skb))
-                       rc = 1;
-       return rc;
-}
-
-/**
- *     llc_find_station_trans - finds transition for this event
- *     @skb: Address of the event
- *
- *     Search thru events of the current state of the station until list
- *     exhausted or it's obvious that the event is not valid for the current
- *     state. Returns the address of the transition if cound, %NULL otherwise.
- */
-static struct llc_station_state_trans *
-                               llc_find_station_trans(struct sk_buff *skb)
-{
-       int i = 0;
-       struct llc_station_state_trans *rc = NULL;
-       struct llc_station_state_trans **next_trans;
-       struct llc_station_state *curr_state =
-                               &llc_station_state_table[llc_main_station.state - 1];
-
-       for (next_trans = curr_state->transitions; next_trans[i]->ev; i++)
-               if (!next_trans[i]->ev(skb)) {
-                       rc = next_trans[i];
-                       break;
-               }
-       return rc;
-}
-
-/**
- *     llc_station_free_ev - frees an event
- *     @skb: Address of the event
- *
- *     Frees an event.
- */
-static void llc_station_free_ev(struct sk_buff *skb)
-{
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       if (ev->type == LLC_STATION_EV_TYPE_PDU)
-               kfree_skb(skb);
-}
-
-/**
- *     llc_station_next_state - processes event and goes to the next state
- *     @skb: Address of the event
- *
- *     Processes an event, executes any transitions related to that event and
- *     updates the state of the station.
- */
-static u16 llc_station_next_state(struct sk_buff *skb)
-{
-       u16 rc = 1;
-       struct llc_station_state_trans *trans;
-
-       if (llc_main_station.state > LLC_NBR_STATION_STATES)
-               goto out;
-       trans = llc_find_station_trans(skb);
-       if (trans) {
-               /* got the state to which we next transition; perform the
-                * actions associated with this transition before actually
-                * transitioning to the next state
-                */
-               rc = llc_exec_station_trans_actions(trans, skb);
-               if (!rc)
-                       /* transition station to next state if all actions
-                        * execute successfully; done; wait for next event
-                        */
-                       llc_main_station.state = trans->next_state;
-       } else
-               /* event not recognized in current state; re-queue it for
-                * processing again at a later time; return failure
-                */
-               rc = 0;
-out:
-       llc_station_free_ev(skb);
-       return rc;
-}
-
-/**
- *     llc_station_service_events - service events in the queue
- *
- *     Get an event from the station event queue (if any); attempt to service
- *     the event; if event serviced, get the next event (if any) on the event
- *     queue; if event not service, re-queue the event on the event queue and
- *     attempt to service the next event; when serviced all events in queue,
- *     finished; if don't transition to different state, just service all
- *     events once; if transition to new state, service all events again.
- *     Caller must hold llc_main_station.ev_q.lock.
- */
-static void llc_station_service_events(void)
-{
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(&llc_main_station.ev_q.list)) != NULL)
-               llc_station_next_state(skb);
-}
-
-/**
- *     llc_station_state_process - queue event and try to process queue.
- *     @skb: Address of the event
- *
- *     Queues an event (on the station event queue) for handling by the
- *     station state machine and attempts to process any queued-up events.
- */
-static void llc_station_state_process(struct sk_buff *skb)
-{
-       spin_lock_bh(&llc_main_station.ev_q.lock);
-       skb_queue_tail(&llc_main_station.ev_q.list, skb);
-       llc_station_service_events();
-       spin_unlock_bh(&llc_main_station.ev_q.lock);
-}
-
-static void llc_station_ack_tmr_cb(unsigned long timeout_data)
-{
-       struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
-
-       if (skb) {
-               struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-               ev->type = LLC_STATION_EV_TYPE_ACK_TMR;
-               llc_station_state_process(skb);
-       }
-}
-
 /**
  *     llc_station_rcv - send received pdu to the station state machine
  *     @skb: received frame.
@@ -680,24 +107,15 @@ static void llc_station_ack_tmr_cb(unsigned long timeout_data)
  */
 static void llc_station_rcv(struct sk_buff *skb)
 {
-       struct llc_station_state_ev *ev = llc_station_ev(skb);
-
-       ev->type   = LLC_STATION_EV_TYPE_PDU;
-       ev->reason = 0;
-       llc_station_state_process(skb);
+       if (llc_stat_ev_rx_null_dsap_xid_c(skb))
+               llc_station_ac_send_xid_r(skb);
+       else if (llc_stat_ev_rx_null_dsap_test_c(skb))
+               llc_station_ac_send_test_r(skb);
+       kfree_skb(skb);
 }
 
 void __init llc_station_init(void)
 {
-       skb_queue_head_init(&llc_main_station.mac_pdu_q);
-       skb_queue_head_init(&llc_main_station.ev_q.list);
-       spin_lock_init(&llc_main_station.ev_q.lock);
-       setup_timer(&llc_main_station.ack_timer, llc_station_ack_tmr_cb,
-                       (unsigned long)&llc_main_station);
-       llc_main_station.ack_timer.expires  = jiffies +
-                                               sysctl_llc_station_ack_timeout;
-       llc_main_station.maximum_retry  = 1;
-       llc_main_station.state          = LLC_STATION_STATE_UP;
        llc_set_station_handler(llc_station_rcv);
 }
 
index d75306b9c2f3e80d5fe0ed90d511d646b8a0728c..612a5ddaf93b1ab1b5a524c5efef8d6b1f769038 100644 (file)
@@ -47,13 +47,6 @@ static struct ctl_table llc2_timeout_table[] = {
 };
 
 static struct ctl_table llc_station_table[] = {
-       {
-               .procname       = "ack_timeout",
-               .data           = &sysctl_llc_station_ack_timeout,
-               .maxlen         = sizeof(long),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
        { },
 };
 
index 8dfd70d8fcfbcce247670d102a21a4d083cb8276..a04752e910239821b1bc3110d078c8d81349d4e2 100644 (file)
@@ -38,14 +38,10 @@ static void gf_mulx(u8 *pad)
 static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
                                const u8 *addr[], const size_t *len, u8 *mac)
 {
-       u8 scratch[2 * AES_BLOCK_SIZE];
-       u8 *cbc, *pad;
+       u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
        const u8 *pos, *end;
        size_t i, e, left, total_len;
 
-       cbc = scratch;
-       pad = scratch + AES_BLOCK_SIZE;
-
        memset(cbc, 0, AES_BLOCK_SIZE);
 
        total_len = 0;
index d0deb3edae21fe4a1fc3cbbc7773e742945197f4..3195a6307f50eeb5e6715a6db0fbecc4fee2f4d5 100644 (file)
@@ -869,7 +869,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 
        } else {
                ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
-                                               true);
+                                               false);
        }
 
  out:
index a58c0b649ba137b09214c031bf3508b5fe2974eb..05f3a313db8852b36c677cad188fd7154d6564ef 100644 (file)
@@ -20,7 +20,8 @@
 #include "rate.h"
 #include "mesh.h"
 
-static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, char *name,
+static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
+                                               const char *name,
                                                enum nl80211_iftype type,
                                                u32 *flags,
                                                struct vif_params *params)
@@ -102,6 +103,18 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
        return 0;
 }
 
+static int ieee80211_start_p2p_device(struct wiphy *wiphy,
+                                     struct wireless_dev *wdev)
+{
+       return ieee80211_do_open(wdev, true);
+}
+
+static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
+                                     struct wireless_dev *wdev)
+{
+       ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
+}
+
 static int ieee80211_set_noack_map(struct wiphy *wiphy,
                                  struct net_device *dev,
                                  u16 noack_map)
@@ -158,6 +171,38 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
                }
        }
 
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_STATION:
+               if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
+                       key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+               break;
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_AP_VLAN:
+               /* Keys without a station are used for TX only */
+               if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+                       key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               /* no MFP (yet) */
+               break;
+       case NL80211_IFTYPE_MESH_POINT:
+#ifdef CONFIG_MAC80211_MESH
+               if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
+                       key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+               break;
+#endif
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_P2P_DEVICE:
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NUM_NL80211_IFTYPES:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               /* shouldn't happen */
+               WARN_ON_ONCE(1);
+               break;
+       }
+
        err = ieee80211_key_link(key, sdata, sta);
        if (err)
                ieee80211_key_free(sdata->local, key);
@@ -330,7 +375,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
        if (!(rate->flags & RATE_INFO_FLAGS_MCS)) {
                struct ieee80211_supported_band *sband;
                sband = sta->local->hw.wiphy->bands[
-                               sta->local->hw.conf.channel->band];
+                               sta->local->oper_channel->band];
                rate->legacy = sband->bitrates[idx].bitrate;
        } else
                rate->mcs = idx;
@@ -725,25 +770,23 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
 static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
                                    const u8 *resp, size_t resp_len)
 {
-       struct sk_buff *new, *old;
+       struct probe_resp *new, *old;
 
        if (!resp || !resp_len)
                return 1;
 
        old = rtnl_dereference(sdata->u.ap.probe_resp);
 
-       new = dev_alloc_skb(resp_len);
+       new = kzalloc(sizeof(struct probe_resp) + resp_len, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
-       memcpy(skb_put(new, resp_len), resp, resp_len);
+       new->len = resp_len;
+       memcpy(new->data, resp, resp_len);
 
        rcu_assign_pointer(sdata->u.ap.probe_resp, new);
-       if (old) {
-               /* TODO: use call_rcu() */
-               synchronize_rcu();
-               dev_kfree_skb(old);
-       }
+       if (old)
+               kfree_rcu(old, rcu_head);
 
        return 0;
 }
@@ -950,7 +993,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
        /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
         * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
 
-       memset(msg->da, 0xff, ETH_ALEN);
+       eth_broadcast_addr(msg->da);
        memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
        msg->len = htons(6);
        msg->dsap = 0;
@@ -1285,9 +1328,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
        mutex_unlock(&local->sta_mtx);
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-           params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
+           params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
                ieee80211_recalc_ps(local, -1);
-
+               ieee80211_recalc_ps_vif(sdata);
+       }
        return 0;
 }
 
@@ -1660,7 +1704,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
        }
 
        if (!sdata->vif.bss_conf.use_short_slot &&
-           sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
+           sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) {
                sdata->vif.bss_conf.use_short_slot = true;
                changed |= BSS_CHANGED_ERP_SLOT;
        }
@@ -1774,6 +1818,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        case NL80211_IFTYPE_P2P_GO:
                if (sdata->local->ops->hw_scan)
@@ -1926,7 +1971,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
                                  enum nl80211_tx_power_setting type, int mbm)
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
-       struct ieee80211_channel *chan = local->hw.conf.channel;
+       struct ieee80211_channel *chan = local->oper_channel;
        u32 changes = 0;
 
        switch (type) {
@@ -2026,9 +2071,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
         */
        if (!sdata->u.mgd.associated ||
            sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) {
-               mutex_lock(&sdata->local->iflist_mtx);
                ieee80211_recalc_smps(sdata->local);
-               mutex_unlock(&sdata->local->iflist_mtx);
                return 0;
        }
 
@@ -2078,6 +2121,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 
        ieee80211_recalc_ps(local, -1);
+       ieee80211_recalc_ps_vif(sdata);
 
        return 0;
 }
@@ -2460,6 +2504,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                if (!sdata->u.mgd.associated)
                        need_offchan = true;
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               need_offchan = true;
+               break;
        default:
                return -EOPNOTSUPP;
        }
@@ -2652,6 +2699,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                               u16 status_code, struct sk_buff *skb)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_tdls_data *tf;
 
        tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
@@ -2671,8 +2719,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                tf->u.setup_req.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        case WLAN_TDLS_SETUP_RESPONSE:
@@ -2685,8 +2735,10 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
                tf->u.setup_resp.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        case WLAN_TDLS_SETUP_CONFIRM:
@@ -2724,6 +2776,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
                           u16 status_code, struct sk_buff *skb)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_mgmt *mgmt;
 
        mgmt = (void *)skb_put(skb, 24);
@@ -2746,8 +2799,10 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
                mgmt->u.action.u.tdls_discover_resp.capability =
                        cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-               ieee80211_add_srates_ie(sdata, skb, false);
-               ieee80211_add_ext_srates_ie(sdata, skb, false);
+               ieee80211_add_srates_ie(sdata, skb, false,
+                                       local->oper_channel->band);
+               ieee80211_add_ext_srates_ie(sdata, skb, false,
+                                           local->oper_channel->band);
                ieee80211_tdls_add_ext_capab(skb);
                break;
        default:
@@ -3004,6 +3059,8 @@ struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
        .change_virtual_intf = ieee80211_change_iface,
+       .start_p2p_device = ieee80211_start_p2p_device,
+       .stop_p2p_device = ieee80211_stop_p2p_device,
        .add_key = ieee80211_add_key,
        .del_key = ieee80211_del_key,
        .get_key = ieee80211_get_key,
index f0f87e5a1d354eef6a705deba7a15c21b491093f..0bfc914ddd1504d16a2ebf8ad74655d6deb60e53 100644 (file)
@@ -68,16 +68,14 @@ ieee80211_get_channel_mode(struct ieee80211_local *local,
        return mode;
 }
 
-bool ieee80211_set_channel_type(struct ieee80211_local *local,
-                               struct ieee80211_sub_if_data *sdata,
-                               enum nl80211_channel_type chantype)
+static enum nl80211_channel_type
+ieee80211_get_superchan(struct ieee80211_local *local,
+                       struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_sub_if_data *tmp;
        enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT;
-       bool result;
+       struct ieee80211_sub_if_data *tmp;
 
        mutex_lock(&local->iflist_mtx);
-
        list_for_each_entry(tmp, &local->interfaces, list) {
                if (tmp == sdata)
                        continue;
@@ -103,39 +101,70 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
                        break;
                }
        }
+       mutex_unlock(&local->iflist_mtx);
 
-       switch (superchan) {
+       return superchan;
+}
+
+static bool
+ieee80211_channel_types_are_compatible(enum nl80211_channel_type chantype1,
+                                      enum nl80211_channel_type chantype2,
+                                      enum nl80211_channel_type *compat)
+{
+       /*
+        * start out with chantype1 being the result,
+        * overwriting later if needed
+        */
+       if (compat)
+               *compat = chantype1;
+
+       switch (chantype1) {
        case NL80211_CHAN_NO_HT:
+               if (compat)
+                       *compat = chantype2;
+               break;
        case NL80211_CHAN_HT20:
                /*
                 * allow any change that doesn't go to no-HT
                 * (if it already is no-HT no change is needed)
                 */
-               if (chantype == NL80211_CHAN_NO_HT)
+               if (chantype2 == NL80211_CHAN_NO_HT)
                        break;
-               superchan = chantype;
+               if (compat)
+                       *compat = chantype2;
                break;
        case NL80211_CHAN_HT40PLUS:
        case NL80211_CHAN_HT40MINUS:
                /* allow smaller bandwidth and same */
-               if (chantype == NL80211_CHAN_NO_HT)
+               if (chantype2 == NL80211_CHAN_NO_HT)
                        break;
-               if (chantype == NL80211_CHAN_HT20)
+               if (chantype2 == NL80211_CHAN_HT20)
                        break;
-               if (superchan == chantype)
+               if (chantype2 == chantype1)
                        break;
-               result = false;
-               goto out;
+               return false;
        }
 
-       local->_oper_channel_type = superchan;
+       return true;
+}
+
+bool ieee80211_set_channel_type(struct ieee80211_local *local,
+                               struct ieee80211_sub_if_data *sdata,
+                               enum nl80211_channel_type chantype)
+{
+       enum nl80211_channel_type superchan;
+       enum nl80211_channel_type compatchan;
+
+       superchan = ieee80211_get_superchan(local, sdata);
+       if (!ieee80211_channel_types_are_compatible(superchan, chantype,
+                                                   &compatchan))
+               return false;
+
+       local->_oper_channel_type = compatchan;
 
        if (sdata)
                sdata->vif.bss_conf.channel_type = chantype;
 
-       result = true;
- out:
-       mutex_unlock(&local->iflist_mtx);
+       return true;
 
-       return result;
 }
index b8dfb440c8ef1ff903e3359e35b041ea9093d358..466f4b45dd94fdf72517d60a5a71e5ba8bbc9daf 100644 (file)
@@ -63,8 +63,6 @@ DEBUGFS_READONLY_FILE(user_power, "%d",
                      local->user_power_level);
 DEBUGFS_READONLY_FILE(power, "%d",
                      local->hw.conf.power_level);
-DEBUGFS_READONLY_FILE(frequency, "%d",
-                     local->hw.conf.channel->center_freq);
 DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
                      local->total_ps_buffered);
 DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
@@ -72,6 +70,7 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
 DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
        local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
 
+#ifdef CONFIG_PM
 static ssize_t reset_write(struct file *file, const char __user *user_buf,
                           size_t count, loff_t *ppos)
 {
@@ -90,33 +89,7 @@ static const struct file_operations reset_ops = {
        .open = simple_open,
        .llseek = noop_llseek,
 };
-
-static ssize_t channel_type_read(struct file *file, char __user *user_buf,
-                      size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-       const char *buf;
-
-       switch (local->hw.conf.channel_type) {
-       case NL80211_CHAN_NO_HT:
-               buf = "no ht\n";
-               break;
-       case NL80211_CHAN_HT20:
-               buf = "ht20\n";
-               break;
-       case NL80211_CHAN_HT40MINUS:
-               buf = "ht40-\n";
-               break;
-       case NL80211_CHAN_HT40PLUS:
-               buf = "ht40+\n";
-               break;
-       default:
-               buf = "???";
-               break;
-       }
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
-}
+#endif
 
 static ssize_t hwflags_read(struct file *file, char __user *user_buf,
                            size_t count, loff_t *ppos)
@@ -205,7 +178,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
 }
 
 DEBUGFS_READONLY_FILE_OPS(hwflags);
-DEBUGFS_READONLY_FILE_OPS(channel_type);
 DEBUGFS_READONLY_FILE_OPS(queues);
 
 /* statistics stuff */
@@ -272,12 +244,12 @@ void debugfs_hw_add(struct ieee80211_local *local)
 
        local->debugfs.keys = debugfs_create_dir("keys", phyd);
 
-       DEBUGFS_ADD(frequency);
        DEBUGFS_ADD(total_ps_buffered);
        DEBUGFS_ADD(wep_iv);
        DEBUGFS_ADD(queues);
+#ifdef CONFIG_PM
        DEBUGFS_ADD_MODE(reset, 0200);
-       DEBUGFS_ADD(channel_type);
+#endif
        DEBUGFS_ADD(hwflags);
        DEBUGFS_ADD(user_power);
        DEBUGFS_ADD(power);
index df9203199102911d23626bd3ecfeb8b72066dc93..da9003b20004227b76f5ad89f04ad4445bf7d0ec 100644 (file)
@@ -9,7 +9,7 @@ static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
 {
        WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
             "%s:  Failed check-sdata-in-driver check, flags: 0x%x\n",
-            sdata->dev->name, sdata->flags);
+            sdata->dev ? sdata->dev->name : sdata->name, sdata->flags);
 }
 
 static inline struct ieee80211_sub_if_data *
@@ -22,9 +22,11 @@ get_bss_sdata(struct ieee80211_sub_if_data *sdata)
        return sdata;
 }
 
-static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
+static inline void drv_tx(struct ieee80211_local *local,
+                         struct ieee80211_tx_control *control,
+                         struct sk_buff *skb)
 {
-       local->ops->tx(&local->hw, skb);
+       local->ops->tx(&local->hw, control, skb);
 }
 
 static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
@@ -526,6 +528,9 @@ static inline void drv_sta_rc_update(struct ieee80211_local *local,
        sdata = get_bss_sdata(sdata);
        check_sdata_in_driver(sdata);
 
+       WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
+               sdata->vif.type != NL80211_IFTYPE_ADHOC);
+
        trace_drv_sta_rc_update(local, sdata, sta, changed);
        if (local->ops->sta_rc_update)
                local->ops->sta_rc_update(&local->hw, &sdata->vif,
index 5746d62faba1956d5a8690726417275cc4a70ff5..5f3620f0bc0a651257aa53e28b91c4b2114be637 100644 (file)
@@ -109,7 +109,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_PROBE_RESP);
-       memset(mgmt->da, 0xff, ETH_ALEN);
+       eth_broadcast_addr(mgmt->da);
        memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
        mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
@@ -205,7 +205,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        mod_timer(&ifibss->timer,
                  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-       bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
+       bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
                                        mgmt, skb->len, 0, GFP_KERNEL);
        cfg80211_put_bss(bss);
        netif_carrier_on(sdata->dev);
@@ -278,7 +278,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
        if (auth && !sdata->u.ibss.auth_frame_registrations) {
                ibss_dbg(sdata,
                         "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
-                        sdata->vif.addr, sdata->u.ibss.bssid, addr);
+                        sdata->vif.addr, addr, sdata->u.ibss.bssid);
                ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
                                    addr, sdata->u.ibss.bssid, NULL, 0, 0);
        }
@@ -294,7 +294,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
-       int band = local->hw.conf.channel->band;
+       int band = local->oper_channel->band;
 
        /*
         * XXX: Consider removing the least recently used entry and
@@ -332,11 +332,27 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        return ieee80211_ibss_finish_sta(sta, auth);
 }
 
+static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
+                                         struct ieee80211_mgmt *mgmt,
+                                         size_t len)
+{
+       u16 reason = le16_to_cpu(mgmt->u.deauth.reason_code);
+
+       if (len < IEEE80211_DEAUTH_FRAME_LEN)
+               return;
+
+       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
+                mgmt->sa, mgmt->da, mgmt->bssid, reason);
+       sta_info_destroy_addr(sdata, mgmt->sa);
+}
+
 static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
                                        struct ieee80211_mgmt *mgmt,
                                        size_t len)
 {
        u16 auth_alg, auth_transaction;
+       struct sta_info *sta;
+       u8 deauth_frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        lockdep_assert_held(&sdata->u.ibss.mtx);
 
@@ -352,9 +368,21 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
                 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
                 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
        sta_info_destroy_addr(sdata, mgmt->sa);
-       ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
+       sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
        rcu_read_unlock();
 
+       /*
+        * if we have any problem in allocating the new station, we reply with a
+        * DEAUTH frame to tell the other end that we had a problem
+        */
+       if (!sta) {
+               ieee80211_send_deauth_disassoc(sdata, sdata->u.ibss.bssid,
+                                              IEEE80211_STYPE_DEAUTH,
+                                              WLAN_REASON_UNSPECIFIED, true,
+                                              deauth_frame_buf);
+               return;
+       }
+
        /*
         * IEEE 802.11 standard does not require authentication in IBSS
         * networks and most implementations do not seem to use it.
@@ -459,8 +487,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                        }
                }
 
-               if (sta && rates_updated)
+               if (sta && rates_updated) {
+                       drv_sta_rc_update(local, sdata, &sta->sta,
+                                         IEEE80211_RC_SUPP_RATES_CHANGED);
                        rate_control_rate_init(sta);
+               }
 
                rcu_read_unlock();
        }
@@ -561,7 +592,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
-       int band = local->hw.conf.channel->band;
+       int band = local->oper_channel->band;
 
        /*
         * XXX: Consider removing the least recently used entry and
@@ -759,7 +790,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                                return;
                        }
                        sdata_info(sdata, "IBSS not allowed on %d MHz\n",
-                                  local->hw.conf.channel->center_freq);
+                                  local->oper_channel->center_freq);
 
                        /* No IBSS found - decrease scan interval and continue
                         * scanning. */
@@ -899,6 +930,9 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        case IEEE80211_STYPE_AUTH:
                ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
                break;
+       case IEEE80211_STYPE_DEAUTH:
+               ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len);
+               break;
        }
 
  mgmt_out:
index bb61f7718c4c52521c555dbc3ae9468c5510151e..8c804550465b37857d6dc50b082881ec5bd4ac35 100644 (file)
@@ -68,6 +68,8 @@ struct ieee80211_local;
 #define IEEE80211_DEFAULT_MAX_SP_LEN           \
        IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
 
+#define IEEE80211_DEAUTH_FRAME_LEN     (24 /* hdr */ + 2 /* reason */)
+
 struct ieee80211_fragment_entry {
        unsigned long first_frag_time;
        unsigned int seq;
@@ -193,8 +195,6 @@ struct ieee80211_tx_data {
        struct sta_info *sta;
        struct ieee80211_key *key;
 
-       struct ieee80211_channel *channel;
-
        unsigned int flags;
 };
 
@@ -274,9 +274,15 @@ struct beacon_data {
        struct rcu_head rcu_head;
 };
 
+struct probe_resp {
+       struct rcu_head rcu_head;
+       int len;
+       u8 data[0];
+};
+
 struct ieee80211_if_ap {
        struct beacon_data __rcu *beacon;
-       struct sk_buff __rcu *probe_resp;
+       struct probe_resp __rcu *probe_resp;
 
        struct list_head vlans;
 
@@ -359,6 +365,7 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_NULLFUNC_ACKED    = BIT(8),
        IEEE80211_STA_RESET_SIGNAL_AVE  = BIT(9),
        IEEE80211_STA_DISABLE_40MHZ     = BIT(10),
+       IEEE80211_STA_DISABLE_VHT       = BIT(11),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -406,6 +413,7 @@ struct ieee80211_if_managed {
        struct work_struct monitor_work;
        struct work_struct chswitch_work;
        struct work_struct beacon_connection_loss_work;
+       struct work_struct csa_connection_drop_work;
 
        unsigned long beacon_timeout;
        unsigned long probe_timeout;
@@ -965,7 +973,6 @@ struct ieee80211_local {
        int scan_channel_idx;
        int scan_ies_len;
 
-       struct ieee80211_sched_scan_ies sched_scan_ies;
        struct work_struct sched_scan_stopped_work;
        struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
 
@@ -1052,7 +1059,7 @@ struct ieee80211_local {
        bool disable_dynamic_ps;
 
        int user_power_level; /* in dBm */
-       int power_constr_level; /* in dBm */
+       int ap_power_level; /* in dBm */
 
        enum ieee80211_smps_mode smps_mode;
 
@@ -1075,6 +1082,8 @@ struct ieee80211_local {
        struct idr ack_status_frames;
        spinlock_t ack_status_lock;
 
+       struct ieee80211_sub_if_data __rcu *p2p_sdata;
+
        /* dummy netdev for use w/ NAPI */
        struct net_device napi_dev;
 
@@ -1131,7 +1140,7 @@ struct ieee802_11_elems {
        u8 *prep;
        u8 *perr;
        struct ieee80211_rann_ie *rann;
-       u8 *ch_switch_elem;
+       struct ieee80211_channel_sw_ie *ch_switch_ie;
        u8 *country_elem;
        u8 *pwr_constr_elem;
        u8 *quiet_elem; /* first quite element */
@@ -1157,9 +1166,7 @@ struct ieee802_11_elems {
        u8 preq_len;
        u8 prep_len;
        u8 perr_len;
-       u8 ch_switch_elem_len;
        u8 country_elem_len;
-       u8 pwr_constr_elem_len;
        u8 quiet_elem_len;
        u8 num_of_quiet_elem;   /* can be more the one */
        u8 timeout_int_len;
@@ -1202,6 +1209,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_pspoll(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata);
 void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
 int ieee80211_max_network_latency(struct notifier_block *nb,
                                  unsigned long data, void *dummy);
 int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
@@ -1291,6 +1299,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
 void ieee80211_recalc_idle(struct ieee80211_local *local);
 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
                                    const int offset);
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
 
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
@@ -1358,7 +1368,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
 int ieee80211_reconfig(struct ieee80211_local *local);
 void ieee80211_stop_device(struct ieee80211_local *local);
 
-#ifdef CONFIG_PM
 int __ieee80211_suspend(struct ieee80211_hw *hw,
                        struct cfg80211_wowlan *wowlan);
 
@@ -1372,18 +1381,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 
        return ieee80211_reconfig(hw_to_local(hw));
 }
-#else
-static inline int __ieee80211_suspend(struct ieee80211_hw *hw,
-                                     struct cfg80211_wowlan *wowlan)
-{
-       return 0;
-}
-
-static inline int __ieee80211_resume(struct ieee80211_hw *hw)
-{
-       return 0;
-}
-#endif
 
 /* utility functions/constants */
 extern void *mac80211_wiphy_privid; /* for wiphy privid */
@@ -1425,7 +1422,6 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr, bool ack);
-void ieee80211_beacon_connection_loss_work(struct work_struct *work);
 
 void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
                                     enum queue_stop_reason reason);
@@ -1451,19 +1447,24 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg,
                         u8 *extra, size_t extra_len, const u8 *bssid,
                         const u8 *da, const u8 *key, u8 key_len, u8 key_idx);
+void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
+                                   const u8 *bssid, u16 stype, u16 reason,
+                                   bool send_frame, u8 *frame_buf);
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             const u8 *ie, size_t ie_len,
                             enum ieee80211_band band, u32 rate_mask,
                             u8 channel);
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
+                                         struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
                                          bool directed);
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck);
+                             u32 ratemask, bool directed, bool no_cck,
+                             struct ieee80211_channel *channel);
 
 void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
                                  const size_t supp_rates_len,
@@ -1487,9 +1488,11 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
 u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
                               u32 cap);
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-                           struct sk_buff *skb, bool need_basic);
+                           struct sk_buff *skb, bool need_basic,
+                           enum ieee80211_band band);
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-                               struct sk_buff *skb, bool need_basic);
+                               struct sk_buff *skb, bool need_basic,
+                               enum ieee80211_band band);
 
 /* channel management */
 enum ieee80211_chan_mode {
index bfb57dcc15381a53cdcc3768e943a6fb144b8c3a..6f8a73c64fb31bde831609fa88a9b4e0425f02b9 100644 (file)
@@ -100,6 +100,10 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
                        sdata->vif.bss_conf.idle = true;
                        continue;
                }
+
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                /* count everything else */
                sdata->vif.bss_conf.idle = false;
                count++;
@@ -121,7 +125,8 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
 
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+                   sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
                        continue;
                if (sdata->old_idle == sdata->vif.bss_conf.idle)
                        continue;
@@ -204,6 +209,8 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
 {
        return type1 == NL80211_IFTYPE_MONITOR ||
                type2 == NL80211_IFTYPE_MONITOR ||
+               type1 == NL80211_IFTYPE_P2P_DEVICE ||
+               type2 == NL80211_IFTYPE_P2P_DEVICE ||
                (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
                (type1 == NL80211_IFTYPE_WDS &&
                        (type2 == NL80211_IFTYPE_WDS ||
@@ -271,13 +278,15 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
        int n_queues = sdata->local->hw.queues;
        int i;
 
-       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
-               if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
-                                IEEE80211_INVAL_HW_QUEUE))
-                       return -EINVAL;
-               if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
-                                n_queues))
-                       return -EINVAL;
+       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
+               for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+                       if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
+                                        IEEE80211_INVAL_HW_QUEUE))
+                               return -EINVAL;
+                       if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
+                                        n_queues))
+                               return -EINVAL;
+               }
        }
 
        if ((sdata->vif.type != NL80211_IFTYPE_AP) ||
@@ -406,9 +415,10 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
  * an error on interface type changes that have been pre-checked, so most
  * checks should be in ieee80211_check_concurrent_iface.
  */
-static int ieee80211_do_open(struct net_device *dev, bool coming_up)
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 {
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+       struct net_device *dev = wdev->netdev;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        u32 changed = 0;
@@ -443,6 +453,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_MONITOR:
        case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_P2P_DEVICE:
                /* no special treatment */
                break;
        case NL80211_IFTYPE_UNSPECIFIED:
@@ -471,7 +482,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
         * Copy the hopefully now-present MAC address to
         * this interface, if it has the special null one.
         */
-       if (is_zero_ether_addr(dev->dev_addr)) {
+       if (dev && is_zero_ether_addr(dev->dev_addr)) {
                memcpy(dev->dev_addr,
                       local->hw.wiphy->perm_addr,
                       ETH_ALEN);
@@ -536,15 +547,23 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                        local->fif_probe_req++;
                }
 
-               changed |= ieee80211_reset_erp_info(sdata);
+               if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+                       changed |= ieee80211_reset_erp_info(sdata);
                ieee80211_bss_info_change_notify(sdata, changed);
 
-               if (sdata->vif.type == NL80211_IFTYPE_STATION ||
-                   sdata->vif.type == NL80211_IFTYPE_ADHOC ||
-                   sdata->vif.type == NL80211_IFTYPE_AP)
+               switch (sdata->vif.type) {
+               case NL80211_IFTYPE_STATION:
+               case NL80211_IFTYPE_ADHOC:
+               case NL80211_IFTYPE_AP:
+               case NL80211_IFTYPE_MESH_POINT:
                        netif_carrier_off(dev);
-               else
+                       break;
+               case NL80211_IFTYPE_WDS:
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       break;
+               default:
                        netif_carrier_on(dev);
+               }
 
                /*
                 * set default queue parameters so drivers don't
@@ -576,6 +595,9 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                }
 
                rate_control_rate_init(sta);
+               netif_carrier_on(dev);
+       } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+               rcu_assign_pointer(local->p2p_sdata, sdata);
        }
 
        /*
@@ -601,7 +623,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
 
        ieee80211_recalc_ps(local, -1);
 
-       netif_tx_start_all_queues(dev);
+       if (dev)
+               netif_tx_start_all_queues(dev);
 
        return 0;
  err_del_interface:
@@ -631,7 +654,7 @@ static int ieee80211_open(struct net_device *dev)
        if (err)
                return err;
 
-       return ieee80211_do_open(dev, true);
+       return ieee80211_do_open(&sdata->wdev, true);
 }
 
 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
@@ -652,7 +675,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        /*
         * Stop TX on this interface first.
         */
-       netif_tx_stop_all_queues(sdata->dev);
+       if (sdata->dev)
+               netif_tx_stop_all_queues(sdata->dev);
 
        ieee80211_roc_purge(sdata);
 
@@ -691,14 +715,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                local->fif_probe_req--;
        }
 
-       netif_addr_lock_bh(sdata->dev);
-       spin_lock_bh(&local->filter_lock);
-       __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
-                        sdata->dev->addr_len);
-       spin_unlock_bh(&local->filter_lock);
-       netif_addr_unlock_bh(sdata->dev);
+       if (sdata->dev) {
+               netif_addr_lock_bh(sdata->dev);
+               spin_lock_bh(&local->filter_lock);
+               __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
+                                sdata->dev->addr_len);
+               spin_unlock_bh(&local->filter_lock);
+               netif_addr_unlock_bh(sdata->dev);
 
-       ieee80211_configure_filter(local);
+               ieee80211_configure_filter(local);
+       }
 
        del_timer_sync(&local->dynamic_ps_timer);
        cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -708,7 +734,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                struct ieee80211_sub_if_data *vlan, *tmpsdata;
                struct beacon_data *old_beacon =
                        rtnl_dereference(sdata->u.ap.beacon);
-               struct sk_buff *old_probe_resp =
+               struct probe_resp *old_probe_resp =
                        rtnl_dereference(sdata->u.ap.probe_resp);
 
                /* sdata_running will return false, so this will disable */
@@ -720,7 +746,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
                synchronize_rcu();
                kfree(old_beacon);
-               kfree_skb(old_probe_resp);
+               kfree(old_probe_resp);
 
                /* down all dependent devices, that is VLANs */
                list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -759,24 +785,29 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_adjust_monitor_flags(sdata, -1);
                ieee80211_configure_filter(local);
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               /* relies on synchronize_rcu() below */
+               rcu_assign_pointer(local->p2p_sdata, NULL);
+               /* fall through */
        default:
                flush_work(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
-                * Call synchronize_rcu() to wait for the RX path
+                * Call rcu_barrier() to wait both for the RX path
                 * should it be using the interface and enqueuing
-                * frames at this very time on another CPU.
+                * frames at this very time on another CPU, and
+                * for the sta free call_rcu callbacks.
                 */
-               synchronize_rcu();
-               skb_queue_purge(&sdata->skb_queue);
+               rcu_barrier();
 
                /*
-                * Disable beaconing here for mesh only, AP and IBSS
-                * are already taken care of.
+                * free_sta_rcu() enqueues a work for the actual
+                * sta cleanup, so we need to flush it while
+                * sdata is still valid.
                 */
-               if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-                       ieee80211_bss_info_change_notify(sdata,
-                               BSS_CHANGED_BEACON_ENABLED);
+               flush_workqueue(local->workqueue);
+
+               skb_queue_purge(&sdata->skb_queue);
 
                /*
                 * Free all remaining keys, there shouldn't be any,
@@ -877,9 +908,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
  * Called when the netdev is removed or, by the code below, before
  * the interface type changes.
  */
-static void ieee80211_teardown_sdata(struct net_device *dev)
+static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
        int flushed;
        int i;
@@ -900,6 +930,11 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
        WARN_ON(flushed);
 }
 
+static void ieee80211_uninit(struct net_device *dev)
+{
+       ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
+}
+
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
                                         struct sk_buff *skb)
 {
@@ -909,7 +944,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
 static const struct net_device_ops ieee80211_dataif_ops = {
        .ndo_open               = ieee80211_open,
        .ndo_stop               = ieee80211_stop,
-       .ndo_uninit             = ieee80211_teardown_sdata,
+       .ndo_uninit             = ieee80211_uninit,
        .ndo_start_xmit         = ieee80211_subif_start_xmit,
        .ndo_set_rx_mode        = ieee80211_set_multicast_list,
        .ndo_change_mtu         = ieee80211_change_mtu,
@@ -940,7 +975,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
 static const struct net_device_ops ieee80211_monitorif_ops = {
        .ndo_open               = ieee80211_open,
        .ndo_stop               = ieee80211_stop,
-       .ndo_uninit             = ieee80211_teardown_sdata,
+       .ndo_uninit             = ieee80211_uninit,
        .ndo_start_xmit         = ieee80211_monitor_start_xmit,
        .ndo_set_rx_mode        = ieee80211_set_multicast_list,
        .ndo_change_mtu         = ieee80211_change_mtu,
@@ -1099,7 +1134,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        /* and set some type-dependent values */
        sdata->vif.type = type;
        sdata->vif.p2p = false;
-       sdata->dev->netdev_ops = &ieee80211_dataif_ops;
        sdata->wdev.iftype = type;
 
        sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
@@ -1107,8 +1141,11 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
 
        sdata->noack_map = 0;
 
-       /* only monitor differs */
-       sdata->dev->type = ARPHRD_ETHER;
+       /* only monitor/p2p-device differ */
+       if (sdata->dev) {
+               sdata->dev->netdev_ops = &ieee80211_dataif_ops;
+               sdata->dev->type = ARPHRD_ETHER;
+       }
 
        skb_queue_head_init(&sdata->skb_queue);
        INIT_WORK(&sdata->work, ieee80211_iface_work);
@@ -1146,6 +1183,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
                break;
        case NL80211_IFTYPE_WDS:
        case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
@@ -1156,18 +1194,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        ieee80211_debugfs_add_netdev(sdata);
 }
 
-static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
-{
-       switch (sdata->vif.type) {
-       case NL80211_IFTYPE_MESH_POINT:
-               mesh_path_flush_by_iface(sdata);
-               break;
-
-       default:
-               break;
-       }
-}
-
 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
                                           enum nl80211_iftype type)
 {
@@ -1225,7 +1251,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_do_stop(sdata, false);
 
-       ieee80211_teardown_sdata(sdata->dev);
+       ieee80211_teardown_sdata(sdata);
 
        ret = drv_change_interface(local, sdata, internal_type, p2p);
        if (ret)
@@ -1240,7 +1266,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_setup_sdata(sdata, type);
 
-       err = ieee80211_do_open(sdata->dev, false);
+       err = ieee80211_do_open(&sdata->wdev, false);
        WARN(err, "type change: do_open returned %d", err);
 
        return ret;
@@ -1267,14 +1293,14 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
                        return ret;
        } else {
                /* Purge and reset type-dependent state. */
-               ieee80211_teardown_sdata(sdata->dev);
+               ieee80211_teardown_sdata(sdata);
                ieee80211_setup_sdata(sdata, type);
        }
 
        /* reset some values that shouldn't be kept across type changes */
        sdata->vif.bss_conf.basic_rates =
                ieee80211_mandatory_rates(sdata->local,
-                       sdata->local->hw.conf.channel->band);
+                       sdata->local->oper_channel->band);
        sdata->drop_unencrypted = 0;
        if (type == NL80211_IFTYPE_STATION)
                sdata->u.mgd.use_4addr = false;
@@ -1283,8 +1309,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
 }
 
 static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
-                                      struct net_device *dev,
-                                      enum nl80211_iftype type)
+                                      u8 *perm_addr, enum nl80211_iftype type)
 {
        struct ieee80211_sub_if_data *sdata;
        u64 mask, start, addr, val, inc;
@@ -1293,13 +1318,12 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
        int i;
 
        /* default ... something at least */
-       memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
+       memcpy(perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
 
        if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
            local->hw.wiphy->n_addresses <= 1)
                return;
 
-
        mutex_lock(&local->iflist_mtx);
 
        switch (type) {
@@ -1312,11 +1336,24 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                list_for_each_entry(sdata, &local->interfaces, list) {
                        if (sdata->vif.type != NL80211_IFTYPE_AP)
                                continue;
-                       memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
+                       memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
                        break;
                }
                /* keep default if no AP interface present */
                break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
+                       list_for_each_entry(sdata, &local->interfaces, list) {
+                               if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
+                                       continue;
+                               if (!ieee80211_sdata_running(sdata))
+                                       continue;
+                               memcpy(perm_addr, sdata->vif.addr, ETH_ALEN);
+                               goto out_unlock;
+                       }
+               }
+               /* otherwise fall through */
        default:
                /* assign a new address if possible -- try n_addresses first */
                for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
@@ -1331,7 +1368,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                        }
 
                        if (!used) {
-                               memcpy(dev->perm_addr,
+                               memcpy(perm_addr,
                                       local->hw.wiphy->addresses[i].addr,
                                       ETH_ALEN);
                                break;
@@ -1382,7 +1419,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                        }
 
                        if (!used) {
-                               memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
+                               memcpy(perm_addr, tmp_addr, ETH_ALEN);
                                break;
                        }
                        addr = (start & ~mask) | (val & mask);
@@ -1391,6 +1428,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                break;
        }
 
+ out_unlock:
        mutex_unlock(&local->iflist_mtx);
 }
 
@@ -1398,49 +1436,68 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params)
 {
-       struct net_device *ndev;
+       struct net_device *ndev = NULL;
        struct ieee80211_sub_if_data *sdata = NULL;
        int ret, i;
        int txqs = 1;
 
        ASSERT_RTNL();
 
-       if (local->hw.queues >= IEEE80211_NUM_ACS)
-               txqs = IEEE80211_NUM_ACS;
-
-       ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
-                               name, ieee80211_if_setup, txqs, 1);
-       if (!ndev)
-               return -ENOMEM;
-       dev_net_set(ndev, wiphy_net(local->hw.wiphy));
-
-       ndev->needed_headroom = local->tx_headroom +
-                               4*6 /* four MAC addresses */
-                               + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
-                               + 6 /* mesh */
-                               + 8 /* rfc1042/bridge tunnel */
-                               - ETH_HLEN /* ethernet hard_header_len */
-                               + IEEE80211_ENCRYPT_HEADROOM;
-       ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
-
-       ret = dev_alloc_name(ndev, ndev->name);
-       if (ret < 0)
-               goto fail;
-
-       ieee80211_assign_perm_addr(local, ndev, type);
-       memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
-       SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
-
-       /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
-       sdata = netdev_priv(ndev);
-       ndev->ieee80211_ptr = &sdata->wdev;
-       memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
-       memcpy(sdata->name, ndev->name, IFNAMSIZ);
+       if (type == NL80211_IFTYPE_P2P_DEVICE) {
+               struct wireless_dev *wdev;
+
+               sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size,
+                               GFP_KERNEL);
+               if (!sdata)
+                       return -ENOMEM;
+               wdev = &sdata->wdev;
+
+               sdata->dev = NULL;
+               strlcpy(sdata->name, name, IFNAMSIZ);
+               ieee80211_assign_perm_addr(local, wdev->address, type);
+               memcpy(sdata->vif.addr, wdev->address, ETH_ALEN);
+       } else {
+               if (local->hw.queues >= IEEE80211_NUM_ACS)
+                       txqs = IEEE80211_NUM_ACS;
+
+               ndev = alloc_netdev_mqs(sizeof(*sdata) +
+                                       local->hw.vif_data_size,
+                                       name, ieee80211_if_setup, txqs, 1);
+               if (!ndev)
+                       return -ENOMEM;
+               dev_net_set(ndev, wiphy_net(local->hw.wiphy));
+
+               ndev->needed_headroom = local->tx_headroom +
+                                       4*6 /* four MAC addresses */
+                                       + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
+                                       + 6 /* mesh */
+                                       + 8 /* rfc1042/bridge tunnel */
+                                       - ETH_HLEN /* ethernet hard_header_len */
+                                       + IEEE80211_ENCRYPT_HEADROOM;
+               ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
+               ret = dev_alloc_name(ndev, ndev->name);
+               if (ret < 0) {
+                       free_netdev(ndev);
+                       return ret;
+               }
+
+               ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
+               memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
+               SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
+
+               /* don't use IEEE80211_DEV_TO_SUB_IF -- it checks too much */
+               sdata = netdev_priv(ndev);
+               ndev->ieee80211_ptr = &sdata->wdev;
+               memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+               memcpy(sdata->name, ndev->name, IFNAMSIZ);
+
+               sdata->dev = ndev;
+       }
 
        /* initialise type-independent data */
        sdata->wdev.wiphy = local->hw.wiphy;
        sdata->local = local;
-       sdata->dev = ndev;
 #ifdef CONFIG_INET
        sdata->arp_filter_state = true;
 #endif
@@ -1469,17 +1526,21 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
        /* setup type-dependent data */
        ieee80211_setup_sdata(sdata, type);
 
-       if (params) {
-               ndev->ieee80211_ptr->use_4addr = params->use_4addr;
-               if (type == NL80211_IFTYPE_STATION)
-                       sdata->u.mgd.use_4addr = params->use_4addr;
-       }
+       if (ndev) {
+               if (params) {
+                       ndev->ieee80211_ptr->use_4addr = params->use_4addr;
+                       if (type == NL80211_IFTYPE_STATION)
+                               sdata->u.mgd.use_4addr = params->use_4addr;
+               }
 
-       ndev->features |= local->hw.netdev_features;
+               ndev->features |= local->hw.netdev_features;
 
-       ret = register_netdevice(ndev);
-       if (ret)
-               goto fail;
+               ret = register_netdevice(ndev);
+               if (ret) {
+                       free_netdev(ndev);
+                       return ret;
+               }
+       }
 
        mutex_lock(&local->iflist_mtx);
        list_add_tail_rcu(&sdata->list, &local->interfaces);
@@ -1489,10 +1550,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                *new_wdev = &sdata->wdev;
 
        return 0;
-
- fail:
-       free_netdev(ndev);
-       return ret;
 }
 
 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
@@ -1503,11 +1560,22 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
        list_del_rcu(&sdata->list);
        mutex_unlock(&sdata->local->iflist_mtx);
 
-       /* clean up type-dependent data */
-       ieee80211_clean_sdata(sdata);
-
        synchronize_rcu();
-       unregister_netdevice(sdata->dev);
+
+       if (sdata->dev) {
+               unregister_netdevice(sdata->dev);
+       } else {
+               cfg80211_unregister_wdev(&sdata->wdev);
+               kfree(sdata);
+       }
+}
+
+void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
+{
+       if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
+               return;
+       ieee80211_do_stop(sdata, true);
+       ieee80211_teardown_sdata(sdata);
 }
 
 /*
@@ -1518,6 +1586,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata, *tmp;
        LIST_HEAD(unreg_list);
+       LIST_HEAD(wdev_list);
 
        ASSERT_RTNL();
 
@@ -1525,13 +1594,20 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
                list_del(&sdata->list);
 
-               ieee80211_clean_sdata(sdata);
-
-               unregister_netdevice_queue(sdata->dev, &unreg_list);
+               if (sdata->dev)
+                       unregister_netdevice_queue(sdata->dev, &unreg_list);
+               else
+                       list_add(&sdata->list, &wdev_list);
        }
        mutex_unlock(&local->iflist_mtx);
        unregister_netdevice_many(&unreg_list);
        list_del(&unreg_list);
+
+       list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
+               list_del(&sdata->list);
+               cfg80211_unregister_wdev(&sdata->wdev);
+               kfree(sdata);
+       }
 }
 
 static int netdev_notify(struct notifier_block *nb,
index 7ae678ba5d679dbd40fc7c199c499ed82228757f..d27e61aaa71bd7200c527606820f5d5d82c39ff0 100644 (file)
@@ -402,7 +402,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
         * Synchronize so the TX path can no longer be using
         * this key before we free/remove it.
         */
-       synchronize_rcu();
+       synchronize_net();
 
        if (key->local)
                ieee80211_key_disable_hw_accel(key);
index c26e231c733af85d66fba73821ee877ca4338912..c80c4490351ce54fb75c41cdb9be1a313b0ffef5 100644 (file)
@@ -150,13 +150,11 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 
        if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
            test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
-           test_bit(SCAN_HW_SCANNING, &local->scanning))
+           test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+           !local->ap_power_level)
                power = chan->max_power;
        else
-               power = local->power_constr_level ?
-                       min(chan->max_power,
-                               (chan->max_reg_power  - local->power_constr_level)) :
-                       chan->max_power;
+               power = min(chan->max_power, local->ap_power_level);
 
        if (local->user_power_level >= 0)
                power = min(power, local->user_power_level);
@@ -207,6 +205,10 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
                sdata->vif.bss_conf.bssid = NULL;
        else if (ieee80211_vif_is_mesh(&sdata->vif)) {
                sdata->vif.bss_conf.bssid = zero;
+       } else if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+               sdata->vif.bss_conf.bssid = sdata->vif.addr;
+               WARN_ONCE(changed & ~(BSS_CHANGED_IDLE),
+                         "P2P Device BSS changed %#x", changed);
        } else {
                WARN_ON(1);
                return;
@@ -362,9 +364,7 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local, recalc_smps);
 
-       mutex_lock(&local->iflist_mtx);
        ieee80211_recalc_smps(local);
-       mutex_unlock(&local->iflist_mtx);
 }
 
 #ifdef CONFIG_INET
@@ -514,6 +514,11 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
                        BIT(IEEE80211_STYPE_AUTH >> 4) |
                        BIT(IEEE80211_STYPE_DEAUTH >> 4),
        },
+       [NL80211_IFTYPE_P2P_DEVICE] = {
+               .tx = 0xffff,
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+                       BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+       },
 };
 
 static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -536,6 +541,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
        int priv_size, i;
        struct wiphy *wiphy;
 
+       if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
+                   !ops->add_interface || !ops->remove_interface ||
+                   !ops->configure_filter))
+               return NULL;
+
        if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
                return NULL;
 
@@ -588,13 +598,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 
        local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
 
-       BUG_ON(!ops->tx);
-       BUG_ON(!ops->start);
-       BUG_ON(!ops->stop);
-       BUG_ON(!ops->config);
-       BUG_ON(!ops->add_interface);
-       BUG_ON(!ops->remove_interface);
-       BUG_ON(!ops->configure_filter);
        local->ops = ops;
 
        /* set up some defaults */
index 85572353a7e37d59b64ced140d75f7a9b8d3fb81..ff0296c7bab8b131f07a9846b608e7b91944a93e 100644 (file)
@@ -109,11 +109,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
 
        /* Disallow HT40+/- mismatch */
        if (ie->ht_operation &&
-           (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
-           local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+           (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS ||
+            sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) &&
            (sta_channel_type == NL80211_CHAN_HT40MINUS ||
             sta_channel_type == NL80211_CHAN_HT40PLUS) &&
-           local->_oper_channel_type != sta_channel_type)
+           sdata->vif.bss_conf.channel_type != sta_channel_type)
                goto mismatch;
 
        return true;
@@ -136,10 +136,13 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
  * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
  *
  * @sdata: mesh interface in which mesh beacons are going to be updated
+ *
+ * Returns: beacon changed flag if the beacon content changed.
  */
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
 {
        bool free_plinks;
+       u32 changed = 0;
 
        /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0,
         * the mesh interface might be able to establish plinks with peers that
@@ -149,8 +152,12 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
         */
        free_plinks = mesh_plink_availables(sdata);
 
-       if (free_plinks != sdata->u.mesh.accepting_plinks)
-               ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
+       if (free_plinks != sdata->u.mesh.accepting_plinks) {
+               sdata->u.mesh.accepting_plinks = free_plinks;
+               changed = BSS_CHANGED_BEACON;
+       }
+
+       return changed;
 }
 
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -262,7 +269,6 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
        neighbors = (neighbors > 15) ? 15 : neighbors;
        *pos++ = neighbors << 1;
        /* Mesh capability */
-       ifmsh->accepting_plinks = mesh_plink_availables(sdata);
        *pos = MESHCONF_CAPAB_FORWARDING;
        *pos |= ifmsh->accepting_plinks ?
            MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
@@ -349,17 +355,18 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *chan = local->oper_channel;
        u8 *pos;
 
        if (skb_tailroom(skb) < 3)
                return -ENOMEM;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[chan->band];
        if (sband->band == IEEE80211_BAND_2GHZ) {
                pos = skb_put(skb, 2 + 1);
                *pos++ = WLAN_EID_DS_PARAMS;
                *pos++ = 1;
-               *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+               *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
        }
 
        return 0;
@@ -374,7 +381,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
 
        sband = local->hw.wiphy->bands[local->oper_channel->band];
        if (!sband->ht_cap.ht_supported ||
-           local->_oper_channel_type == NL80211_CHAN_NO_HT)
+           sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
                return 0;
 
        if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -391,7 +398,8 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_channel *channel = local->oper_channel;
-       enum nl80211_channel_type channel_type = local->_oper_channel_type;
+       enum nl80211_channel_type channel_type =
+                               sdata->vif.bss_conf.channel_type;
        struct ieee80211_supported_band *sband =
                                local->hw.wiphy->bands[channel->band];
        struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
@@ -521,14 +529,13 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
 static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
                           struct ieee80211_if_mesh *ifmsh)
 {
-       bool free_plinks;
+       u32 changed;
 
        ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
        mesh_path_expire(sdata);
 
-       free_plinks = mesh_plink_availables(sdata);
-       if (free_plinks != sdata->u.mesh.accepting_plinks)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       changed = mesh_accept_plinks_update(sdata);
+       ieee80211_bss_info_change_notify(sdata, changed);
 
        mod_timer(&ifmsh->housekeeping_timer,
                  round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
@@ -603,12 +610,14 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
        sdata->vif.bss_conf.basic_rates =
                ieee80211_mandatory_rates(sdata->local,
-                                         sdata->local->hw.conf.channel->band);
+                                         sdata->local->oper_channel->band);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
                                                BSS_CHANGED_BEACON_ENABLED |
                                                BSS_CHANGED_HT |
                                                BSS_CHANGED_BASIC_RATES |
                                                BSS_CHANGED_BEACON_INT);
+
+       netif_carrier_on(sdata->dev);
 }
 
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -616,9 +625,15 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
+       netif_carrier_off(sdata->dev);
+
+       /* stop the beacon */
        ifmsh->mesh_id_len = 0;
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
-       sta_info_flush(local, NULL);
+
+       /* flush STAs and mpaths on this iface */
+       sta_info_flush(sdata->local, sdata);
+       mesh_path_flush_by_iface(sdata);
 
        del_timer_sync(&sdata->u.mesh.housekeeping_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
index faaa39bcfd109b783c2f297fdda6aa3585587026..25d0f17dec71e74c8c9e514f17155628ce6dfff6 100644 (file)
@@ -215,6 +215,9 @@ struct mesh_rmc {
 /* Maximum number of paths per interface */
 #define MESH_MAX_MPATHS                1024
 
+/* Number of frames buffered per destination for unresolved destinations */
+#define MESH_FRAME_QUEUE_LEN   10
+
 /* Public interfaces */
 /* Various */
 int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -282,7 +285,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
                           u8 *hw_addr,
                           struct ieee802_11_elems *ie);
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
-void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
+u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_broken(struct sta_info *sta);
 void mesh_plink_deactivate(struct sta_info *sta);
 int mesh_plink_open(struct sta_info *sta);
index 494bc39f61a4cb67fce07ad649e9be814fcb9d6b..47aeee2d8db160f6fa9eb62c32131bbd686acc29 100644 (file)
@@ -17,8 +17,6 @@
 #define MAX_METRIC     0xffffffff
 #define ARITH_SHIFT    8
 
-/* Number of frames buffered per destination for unresolved destinations */
-#define MESH_FRAME_QUEUE_LEN   10
 #define MAX_PREQ_QUEUE_LEN     64
 
 /* Destination only */
index 075bc535c60126c33c6ce5f015bab87b00fe08f2..aa749818860e72f1cb279f3cf20c437a5b0ae6d0 100644 (file)
@@ -203,23 +203,17 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
 {
        struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
-       struct sk_buff_head tmpq;
        unsigned long flags;
 
        rcu_assign_pointer(mpath->next_hop, sta);
 
-       __skb_queue_head_init(&tmpq);
-
        spin_lock_irqsave(&mpath->frame_queue.lock, flags);
-
-       while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
+       skb_queue_walk(&mpath->frame_queue, skb) {
                hdr = (struct ieee80211_hdr *) skb->data;
                memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
                memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
-               __skb_queue_tail(&tmpq, skb);
        }
 
-       skb_queue_splice(&tmpq, &mpath->frame_queue);
        spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
 }
 
@@ -285,40 +279,42 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
                                    struct mesh_path *from_mpath,
                                    bool copy)
 {
-       struct sk_buff *skb, *cp_skb = NULL;
-       struct sk_buff_head gateq, failq;
+       struct sk_buff *skb, *fskb, *tmp;
+       struct sk_buff_head failq;
        unsigned long flags;
-       int num_skbs;
 
        BUG_ON(gate_mpath == from_mpath);
        BUG_ON(!gate_mpath->next_hop);
 
-       __skb_queue_head_init(&gateq);
        __skb_queue_head_init(&failq);
 
        spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
        skb_queue_splice_init(&from_mpath->frame_queue, &failq);
        spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
 
-       num_skbs = skb_queue_len(&failq);
-
-       while (num_skbs--) {
-               skb = __skb_dequeue(&failq);
-               if (copy) {
-                       cp_skb = skb_copy(skb, GFP_ATOMIC);
-                       if (cp_skb)
-                               __skb_queue_tail(&failq, cp_skb);
+       skb_queue_walk_safe(&failq, fskb, tmp) {
+               if (skb_queue_len(&gate_mpath->frame_queue) >=
+                                 MESH_FRAME_QUEUE_LEN) {
+                       mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
+                       break;
                }
 
+               skb = skb_copy(fskb, GFP_ATOMIC);
+               if (WARN_ON(!skb))
+                       break;
+
                prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
-               __skb_queue_tail(&gateq, skb);
+               skb_queue_tail(&gate_mpath->frame_queue, skb);
+
+               if (copy)
+                       continue;
+
+               __skb_unlink(fskb, &failq);
+               kfree_skb(fskb);
        }
 
-       spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
-       skb_queue_splice(&gateq, &gate_mpath->frame_queue);
        mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
                  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
-       spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
 
        if (!copy)
                return;
@@ -531,7 +527,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
 
        read_lock_bh(&pathtbl_resize_lock);
        memcpy(new_mpath->dst, dst, ETH_ALEN);
-       memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
+       eth_broadcast_addr(new_mpath->rann_snd_addr);
        new_mpath->is_root = false;
        new_mpath->sdata = sdata;
        new_mpath->flags = 0;
index af671b984df37123cf6841fb9a8e4e5db70bde65..3ab34d81689753e0beaf7af4a9c7c806f9da6915 100644 (file)
@@ -48,17 +48,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                u8 *da, __le16 llid, __le16 plid, __le16 reason);
 
 static inline
-void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
-       mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata);
 }
 
 static inline
-void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
+u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
-       mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata);
 }
 
 /**
@@ -117,7 +117,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        u16 ht_opmode;
        bool non_ht_sta = false, ht20_sta = false;
 
-       if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
+       if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT)
                return 0;
 
        rcu_read_lock();
@@ -147,7 +147,8 @@ out:
 
        if (non_ht_sta)
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
-       else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
+       else if (ht20_sta &&
+                sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20)
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
        else
                ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -170,22 +171,21 @@ out:
  * @sta: mesh peer link to deactivate
  *
  * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
  *
  * Locking: the caller must hold sta->lock
  */
-static bool __mesh_plink_deactivate(struct sta_info *sta)
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated = false;
+       u32 changed = 0;
 
-       if (sta->plink_state == NL80211_PLINK_ESTAB) {
-               mesh_plink_dec_estab_count(sdata);
-               deactivated = true;
-       }
+       if (sta->plink_state == NL80211_PLINK_ESTAB)
+               changed = mesh_plink_dec_estab_count(sdata);
        sta->plink_state = NL80211_PLINK_BLOCKED;
        mesh_path_flush_by_nexthop(sta);
 
-       return deactivated;
+       return changed;
 }
 
 /**
@@ -198,18 +198,17 @@ static bool __mesh_plink_deactivate(struct sta_info *sta)
 void mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated;
+       u32 changed;
 
        spin_lock_bh(&sta->lock);
-       deactivated = __mesh_plink_deactivate(sta);
+       changed = __mesh_plink_deactivate(sta);
        sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                            sta->sta.addr, sta->llid, sta->plid,
                            sta->reason);
        spin_unlock_bh(&sta->lock);
 
-       if (deactivated)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -217,12 +216,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                u8 *da, __le16 llid, __le16 plid, __le16 reason) {
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
        struct ieee80211_mgmt *mgmt;
        bool include_plid = false;
        u16 peering_proto = 0;
        u8 *pos, ie_len = 4;
        int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
                      sizeof(mgmt->u.action.u.self_prot);
+       int err = -ENOMEM;
 
        skb = dev_alloc_skb(local->tx_headroom +
                            hdr_len +
@@ -238,6 +239,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                            sdata->u.mesh.ie_len);
        if (!skb)
                return -1;
+       info = IEEE80211_SKB_CB(skb);
        skb_reserve(skb, local->tx_headroom);
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
        memset(mgmt, 0, hdr_len);
@@ -258,15 +260,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                        pos = skb_put(skb, 2);
                        memcpy(pos + 2, &plid, 2);
                }
-               if (ieee80211_add_srates_ie(sdata, skb, true) ||
-                   ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+               if (ieee80211_add_srates_ie(sdata, skb, true,
+                                           local->oper_channel->band) ||
+                   ieee80211_add_ext_srates_ie(sdata, skb, true,
+                                               local->oper_channel->band) ||
                    mesh_add_rsn_ie(skb, sdata) ||
                    mesh_add_meshid_ie(skb, sdata) ||
                    mesh_add_meshconf_ie(skb, sdata))
-                       return -1;
+                       goto free;
        } else {        /* WLAN_SP_MESH_PEERING_CLOSE */
+               info->flags |= IEEE80211_TX_CTL_NO_ACK;
                if (mesh_add_meshid_ie(skb, sdata))
-                       return -1;
+                       goto free;
        }
 
        /* Add Mesh Peering Management element */
@@ -285,11 +290,12 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                ie_len += 2;    /* reason code */
                break;
        default:
-               return -EINVAL;
+               err = -EINVAL;
+               goto free;
        }
 
        if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
-               return -ENOMEM;
+               goto free;
 
        pos = skb_put(skb, 2 + ie_len);
        *pos++ = WLAN_EID_PEER_MGMT;
@@ -310,14 +316,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
        if (action != WLAN_SP_MESH_PEERING_CLOSE) {
                if (mesh_add_ht_cap_ie(skb, sdata) ||
                    mesh_add_ht_oper_ie(skb, sdata))
-                       return -1;
+                       goto free;
        }
 
        if (mesh_add_vendor_ies(skb, sdata))
-               return -1;
+               goto free;
 
        ieee80211_tx_skb(sdata, skb);
        return 0;
+free:
+       kfree_skb(skb);
+       return err;
 }
 
 /**
@@ -362,9 +371,14 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_bh(&sta->lock);
        sta->last_rx = jiffies;
+       if (sta->plink_state == NL80211_PLINK_ESTAB) {
+               spin_unlock_bh(&sta->lock);
+               return sta;
+       }
+
        sta->sta.supp_rates[band] = rates;
        if (elems->ht_cap_elem &&
-           sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
+           sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT)
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                                  elems->ht_cap_elem,
                                                  &sta->sta.ht_cap);
@@ -523,7 +537,8 @@ int mesh_plink_open(struct sta_info *sta)
        spin_lock_bh(&sta->lock);
        get_random_bytes(&llid, 2);
        sta->llid = llid;
-       if (sta->plink_state != NL80211_PLINK_LISTEN) {
+       if (sta->plink_state != NL80211_PLINK_LISTEN &&
+           sta->plink_state != NL80211_PLINK_BLOCKED) {
                spin_unlock_bh(&sta->lock);
                return -EBUSY;
        }
@@ -541,15 +556,14 @@ int mesh_plink_open(struct sta_info *sta)
 void mesh_plink_block(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       bool deactivated;
+       u32 changed;
 
        spin_lock_bh(&sta->lock);
-       deactivated = __mesh_plink_deactivate(sta);
+       changed = __mesh_plink_deactivate(sta);
        sta->plink_state = NL80211_PLINK_BLOCKED;
        spin_unlock_bh(&sta->lock);
 
-       if (deactivated)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 
@@ -852,9 +866,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        del_timer(&sta->plink_timer);
                        sta->plink_state = NL80211_PLINK_ESTAB;
                        spin_unlock_bh(&sta->lock);
-                       mesh_plink_inc_estab_count(sdata);
+                       changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
                        break;
@@ -888,9 +901,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                        del_timer(&sta->plink_timer);
                        sta->plink_state = NL80211_PLINK_ESTAB;
                        spin_unlock_bh(&sta->lock);
-                       mesh_plink_inc_estab_count(sdata);
+                       changed |= mesh_plink_inc_estab_count(sdata);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
                                sta->sta.addr);
                        mesh_plink_frame_tx(sdata,
@@ -908,13 +920,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                case CLS_ACPT:
                        reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
                        sta->reason = reason;
-                       __mesh_plink_deactivate(sta);
+                       changed |= __mesh_plink_deactivate(sta);
                        sta->plink_state = NL80211_PLINK_HOLDING;
                        llid = sta->llid;
                        mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
                        spin_unlock_bh(&sta->lock);
                        changed |= mesh_set_ht_prot_mode(sdata);
-                       changed |= BSS_CHANGED_BEACON;
                        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                                            sta->sta.addr, llid, plid, reason);
                        break;
index f76b83341cf9a39db0e14092a85f2e576245a306..e714ed8bb198727c6738c1e3ae7650243dd2a07e 100644 (file)
@@ -88,8 +88,6 @@ MODULE_PARM_DESC(probe_wait_ms,
 #define TMR_RUNNING_TIMER      0
 #define TMR_RUNNING_CHANSW     1
 
-#define DEAUTH_DISASSOC_LEN    (24 /* hdr */ + 2 /* reason */)
-
 /*
  * All cfg80211 functions have to be called outside a locked
  * section so that they can acquire a lock themselves... This
@@ -146,6 +144,9 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
        if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
                return;
 
+       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+               return;
+
        mod_timer(&sdata->u.mgd.bcn_mon_timer,
                  round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
 }
@@ -182,15 +183,15 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
        u16 ht_opmode;
        bool disable_40 = false;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
 
        switch (sdata->vif.bss_conf.channel_type) {
        case NL80211_CHAN_HT40PLUS:
-               if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+               if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
                        disable_40 = true;
                break;
        case NL80211_CHAN_HT40MINUS:
-               if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+               if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
                        disable_40 = true;
                break;
        default:
@@ -326,6 +327,26 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
+                                struct sk_buff *skb,
+                                struct ieee80211_supported_band *sband)
+{
+       u8 *pos;
+       u32 cap;
+       struct ieee80211_sta_vht_cap vht_cap;
+
+       BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
+
+       memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+
+       /* determine capability flags */
+       cap = vht_cap.cap;
+
+       /* reserve and fill IE */
+       pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2);
+       ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
+}
+
 static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
@@ -371,6 +392,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                        4 + /* power capability */
                        2 + 2 * sband->n_channels + /* supported channels */
                        2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+                       2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */
                        assoc_data->ie_len + /* extra IEs */
                        9, /* WMM */
                        GFP_KERNEL);
@@ -503,6 +525,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
                                    sband, local->oper_channel, ifmgd->ap_smps);
 
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+               ieee80211_add_vht_ie(sdata, skb, sband);
+
        /* if present, add any custom non-vendor IEs that go after HT */
        if (assoc_data->ie_len && assoc_data->ie) {
                noffset = ieee80211_ie_split_vendor(assoc_data->ie,
@@ -547,48 +572,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        ieee80211_tx_skb(sdata, skb);
 }
 
-static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
-                                          const u8 *bssid, u16 stype,
-                                          u16 reason, bool send_frame,
-                                          u8 *frame_buf)
-{
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct sk_buff *skb;
-       struct ieee80211_mgmt *mgmt = (void *)frame_buf;
-
-       /* build frame */
-       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
-       mgmt->duration = 0; /* initialize only */
-       mgmt->seq_ctrl = 0; /* initialize only */
-       memcpy(mgmt->da, bssid, ETH_ALEN);
-       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
-       memcpy(mgmt->bssid, bssid, ETH_ALEN);
-       /* u.deauth.reason_code == u.disassoc.reason_code */
-       mgmt->u.deauth.reason_code = cpu_to_le16(reason);
-
-       if (send_frame) {
-               skb = dev_alloc_skb(local->hw.extra_tx_headroom +
-                                   DEAUTH_DISASSOC_LEN);
-               if (!skb)
-                       return;
-
-               skb_reserve(skb, local->hw.extra_tx_headroom);
-
-               /* copy in frame */
-               memcpy(skb_put(skb, DEAUTH_DISASSOC_LEN),
-                      mgmt, DEAUTH_DISASSOC_LEN);
-
-               if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
-                       IEEE80211_SKB_CB(skb)->flags |=
-                               IEEE80211_TX_INTFL_DONT_ENCRYPT;
-
-               drv_mgd_prepare_tx(local, sdata);
-
-               ieee80211_tx_skb(sdata, skb);
-       }
-}
-
 void ieee80211_send_pspoll(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata)
 {
@@ -687,6 +670,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
        /* XXX: shouldn't really modify cfg80211-owned data! */
        ifmgd->associated->channel = sdata->local->oper_channel;
 
+       /* XXX: wait for a beacon first? */
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
  out:
@@ -704,16 +688,13 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
 
        trace_api_chswitch_done(sdata, success);
        if (!success) {
-               /*
-                * If the channel switch was not successful, stay
-                * around on the old channel. We currently lack
-                * good handling of this situation, possibly we
-                * should just drop the association.
-                */
-               sdata->local->csa_channel = sdata->local->oper_channel;
+               sdata_info(sdata,
+                          "driver channel switch failed, disconnecting\n");
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &ifmgd->csa_connection_drop_work);
+       } else {
+               ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
        }
-
-       ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
 }
 EXPORT_SYMBOL(ieee80211_chswitch_done);
 
@@ -758,61 +739,111 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                return;
 
        new_ch = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
-       if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED)
+       if (!new_ch || new_ch->flags & IEEE80211_CHAN_DISABLED) {
+               sdata_info(sdata,
+                          "AP %pM switches to unsupported channel (%d MHz), disconnecting\n",
+                          ifmgd->associated->bssid, new_freq);
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &ifmgd->csa_connection_drop_work);
                return;
+       }
 
        sdata->local->csa_channel = new_ch;
 
+       ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+
+       if (sw_elem->mode)
+               ieee80211_stop_queues_by_reason(&sdata->local->hw,
+                               IEEE80211_QUEUE_STOP_REASON_CSA);
+
        if (sdata->local->ops->channel_switch) {
                /* use driver's channel switch callback */
-               struct ieee80211_channel_switch ch_switch;
-               memset(&ch_switch, 0, sizeof(ch_switch));
-               ch_switch.timestamp = timestamp;
-               if (sw_elem->mode) {
-                       ch_switch.block_tx = true;
-                       ieee80211_stop_queues_by_reason(&sdata->local->hw,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-               }
-               ch_switch.channel = new_ch;
-               ch_switch.count = sw_elem->count;
-               ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+               struct ieee80211_channel_switch ch_switch = {
+                       .timestamp = timestamp,
+                       .block_tx = sw_elem->mode,
+                       .channel = new_ch,
+                       .count = sw_elem->count,
+               };
+
                drv_channel_switch(sdata->local, &ch_switch);
                return;
        }
 
        /* channel switch handled in software */
-       if (sw_elem->count <= 1) {
+       if (sw_elem->count <= 1)
                ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
-       } else {
-               if (sw_elem->mode)
-                       ieee80211_stop_queues_by_reason(&sdata->local->hw,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-               ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+       else
                mod_timer(&ifmgd->chswitch_timer,
-                         jiffies +
-                         msecs_to_jiffies(sw_elem->count *
-                                          cbss->beacon_interval));
-       }
+                         TU_TO_EXP_TIME(sw_elem->count *
+                                        cbss->beacon_interval));
 }
 
 static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
-                                       u16 capab_info, u8 *pwr_constr_elem,
-                                       u8 pwr_constr_elem_len)
+                                       struct ieee80211_channel *channel,
+                                       const u8 *country_ie, u8 country_ie_len,
+                                       const u8 *pwr_constr_elem)
 {
-       struct ieee80211_conf *conf = &sdata->local->hw.conf;
+       struct ieee80211_country_ie_triplet *triplet;
+       int chan = ieee80211_frequency_to_channel(channel->center_freq);
+       int i, chan_pwr, chan_increment, new_ap_level;
+       bool have_chan_pwr = false;
 
-       if (!(capab_info & WLAN_CAPABILITY_SPECTRUM_MGMT))
+       /* Invalid IE */
+       if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
                return;
 
-       /* Power constraint IE length should be 1 octet */
-       if (pwr_constr_elem_len != 1)
-               return;
+       triplet = (void *)(country_ie + 3);
+       country_ie_len -= 3;
+
+       switch (channel->band) {
+       default:
+               WARN_ON_ONCE(1);
+               /* fall through */
+       case IEEE80211_BAND_2GHZ:
+       case IEEE80211_BAND_60GHZ:
+               chan_increment = 1;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               chan_increment = 4;
+               break;
+       }
+
+       /* find channel */
+       while (country_ie_len >= 3) {
+               u8 first_channel = triplet->chans.first_channel;
 
-       if ((*pwr_constr_elem <= conf->channel->max_reg_power) &&
-           (*pwr_constr_elem != sdata->local->power_constr_level)) {
-               sdata->local->power_constr_level = *pwr_constr_elem;
-               ieee80211_hw_config(sdata->local, 0);
+               if (first_channel >= IEEE80211_COUNTRY_EXTENSION_ID)
+                       goto next;
+
+               for (i = 0; i < triplet->chans.num_channels; i++) {
+                       if (first_channel + i * chan_increment == chan) {
+                               have_chan_pwr = true;
+                               chan_pwr = triplet->chans.max_power;
+                               break;
+                       }
+               }
+               if (have_chan_pwr)
+                       break;
+
+ next:
+               triplet++;
+               country_ie_len -= 3;
        }
+
+       if (!have_chan_pwr)
+               return;
+
+       new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem);
+
+       if (sdata->local->ap_power_level == new_ap_level)
+               return;
+
+       sdata_info(sdata,
+                  "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
+                  new_ap_level, chan_pwr, *pwr_constr_elem,
+                  sdata->u.mgd.bssid);
+       sdata->local->ap_power_level = new_ap_level;
+       ieee80211_hw_config(sdata->local, 0);
 }
 
 void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
@@ -1007,6 +1038,16 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
        ieee80211_change_ps(local);
 }
 
+void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata)
+{
+       bool ps_allowed = ieee80211_powersave_allowed(sdata);
+
+       if (sdata->vif.bss_conf.ps != ps_allowed) {
+               sdata->vif.bss_conf.ps = ps_allowed;
+               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_PS);
+       }
+}
+
 void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
@@ -1239,7 +1280,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
        }
 
        use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
-       if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
+       if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ)
                use_short_slot = true;
 
        if (use_protection != bss_conf->use_cts_prot) {
@@ -1307,9 +1348,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
 
        mutex_lock(&local->iflist_mtx);
        ieee80211_recalc_ps(local, -1);
-       ieee80211_recalc_smps(local);
        mutex_unlock(&local->iflist_mtx);
 
+       ieee80211_recalc_smps(local);
+       ieee80211_recalc_ps_vif(sdata);
+
        netif_tx_start_all_queues(sdata->dev);
        netif_carrier_on(sdata->dev);
 }
@@ -1356,7 +1399,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        sta = sta_info_get(sdata, ifmgd->bssid);
        if (sta) {
                set_sta_flag(sta, WLAN_STA_BLOCK_BA);
-               ieee80211_sta_tear_down_BA_sessions(sta, tx);
+               ieee80211_sta_tear_down_BA_sessions(sta, false);
        }
        mutex_unlock(&local->sta_mtx);
 
@@ -1371,6 +1414,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        }
        local->ps_sdata = NULL;
 
+       /* disable per-vif ps */
+       ieee80211_recalc_ps_vif(sdata);
+
        /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
        if (tx)
                drv_flush(local, false);
@@ -1401,7 +1447,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
        memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 
-       local->power_constr_level = 0;
+       local->ap_power_level = 0;
 
        del_timer_sync(&local->dynamic_ps_timer);
        cancel_work_sync(&local->dynamic_ps_enable_work);
@@ -1542,7 +1588,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                        ssid_len = ssid[1];
 
                ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
-                                        0, (u32) -1, true, false);
+                                        0, (u32) -1, true, false,
+                                        ifmgd->associated->channel);
        }
 
        ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -1645,19 +1692,21 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
                ssid_len = ssid[1];
 
        skb = ieee80211_build_probe_req(sdata, cbss->bssid,
-                                       (u32) -1, ssid + 2, ssid_len,
+                                       (u32) -1,
+                                       sdata->local->oper_channel,
+                                       ssid + 2, ssid_len,
                                        NULL, 0, true);
 
        return skb;
 }
 EXPORT_SYMBOL(ieee80211_ap_probereq_get);
 
-static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
+static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata,
+                                  bool transmit_frame)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_local *local = sdata->local;
-       u8 bssid[ETH_ALEN];
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        mutex_lock(&ifmgd->mtx);
        if (!ifmgd->associated) {
@@ -1665,27 +1714,24 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
-       memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
-       sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
-
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-                              false, frame_buf);
+                              transmit_frame, frame_buf);
+       ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
        mutex_unlock(&ifmgd->mtx);
 
        /*
         * must be outside lock due to cfg80211,
         * but that's not a problem.
         */
-       cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&local->mtx);
        ieee80211_recalc_idle(local);
        mutex_unlock(&local->mtx);
 }
 
-void ieee80211_beacon_connection_loss_work(struct work_struct *work)
+static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
 {
        struct ieee80211_sub_if_data *sdata =
                container_of(work, struct ieee80211_sub_if_data,
@@ -1701,10 +1747,24 @@ void ieee80211_beacon_connection_loss_work(struct work_struct *work)
                rcu_read_unlock();
        }
 
-       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
-               __ieee80211_connection_loss(sdata);
-       else
+       if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) {
+               sdata_info(sdata, "Connection to AP %pM lost\n",
+                          ifmgd->bssid);
+               __ieee80211_disconnect(sdata, false);
+       } else {
                ieee80211_mgd_probe_ap(sdata, true);
+       }
+}
+
+static void ieee80211_csa_connection_drop_work(struct work_struct *work)
+{
+       struct ieee80211_sub_if_data *sdata =
+               container_of(work, struct ieee80211_sub_if_data,
+                            u.mgd.csa_connection_drop_work);
+
+       ieee80211_wake_queues_by_reason(&sdata->local->hw,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       __ieee80211_disconnect(sdata, true);
 }
 
 void ieee80211_beacon_loss(struct ieee80211_vif *vif)
@@ -2232,14 +2292,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                mutex_unlock(&local->iflist_mtx);
        }
 
-       if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
-           (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
-                                                       ETH_ALEN) == 0)) {
-               struct ieee80211_channel_sw_ie *sw_elem =
-                       (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
-               ieee80211_sta_process_chanswitch(sdata, sw_elem,
+       if (elems->ch_switch_ie &&
+           memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid, ETH_ALEN) == 0)
+               ieee80211_sta_process_chanswitch(sdata, elems->ch_switch_ie,
                                                 bss, rx_status->mactime);
-       }
 }
 
 
@@ -2326,7 +2382,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        if (baselen > len)
                return;
 
-       if (rx_status->freq != local->hw.conf.channel->center_freq)
+       if (rx_status->freq != local->oper_channel->center_freq)
                return;
 
        if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
@@ -2490,21 +2546,19 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
            !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
                struct ieee80211_supported_band *sband;
 
-               sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+               sband = local->hw.wiphy->bands[local->oper_channel->band];
 
                changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
                                                  bssid, true);
        }
 
-       /* Note: country IE parsing is done for us by cfg80211 */
-       if (elems.country_elem) {
-               /* TODO: IBSS also needs this */
-               if (elems.pwr_constr_elem)
-                       ieee80211_handle_pwr_constr(sdata,
-                               le16_to_cpu(mgmt->u.probe_resp.capab_info),
-                               elems.pwr_constr_elem,
-                               elems.pwr_constr_elem_len);
-       }
+       if (elems.country_elem && elems.pwr_constr_elem &&
+           mgmt->u.probe_resp.capab_info &
+                               cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT))
+               ieee80211_handle_pwr_constr(sdata, local->oper_channel,
+                                           elems.country_elem,
+                                           elems.country_elem_len,
+                                           elems.pwr_constr_elem);
 
        ieee80211_bss_info_change_notify(sdata, changed);
 }
@@ -2601,7 +2655,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               false, frame_buf);
@@ -2611,7 +2665,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
         * must be outside lock due to cfg80211,
         * but that's not a problem.
         */
-       cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&local->mtx);
        ieee80211_recalc_idle(local);
@@ -2673,7 +2727,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
                 * will not answer to direct packet in unassociated state.
                 */
                ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1],
-                                        NULL, 0, (u32) -1, true, false);
+                                        NULL, 0, (u32) -1, true, false,
+                                        auth_data->bss->channel);
        }
 
        auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
@@ -2894,6 +2949,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
 
        cancel_work_sync(&ifmgd->monitor_work);
        cancel_work_sync(&ifmgd->beacon_connection_loss_work);
+       cancel_work_sync(&ifmgd->csa_connection_drop_work);
        if (del_timer_sync(&ifmgd->timer))
                set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
 
@@ -2950,6 +3006,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
        INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
        INIT_WORK(&ifmgd->beacon_connection_loss_work,
                  ieee80211_beacon_connection_loss_work);
+       INIT_WORK(&ifmgd->csa_connection_drop_work,
+                 ieee80211_csa_connection_drop_work);
        INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work);
        setup_timer(&ifmgd->timer, ieee80211_sta_timer,
                    (unsigned long) sdata);
@@ -3000,41 +3058,17 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
        return 0;
 }
 
-static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
-                                    struct cfg80211_bss *cbss, bool assoc)
+static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
+                                 struct cfg80211_bss *cbss)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct ieee80211_bss *bss = (void *)cbss->priv;
-       struct sta_info *sta = NULL;
-       bool have_sta = false;
-       int err;
        int ht_cfreq;
        enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
        const u8 *ht_oper_ie;
        const struct ieee80211_ht_operation *ht_oper = NULL;
        struct ieee80211_supported_band *sband;
 
-       if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
-               return -EINVAL;
-
-       if (assoc) {
-               rcu_read_lock();
-               have_sta = sta_info_get(sdata, cbss->bssid);
-               rcu_read_unlock();
-       }
-
-       if (!have_sta) {
-               sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
-               if (!sta)
-                       return -ENOMEM;
-       }
-
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(sdata->local);
-       mutex_unlock(&local->mtx);
-
-       /* switch to the right channel */
        sband = local->hw.wiphy->bands[cbss->channel->band];
 
        ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
@@ -3097,10 +3131,51 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
        local->oper_channel = cbss->channel;
        ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
-       if (sta) {
+       return 0;
+}
+
+static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+                                    struct cfg80211_bss *cbss, bool assoc)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+       struct ieee80211_bss *bss = (void *)cbss->priv;
+       struct sta_info *new_sta = NULL;
+       bool have_sta = false;
+       int err;
+
+       if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
+               return -EINVAL;
+
+       if (assoc) {
+               rcu_read_lock();
+               have_sta = sta_info_get(sdata, cbss->bssid);
+               rcu_read_unlock();
+       }
+
+       if (!have_sta) {
+               new_sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
+               if (!new_sta)
+                       return -ENOMEM;
+       }
+
+       mutex_lock(&local->mtx);
+       ieee80211_recalc_idle(sdata->local);
+       mutex_unlock(&local->mtx);
+
+       if (new_sta) {
                u32 rates = 0, basic_rates = 0;
                bool have_higher_than_11mbit;
                int min_rate = INT_MAX, min_rate_index = -1;
+               struct ieee80211_supported_band *sband;
+
+               sband = local->hw.wiphy->bands[cbss->channel->band];
+
+               err = ieee80211_prep_channel(sdata, cbss);
+               if (err) {
+                       sta_info_free(local, new_sta);
+                       return err;
+               }
 
                ieee80211_get_rates(sband, bss->supp_rates,
                                    bss->supp_rates_len,
@@ -3122,7 +3197,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        basic_rates = BIT(min_rate_index);
                }
 
-               sta->sta.supp_rates[cbss->channel->band] = rates;
+               new_sta->sta.supp_rates[cbss->channel->band] = rates;
                sdata->vif.bss_conf.basic_rates = basic_rates;
 
                /* cf. IEEE 802.11 9.2.12 */
@@ -3145,10 +3220,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        BSS_CHANGED_BEACON_INT);
 
                if (assoc)
-                       sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+                       sta_info_pre_move_state(new_sta, IEEE80211_STA_AUTH);
 
-               err = sta_info_insert(sta);
-               sta = NULL;
+               err = sta_info_insert(new_sta);
+               new_sta = NULL;
                if (err) {
                        sdata_info(sdata,
                                   "failed to insert STA entry for the AP (error %d)\n",
@@ -3302,9 +3377,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        }
 
        /* prepare assoc data */
-
-       ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
-       ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+       
+       /*
+        * keep only the 40 MHz disable bit set as it might have
+        * been set during authentication already, all other bits
+        * should be reset for a new connection
+        */
+       ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ;
 
        ifmgd->beacon_crc_valid = false;
 
@@ -3320,21 +3399,34 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
                    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
                        ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
                        netdev_info(sdata->dev,
-                                   "disabling HT due to WEP/TKIP use\n");
+                                   "disabling HT/VHT due to WEP/TKIP use\n");
                }
        }
 
-       if (req->flags & ASSOC_REQ_DISABLE_HT)
+       if (req->flags & ASSOC_REQ_DISABLE_HT) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+       }
 
        /* Also disable HT if we don't support it or the AP doesn't use WMM */
        sband = local->hw.wiphy->bands[req->bss->channel->band];
        if (!sband->ht_cap.ht_supported ||
            local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
-               netdev_info(sdata->dev,
-                           "disabling HT as WMM/QoS is not supported\n");
+               if (!bss->wmm_used)
+                       netdev_info(sdata->dev,
+                                   "disabling HT as WMM/QoS is not supported by the AP\n");
+       }
+
+       /* disable VHT if we don't support it or the AP doesn't use WMM */
+       if (!sband->vht_cap.vht_supported ||
+           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+               if (!bss->wmm_used)
+                       netdev_info(sdata->dev,
+                                   "disabling VHT as WMM/QoS is not supported by the AP\n");
        }
 
        memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3456,7 +3548,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                         struct cfg80211_deauth_request *req)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        mutex_lock(&ifmgd->mtx);
 
@@ -3471,17 +3563,21 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                   req->bssid, req->reason_code);
 
        if (ifmgd->associated &&
-           ether_addr_equal(ifmgd->associated->bssid, req->bssid))
+           ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       req->reason_code, true, frame_buf);
-       else
+       } else {
+               drv_mgd_prepare_tx(sdata->local, sdata);
                ieee80211_send_deauth_disassoc(sdata, req->bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               req->reason_code, true,
                                               frame_buf);
+       }
+
        mutex_unlock(&ifmgd->mtx);
 
-       __cfg80211_send_deauth(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       __cfg80211_send_deauth(sdata->dev, frame_buf,
+                              IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&sdata->local->mtx);
        ieee80211_recalc_idle(sdata->local);
@@ -3495,7 +3591,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 bssid[ETH_ALEN];
-       u8 frame_buf[DEAUTH_DISASSOC_LEN];
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        mutex_lock(&ifmgd->mtx);
 
@@ -3520,7 +3616,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
                               frame_buf);
        mutex_unlock(&ifmgd->mtx);
 
-       __cfg80211_send_disassoc(sdata->dev, frame_buf, DEAUTH_DISASSOC_LEN);
+       __cfg80211_send_disassoc(sdata->dev, frame_buf,
+                                IEEE80211_DEAUTH_FRAME_LEN);
 
        mutex_lock(&sdata->local->mtx);
        ieee80211_recalc_idle(sdata->local);
index 635c3250c66894ed090d08d4356b5832a7120247..83608ac167801f1c06fc55dd3ab53370d947cbc7 100644 (file)
@@ -116,6 +116,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
                        set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
@@ -144,6 +147,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
 
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
+
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
                        clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
 
@@ -227,8 +233,7 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
                        u32 dur = dep->duration;
                        dep->duration = dur - roc->duration;
                        roc->duration = dur;
-                       list_del(&dep->list);
-                       list_add(&dep->list, &roc->list);
+                       list_move(&dep->list, &roc->list);
                }
        }
  out_unlock:
index 6e4fd32c66171345c399f9eb765809d974068e34..10de668eb9f64b8e6f3f9509f7db9ad666f36af1 100644 (file)
@@ -56,7 +56,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
        if (!ref)
                return;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
 
        ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
        set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
index 0cb4edee6af5a468657d15af4fc7a05b99028088..61c621e9273fe70c26978d42433d58fca60a80b8 100644 (file)
@@ -60,7 +60,9 @@ static inline int should_drop_frame(struct sk_buff *skb,
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-       if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
+       if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
+                           RX_FLAG_FAILED_PLCP_CRC |
+                           RX_FLAG_AMPDU_IS_ZEROLEN))
                return 1;
        if (unlikely(skb->len < 16 + present_fcs_len))
                return 1;
@@ -91,10 +93,17 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
        if (status->flag & RX_FLAG_HT) /* HT info */
                len += 3;
 
+       if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+               /* padding */
+               while (len & 3)
+                       len++;
+               len += 8;
+       }
+
        return len;
 }
 
-/**
+/*
  * ieee80211_add_rx_radiotap_header - add radiotap header
  *
  * add a radiotap header containing all the fields which the hardware provided.
@@ -215,6 +224,37 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                pos++;
                *pos++ = status->rate_idx;
        }
+
+       if (status->flag & RX_FLAG_AMPDU_DETAILS) {
+               u16 flags = 0;
+
+               /* ensure 4 byte alignment */
+               while ((pos - (u8 *)rthdr) & 3)
+                       pos++;
+               rthdr->it_present |=
+                       cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
+               put_unaligned_le32(status->ampdu_reference, pos);
+               pos += 4;
+               if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
+               if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
+               if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
+               if (status->flag & RX_FLAG_AMPDU_IS_LAST)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+                       flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
+               put_unaligned_le16(flags, pos);
+               pos += 2;
+               if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
+                       *pos++ = status->ampdu_delimiter_crc;
+               else
+                       *pos++ = 0;
+               *pos++ = 0;
+       }
 }
 
 /*
@@ -2268,7 +2308,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 
                goto queue;
        case WLAN_CATEGORY_SPECTRUM_MGMT:
-               if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
+               if (status->band != IEEE80211_BAND_5GHZ)
                        break;
 
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
@@ -2772,8 +2812,7 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                if (!bssid) {
                        if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
                                return 0;
-               } else if (!ieee80211_bssid_match(bssid,
-                                       sdata->vif.addr)) {
+               } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
                        /*
                         * Accept public action frames even when the
                         * BSSID doesn't match, this is used for P2P
@@ -2793,9 +2832,18 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
                        return 0;
                break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (!ieee80211_is_public_action(hdr, skb->len) &&
+                   !ieee80211_is_probe_req(hdr->frame_control) &&
+                   !ieee80211_is_probe_resp(hdr->frame_control) &&
+                   !ieee80211_is_beacon(hdr->frame_control))
+                       return 0;
+               if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
+                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+               break;
        default:
                /* should never get here */
-               WARN_ON(1);
+               WARN_ON_ONCE(1);
                break;
        }
 
index 839dd9737989ec78bbb979c814953b1bdd0187a8..c4cdbde24fd3a70db1141c9460f617710daf98e2 100644 (file)
@@ -407,7 +407,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
        enum ieee80211_band band = local->hw.conf.channel->band;
 
        sdata = rcu_dereference_protected(local->scan_sdata,
-                                         lockdep_is_held(&local->mtx));;
+                                         lockdep_is_held(&local->mtx));
 
        for (i = 0; i < local->scan_req->n_ssids; i++)
                ieee80211_send_probe_req(
@@ -416,7 +416,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
                        local->scan_req->ssids[i].ssid_len,
                        local->scan_req->ie, local->scan_req->ie_len,
                        local->scan_req->rates[band], false,
-                       local->scan_req->no_cck);
+                       local->scan_req->no_cck,
+                       local->hw.conf.channel);
 
        /*
         * After sending probe requests, wait for probe responses
@@ -479,11 +480,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        if (local->ops->hw_scan) {
                __set_bit(SCAN_HW_SCANNING, &local->scanning);
        } else if ((req->n_channels == 1) &&
-                  (req->channels[0]->center_freq ==
-                   local->hw.conf.channel->center_freq)) {
-
-               /* If we are scanning only on the current channel, then
-                * we do not need to stop normal activities
+                  (req->channels[0] == local->oper_channel)) {
+               /*
+                * If we are scanning only on the operating channel
+                * then we do not need to stop normal activities
                 */
                unsigned long next_delay;
 
@@ -917,6 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                                       struct cfg80211_sched_scan_request *req)
 {
        struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sched_scan_ies sched_scan_ies;
        int ret, i;
 
        mutex_lock(&local->mtx);
@@ -935,33 +936,28 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                if (!local->hw.wiphy->bands[i])
                        continue;
 
-               local->sched_scan_ies.ie[i] = kzalloc(2 +
-                                                     IEEE80211_MAX_SSID_LEN +
-                                                     local->scan_ies_len +
-                                                     req->ie_len,
-                                                     GFP_KERNEL);
-               if (!local->sched_scan_ies.ie[i]) {
+               sched_scan_ies.ie[i] = kzalloc(2 + IEEE80211_MAX_SSID_LEN +
+                                              local->scan_ies_len +
+                                              req->ie_len,
+                                              GFP_KERNEL);
+               if (!sched_scan_ies.ie[i]) {
                        ret = -ENOMEM;
                        goto out_free;
                }
 
-               local->sched_scan_ies.len[i] =
-                       ieee80211_build_preq_ies(local,
-                                                local->sched_scan_ies.ie[i],
+               sched_scan_ies.len[i] =
+                       ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
                                                 req->ie, req->ie_len, i,
                                                 (u32) -1, 0);
        }
 
-       ret = drv_sched_scan_start(local, sdata, req,
-                                  &local->sched_scan_ies);
-       if (ret == 0) {
+       ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
+       if (ret == 0)
                rcu_assign_pointer(local->sched_scan_sdata, sdata);
-               goto out;
-       }
 
 out_free:
        while (i > 0)
-               kfree(local->sched_scan_ies.ie[--i]);
+               kfree(sched_scan_ies.ie[--i]);
 out:
        mutex_unlock(&local->mtx);
        return ret;
@@ -970,7 +966,7 @@ out:
 int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
-       int ret = 0, i;
+       int ret = 0;
 
        mutex_lock(&local->mtx);
 
@@ -979,12 +975,9 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
                goto out;
        }
 
-       if (rcu_access_pointer(local->sched_scan_sdata)) {
-               for (i = 0; i < IEEE80211_NUM_BANDS; i++)
-                       kfree(local->sched_scan_ies.ie[i]);
-
+       if (rcu_access_pointer(local->sched_scan_sdata))
                drv_sched_scan_stop(local, sdata);
-       }
+
 out:
        mutex_unlock(&local->mtx);
 
@@ -1006,7 +999,6 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local,
                             sched_scan_stopped_work);
-       int i;
 
        mutex_lock(&local->mtx);
 
@@ -1015,9 +1007,6 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
                return;
        }
 
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++)
-               kfree(local->sched_scan_ies.ie[i]);
-
        rcu_assign_pointer(local->sched_scan_sdata, NULL);
 
        mutex_unlock(&local->mtx);
index 06fa75ceb0251e6064d90661bf7b5e61f979b72c..797dd36a220d92ac549067a9cd4d59a7ebfc09a3 100644 (file)
@@ -91,6 +91,70 @@ static int sta_info_hash_del(struct ieee80211_local *local,
        return -ENOENT;
 }
 
+static void free_sta_work(struct work_struct *wk)
+{
+       struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
+       int ac, i;
+       struct tid_ampdu_tx *tid_tx;
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_local *local = sdata->local;
+
+       /*
+        * At this point, when being called as call_rcu callback,
+        * neither mac80211 nor the driver can reference this
+        * sta struct any more except by still existing timers
+        * associated with this station that we clean up below.
+        */
+
+       if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+               BUG_ON(!sdata->bss);
+
+               clear_sta_flag(sta, WLAN_STA_PS_STA);
+
+               atomic_dec(&sdata->bss->num_sta_ps);
+               sta_info_recalc_tim(sta);
+       }
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
+               __skb_queue_purge(&sta->ps_tx_buf[ac]);
+               __skb_queue_purge(&sta->tx_filtered[ac]);
+       }
+
+#ifdef CONFIG_MAC80211_MESH
+       if (ieee80211_vif_is_mesh(&sdata->vif)) {
+               mesh_accept_plinks_update(sdata);
+               mesh_plink_deactivate(sta);
+               del_timer_sync(&sta->plink_timer);
+       }
+#endif
+
+       cancel_work_sync(&sta->drv_unblock_wk);
+
+       /*
+        * Destroy aggregation state here. It would be nice to wait for the
+        * driver to finish aggregation stop and then clean up, but for now
+        * drivers have to handle aggregation stop being requested, followed
+        * directly by station destruction.
+        */
+       for (i = 0; i < STA_TID_NUM; i++) {
+               tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
+               if (!tid_tx)
+                       continue;
+               __skb_queue_purge(&tid_tx->pending);
+               kfree(tid_tx);
+       }
+
+       sta_info_free(local, sta);
+}
+
+static void free_sta_rcu(struct rcu_head *h)
+{
+       struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
+
+       ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
+}
+
 /* protected by RCU */
 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
                              const u8 *addr)
@@ -241,6 +305,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_init(&sta->lock);
        INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+       INIT_WORK(&sta->free_sta_wk, free_sta_work);
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 
@@ -654,8 +719,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
 {
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
-       int ret, i, ac;
-       struct tid_ampdu_tx *tid_tx;
+       int ret, i;
 
        might_sleep();
 
@@ -674,7 +738,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
         * will be sufficient.
         */
        set_sta_flag(sta, WLAN_STA_BLOCK_BA);
-       ieee80211_sta_tear_down_BA_sessions(sta, true);
+       ieee80211_sta_tear_down_BA_sessions(sta, false);
 
        ret = sta_info_hash_del(local, sta);
        if (ret)
@@ -711,65 +775,14 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
                WARN_ON_ONCE(ret != 0);
        }
 
-       /*
-        * At this point, after we wait for an RCU grace period,
-        * neither mac80211 nor the driver can reference this
-        * sta struct any more except by still existing timers
-        * associated with this station that we clean up below.
-        */
-       synchronize_rcu();
-
-       if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
-               BUG_ON(!sdata->bss);
-
-               clear_sta_flag(sta, WLAN_STA_PS_STA);
-
-               atomic_dec(&sdata->bss->num_sta_ps);
-               sta_info_recalc_tim(sta);
-       }
-
-       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
-               __skb_queue_purge(&sta->ps_tx_buf[ac]);
-               __skb_queue_purge(&sta->tx_filtered[ac]);
-       }
-
-#ifdef CONFIG_MAC80211_MESH
-       if (ieee80211_vif_is_mesh(&sdata->vif))
-               mesh_accept_plinks_update(sdata);
-#endif
-
        sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
 
-       cancel_work_sync(&sta->drv_unblock_wk);
-
        cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
 
        rate_control_remove_sta_debugfs(sta);
        ieee80211_sta_debugfs_remove(sta);
 
-#ifdef CONFIG_MAC80211_MESH
-       if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
-               mesh_plink_deactivate(sta);
-               del_timer_sync(&sta->plink_timer);
-       }
-#endif
-
-       /*
-        * Destroy aggregation state here. It would be nice to wait for the
-        * driver to finish aggregation stop and then clean up, but for now
-        * drivers have to handle aggregation stop being requested, followed
-        * directly by station destruction.
-        */
-       for (i = 0; i < STA_TID_NUM; i++) {
-               tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
-               if (!tid_tx)
-                       continue;
-               __skb_queue_purge(&tid_tx->pending);
-               kfree(tid_tx);
-       }
-
-       sta_info_free(local, sta);
+       call_rcu(&sta->rcu_head, free_sta_rcu);
 
        return 0;
 }
index a470e1123a5576ed5e14b779ed4a9213cda407b7..c88f161f81185a678335fe3df6201fb8e64c8d80 100644 (file)
@@ -287,6 +287,7 @@ struct sta_ampdu_mlme {
 struct sta_info {
        /* General information, mostly static */
        struct list_head list;
+       struct rcu_head rcu_head;
        struct sta_info __rcu *hnext;
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
@@ -297,6 +298,7 @@ struct sta_info {
        spinlock_t lock;
 
        struct work_struct drv_unblock_wk;
+       struct work_struct free_sta_wk;
 
        u16 listen_interval;
 
index 8cd72914cdaf2f3540268920b1ab0969f687d330..2ce89732d0f21755939699b55e3382ac98baeaca 100644 (file)
@@ -517,21 +517,41 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                u64 cookie = (unsigned long)skb;
+               bool found = false;
+
                acked = info->flags & IEEE80211_TX_STAT_ACK;
 
-               /*
-                * TODO: When we have non-netdev frame TX,
-                * we cannot use skb->dev->ieee80211_ptr
-                */
+               rcu_read_lock();
+
+               list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+                       if (!sdata->dev)
+                               continue;
+
+                       if (skb->dev != sdata->dev)
+                               continue;
 
-               if (ieee80211_is_nullfunc(hdr->frame_control) ||
-                   ieee80211_is_qos_nullfunc(hdr->frame_control))
-                       cfg80211_probe_status(skb->dev, hdr->addr1,
+                       found = true;
+                       break;
+               }
+
+               if (!skb->dev) {
+                       sdata = rcu_dereference(local->p2p_sdata);
+                       if (sdata)
+                               found = true;
+               }
+
+               if (!found)
+                       skb->dev = NULL;
+               else if (ieee80211_is_nullfunc(hdr->frame_control) ||
+                        ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+                       cfg80211_probe_status(sdata->dev, hdr->addr1,
                                              cookie, acked, GFP_ATOMIC);
-               else
-                       cfg80211_mgmt_tx_status(
-                               skb->dev->ieee80211_ptr, cookie, skb->data,
-                               skb->len, acked, GFP_ATOMIC);
+               } else {
+                       cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
+                                               skb->len, acked, GFP_ATOMIC);
+               }
+
+               rcu_read_unlock();
        }
 
        if (unlikely(info->ack_frame_id)) {
index c6d33b55b2dfd51602d7fc40dcbbbb0ef6a0a451..18d9c8a52e9e72d98686778bb7bcab974119e638 100644 (file)
@@ -24,7 +24,7 @@
                        __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
 #define VIF_ASSIGN     __entry->vif_type = sdata->vif.type; __entry->sdata = sdata;    \
                        __entry->p2p = sdata->vif.p2p;                                  \
-                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+                       __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
 #define VIF_PR_FMT     " vif:%s(%d%s)"
 #define VIF_PR_ARG     __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
 
@@ -274,9 +274,12 @@ TRACE_EVENT(drv_config,
                __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
                __entry->max_sleep_period = local->hw.conf.max_sleep_period;
                __entry->listen_interval = local->hw.conf.listen_interval;
-               __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count;
-               __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
-               __entry->center_freq = local->hw.conf.channel->center_freq;
+               __entry->long_frame_max_tx_count =
+                       local->hw.conf.long_frame_max_tx_count;
+               __entry->short_frame_max_tx_count =
+                       local->hw.conf.short_frame_max_tx_count;
+               __entry->center_freq = local->hw.conf.channel ?
+                                       local->hw.conf.channel->center_freq : 0;
                __entry->channel_type = local->hw.conf.channel_type;
                __entry->smps = local->hw.conf.smps_mode;
        ),
index c5e8c9c31f7687d9922d0011ea1b31e8244ea8d2..e0e0d1d0e8301d4a8803b7af811e24d752347a88 100644 (file)
@@ -55,7 +55,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
        if (WARN_ON_ONCE(info->control.rates[0].idx < 0))
                return 0;
 
-       sband = local->hw.wiphy->bands[tx->channel->band];
+       sband = local->hw.wiphy->bands[info->band];
        txrate = &sband->bitrates[info->control.rates[0].idx];
 
        erp = txrate->flags & IEEE80211_RATE_ERP_G;
@@ -580,7 +580,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                                tx->key = NULL;
                        else
                                skip_hw = (tx->key->conf.flags &
-                                          IEEE80211_KEY_FLAG_SW_MGMT) &&
+                                          IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
                                        ieee80211_is_mgmt(hdr->frame_control);
                        break;
                case WLAN_CIPHER_SUITE_AES_CMAC:
@@ -615,7 +615,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        memset(&txrc, 0, sizeof(txrc));
 
-       sband = tx->local->hw.wiphy->bands[tx->channel->band];
+       sband = tx->local->hw.wiphy->bands[info->band];
 
        len = min_t(u32, tx->skb->len + FCS_LEN,
                         tx->local->hw.wiphy->frag_threshold);
@@ -626,13 +626,13 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        txrc.bss_conf = &tx->sdata->vif.bss_conf;
        txrc.skb = tx->skb;
        txrc.reported_rate.idx = -1;
-       txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
+       txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
        if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
        memcpy(txrc.rate_idx_mcs_mask,
-              tx->sdata->rc_rateidx_mcs_mask[tx->channel->band],
+              tx->sdata->rc_rateidx_mcs_mask[info->band],
               sizeof(txrc.rate_idx_mcs_mask));
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
                    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
@@ -667,7 +667,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
                 "scanning and associated. Target station: "
                 "%pM on %d GHz band\n",
                 tx->sdata->name, hdr->addr1,
-                tx->channel->band ? 5 : 2))
+                info->band ? 5 : 2))
                return TX_DROP;
 
        /*
@@ -1131,7 +1131,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        tx->skb = skb;
        tx->local = local;
        tx->sdata = sdata;
-       tx->channel = local->hw.conf.channel;
        __skb_queue_head_init(&tx->skbs);
 
        /*
@@ -1204,6 +1203,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                               struct sk_buff_head *skbs,
                               bool txpending)
 {
+       struct ieee80211_tx_control control;
        struct sk_buff *skb, *tmp;
        unsigned long flags;
 
@@ -1240,10 +1240,10 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
                info->control.vif = vif;
-               info->control.sta = sta;
+               control.sta = sta;
 
                __skb_unlink(skb, skbs);
-               drv_tx(local, skb);
+               drv_tx(local, &control, skb);
        }
 
        return true;
@@ -1399,8 +1399,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       tx.channel = local->hw.conf.channel;
-       info->band = tx.channel->band;
+       info->band = local->hw.conf.channel->band;
 
        /* set up hw_queue value early */
        if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
@@ -1720,7 +1719,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_info *info;
-       int ret = NETDEV_TX_BUSY, head_need;
+       int head_need;
        u16 ethertype, hdrlen,  meshhdrlen = 0;
        __le16 fc;
        struct ieee80211_hdr hdr;
@@ -1736,10 +1735,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        u32 info_flags = 0;
        u16 info_id = 0;
 
-       if (unlikely(skb->len < ETH_HLEN)) {
-               ret = NETDEV_TX_OK;
+       if (unlikely(skb->len < ETH_HLEN))
                goto fail;
-       }
 
        /* convert Ethernet header to proper 802.11 header (based on
         * operation mode) */
@@ -1787,7 +1784,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
                        /* Do not send frames with mesh_ttl == 0 */
                        sdata->u.mesh.mshstats.dropped_frames_ttl++;
-                       ret = NETDEV_TX_OK;
                        goto fail;
                }
                rcu_read_lock();
@@ -1874,10 +1870,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 
                if (tdls_direct) {
                        /* link during setup - throw out frames to peer */
-                       if (!tdls_auth) {
-                               ret = NETDEV_TX_OK;
+                       if (!tdls_auth)
                                goto fail;
-                       }
 
                        /* DA SA BSSID */
                        memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1911,7 +1905,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                hdrlen = 24;
                break;
        default:
-               ret = NETDEV_TX_OK;
                goto fail;
        }
 
@@ -1956,7 +1949,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 
                I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
 
-               ret = NETDEV_TX_OK;
                goto fail;
        }
 
@@ -2011,10 +2003,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                skb = skb_clone(skb, GFP_ATOMIC);
                kfree_skb(tmp_skb);
 
-               if (!skb) {
-                       ret = NETDEV_TX_OK;
+               if (!skb)
                        goto fail;
-               }
        }
 
        hdr.frame_control = fc;
@@ -2117,10 +2107,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
  fail:
-       if (ret == NETDEV_TX_OK)
-               dev_kfree_skb(skb);
-
-       return ret;
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 
@@ -2295,12 +2283,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        struct ieee80211_sub_if_data *sdata = NULL;
        struct ieee80211_if_ap *ap = NULL;
        struct beacon_data *beacon;
-       struct ieee80211_supported_band *sband;
-       enum ieee80211_band band = local->hw.conf.channel->band;
+       enum ieee80211_band band = local->oper_channel->band;
        struct ieee80211_tx_rate_control txrc;
 
-       sband = local->hw.wiphy->bands[band];
-
        rcu_read_lock();
 
        sdata = vif_to_sdata(vif);
@@ -2410,7 +2395,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                memset(mgmt, 0, hdr_len);
                mgmt->frame_control =
                    cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
-               memset(mgmt->da, 0xff, ETH_ALEN);
+               eth_broadcast_addr(mgmt->da);
                memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
                mgmt->u.beacon.beacon_int =
@@ -2422,9 +2407,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                *pos++ = WLAN_EID_SSID;
                *pos++ = 0x0;
 
-               if (ieee80211_add_srates_ie(sdata, skb, true) ||
+               if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
                    mesh_add_ds_params_ie(skb, sdata) ||
-                   ieee80211_add_ext_srates_ie(sdata, skb, true) ||
+                   ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
                    mesh_add_rsn_ie(skb, sdata) ||
                    mesh_add_ht_cap_ie(skb, sdata) ||
                    mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2447,12 +2432,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
 
        memset(&txrc, 0, sizeof(txrc));
        txrc.hw = hw;
-       txrc.sband = sband;
+       txrc.sband = local->hw.wiphy->bands[band];
        txrc.bss_conf = &sdata->vif.bss_conf;
        txrc.skb = skb;
        txrc.reported_rate.idx = -1;
        txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
-       if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+       if (txrc.rate_idx_mask == (1 << txrc.sband->n_bitrates) - 1)
                txrc.max_rate_idx = -1;
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
@@ -2476,7 +2461,8 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif)
 {
        struct ieee80211_if_ap *ap = NULL;
-       struct sk_buff *presp = NULL, *skb = NULL;
+       struct sk_buff *skb = NULL;
+       struct probe_resp *presp = NULL;
        struct ieee80211_hdr *hdr;
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
@@ -2490,10 +2476,12 @@ struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
        if (!presp)
                goto out;
 
-       skb = skb_copy(presp, GFP_ATOMIC);
+       skb = dev_alloc_skb(presp->len);
        if (!skb)
                goto out;
 
+       memcpy(skb_put(skb, presp->len), presp->data, presp->len);
+
        hdr = (struct ieee80211_hdr *) skb->data;
        memset(hdr->addr1, 0, sizeof(hdr->addr1));
 
@@ -2604,9 +2592,9 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
        memset(hdr, 0, sizeof(*hdr));
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                         IEEE80211_STYPE_PROBE_REQ);
-       memset(hdr->addr1, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr1);
        memcpy(hdr->addr2, vif->addr, ETH_ALEN);
-       memset(hdr->addr3, 0xff, ETH_ALEN);
+       eth_broadcast_addr(hdr->addr3);
 
        pos = skb_put(skb, ie_ssid_len);
        *pos++ = WLAN_EID_SSID;
@@ -2703,8 +2691,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
        info = IEEE80211_SKB_CB(skb);
 
        tx.flags |= IEEE80211_TX_PS_BUFFERED;
-       tx.channel = local->hw.conf.channel;
-       info->band = tx.channel->band;
+       info->band = local->oper_channel->band;
 
        if (invoke_tx_handlers(&tx))
                skb = NULL;
index 39b82fee4904784c87a635026e8ae0b7e2a4318d..22ca35054dd065753b9e7d6c4f3e5ab990903e83 100644 (file)
@@ -276,6 +276,9 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                int ac;
 
+               if (!sdata->dev)
+                       continue;
+
                if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
                        continue;
 
@@ -364,6 +367,9 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                int ac;
 
+               if (!sdata->dev)
+                       continue;
+
                for (ac = 0; ac < n_acs; ac++) {
                        if (sdata->vif.hw_queue[ac] == queue ||
                            sdata->vif.cab_queue == queue)
@@ -768,8 +774,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                                elem_parse_failed = true;
                        break;
                case WLAN_EID_CHANNEL_SWITCH:
-                       elems->ch_switch_elem = pos;
-                       elems->ch_switch_elem_len = elen;
+                       if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+                               elem_parse_failed = true;
+                               break;
+                       }
+                       elems->ch_switch_ie = (void *)pos;
                        break;
                case WLAN_EID_QUIET:
                        if (!elems->quiet_elem) {
@@ -783,8 +792,11 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
                        elems->country_elem_len = elen;
                        break;
                case WLAN_EID_PWR_CONSTRAINT:
+                       if (elen != 1) {
+                               elem_parse_failed = true;
+                               break;
+                       }
                        elems->pwr_constr_elem = pos;
-                       elems->pwr_constr_elem_len = elen;
                        break;
                case WLAN_EID_TIMEOUT_INTERVAL:
                        elems->timeout_int = pos;
@@ -832,7 +844,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
 
        memset(&qparam, 0, sizeof(qparam));
 
-       use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
+       use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) &&
                 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
 
        /*
@@ -899,7 +911,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
                drv_conf_tx(local, sdata, ac, &qparam);
        }
 
-       if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
+       if (sdata->vif.type != NL80211_IFTYPE_MONITOR &&
+           sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
                sdata->vif.bss_conf.qos = enable_qos;
                if (bss_notify)
                        ieee80211_bss_info_change_notify(sdata,
@@ -919,7 +932,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
                if ((supp_rates[i] & 0x7f) * 5 > 110)
                        have_higher_than_11mbit = 1;
 
-       if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
+       if (local->oper_channel->band == IEEE80211_BAND_2GHZ &&
            have_higher_than_11mbit)
                sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
        else
@@ -994,6 +1007,45 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx_skb(sdata, skb);
 }
 
+void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
+                                   const u8 *bssid, u16 stype, u16 reason,
+                                   bool send_frame, u8 *frame_buf)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct sk_buff *skb;
+       struct ieee80211_mgmt *mgmt = (void *)frame_buf;
+
+       /* build frame */
+       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
+       mgmt->duration = 0; /* initialize only */
+       mgmt->seq_ctrl = 0; /* initialize only */
+       memcpy(mgmt->da, bssid, ETH_ALEN);
+       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+       memcpy(mgmt->bssid, bssid, ETH_ALEN);
+       /* u.deauth.reason_code == u.disassoc.reason_code */
+       mgmt->u.deauth.reason_code = cpu_to_le16(reason);
+
+       if (send_frame) {
+               skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                                   IEEE80211_DEAUTH_FRAME_LEN);
+               if (!skb)
+                       return;
+
+               skb_reserve(skb, local->hw.extra_tx_headroom);
+
+               /* copy in frame */
+               memcpy(skb_put(skb, IEEE80211_DEAUTH_FRAME_LEN),
+                      mgmt, IEEE80211_DEAUTH_FRAME_LEN);
+
+               if (sdata->vif.type != NL80211_IFTYPE_STATION ||
+                   !(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
+                       IEEE80211_SKB_CB(skb)->flags |=
+                               IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+               ieee80211_tx_skb(sdata, skb);
+       }
+}
+
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             const u8 *ie, size_t ie_len,
                             enum ieee80211_band band, u32 rate_mask,
@@ -1100,6 +1152,7 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
 
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
+                                         struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
                                          bool directed)
@@ -1109,7 +1162,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_mgmt *mgmt;
        size_t buf_len;
        u8 *buf;
-       u8 chan;
+       u8 chan_no;
 
        /* FIXME: come up with a proper value */
        buf = kmalloc(200 + ie_len, GFP_KERNEL);
@@ -1122,14 +1175,12 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
         * badly-behaved APs don't respond when this parameter is included.
         */
        if (directed)
-               chan = 0;
+               chan_no = 0;
        else
-               chan = ieee80211_frequency_to_channel(
-                       local->hw.conf.channel->center_freq);
+               chan_no = ieee80211_frequency_to_channel(chan->center_freq);
 
-       buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
-                                          local->hw.conf.channel->band,
-                                          ratemask, chan);
+       buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, chan->band,
+                                          ratemask, chan_no);
 
        skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
                                     ssid, ssid_len,
@@ -1154,11 +1205,13 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              const u8 *ssid, size_t ssid_len,
                              const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, bool no_cck)
+                             u32 ratemask, bool directed, bool no_cck,
+                             struct ieee80211_channel *channel)
 {
        struct sk_buff *skb;
 
-       skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
+       skb = ieee80211_build_probe_req(sdata, dst, ratemask, channel,
+                                       ssid, ssid_len,
                                        ie, ie_len, directed);
        if (skb) {
                if (no_cck)
@@ -1359,7 +1412,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
                        changed |= BSS_CHANGED_ASSOC |
-                                  BSS_CHANGED_ARP_FILTER;
+                                  BSS_CHANGED_ARP_FILTER |
+                                  BSS_CHANGED_PS;
                        mutex_lock(&sdata->u.mgd.mtx);
                        ieee80211_bss_info_change_notify(sdata, changed);
                        mutex_unlock(&sdata->u.mgd.mtx);
@@ -1385,6 +1439,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                case NL80211_IFTYPE_MONITOR:
                        /* ignore virtual */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       changed = BSS_CHANGED_IDLE;
+                       break;
                case NL80211_IFTYPE_UNSPECIFIED:
                case NUM_NL80211_IFTYPES:
                case NL80211_IFTYPE_P2P_CLIENT:
@@ -1549,14 +1606,13 @@ static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
        return 0;
 }
 
-/* must hold iflist_mtx */
 void ieee80211_recalc_smps(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
        enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
        int count = 0;
 
-       lockdep_assert_held(&local->iflist_mtx);
+       mutex_lock(&local->iflist_mtx);
 
        /*
         * This function could be improved to handle multiple
@@ -1571,6 +1627,8 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
                        continue;
+               if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
+                       continue;
                if (sdata->vif.type != NL80211_IFTYPE_STATION)
                        goto set;
 
@@ -1583,12 +1641,14 @@ void ieee80211_recalc_smps(struct ieee80211_local *local)
        }
 
        if (smps_mode == local->smps_mode)
-               return;
+               goto unlock;
 
  set:
        local->smps_mode = smps_mode;
        /* changed flag is auto-detected for this */
        ieee80211_hw_config(local, 0);
+ unlock:
+       mutex_unlock(&local->iflist_mtx);
 }
 
 static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
@@ -1809,7 +1869,8 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
 }
 
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
-                           struct sk_buff *skb, bool need_basic)
+                           struct sk_buff *skb, bool need_basic,
+                           enum ieee80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -1817,7 +1878,7 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
        u8 i, rates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[band];
        rates = sband->n_bitrates;
        if (rates > 8)
                rates = 8;
@@ -1840,7 +1901,8 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
 }
 
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
-                               struct sk_buff *skb, bool need_basic)
+                               struct sk_buff *skb, bool need_basic,
+                               enum ieee80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -1848,7 +1910,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
        u8 i, exrates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
 
-       sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+       sband = local->hw.wiphy->bands[band];
        exrates = sband->n_bitrates;
        if (exrates > 8)
                exrates -= 8;
index c19b214ffd571776434c2400eb1d906fc35d2589..fefa514b99170aae55620e4007565b0ad3e814b4 100644 (file)
@@ -356,6 +356,55 @@ config NETFILTER_NETLINK_QUEUE_CT
          If this option is enabled, NFQUEUE can include Connection Tracking
          information together with the packet is the enqueued via NFNETLINK.
 
+config NF_NAT
+       tristate
+
+config NF_NAT_NEEDED
+       bool
+       depends on NF_NAT
+       default y
+
+config NF_NAT_PROTO_DCCP
+       tristate
+       depends on NF_NAT && NF_CT_PROTO_DCCP
+       default NF_NAT && NF_CT_PROTO_DCCP
+
+config NF_NAT_PROTO_UDPLITE
+       tristate
+       depends on NF_NAT && NF_CT_PROTO_UDPLITE
+       default NF_NAT && NF_CT_PROTO_UDPLITE
+
+config NF_NAT_PROTO_SCTP
+       tristate
+       default NF_NAT && NF_CT_PROTO_SCTP
+       depends on NF_NAT && NF_CT_PROTO_SCTP
+       select LIBCRC32C
+
+config NF_NAT_AMANDA
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_AMANDA
+
+config NF_NAT_FTP
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_FTP
+
+config NF_NAT_IRC
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_IRC
+
+config NF_NAT_SIP
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_SIP
+
+config NF_NAT_TFTP
+       tristate
+       depends on NF_CONNTRACK && NF_NAT
+       default NF_NAT && NF_CONNTRACK_TFTP
+
 endif # NF_CONNTRACK
 
 # transparent proxy support
@@ -599,6 +648,16 @@ config NETFILTER_XT_TARGET_MARK
        (e.g. when running oldconfig). It selects
        CONFIG_NETFILTER_XT_MARK (combined mark/MARK module).
 
+config NETFILTER_XT_TARGET_NETMAP
+       tristate '"NETMAP" target support'
+       depends on NF_NAT
+       ---help---
+       NETMAP is an implementation of static 1:1 NAT mapping of network
+       addresses. It maps the network address part, while keeping the host
+       address part intact.
+
+       To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_NFLOG
        tristate '"NFLOG" target support'
        default m if NETFILTER_ADVANCED=n
@@ -621,19 +680,6 @@ config NETFILTER_XT_TARGET_NFQUEUE
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config NETFILTER_XT_TARGET_NOTRACK
-       tristate  '"NOTRACK" target support'
-       depends on IP_NF_RAW || IP6_NF_RAW
-       depends on NF_CONNTRACK
-       help
-         The NOTRACK target allows a select rule to specify
-         which packets *not* to enter the conntrack/NAT
-         subsystem with all the consequences (no ICMP error tracking,
-         no protocol helpers for the selected packets).
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
-
 config NETFILTER_XT_TARGET_RATEEST
        tristate '"RATEEST" target support'
        depends on NETFILTER_ADVANCED
@@ -644,6 +690,17 @@ config NETFILTER_XT_TARGET_RATEEST
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_TARGET_REDIRECT
+       tristate "REDIRECT target support"
+       depends on NF_NAT
+       ---help---
+       REDIRECT is a special case of NAT: all incoming connections are
+       mapped onto the incoming interface's address, causing the packets to
+       come to the local machine instead of passing through. This is
+       useful for transparent proxies.
+
+       To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_TEE
        tristate '"TEE" - packet cloning to alternate destination'
        depends on NETFILTER_ADVANCED
index 1c5160f2278e6306f4bcb0de34e349c7c8efc60c..32596978df1d9bb0b92f1b0b1b81c4adf7204c94 100644 (file)
@@ -43,6 +43,23 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
 obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
 obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
 
+nf_nat-y       := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
+                  nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
+
+obj-$(CONFIG_NF_NAT) += nf_nat.o
+
+# NAT protocols (nf_nat)
+obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
+obj-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
+obj-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
+
+# NAT helpers
+obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o
+obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o
+obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
+obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
+obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
+
 # transparent proxy support
 obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
 
@@ -53,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
 obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
+obj-$(CONFIG_NF_NAT) += xt_nat.o
 
 # targets
 obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
@@ -65,10 +83,11 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_RATEEST) += xt_RATEEST.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_REDIRECT) += xt_REDIRECT.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TPROXY) += xt_TPROXY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
index 0bc6b60db4df2fda1a7cd2f1b57747f655d046a3..68912dadf13d15c0354bec108039176b9659c75a 100644 (file)
@@ -126,7 +126,7 @@ unsigned int nf_iterate(struct list_head *head,
                        unsigned int hook,
                        const struct net_device *indev,
                        const struct net_device *outdev,
-                       struct list_head **i,
+                       struct nf_hook_ops **elemp,
                        int (*okfn)(struct sk_buff *),
                        int hook_thresh)
 {
@@ -136,22 +136,20 @@ unsigned int nf_iterate(struct list_head *head,
         * The caller must not block between calls to this
         * function because of risk of continuing from deleted element.
         */
-       list_for_each_continue_rcu(*i, head) {
-               struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
-
-               if (hook_thresh > elem->priority)
+       list_for_each_entry_continue_rcu((*elemp), head, list) {
+               if (hook_thresh > (*elemp)->priority)
                        continue;
 
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
 repeat:
-               verdict = elem->hook(hook, skb, indev, outdev, okfn);
+               verdict = (*elemp)->hook(hook, skb, indev, outdev, okfn);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
                        if (unlikely((verdict & NF_VERDICT_MASK)
                                                        > NF_MAX_VERDICT)) {
                                NFDEBUG("Evil return from %p(%u).\n",
-                                       elem->hook, hook);
+                                       (*elemp)->hook, hook);
                                continue;
                        }
 #endif
@@ -172,14 +170,14 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
                 int (*okfn)(struct sk_buff *),
                 int hook_thresh)
 {
-       struct list_head *elem;
+       struct nf_hook_ops *elem;
        unsigned int verdict;
        int ret = 0;
 
        /* We may already have this, but read-locks nest anyway */
        rcu_read_lock();
 
-       elem = &nf_hooks[pf][hook];
+       elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list);
 next_hook:
        verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
                             outdev, &elem, okfn, hook_thresh);
@@ -273,6 +271,11 @@ EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
 
 #endif /* CONFIG_NF_CONNTRACK */
 
+#ifdef CONFIG_NF_NAT_NEEDED
+void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
+EXPORT_SYMBOL(nf_nat_decode_session_hook);
+#endif
+
 #ifdef CONFIG_PROC_FS
 struct proc_dir_entry *proc_net_netfilter;
 EXPORT_SYMBOL(proc_net_netfilter);
index 7e1b061aeeba4c14cb45785b2975b353ab18a314..4a92fd47bd4cec1a6726b297e78e604f7d9e61e5 100644 (file)
 #define IP_SET_BITMAP_TIMEOUT
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip");
 
 /* Type structure */
@@ -284,7 +287,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -454,7 +457,8 @@ static int
 bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 {
        struct bitmap_ip *map;
-       u32 first_ip, last_ip, hosts, elements;
+       u32 first_ip, last_ip, hosts;
+       u64 elements;
        u8 netmask = 32;
        int ret;
 
@@ -497,7 +501,7 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 
        if (netmask == 32) {
                hosts = 1;
-               elements = last_ip - first_ip + 1;
+               elements = (u64)last_ip - first_ip + 1;
        } else {
                u8 mask_bits;
                u32 mask;
@@ -515,7 +519,8 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
        if (elements > IPSET_BITMAP_MAX_RANGE + 1)
                return -IPSET_ERR_BITMAP_RANGE_SIZE;
 
-       pr_debug("hosts %u, elements %u\n", hosts, elements);
+       pr_debug("hosts %u, elements %llu\n",
+                hosts, (unsigned long long)elements);
 
        map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (!map)
@@ -554,8 +559,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_IPV4,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = bitmap_ip_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
index d7eaf10edb6d311f26a0b1e2bceac856c1085ae8..0f92dc24cb894bfa0674c2b3f6dca970d7615c64 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip,mac");
 
 enum {
@@ -320,11 +323,11 @@ bitmap_ipmac_tlist(const struct ip_set *set,
                    (elem->match == MAC_FILLED &&
                     nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
                             elem->ether)))
-                   goto nla_put_failure;
+                       goto nla_put_failure;
                timeout = elem->match == MAC_UNSET ? elem->timeout
                                : ip_set_timeout_get(elem->timeout);
                if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
-                   goto nla_put_failure;
+                       goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
        ipset_nest_end(skb, atd);
@@ -557,7 +560,8 @@ static int
 bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
                    u32 flags)
 {
-       u32 first_ip, last_ip, elements;
+       u32 first_ip, last_ip;
+       u64 elements;
        struct bitmap_ipmac *map;
        int ret;
 
@@ -588,7 +592,7 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
        } else
                return -IPSET_ERR_PROTOCOL;
 
-       elements = last_ip - first_ip + 1;
+       elements = (u64)last_ip - first_ip + 1;
 
        if (elements > IPSET_BITMAP_MAX_RANGE + 1)
                return -IPSET_ERR_BITMAP_RANGE_SIZE;
@@ -629,8 +633,8 @@ static struct ip_set_type bitmap_ipmac_type = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_MAC,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_IPV4,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = bitmap_ipmac_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
index b9f1fce7053b29d9fb761d634a581d2d8b8ef1d2..e6b2db76f4c3faeb4f4c5570a19a9cea0a852f0e 100644 (file)
 #define IP_SET_BITMAP_TIMEOUT
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("bitmap:port type of IP sets");
+IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_bitmap:port");
 
 /* Type structure */
@@ -487,8 +490,8 @@ static struct ip_set_type bitmap_port_type = {
        .features       = IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = bitmap_port_create,
        .create_policy  = {
                [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
index 9730882697aaedbab0beee66f0f12a654b97b63a..778465f217fa975c39af6fb3d7429214dbfe2a51 100644 (file)
@@ -69,7 +69,8 @@ find_set_type(const char *name, u8 family, u8 revision)
 
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
                if (STREQ(type->name, name) &&
-                   (type->family == family || type->family == NFPROTO_UNSPEC) &&
+                   (type->family == family ||
+                    type->family == NFPROTO_UNSPEC) &&
                    revision >= type->revision_min &&
                    revision <= type->revision_max)
                        return type;
@@ -149,7 +150,8 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
        rcu_read_lock();
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
                if (STREQ(type->name, name) &&
-                   (type->family == family || type->family == NFPROTO_UNSPEC)) {
+                   (type->family == family ||
+                    type->family == NFPROTO_UNSPEC)) {
                        found = true;
                        if (type->revision_min < *min)
                                *min = type->revision_min;
@@ -368,6 +370,12 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
                set->variant->kadt(set, skb, par, IPSET_ADD, opt);
                write_unlock_bh(&set->lock);
                ret = 1;
+       } else {
+               /* --return-nomatch: invert matched element */
+               if ((opt->flags & IPSET_RETURN_NOMATCH) &&
+                   (set->type->features & IPSET_TYPE_NOMATCH) &&
+                   (ret > 0 || ret == -ENOTEMPTY))
+                       ret = -ret;
        }
 
        /* Convert error codes to nomatch */
@@ -563,13 +571,13 @@ flag_exist(const struct nlmsghdr *nlh)
 }
 
 static struct nlmsghdr *
-start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
          enum ipset_cmd cmd)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
 
-       nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+       nlh = nlmsg_put(skb, portid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
                        sizeof(*nfmsg), flags);
        if (nlh == NULL)
                return NULL;
@@ -721,7 +729,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
         * by the nfnl mutex. Find the first free index in ip_set_list
         * and check clashing.
         */
-       if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+       ret = find_free_id(set->name, &index, &clash);
+       if (ret != 0) {
                /* If this is the same set and requested, ignore error */
                if (ret == -EEXIST &&
                    (flags & IPSET_FLAG_EXIST) &&
@@ -1045,7 +1054,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
        ip_set_id_t index = IPSET_INVALID_ID, max;
        struct ip_set *set = NULL;
        struct nlmsghdr *nlh = NULL;
-       unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+       unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
        u32 dump_type, dump_flags;
        int ret = 0;
 
@@ -1093,7 +1102,7 @@ dump_last:
                        pr_debug("reference set\n");
                        __ip_set_get(index);
                }
-               nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+               nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, flags,
                                IPSET_CMD_LIST);
                if (!nlh) {
@@ -1226,7 +1235,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
                skb2 = nlmsg_new(payload, GFP_KERNEL);
                if (skb2 == NULL)
                        return -ENOMEM;
-               rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+               rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
                                  nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
                errmsg = nlmsg_data(rep);
                errmsg->error = ret;
@@ -1241,7 +1250,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
 
                *errline = lineno;
 
-               netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+               netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
                /* Signal netlink not to send its ACK/errmsg.  */
                return -EINTR;
        }
@@ -1416,7 +1425,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
                         IPSET_CMD_HEADER);
        if (!nlh2)
                goto nlmsg_failure;
@@ -1428,7 +1437,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
                goto nla_put_failure;
        nlmsg_end(skb2, nlh2);
 
-       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (ret < 0)
                return ret;
 
@@ -1476,7 +1485,7 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
                         IPSET_CMD_TYPE);
        if (!nlh2)
                goto nlmsg_failure;
@@ -1489,7 +1498,7 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
        nlmsg_end(skb2, nlh2);
 
        pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
-       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (ret < 0)
                return ret;
 
@@ -1525,7 +1534,7 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+       nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
                         IPSET_CMD_PROTOCOL);
        if (!nlh2)
                goto nlmsg_failure;
@@ -1533,7 +1542,7 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
                goto nla_put_failure;
        nlmsg_end(skb2, nlh2);
 
-       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (ret < 0)
                return ret;
 
index a68dbd4f1e4e4404d25d6b71e321122fd4fae66a..ec3dba5dcd62f081c1749fb60f2aa71cd31d11ad 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip type of IP sets");
+IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip");
 
 /* Type specific function prefix */
@@ -114,7 +117,7 @@ nla_put_failure:
 static inline void
 hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
+       h->next.ip = d->ip;
 }
 
 static int
@@ -179,7 +182,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -188,7 +191,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip += hosts) {
                nip = htonl(ip);
                if (nip == 0)
@@ -452,8 +455,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 92722bb82eea65cde13c77ce78be136fca2a8b94..0171f7502fa58d035fcda2361a6c4968acf09b7b 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   1 /* SCTP and UDPLITE support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port");
 
 /* Type specific function prefix */
@@ -130,8 +133,8 @@ static inline void
 hash_ipport4_data_next(struct ip_set_hash *h,
                       const struct hash_ipport4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
 }
 
 static int
@@ -217,7 +220,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -231,9 +234,10 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip++) {
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.ip = htonl(ip);
                        data.port = htons(p);
@@ -349,7 +353,7 @@ static inline void
 hash_ipport6_data_next(struct ip_set_hash *h,
                       const struct hash_ipport6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -431,7 +435,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -522,8 +526,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 1,    /* SCTP and UDPLITE support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ipport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 0637ce096def10e4faad6ef18003e263c8e8b77c..6344ef551ec811208b79ddc54c89a1270c2419cd 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   1 /* SCTP and UDPLITE support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,ip");
 
 /* Type specific function prefix */
@@ -133,8 +136,8 @@ static inline void
 hash_ipportip4_data_next(struct ip_set_hash *h,
                         const struct hash_ipportip4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
 }
 
 static int
@@ -225,7 +228,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -239,9 +242,10 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip++) {
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.ip = htonl(ip);
                        data.port = htons(p);
@@ -362,7 +366,7 @@ static inline void
 hash_ipportip6_data_next(struct ip_set_hash *h,
                         const struct hash_ipportip6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -449,7 +453,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -540,8 +544,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 1,    /* SCTP and UDPLITE support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ipportip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 1ce21ca976e17bb077f1e790677cc7af99f7c7aa..cb71f9a774e7d50d67998aaaadc199a563f43f40 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    SCTP and UDPLITE support added */
+/*                     2    Range as input support for IPv4 added */
+#define REVISION_MAX   3 /* nomatch flag support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,net");
 
 /* Type specific function prefix */
@@ -99,10 +104,10 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -173,9 +178,9 @@ static inline void
 hash_ipportnet4_data_next(struct ip_set_hash *h,
                          const struct hash_ipportnet4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
-       h->next.ip2 = ntohl(d->ip2);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
+       h->next.ip2 = d->ip2;
 }
 
 static int
@@ -290,7 +295,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr > 32)
+               if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
@@ -314,14 +319,17 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        for (; !before(ip_to, ip); ip++) {
                data.ip = htonl(ip);
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.port = htons(p);
-                       ip2 = retried && ip == h->next.ip && p == h->next.port
-                               ? h->next.ip2 : ip2_from;
+                       ip2 = retried
+                             && ip == ntohl(h->next.ip)
+                             && p == ntohs(h->next.port)
+                               ? ntohl(h->next.ip2) : ip2_from;
                        while (!after(ip2, ip2_to)) {
                                data.ip2 = htonl(ip2);
                                ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
@@ -403,10 +411,10 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -486,7 +494,7 @@ static inline void
 hash_ipportnet6_data_next(struct ip_set_hash *h,
                          const struct hash_ipportnet6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -598,7 +606,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -689,13 +697,12 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_ipportnet_type __read_mostly = {
        .name           = "hash:ip,port,net",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
+                         IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       /*                1        SCTP and UDPLITE support added */
-       /*                2        Range as input support for IPv4 added */
-       .revision_max   = 3,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_ipportnet_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index c57a6a09906d7df3338f900d2b3b7321f487b103..29e94b981f3f3fe4853a9683a1cbecdfc9d7ec42 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    Range as input support for IPv4 added */
+#define REVISION_MAX   2 /* nomatch flag support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:net type of IP sets");
+IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:net");
 
 /* Type specific function prefix */
@@ -86,10 +90,10 @@ hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_net4_data_match(const struct hash_net4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -152,7 +156,7 @@ static inline void
 hash_net4_data_next(struct ip_set_hash *h,
                    const struct hash_net4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
+       h->next.ip = d->ip;
 }
 
 static int
@@ -235,7 +239,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
                        return -IPSET_ERR_HASH_RANGE;
        }
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        while (!after(ip, ip_to)) {
                data.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
@@ -307,10 +311,10 @@ hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_net6_data_match(const struct hash_net6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -532,12 +536,11 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_net_type __read_mostly = {
        .name           = "hash:net",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       /*              = 1        Range as input support for IPv4 added */
-       .revision_max   = 2,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_net_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index d5d3607ae7bcf5e9704bd189217115cf048aee4b..b9a63381e34998e08ab5271f7d82cca9058a76d8 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    nomatch flag support added */
+#define REVISION_MAX   2 /* /0 support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:net,iface type of IP sets");
+IP_SET_MODULE_DESC("hash:net,iface", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:net,iface");
 
 /* Interface name rbtree */
@@ -140,7 +144,7 @@ struct hash_netiface4_elem_hashed {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
 };
 
 #define HKEY_DATALEN   sizeof(struct hash_netiface4_elem_hashed)
@@ -151,7 +155,7 @@ struct hash_netiface4_elem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
 };
 
@@ -161,7 +165,7 @@ struct hash_netiface4_telem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
        unsigned long timeout;
 };
@@ -181,18 +185,14 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
 static inline bool
 hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
 {
-       return elem->cidr == 0;
+       return elem->elem == 0;
 }
 
 static inline void
 hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
                         const struct hash_netiface4_elem *src)
 {
-       dst->ip = src->ip;
-       dst->cidr = src->cidr;
-       dst->physdev = src->physdev;
-       dst->iface = src->iface;
-       dst->nomatch = src->nomatch;
+       memcpy(dst, src, sizeof(*dst));
 }
 
 static inline void
@@ -201,10 +201,10 @@ hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -217,7 +217,7 @@ hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
 static inline void
 hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem)
 {
-       elem->cidr = 0;
+       elem->elem = 0;
 }
 
 static bool
@@ -277,7 +277,7 @@ static inline void
 hash_netiface4_data_next(struct ip_set_hash *h,
                         const struct hash_netiface4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
+       h->next.ip = d->ip;
 }
 
 static int
@@ -288,7 +288,8 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem data = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .elem = 1,
        };
        int ret;
 
@@ -339,7 +340,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netiface4_elem data = { .cidr = HOST_MASK };
+       struct hash_netiface4_elem data = { .cidr = HOST_MASK, .elem = 1 };
        u32 ip = 0, ip_to, last;
        u32 timeout = h->timeout;
        char iface[IFNAMSIZ];
@@ -360,7 +361,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR]) {
                data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-               if (!data.cidr || data.cidr > HOST_MASK)
+               if (data.cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
@@ -389,7 +390,6 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
                        flags |= (cadt_flags << 16);
        }
-
        if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
                data.ip = htonl(ip & ip_set_hostmask(data.cidr));
                ret = adtfn(set, &data, timeout, flags);
@@ -409,7 +409,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        while (!after(ip, ip_to)) {
                data.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
@@ -442,7 +442,7 @@ struct hash_netiface6_elem_hashed {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
 };
 
 #define HKEY_DATALEN   sizeof(struct hash_netiface6_elem_hashed)
@@ -452,7 +452,7 @@ struct hash_netiface6_elem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
 };
 
@@ -461,7 +461,7 @@ struct hash_netiface6_telem {
        u8 physdev;
        u8 cidr;
        u8 nomatch;
-       u8 padding;
+       u8 elem;
        const char *iface;
        unsigned long timeout;
 };
@@ -481,7 +481,7 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
 static inline bool
 hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem)
 {
-       return elem->cidr == 0;
+       return elem->elem == 0;
 }
 
 static inline void
@@ -497,16 +497,16 @@ hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
        dst->nomatch = flags & IPSET_FLAG_NOMATCH;
 }
 
-static inline bool
+static inline int
 hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
 hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
 {
-       elem->cidr = 0;
+       elem->elem = 0;
 }
 
 static inline void
@@ -590,7 +590,8 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem data = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .elem = 1,
        };
        int ret;
 
@@ -637,7 +638,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netiface6_elem data = { .cidr = HOST_MASK };
+       struct hash_netiface6_elem data = { .cidr = HOST_MASK, .elem = 1 };
        u32 timeout = h->timeout;
        char iface[IFNAMSIZ];
        int ret;
@@ -659,7 +660,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR])
                data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-       if (!data.cidr || data.cidr > HOST_MASK)
+       if (data.cidr > HOST_MASK)
                return -IPSET_ERR_INVALID_CIDR;
        ip6_netmask(&data.ip, data.cidr);
 
@@ -773,11 +774,12 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_netiface_type __read_mostly = {
        .name           = "hash:net,iface",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_IFACE |
+                         IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 1,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_netiface_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index fc3143a2d41bbdd07747ede2339ecf8407911cc0..7ef700de596c54d09f8873a4498da207bfd5a828 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
+#define REVISION_MIN   0
+/*                     1    SCTP and UDPLITE support added */
+/*                     2    Range as input support for IPv4 added */
+#define REVISION_MAX   3 /* nomatch flag support added */
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("hash:net,port type of IP sets");
+IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_hash:net,port");
 
 /* Type specific function prefix */
@@ -99,10 +104,10 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_netport4_data_match(const struct hash_netport4_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -171,8 +176,8 @@ static inline void
 hash_netport4_data_next(struct ip_set_hash *h,
                        const struct hash_netport4_elem *d)
 {
-       h->next.ip = ntohl(d->ip);
-       h->next.port = ntohs(d->port);
+       h->next.ip = d->ip;
+       h->next.port = d->port;
 }
 
 static int
@@ -289,12 +294,13 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (retried)
-               ip = h->next.ip;
+               ip = ntohl(h->next.ip);
        while (!after(ip, ip_to)) {
                data.ip = htonl(ip);
                last = ip_set_range_to_cidr(ip, ip_to, &cidr);
                data.cidr = cidr - 1;
-               p = retried && ip == h->next.ip ? h->next.port : port;
+               p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+                                                      : port;
                for (; p <= port_to; p++) {
                        data.port = htons(p);
                        ret = adtfn(set, &data, timeout, flags);
@@ -369,10 +375,10 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
        dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
 }
 
-static inline bool
+static inline int
 hash_netport6_data_match(const struct hash_netport6_elem *elem)
 {
-       return !elem->nomatch;
+       return elem->nomatch ? -ENOTEMPTY : 1;
 }
 
 static inline void
@@ -450,7 +456,7 @@ static inline void
 hash_netport6_data_next(struct ip_set_hash *h,
                        const struct hash_netport6_elem *d)
 {
-       h->next.port = ntohs(d->port);
+       h->next.port = d->port;
 }
 
 static int
@@ -554,7 +560,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
                swap(port, port_to);
 
        if (retried)
-               port = h->next.port;
+               port = ntohs(h->next.port);
        for (; port <= port_to; port++) {
                data.port = htons(port);
                ret = adtfn(set, &data, timeout, flags);
@@ -644,13 +650,11 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 static struct ip_set_type hash_netport_type __read_mostly = {
        .name           = "hash:net,port",
        .protocol       = IPSET_PROTOCOL,
-       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       /*                1        SCTP and UDPLITE support added */
-       /*                2,       Range as input support for IPv4 added */
-       .revision_max   = 3,    /* nomatch flag support added */
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = hash_netport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
index 6cb1225765f952667b0f4c763e635461fcb94ad0..8371c2bac2e4240eb5c4b3f6abd0faa48f6212ce 100644 (file)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_list.h>
 
+#define REVISION_MIN   0
+#define REVISION_MAX   0
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-MODULE_DESCRIPTION("list:set type of IP sets");
+IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
 MODULE_ALIAS("ip_set_list:set");
 
 /* Member elements without and with timeout */
@@ -579,8 +582,8 @@ static struct ip_set_type list_set_type __read_mostly = {
        .features       = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = 0,
-       .revision_max   = 0,
+       .revision_min   = REVISION_MIN,
+       .revision_max   = REVISION_MAX,
        .create         = list_set_create,
        .create_policy  = {
                [IPSET_ATTR_SIZE]       = { .type = NLA_U32 },
index f9871385a65eddca7f9b942f0c83794c5c147339..8b2cffdfdd9985e7397a0b800d5e2c28af20a4b7 100644 (file)
@@ -250,7 +250,8 @@ comment 'IPVS application helper'
 
 config IP_VS_FTP
        tristate "FTP protocol helper"
-        depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT
+       depends on IP_VS_PROTO_TCP && NF_CONNTRACK && NF_NAT && \
+               NF_CONNTRACK_FTP
        select IP_VS_NFCT
        ---help---
          FTP is a protocol that transfers IP address and/or port number in
index 64f9e8f13207e94463240b330abc668fd607df70..9713e6e86d472f2e2aefc5c4ebfd8f95a5cda62c 100644 (file)
@@ -180,22 +180,38 @@ register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
 }
 
 
-/*
- *     ip_vs_app registration routine
- */
-int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
+/* Register application for netns */
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
-       /* increase the module use count */
-       ip_vs_use_count_inc();
+       struct ip_vs_app *a;
+       int err = 0;
+
+       if (!ipvs)
+               return ERR_PTR(-ENOENT);
 
        mutex_lock(&__ip_vs_app_mutex);
 
-       list_add(&app->a_list, &ipvs->app_list);
+       list_for_each_entry(a, &ipvs->app_list, a_list) {
+               if (!strcmp(app->name, a->name)) {
+                       err = -EEXIST;
+                       goto out_unlock;
+               }
+       }
+       a = kmemdup(app, sizeof(*app), GFP_KERNEL);
+       if (!a) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+       INIT_LIST_HEAD(&a->incs_list);
+       list_add(&a->a_list, &ipvs->app_list);
+       /* increase the module use count */
+       ip_vs_use_count_inc();
 
+out_unlock:
        mutex_unlock(&__ip_vs_app_mutex);
 
-       return 0;
+       return err ? ERR_PTR(err) : a;
 }
 
 
@@ -205,20 +221,29 @@ int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
  */
 void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
-       struct ip_vs_app *inc, *nxt;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+       struct ip_vs_app *a, *anxt, *inc, *nxt;
+
+       if (!ipvs)
+               return;
 
        mutex_lock(&__ip_vs_app_mutex);
 
-       list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
-               ip_vs_app_inc_release(net, inc);
-       }
+       list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
+               if (app && strcmp(app->name, a->name))
+                       continue;
+               list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
+                       ip_vs_app_inc_release(net, inc);
+               }
 
-       list_del(&app->a_list);
+               list_del(&a->a_list);
+               kfree(a);
 
-       mutex_unlock(&__ip_vs_app_mutex);
+               /* decrease the module use count */
+               ip_vs_use_count_dec();
+       }
 
-       /* decrease the module use count */
-       ip_vs_use_count_dec();
+       mutex_unlock(&__ip_vs_app_mutex);
 }
 
 
@@ -586,5 +611,6 @@ int __net_init ip_vs_app_net_init(struct net *net)
 
 void __net_exit ip_vs_app_net_cleanup(struct net *net)
 {
+       unregister_ip_vs_app(net, NULL /* all */);
        proc_net_remove(net, "ip_vs_app");
 }
index b54eccef40b5cf7ecb74a1c7f1950f48d7823413..58918e20f9d5b038c2181b893b0e0458dbd3a8f2 100644 (file)
@@ -1303,7 +1303,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        struct ip_vs_conn *cp;
        struct ip_vs_protocol *pp;
        struct ip_vs_proto_data *pd;
-       unsigned int offset, ihl, verdict;
+       unsigned int offset, offset2, ihl, verdict;
+       bool ipip;
 
        *related = 1;
 
@@ -1345,6 +1346,21 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 
        net = skb_net(skb);
 
+       /* Special case for errors for IPIP packets */
+       ipip = false;
+       if (cih->protocol == IPPROTO_IPIP) {
+               if (unlikely(cih->frag_off & htons(IP_OFFSET)))
+                       return NF_ACCEPT;
+               /* Error for our IPIP must arrive at LOCAL_IN */
+               if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
+                       return NF_ACCEPT;
+               offset += cih->ihl * 4;
+               cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
+               if (cih == NULL)
+                       return NF_ACCEPT; /* The packet looks wrong, ignore */
+               ipip = true;
+       }
+
        pd = ip_vs_proto_data_get(net, cih->protocol);
        if (!pd)
                return NF_ACCEPT;
@@ -1358,11 +1374,14 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
        IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
                      "Checking incoming ICMP for");
 
+       offset2 = offset;
        offset += cih->ihl * 4;
 
        ip_vs_fill_iphdr(AF_INET, cih, &ciph);
-       /* The embedded headers contain source and dest in reverse order */
-       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
+       /* The embedded headers contain source and dest in reverse order.
+        * For IPIP this is error for request, not for reply.
+        */
+       cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1);
        if (!cp)
                return NF_ACCEPT;
 
@@ -1376,6 +1395,57 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
                goto out;
        }
 
+       if (ipip) {
+               __be32 info = ic->un.gateway;
+
+               /* Update the MTU */
+               if (ic->type == ICMP_DEST_UNREACH &&
+                   ic->code == ICMP_FRAG_NEEDED) {
+                       struct ip_vs_dest *dest = cp->dest;
+                       u32 mtu = ntohs(ic->un.frag.mtu);
+
+                       /* Strip outer IP and ICMP, go to IPIP header */
+                       __skb_pull(skb, ihl + sizeof(_icmph));
+                       offset2 -= ihl + sizeof(_icmph);
+                       skb_reset_network_header(skb);
+                       IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
+                               &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
+                       rcu_read_lock();
+                       ipv4_update_pmtu(skb, dev_net(skb->dev),
+                                        mtu, 0, 0, 0, 0);
+                       rcu_read_unlock();
+                       /* Client uses PMTUD? */
+                       if (!(cih->frag_off & htons(IP_DF)))
+                               goto ignore_ipip;
+                       /* Prefer the resulting PMTU */
+                       if (dest) {
+                               spin_lock(&dest->dst_lock);
+                               if (dest->dst_cache)
+                                       mtu = dst_mtu(dest->dst_cache);
+                               spin_unlock(&dest->dst_lock);
+                       }
+                       if (mtu > 68 + sizeof(struct iphdr))
+                               mtu -= sizeof(struct iphdr);
+                       info = htonl(mtu);
+               }
+               /* Strip outer IP, ICMP and IPIP, go to IP header of
+                * original request.
+                */
+               __skb_pull(skb, offset2);
+               skb_reset_network_header(skb);
+               IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
+                       &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+                       ic->type, ic->code, ntohl(info));
+               icmp_send(skb, ic->type, ic->code, info);
+               /* ICMP can be shorter but anyways, account it */
+               ip_vs_out_stats(cp, skb);
+
+ignore_ipip:
+               consume_skb(skb);
+               verdict = NF_STOLEN;
+               goto out;
+       }
+
        /* do the statistics and put it back */
        ip_vs_in_stats(cp, skb);
        if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
index f51013c07b9f4e5a81885f0462d5423ccbc8a44f..7e7198b51c068a7ea10446af4d390a9e6e2044f4 100644 (file)
@@ -539,8 +539,7 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
         * Remove it from the rs_table table.
         */
        if (!list_empty(&dest->d_list)) {
-               list_del(&dest->d_list);
-               INIT_LIST_HEAD(&dest->d_list);
+               list_del_init(&dest->d_list);
        }
 
        return 1;
@@ -1803,6 +1802,12 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "pmtu_disc",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IP_VS_DEBUG
        {
                .procname       = "debug_level",
@@ -2933,7 +2938,7 @@ static int ip_vs_genl_dump_service(struct sk_buff *skb,
 {
        void *hdr;
 
-       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &ip_vs_genl_family, NLM_F_MULTI,
                          IPVS_CMD_NEW_SERVICE);
        if (!hdr)
@@ -3122,7 +3127,7 @@ static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
 {
        void *hdr;
 
-       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &ip_vs_genl_family, NLM_F_MULTI,
                          IPVS_CMD_NEW_DEST);
        if (!hdr)
@@ -3251,7 +3256,7 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
                                  struct netlink_callback *cb)
 {
        void *hdr;
-       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &ip_vs_genl_family, NLM_F_MULTI,
                          IPVS_CMD_NEW_DAEMON);
        if (!hdr)
@@ -3678,7 +3683,7 @@ static void ip_vs_genl_unregister(void)
  * per netns intit/exit func.
  */
 #ifdef CONFIG_SYSCTL
-int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
 {
        int idx;
        struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3729,6 +3734,8 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
        tbl[idx++].data = &ipvs->sysctl_sync_retries;
        tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+       ipvs->sysctl_pmtu_disc = 1;
+       tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
 
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
@@ -3746,7 +3753,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        return 0;
 }
 
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -3757,8 +3764,8 @@ void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 
 #else
 
-int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
 
 #endif
 
index b20b29c903efdc0a0593f90547661446c58b47e6..4f53a5f04437b4d75c9a12e672ff9beabf69c5c2 100644 (file)
@@ -268,6 +268,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                         * packet.
                         */
                        ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+                                                      iph->ihl * 4,
                                                       start-data, end-start,
                                                       buf, buf_len);
                        if (ret) {
@@ -441,16 +442,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
 
        if (!ipvs)
                return -ENOENT;
-       app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
-       if (!app)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&app->a_list);
-       INIT_LIST_HEAD(&app->incs_list);
-       ipvs->ftp_app = app;
 
-       ret = register_ip_vs_app(net, app);
-       if (ret)
-               goto err_exit;
+       app = register_ip_vs_app(net, &ip_vs_ftp);
+       if (IS_ERR(app))
+               return PTR_ERR(app);
 
        for (i = 0; i < ports_count; i++) {
                if (!ports[i])
@@ -464,9 +459,7 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
        return 0;
 
 err_unreg:
-       unregister_ip_vs_app(net, app);
-err_exit:
-       kfree(ipvs->ftp_app);
+       unregister_ip_vs_app(net, &ip_vs_ftp);
        return ret;
 }
 /*
@@ -474,10 +467,7 @@ err_exit:
  */
 static void __ip_vs_ftp_exit(struct net *net)
 {
-       struct netns_ipvs *ipvs = net_ipvs(net);
-
-       unregister_ip_vs_app(net, ipvs->ftp_app);
-       kfree(ipvs->ftp_app);
+       unregister_ip_vs_app(net, &ip_vs_ftp);
 }
 
 static struct pernet_operations ip_vs_ftp_ops = {
index 65b616ae1716366f6437b9e760b22aabe7f9b4d1..56f6d5d81a7735d33ea7cc777945024850dfe177 100644 (file)
@@ -49,6 +49,7 @@ enum {
        IP_VS_RT_MODE_RDR       = 4, /* Allow redirect from remote daddr to
                                      * local
                                      */
+       IP_VS_RT_MODE_CONNECT   = 8, /* Always bind route to saddr */
 };
 
 /*
@@ -84,6 +85,58 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
        return dst;
 }
 
+static inline bool
+__mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
+{
+       if (IP6CB(skb)->frag_max_size) {
+               /* frag_max_size tell us that, this packet have been
+                * defragmented by netfilter IPv6 conntrack module.
+                */
+               if (IP6CB(skb)->frag_max_size > mtu)
+                       return true; /* largest fragment violate MTU */
+       }
+       else if (skb->len > mtu && !skb_is_gso(skb)) {
+               return true; /* Packet size violate MTU size */
+       }
+       return false;
+}
+
+/* Get route to daddr, update *saddr, optionally bind route to saddr */
+static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+                                      u32 rtos, int rt_mode, __be32 *saddr)
+{
+       struct flowi4 fl4;
+       struct rtable *rt;
+       int loop = 0;
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.daddr = daddr;
+       fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
+       fl4.flowi4_tos = rtos;
+
+retry:
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt)) {
+               /* Invalid saddr ? */
+               if (PTR_ERR(rt) == -EINVAL && *saddr &&
+                   rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
+                       *saddr = 0;
+                       flowi4_update_output(&fl4, 0, rtos, daddr, 0);
+                       goto retry;
+               }
+               IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
+               return NULL;
+       } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+               ip_rt_put(rt);
+               *saddr = fl4.saddr;
+               flowi4_update_output(&fl4, 0, rtos, daddr, fl4.saddr);
+               loop++;
+               goto retry;
+       }
+       *saddr = fl4.saddr;
+       return rt;
+}
+
 /* Get route to destination or remote server */
 static struct rtable *
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -98,20 +151,13 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                spin_lock(&dest->dst_lock);
                if (!(rt = (struct rtable *)
                      __ip_vs_dst_check(dest, rtos))) {
-                       struct flowi4 fl4;
-
-                       memset(&fl4, 0, sizeof(fl4));
-                       fl4.daddr = dest->addr.ip;
-                       fl4.flowi4_tos = rtos;
-                       rt = ip_route_output_key(net, &fl4);
-                       if (IS_ERR(rt)) {
+                       rt = do_output_route4(net, dest->addr.ip, rtos,
+                                             rt_mode, &dest->dst_saddr.ip);
+                       if (!rt) {
                                spin_unlock(&dest->dst_lock);
-                               IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-                                            &dest->addr.ip);
                                return NULL;
                        }
                        __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
-                       dest->dst_saddr.ip = fl4.saddr;
                        IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
                                  "rtos=%X\n",
                                  &dest->addr.ip, &dest->dst_saddr.ip,
@@ -122,19 +168,17 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                        *ret_saddr = dest->dst_saddr.ip;
                spin_unlock(&dest->dst_lock);
        } else {
-               struct flowi4 fl4;
+               __be32 saddr = htonl(INADDR_ANY);
 
-               memset(&fl4, 0, sizeof(fl4));
-               fl4.daddr = daddr;
-               fl4.flowi4_tos = rtos;
-               rt = ip_route_output_key(net, &fl4);
-               if (IS_ERR(rt)) {
-                       IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
-                                    &daddr);
+               /* For such unconfigured boxes avoid many route lookups
+                * for performance reasons because we do not remember saddr
+                */
+               rt_mode &= ~IP_VS_RT_MODE_CONNECT;
+               rt = do_output_route4(net, daddr, rtos, rt_mode, &saddr);
+               if (!rt)
                        return NULL;
-               }
                if (ret_saddr)
-                       *ret_saddr = fl4.saddr;
+                       *ret_saddr = saddr;
        }
 
        local = rt->rt_flags & RTCF_LOCAL;
@@ -331,6 +375,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
        old_dst = dest->dst_cache;
        dest->dst_cache = NULL;
        dst_release(old_dst);
+       dest->dst_saddr.ip = 0;
 }
 
 #define IP_VS_XMIT_TUNNEL(skb, cp)                             \
@@ -462,7 +507,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -683,7 +728,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -766,12 +811,13 @@ int
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                  struct ip_vs_protocol *pp)
 {
+       struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
        struct rtable *rt;                      /* Route to the other host */
        __be32 saddr;                           /* Source for tunnel */
        struct net_device *tdev;                /* Device to other host */
        struct iphdr  *old_iph = ip_hdr(skb);
        u8     tos = old_iph->tos;
-       __be16 df = old_iph->frag_off;
+       __be16 df;
        struct iphdr  *iph;                     /* Our new IP header */
        unsigned int max_headroom;              /* The extra header space needed */
        int    mtu;
@@ -781,7 +827,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
                                      RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
-                                                  IP_VS_RT_MODE_NON_LOCAL,
+                                                  IP_VS_RT_MODE_NON_LOCAL |
+                                                  IP_VS_RT_MODE_CONNECT,
                                                   &saddr)))
                goto tx_error_icmp;
        if (rt->rt_flags & RTCF_LOCAL) {
@@ -796,13 +843,13 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
                goto tx_error_put;
        }
-       if (skb_dst(skb))
+       if (rt_is_output_route(skb_rtable(skb)))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-       df |= (old_iph->frag_off & htons(IP_DF));
+       /* Copy DF, reset fragment offset and MF */
+       df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
 
-       if ((old_iph->frag_off & htons(IP_DF) &&
-           mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
+       if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
                icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
                IP_VS_DBG_RL("%s(): frag needed\n", __func__);
                goto tx_error_put;
@@ -915,8 +962,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        if (skb_dst(skb))
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 
-       if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
-           !skb_is_gso(skb)) {
+       /* MTU checking: Notice that 'mtu' have been adjusted before hand */
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -1082,7 +1129,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -1318,7 +1365,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* MTU checking */
        mtu = dst_mtu(&rt->dst);
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if (__mtu_check_toobig_v6(skb, mtu)) {
                if (!skb->dev) {
                        struct net *net = dev_net(skb_dst(skb)->dev);
 
index f2de8c55ac506fcb764af21968ed2fc70a8a9da4..c514fe6033d254280f6760a3fe6d1c1f37185e1c 100644 (file)
@@ -40,6 +40,7 @@ MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)");
 
 unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
                                   enum ip_conntrack_info ctinfo,
+                                  unsigned int protoff,
                                   unsigned int matchoff,
                                   unsigned int matchlen,
                                   struct nf_conntrack_expect *exp)
@@ -155,8 +156,8 @@ static int amanda_help(struct sk_buff *skb,
 
                nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook);
                if (nf_nat_amanda && ct->status & IPS_NAT_MASK)
-                       ret = nf_nat_amanda(skb, ctinfo, off - dataoff,
-                                           len, exp);
+                       ret = nf_nat_amanda(skb, ctinfo, protoff,
+                                           off - dataoff, len, exp);
                else if (nf_ct_expect_related(exp) != 0)
                        ret = NF_DROP;
                nf_ct_expect_put(exp);
index 2ceec64b19f9866a222787531ba948b4c9e7e75b..0f241be28f9eec697ea9216c096657ace6eaefd6 100644 (file)
@@ -55,6 +55,12 @@ int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
                                      const struct nlattr *attr) __read_mostly;
 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
 
+int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
+                             struct nf_conn *ct,
+                             enum ip_conntrack_info ctinfo,
+                             unsigned int protoff);
+EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
+
 DEFINE_SPINLOCK(nf_conntrack_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
 
@@ -930,7 +936,6 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_l3proto *l3proto;
        struct nf_conntrack_l4proto *l4proto;
-       struct nf_conn_timeout *timeout_ext;
        unsigned int *timeouts;
        unsigned int dataoff;
        u_int8_t protonum;
@@ -997,11 +1002,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        NF_CT_ASSERT(skb->nfct);
 
        /* Decide what timeout policy we want to apply to this flow. */
-       timeout_ext = nf_ct_timeout_find(ct);
-       if (timeout_ext)
-               timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
-       else
-               timeouts = l4proto->get_timeouts(net);
+       timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
 
        ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
        if (ret <= 0) {
@@ -1223,6 +1224,8 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        spin_lock_bh(&nf_conntrack_lock);
        for (; *bucket < net->ct.htable_size; (*bucket)++) {
                hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
+                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+                               continue;
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (iter(ct, data))
                                goto found;
index e7be79e640de0397ac9f5e5aaab00faf1f2c8730..de9781b6464f0940d391555489782bf63f1c956e 100644 (file)
@@ -61,7 +61,7 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
                goto out_unlock;
 
        item.ct = ct;
-       item.pid = 0;
+       item.portid = 0;
        item.report = 0;
 
        ret = notify->fcn(events | missed, &item);
index 4bb771d1f57af53545b9eb36687ba000fd535fde..1ce3befb7c8ac895360a286d7ec9c6cd0da4a067 100644 (file)
@@ -48,6 +48,7 @@ module_param(loose, bool, 0600);
 unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
                                enum ip_conntrack_info ctinfo,
                                enum nf_ct_ftp_type type,
+                               unsigned int protoff,
                                unsigned int matchoff,
                                unsigned int matchlen,
                                struct nf_conntrack_expect *exp);
@@ -395,6 +396,12 @@ static int help(struct sk_buff *skb,
 
        /* Look up to see if we're just after a \n. */
        if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
+               /* We're picking up this, clear flags and let it continue */
+               if (unlikely(ct_ftp_info->flags[dir] & NF_CT_FTP_SEQ_PICKUP)) {
+                       ct_ftp_info->flags[dir] ^= NF_CT_FTP_SEQ_PICKUP;
+                       goto skip_nl_seq;
+               }
+
                /* Now if this ends in \n, update ftp info. */
                pr_debug("nf_conntrack_ftp: wrong seq pos %s(%u) or %s(%u)\n",
                         ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)",
@@ -405,6 +412,7 @@ static int help(struct sk_buff *skb,
                goto out_update_nl;
        }
 
+skip_nl_seq:
        /* Initialize IP/IPv6 addr to expected address (it's not mentioned
           in EPSV responses) */
        cmd.l3num = nf_ct_l3num(ct);
@@ -489,7 +497,7 @@ static int help(struct sk_buff *skb,
        nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook);
        if (nf_nat_ftp && ct->status & IPS_NAT_MASK)
                ret = nf_nat_ftp(skb, ctinfo, search[dir][i].ftptype,
-                                matchoff, matchlen, exp);
+                                protoff, matchoff, matchlen, exp);
        else {
                /* Can't expect this?  Best to drop packet now. */
                if (nf_ct_expect_related(exp) != 0)
@@ -511,6 +519,19 @@ out_update_nl:
        return ret;
 }
 
+static int nf_ct_ftp_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
+{
+       struct nf_ct_ftp_master *ftp = nfct_help_data(ct);
+
+       /* This conntrack has been injected from user-space, always pick up
+        * sequence tracking. Otherwise, the first FTP command after the
+        * failover breaks.
+        */
+       ftp->flags[IP_CT_DIR_ORIGINAL] |= NF_CT_FTP_SEQ_PICKUP;
+       ftp->flags[IP_CT_DIR_REPLY] |= NF_CT_FTP_SEQ_PICKUP;
+       return 0;
+}
+
 static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
 
 static const struct nf_conntrack_expect_policy ftp_exp_policy = {
@@ -560,6 +581,7 @@ static int __init nf_conntrack_ftp_init(void)
                        ftp[i][j].expect_policy = &ftp_exp_policy;
                        ftp[i][j].me = THIS_MODULE;
                        ftp[i][j].help = help;
+                       ftp[i][j].from_nlattr = nf_ct_ftp_from_nlattr;
                        if (ports[i] == FTP_PORT)
                                sprintf(ftp[i][j].name, "ftp");
                        else
index 4283b207e63be6c5da6fb9db24696d59097157fb..1b30b0dee70818c4842b1835964ffc5f59e6b6e3 100644 (file)
@@ -49,12 +49,12 @@ MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations "
                                     "(determined by routing information)");
 
 /* Hooks for NAT */
-int (*set_h245_addr_hook) (struct sk_buff *skb,
+int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                           unsigned char **data, int dataoff,
                           H245_TransportAddress *taddr,
                           union nf_inet_addr *addr, __be16 port)
                           __read_mostly;
-int (*set_h225_addr_hook) (struct sk_buff *skb,
+int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                           unsigned char **data, int dataoff,
                           TransportAddress *taddr,
                           union nf_inet_addr *addr, __be16 port)
@@ -62,16 +62,17 @@ int (*set_h225_addr_hook) (struct sk_buff *skb,
 int (*set_sig_addr_hook) (struct sk_buff *skb,
                          struct nf_conn *ct,
                          enum ip_conntrack_info ctinfo,
-                         unsigned char **data,
+                         unsigned int protoff, unsigned char **data,
                          TransportAddress *taddr, int count) __read_mostly;
 int (*set_ras_addr_hook) (struct sk_buff *skb,
                          struct nf_conn *ct,
                          enum ip_conntrack_info ctinfo,
-                         unsigned char **data,
+                         unsigned int protoff, unsigned char **data,
                          TransportAddress *taddr, int count) __read_mostly;
 int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
                          struct nf_conn *ct,
                          enum ip_conntrack_info ctinfo,
+                         unsigned int protoff,
                          unsigned char **data, int dataoff,
                          H245_TransportAddress *taddr,
                          __be16 port, __be16 rtp_port,
@@ -80,24 +81,28 @@ int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
 int (*nat_t120_hook) (struct sk_buff *skb,
                      struct nf_conn *ct,
                      enum ip_conntrack_info ctinfo,
+                     unsigned int protoff,
                      unsigned char **data, int dataoff,
                      H245_TransportAddress *taddr, __be16 port,
                      struct nf_conntrack_expect *exp) __read_mostly;
 int (*nat_h245_hook) (struct sk_buff *skb,
                      struct nf_conn *ct,
                      enum ip_conntrack_info ctinfo,
+                     unsigned int protoff,
                      unsigned char **data, int dataoff,
                      TransportAddress *taddr, __be16 port,
                      struct nf_conntrack_expect *exp) __read_mostly;
 int (*nat_callforwarding_hook) (struct sk_buff *skb,
                                struct nf_conn *ct,
                                enum ip_conntrack_info ctinfo,
+                               unsigned int protoff,
                                unsigned char **data, int dataoff,
                                TransportAddress *taddr, __be16 port,
                                struct nf_conntrack_expect *exp) __read_mostly;
 int (*nat_q931_hook) (struct sk_buff *skb,
                      struct nf_conn *ct,
                      enum ip_conntrack_info ctinfo,
+                     unsigned int protoff,
                      unsigned char **data, TransportAddress *taddr, int idx,
                      __be16 port, struct nf_conntrack_expect *exp)
                      __read_mostly;
@@ -251,6 +256,7 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
 /****************************************************************************/
 static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo,
+                          unsigned int protoff,
                           unsigned char **data, int dataoff,
                           H245_TransportAddress *taddr)
 {
@@ -295,9 +301,10 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
                   (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
+                  nf_ct_l3num(ct) == NFPROTO_IPV4 &&
                   ct->status & IPS_NAT_MASK) {
                /* NAT needed */
-               ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+               ret = nat_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
                                   taddr, port, rtp_port, rtp_exp, rtcp_exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -324,6 +331,7 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 static int expect_t120(struct sk_buff *skb,
                       struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, int dataoff,
                       H245_TransportAddress *taddr)
 {
@@ -353,9 +361,10 @@ static int expect_t120(struct sk_buff *skb,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
            (nat_t120 = rcu_dereference(nat_t120_hook)) &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            ct->status & IPS_NAT_MASK) {
                /* NAT needed */
-               ret = nat_t120(skb, ct, ctinfo, data, dataoff, taddr,
+               ret = nat_t120(skb, ct, ctinfo, protoff, data, dataoff, taddr,
                               port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
@@ -374,6 +383,7 @@ static int expect_t120(struct sk_buff *skb,
 static int process_h245_channel(struct sk_buff *skb,
                                struct nf_conn *ct,
                                enum ip_conntrack_info ctinfo,
+                               unsigned int protoff,
                                unsigned char **data, int dataoff,
                                H2250LogicalChannelParameters *channel)
 {
@@ -381,7 +391,7 @@ static int process_h245_channel(struct sk_buff *skb,
 
        if (channel->options & eH2250LogicalChannelParameters_mediaChannel) {
                /* RTP */
-               ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+               ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
                                      &channel->mediaChannel);
                if (ret < 0)
                        return -1;
@@ -390,7 +400,7 @@ static int process_h245_channel(struct sk_buff *skb,
        if (channel->
            options & eH2250LogicalChannelParameters_mediaControlChannel) {
                /* RTCP */
-               ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+               ret = expect_rtp_rtcp(skb, ct, ctinfo, protoff, data, dataoff,
                                      &channel->mediaControlChannel);
                if (ret < 0)
                        return -1;
@@ -402,6 +412,7 @@ static int process_h245_channel(struct sk_buff *skb,
 /****************************************************************************/
 static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, int dataoff,
                       OpenLogicalChannel *olc)
 {
@@ -412,7 +423,8 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
        if (olc->forwardLogicalChannelParameters.multiplexParameters.choice ==
            eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)
        {
-               ret = process_h245_channel(skb, ct, ctinfo, data, dataoff,
+               ret = process_h245_channel(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &olc->
                                           forwardLogicalChannelParameters.
                                           multiplexParameters.
@@ -430,7 +442,8 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
                eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
        {
                ret =
-                   process_h245_channel(skb, ct, ctinfo, data, dataoff,
+                   process_h245_channel(skb, ct, ctinfo,
+                                        protoff, data, dataoff,
                                         &olc->
                                         reverseLogicalChannelParameters.
                                         multiplexParameters.
@@ -448,7 +461,7 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
            t120.choice == eDataProtocolCapability_separateLANStack &&
            olc->separateStack.networkAddress.choice ==
            eNetworkAccessParameters_networkAddress_localAreaAddress) {
-               ret = expect_t120(skb, ct, ctinfo, data, dataoff,
+               ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
                                  &olc->separateStack.networkAddress.
                                  localAreaAddress);
                if (ret < 0)
@@ -461,7 +474,7 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
+                       unsigned int protoff, unsigned char **data, int dataoff,
                        OpenLogicalChannelAck *olca)
 {
        H2250LogicalChannelAckParameters *ack;
@@ -477,7 +490,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                choice ==
                eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters))
        {
-               ret = process_h245_channel(skb, ct, ctinfo, data, dataoff,
+               ret = process_h245_channel(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &olca->
                                           reverseLogicalChannelParameters.
                                           multiplexParameters.
@@ -496,7 +510,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                if (ack->options &
                    eH2250LogicalChannelAckParameters_mediaChannel) {
                        /* RTP */
-                       ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+                       ret = expect_rtp_rtcp(skb, ct, ctinfo,
+                                             protoff, data, dataoff,
                                              &ack->mediaChannel);
                        if (ret < 0)
                                return -1;
@@ -505,7 +520,8 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
                if (ack->options &
                    eH2250LogicalChannelAckParameters_mediaControlChannel) {
                        /* RTCP */
-                       ret = expect_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
+                       ret = expect_rtp_rtcp(skb, ct, ctinfo,
+                                             protoff, data, dataoff,
                                              &ack->mediaControlChannel);
                        if (ret < 0)
                                return -1;
@@ -515,7 +531,7 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
        if ((olca->options & eOpenLogicalChannelAck_separateStack) &&
                olca->separateStack.networkAddress.choice ==
                eNetworkAccessParameters_networkAddress_localAreaAddress) {
-               ret = expect_t120(skb, ct, ctinfo, data, dataoff,
+               ret = expect_t120(skb, ct, ctinfo, protoff, data, dataoff,
                                  &olca->separateStack.networkAddress.
                                  localAreaAddress);
                if (ret < 0)
@@ -528,14 +544,15 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff,
+                       unsigned int protoff, unsigned char **data, int dataoff,
                        MultimediaSystemControlMessage *mscm)
 {
        switch (mscm->choice) {
        case eMultimediaSystemControlMessage_request:
                if (mscm->request.choice ==
                    eRequestMessage_openLogicalChannel) {
-                       return process_olc(skb, ct, ctinfo, data, dataoff,
+                       return process_olc(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &mscm->request.openLogicalChannel);
                }
                pr_debug("nf_ct_h323: H.245 Request %d\n",
@@ -544,7 +561,8 @@ static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
        case eMultimediaSystemControlMessage_response:
                if (mscm->response.choice ==
                    eResponseMessage_openLogicalChannelAck) {
-                       return process_olca(skb, ct, ctinfo, data, dataoff,
+                       return process_olca(skb, ct, ctinfo,
+                                           protoff, data, dataoff,
                                            &mscm->response.
                                            openLogicalChannelAck);
                }
@@ -595,7 +613,8 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
                }
 
                /* Process H.245 signal */
-               if (process_h245(skb, ct, ctinfo, &data, dataoff, &mscm) < 0)
+               if (process_h245(skb, ct, ctinfo, protoff,
+                                &data, dataoff, &mscm) < 0)
                        goto drop;
        }
 
@@ -659,7 +678,7 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
 /****************************************************************************/
 static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
-                      unsigned char **data, int dataoff,
+                      unsigned int protoff, unsigned char **data, int dataoff,
                       TransportAddress *taddr)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -688,9 +707,10 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
            (nat_h245 = rcu_dereference(nat_h245_hook)) &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            ct->status & IPS_NAT_MASK) {
                /* NAT needed */
-               ret = nat_h245(skb, ct, ctinfo, data, dataoff, taddr,
+               ret = nat_h245(skb, ct, ctinfo, protoff, data, dataoff, taddr,
                               port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
@@ -776,6 +796,7 @@ static int callforward_do_filter(const union nf_inet_addr *src,
 static int expect_callforwarding(struct sk_buff *skb,
                                 struct nf_conn *ct,
                                 enum ip_conntrack_info ctinfo,
+                                unsigned int protoff,
                                 unsigned char **data, int dataoff,
                                 TransportAddress *taddr)
 {
@@ -811,9 +832,11 @@ static int expect_callforwarding(struct sk_buff *skb,
                   &ct->tuplehash[!dir].tuple.dst.u3,
                   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
            (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            ct->status & IPS_NAT_MASK) {
                /* Need NAT */
-               ret = nat_callforwarding(skb, ct, ctinfo, data, dataoff,
+               ret = nat_callforwarding(skb, ct, ctinfo,
+                                        protoff, data, dataoff,
                                         taddr, port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
@@ -831,6 +854,7 @@ static int expect_callforwarding(struct sk_buff *skb,
 /****************************************************************************/
 static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
                         enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
                         unsigned char **data, int dataoff,
                         Setup_UUIE *setup)
 {
@@ -844,7 +868,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Setup\n");
 
        if (setup->options & eSetup_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &setup->h245Address);
                if (ret < 0)
                        return -1;
@@ -852,14 +876,15 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 
        set_h225_addr = rcu_dereference(set_h225_addr_hook);
        if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
-           (set_h225_addr) && ct->status & IPS_NAT_MASK &&
+           (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK &&
            get_h225_addr(ct, *data, &setup->destCallSignalAddress,
                          &addr, &port) &&
            memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
                pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n",
                         &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3,
                         ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
-               ret = set_h225_addr(skb, data, dataoff,
+               ret = set_h225_addr(skb, protoff, data, dataoff,
                                    &setup->destCallSignalAddress,
                                    &ct->tuplehash[!dir].tuple.src.u3,
                                    ct->tuplehash[!dir].tuple.src.u.tcp.port);
@@ -868,14 +893,15 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
        }
 
        if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
-           (set_h225_addr) && ct->status & IPS_NAT_MASK &&
+           (set_h225_addr) && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK &&
            get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
                          &addr, &port) &&
            memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
                pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n",
                         &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3,
                         ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
-               ret = set_h225_addr(skb, data, dataoff,
+               ret = set_h225_addr(skb, protoff, data, dataoff,
                                    &setup->sourceCallSignalAddress,
                                    &ct->tuplehash[!dir].tuple.dst.u3,
                                    ct->tuplehash[!dir].tuple.dst.u.tcp.port);
@@ -885,7 +911,8 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 
        if (setup->options & eSetup_UUIE_fastStart) {
                for (i = 0; i < setup->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &setup->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -899,6 +926,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 static int process_callproceeding(struct sk_buff *skb,
                                  struct nf_conn *ct,
                                  enum ip_conntrack_info ctinfo,
+                                 unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  CallProceeding_UUIE *callproc)
 {
@@ -908,7 +936,7 @@ static int process_callproceeding(struct sk_buff *skb,
        pr_debug("nf_ct_q931: CallProceeding\n");
 
        if (callproc->options & eCallProceeding_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &callproc->h245Address);
                if (ret < 0)
                        return -1;
@@ -916,7 +944,8 @@ static int process_callproceeding(struct sk_buff *skb,
 
        if (callproc->options & eCallProceeding_UUIE_fastStart) {
                for (i = 0; i < callproc->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &callproc->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -929,6 +958,7 @@ static int process_callproceeding(struct sk_buff *skb,
 /****************************************************************************/
 static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo,
+                          unsigned int protoff,
                           unsigned char **data, int dataoff,
                           Connect_UUIE *connect)
 {
@@ -938,7 +968,7 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Connect\n");
 
        if (connect->options & eConnect_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &connect->h245Address);
                if (ret < 0)
                        return -1;
@@ -946,7 +976,8 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 
        if (connect->options & eConnect_UUIE_fastStart) {
                for (i = 0; i < connect->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &connect->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -959,6 +990,7 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            unsigned char **data, int dataoff,
                            Alerting_UUIE *alert)
 {
@@ -968,7 +1000,7 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Alerting\n");
 
        if (alert->options & eAlerting_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &alert->h245Address);
                if (ret < 0)
                        return -1;
@@ -976,7 +1008,8 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 
        if (alert->options & eAlerting_UUIE_fastStart) {
                for (i = 0; i < alert->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &alert->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -989,6 +1022,7 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            unsigned char **data, int dataoff,
                            Facility_UUIE *facility)
 {
@@ -999,15 +1033,15 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 
        if (facility->reason.choice == eFacilityReason_callForwarded) {
                if (facility->options & eFacility_UUIE_alternativeAddress)
-                       return expect_callforwarding(skb, ct, ctinfo, data,
-                                                    dataoff,
+                       return expect_callforwarding(skb, ct, ctinfo,
+                                                    protoff, data, dataoff,
                                                     &facility->
                                                     alternativeAddress);
                return 0;
        }
 
        if (facility->options & eFacility_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &facility->h245Address);
                if (ret < 0)
                        return -1;
@@ -1015,7 +1049,8 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 
        if (facility->options & eFacility_UUIE_fastStart) {
                for (i = 0; i < facility->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &facility->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -1028,6 +1063,7 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
+                           unsigned int protoff,
                            unsigned char **data, int dataoff,
                            Progress_UUIE *progress)
 {
@@ -1037,7 +1073,7 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_q931: Progress\n");
 
        if (progress->options & eProgress_UUIE_h245Address) {
-               ret = expect_h245(skb, ct, ctinfo, data, dataoff,
+               ret = expect_h245(skb, ct, ctinfo, protoff, data, dataoff,
                                  &progress->h245Address);
                if (ret < 0)
                        return -1;
@@ -1045,7 +1081,8 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 
        if (progress->options & eProgress_UUIE_fastStart) {
                for (i = 0; i < progress->fastStart.count; i++) {
-                       ret = process_olc(skb, ct, ctinfo, data, dataoff,
+                       ret = process_olc(skb, ct, ctinfo,
+                                         protoff, data, dataoff,
                                          &progress->fastStart.item[i]);
                        if (ret < 0)
                                return -1;
@@ -1058,7 +1095,8 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
                        enum ip_conntrack_info ctinfo,
-                       unsigned char **data, int dataoff, Q931 *q931)
+                       unsigned int protoff, unsigned char **data, int dataoff,
+                       Q931 *q931)
 {
        H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu;
        int i;
@@ -1066,28 +1104,29 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 
        switch (pdu->h323_message_body.choice) {
        case eH323_UU_PDU_h323_message_body_setup:
-               ret = process_setup(skb, ct, ctinfo, data, dataoff,
+               ret = process_setup(skb, ct, ctinfo, protoff, data, dataoff,
                                    &pdu->h323_message_body.setup);
                break;
        case eH323_UU_PDU_h323_message_body_callProceeding:
-               ret = process_callproceeding(skb, ct, ctinfo, data, dataoff,
+               ret = process_callproceeding(skb, ct, ctinfo,
+                                            protoff, data, dataoff,
                                             &pdu->h323_message_body.
                                             callProceeding);
                break;
        case eH323_UU_PDU_h323_message_body_connect:
-               ret = process_connect(skb, ct, ctinfo, data, dataoff,
+               ret = process_connect(skb, ct, ctinfo, protoff, data, dataoff,
                                      &pdu->h323_message_body.connect);
                break;
        case eH323_UU_PDU_h323_message_body_alerting:
-               ret = process_alerting(skb, ct, ctinfo, data, dataoff,
+               ret = process_alerting(skb, ct, ctinfo, protoff, data, dataoff,
                                       &pdu->h323_message_body.alerting);
                break;
        case eH323_UU_PDU_h323_message_body_facility:
-               ret = process_facility(skb, ct, ctinfo, data, dataoff,
+               ret = process_facility(skb, ct, ctinfo, protoff, data, dataoff,
                                       &pdu->h323_message_body.facility);
                break;
        case eH323_UU_PDU_h323_message_body_progress:
-               ret = process_progress(skb, ct, ctinfo, data, dataoff,
+               ret = process_progress(skb, ct, ctinfo, protoff, data, dataoff,
                                       &pdu->h323_message_body.progress);
                break;
        default:
@@ -1101,7 +1140,8 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 
        if (pdu->options & eH323_UU_PDU_h245Control) {
                for (i = 0; i < pdu->h245Control.count; i++) {
-                       ret = process_h245(skb, ct, ctinfo, data, dataoff,
+                       ret = process_h245(skb, ct, ctinfo,
+                                          protoff, data, dataoff,
                                           &pdu->h245Control.item[i]);
                        if (ret < 0)
                                return -1;
@@ -1146,7 +1186,8 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
                }
 
                /* Process Q.931 signal */
-               if (process_q931(skb, ct, ctinfo, &data, dataoff, &q931) < 0)
+               if (process_q931(skb, ct, ctinfo, protoff,
+                                &data, dataoff, &q931) < 0)
                        goto drop;
        }
 
@@ -1243,7 +1284,7 @@ static int set_expect_timeout(struct nf_conntrack_expect *exp,
 /****************************************************************************/
 static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
-                      unsigned char **data,
+                      unsigned int protoff, unsigned char **data,
                       TransportAddress *taddr, int count)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1278,8 +1319,10 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
        exp->flags = NF_CT_EXPECT_PERMANENT;    /* Accept multiple calls */
 
        nat_q931 = rcu_dereference(nat_q931_hook);
-       if (nat_q931 && ct->status & IPS_NAT_MASK) {    /* Need NAT */
-               ret = nat_q931(skb, ct, ctinfo, data, taddr, i, port, exp);
+       if (nat_q931 && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {        /* Need NAT */
+               ret = nat_q931(skb, ct, ctinfo, protoff, data,
+                              taddr, i, port, exp);
        } else {                /* Conntrack only */
                if (nf_ct_expect_related(exp) == 0) {
                        pr_debug("nf_ct_ras: expect Q.931 ");
@@ -1299,6 +1342,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, GatekeeperRequest *grq)
 {
        typeof(set_ras_addr_hook) set_ras_addr;
@@ -1306,8 +1350,9 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: GRQ\n");
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK)  /* NATed */
-               return set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK)  /* NATed */
+               return set_ras_addr(skb, ct, ctinfo, protoff, data,
                                    &grq->rasAddress, 1);
        return 0;
 }
@@ -1315,6 +1360,7 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, GatekeeperConfirm *gcf)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -1359,6 +1405,7 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, RegistrationRequest *rrq)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1367,15 +1414,16 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 
        pr_debug("nf_ct_ras: RRQ\n");
 
-       ret = expect_q931(skb, ct, ctinfo, data,
+       ret = expect_q931(skb, ct, ctinfo, protoff, data,
                          rrq->callSignalAddress.item,
                          rrq->callSignalAddress.count);
        if (ret < 0)
                return -1;
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
                                   rrq->rasAddress.item,
                                   rrq->rasAddress.count);
                if (ret < 0)
@@ -1394,6 +1442,7 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, RegistrationConfirm *rcf)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1405,8 +1454,9 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: RCF\n");
 
        set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_sig_addr(skb, ct, ctinfo, data,
+       if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
                                        rcf->callSignalAddress.item,
                                        rcf->callSignalAddress.count);
                if (ret < 0)
@@ -1443,6 +1493,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, UnregistrationRequest *urq)
 {
        struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1453,8 +1504,9 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: URQ\n");
 
        set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_sig_addr(skb, ct, ctinfo, data,
+       if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
                                   urq->callSignalAddress.item,
                                   urq->callSignalAddress.count);
                if (ret < 0)
@@ -1475,6 +1527,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, AdmissionRequest *arq)
 {
        const struct nf_ct_h323_master *info = nfct_help_data(ct);
@@ -1491,9 +1544,10 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
                          &addr, &port) &&
            !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
            port == info->sig_port[dir] &&
+           nf_ct_l3num(ct) == NFPROTO_IPV4 &&
            set_h225_addr && ct->status & IPS_NAT_MASK) {
                /* Answering ARQ */
-               return set_h225_addr(skb, data, 0,
+               return set_h225_addr(skb, protoff, data, 0,
                                     &arq->destCallSignalAddress,
                                     &ct->tuplehash[!dir].tuple.dst.u3,
                                     info->sig_port[!dir]);
@@ -1503,9 +1557,10 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
            get_h225_addr(ct, *data, &arq->srcCallSignalAddress,
                          &addr, &port) &&
            !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) &&
-           set_h225_addr && ct->status & IPS_NAT_MASK) {
+           set_h225_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
                /* Calling ARQ */
-               return set_h225_addr(skb, data, 0,
+               return set_h225_addr(skb, protoff, data, 0,
                                     &arq->srcCallSignalAddress,
                                     &ct->tuplehash[!dir].tuple.dst.u3,
                                     port);
@@ -1517,6 +1572,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, AdmissionConfirm *acf)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -1535,8 +1591,9 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
        if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) {
                /* Answering ACF */
                set_sig_addr = rcu_dereference(set_sig_addr_hook);
-               if (set_sig_addr && ct->status & IPS_NAT_MASK)
-                       return set_sig_addr(skb, ct, ctinfo, data,
+               if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+                   ct->status & IPS_NAT_MASK)
+                       return set_sig_addr(skb, ct, ctinfo, protoff, data,
                                            &acf->destCallSignalAddress, 1);
                return 0;
        }
@@ -1564,6 +1621,7 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, LocationRequest *lrq)
 {
        typeof(set_ras_addr_hook) set_ras_addr;
@@ -1571,8 +1629,9 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: LRQ\n");
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK)
-               return set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK)
+               return set_ras_addr(skb, ct, ctinfo, protoff, data,
                                    &lrq->replyAddress, 1);
        return 0;
 }
@@ -1580,6 +1639,7 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, LocationConfirm *lcf)
 {
        int dir = CTINFO2DIR(ctinfo);
@@ -1619,6 +1679,7 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, InfoRequestResponse *irr)
 {
        int ret;
@@ -1628,16 +1689,18 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
        pr_debug("nf_ct_ras: IRR\n");
 
        set_ras_addr = rcu_dereference(set_ras_addr_hook);
-       if (set_ras_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_ras_addr(skb, ct, ctinfo, data,
+       if (set_ras_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_ras_addr(skb, ct, ctinfo, protoff, data,
                                   &irr->rasAddress, 1);
                if (ret < 0)
                        return -1;
        }
 
        set_sig_addr = rcu_dereference(set_sig_addr_hook);
-       if (set_sig_addr && ct->status & IPS_NAT_MASK) {
-               ret = set_sig_addr(skb, ct, ctinfo, data,
+       if (set_sig_addr && nf_ct_l3num(ct) == NFPROTO_IPV4 &&
+           ct->status & IPS_NAT_MASK) {
+               ret = set_sig_addr(skb, ct, ctinfo, protoff, data,
                                        irr->callSignalAddress.item,
                                        irr->callSignalAddress.count);
                if (ret < 0)
@@ -1650,38 +1713,39 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
 /****************************************************************************/
 static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
                       enum ip_conntrack_info ctinfo,
+                      unsigned int protoff,
                       unsigned char **data, RasMessage *ras)
 {
        switch (ras->choice) {
        case eRasMessage_gatekeeperRequest:
-               return process_grq(skb, ct, ctinfo, data,
+               return process_grq(skb, ct, ctinfo, protoff, data,
                                   &ras->gatekeeperRequest);
        case eRasMessage_gatekeeperConfirm:
-               return process_gcf(skb, ct, ctinfo, data,
+               return process_gcf(skb, ct, ctinfo, protoff, data,
                                   &ras->gatekeeperConfirm);
        case eRasMessage_registrationRequest:
-               return process_rrq(skb, ct, ctinfo, data,
+               return process_rrq(skb, ct, ctinfo, protoff, data,
                                   &ras->registrationRequest);
        case eRasMessage_registrationConfirm:
-               return process_rcf(skb, ct, ctinfo, data,
+               return process_rcf(skb, ct, ctinfo, protoff, data,
                                   &ras->registrationConfirm);
        case eRasMessage_unregistrationRequest:
-               return process_urq(skb, ct, ctinfo, data,
+               return process_urq(skb, ct, ctinfo, protoff, data,
                                   &ras->unregistrationRequest);
        case eRasMessage_admissionRequest:
-               return process_arq(skb, ct, ctinfo, data,
+               return process_arq(skb, ct, ctinfo, protoff, data,
                                   &ras->admissionRequest);
        case eRasMessage_admissionConfirm:
-               return process_acf(skb, ct, ctinfo, data,
+               return process_acf(skb, ct, ctinfo, protoff, data,
                                   &ras->admissionConfirm);
        case eRasMessage_locationRequest:
-               return process_lrq(skb, ct, ctinfo, data,
+               return process_lrq(skb, ct, ctinfo, protoff, data,
                                   &ras->locationRequest);
        case eRasMessage_locationConfirm:
-               return process_lcf(skb, ct, ctinfo, data,
+               return process_lcf(skb, ct, ctinfo, protoff, data,
                                   &ras->locationConfirm);
        case eRasMessage_infoRequestResponse:
-               return process_irr(skb, ct, ctinfo, data,
+               return process_irr(skb, ct, ctinfo, protoff, data,
                                   &ras->infoRequestResponse);
        default:
                pr_debug("nf_ct_ras: RAS message %d\n", ras->choice);
@@ -1721,7 +1785,7 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
        }
 
        /* Process RAS message */
-       if (process_ras(skb, ct, ctinfo, &data, &ras) < 0)
+       if (process_ras(skb, ct, ctinfo, protoff, &data, &ras) < 0)
                goto drop;
 
       accept:
index 009c52cfd1ec4b9b86a8dd7bf047f3eae249fd8a..3b20aa77cfc8b76ef1ed78b0b85aacd926f3ce29 100644 (file)
@@ -33,6 +33,7 @@ static DEFINE_SPINLOCK(irc_buffer_lock);
 
 unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
                                enum ip_conntrack_info ctinfo,
+                               unsigned int protoff,
                                unsigned int matchoff,
                                unsigned int matchlen,
                                struct nf_conntrack_expect *exp) __read_mostly;
@@ -205,7 +206,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
 
                        nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
                        if (nf_nat_irc && ct->status & IPS_NAT_MASK)
-                               ret = nf_nat_irc(skb, ctinfo,
+                               ret = nf_nat_irc(skb, ctinfo, protoff,
                                                 addr_beg_p - ib_ptr,
                                                 addr_end_p - addr_beg_p,
                                                 exp);
index 9807f3278fcbcdfcc28c61b9b19e6a8c74d02b18..7bbfb3deea305e4d8434c3af9a68ee6e4531c80f 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/netfilter/nf_conntrack_timestamp.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_l4proto.h>
 #include <net/netfilter/nf_nat_helper.h>
 #endif
 
@@ -418,16 +418,16 @@ nla_put_failure:
 }
 
 static int
-ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                    struct nf_conn *ct)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        struct nlattr *nest_parms;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -604,7 +604,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
                goto errout;
 
        type |= NFNL_SUBSYS_CTNETLINK << 8;
-       nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -680,7 +680,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
        rcu_read_unlock();
 
        nlmsg_end(skb, nlh);
-       err = nfnetlink_send(skb, net, item->pid, group, item->report,
+       err = nfnetlink_send(skb, net, item->portid, group, item->report,
                             GFP_ATOMIC);
        if (err == -ENOBUFS || err == -EAGAIN)
                return -ENOBUFS;
@@ -757,7 +757,7 @@ restart:
 #endif
                        rcu_read_lock();
                        res =
-                       ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
+                       ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                            ct);
@@ -961,7 +961,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        else {
                /* Flush the whole table */
                nf_conntrack_flush_report(net,
-                                        NETLINK_CB(skb).pid,
+                                        NETLINK_CB(skb).portid,
                                         nlmsg_report(nlh));
                return 0;
        }
@@ -985,7 +985,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
        if (del_timer(&ct->timeout)) {
                if (nf_conntrack_event_report(IPCT_DESTROY, ct,
-                                             NETLINK_CB(skb).pid,
+                                             NETLINK_CB(skb).portid,
                                              nlmsg_report(nlh)) < 0) {
                        nf_ct_delete_from_lists(ct);
                        /* we failed to report the event, try later */
@@ -1069,14 +1069,14 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        }
 
        rcu_read_lock();
-       err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
+       err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
                                  NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
        rcu_read_unlock();
        nf_ct_put(ct);
        if (err <= 0)
                goto free;
 
-       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (err < 0)
                goto out;
 
@@ -1096,13 +1096,14 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
                          const struct nlattr *attr)
 {
        typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
+       int err;
 
        parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
        if (!parse_nat_setup) {
 #ifdef CONFIG_MODULES
                rcu_read_unlock();
                nfnl_unlock();
-               if (request_module("nf-nat-ipv4") < 0) {
+               if (request_module("nf-nat") < 0) {
                        nfnl_lock();
                        rcu_read_lock();
                        return -EOPNOTSUPP;
@@ -1115,7 +1116,23 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
                return -EOPNOTSUPP;
        }
 
-       return parse_nat_setup(ct, manip, attr);
+       err = parse_nat_setup(ct, manip, attr);
+       if (err == -EAGAIN) {
+#ifdef CONFIG_MODULES
+               rcu_read_unlock();
+               nfnl_unlock();
+               if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
+                       nfnl_lock();
+                       rcu_read_lock();
+                       return -EOPNOTSUPP;
+               }
+               nfnl_lock();
+               rcu_read_lock();
+#else
+               err = -EOPNOTSUPP;
+#endif
+       }
+       return err;
 }
 #endif
 
@@ -1221,7 +1238,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
        if (help) {
                if (help->helper == helper) {
                        /* update private helper data if allowed. */
-                       if (helper->from_nlattr && helpinfo)
+                       if (helper->from_nlattr)
                                helper->from_nlattr(helpinfo, ct);
                        return 0;
                } else
@@ -1450,7 +1467,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                                goto err2;
                        }
                        /* set private helper data if allowed. */
-                       if (helper->from_nlattr && helpinfo)
+                       if (helper->from_nlattr)
                                helper->from_nlattr(helpinfo, ct);
 
                        /* not in hash table yet so not strictly necessary */
@@ -1596,7 +1613,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                                                      (1 << IPCT_PROTOINFO) |
                                                      (1 << IPCT_NATSEQADJ) |
                                                      (1 << IPCT_MARK) | events,
-                                                     ct, NETLINK_CB(skb).pid,
+                                                     ct, NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                        nf_ct_put(ct);
                }
@@ -1618,7 +1635,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
                                                      (1 << IPCT_PROTOINFO) |
                                                      (1 << IPCT_NATSEQADJ) |
                                                      (1 << IPCT_MARK),
-                                                     ct, NETLINK_CB(skb).pid,
+                                                     ct, NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                }
        }
@@ -1628,15 +1645,15 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
 }
 
 static int
-ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
                                __u16 cpu, const struct ip_conntrack_stat *st)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -1688,7 +1705,7 @@ ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
                st = per_cpu_ptr(net->ct.stat, cpu);
                if (ctnetlink_ct_stat_cpu_fill_info(skb,
-                                                   NETLINK_CB(cb->skb).pid,
+                                                   NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    cpu, st) < 0)
                                break;
@@ -1714,16 +1731,16 @@ ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
 }
 
 static int
-ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                            struct net *net)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
        unsigned int nr_conntracks = atomic_read(&net->ct.count);
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -1756,14 +1773,14 @@ ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
        if (skb2 == NULL)
                return -ENOMEM;
 
-       err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid,
+       err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
                                          nlh->nlmsg_seq,
                                          NFNL_MSG_TYPE(nlh->nlmsg_type),
                                          sock_net(skb->sk));
        if (err <= 0)
                goto free;
 
-       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (err < 0)
                goto out;
 
@@ -1979,6 +1996,8 @@ nla_put_failure:
        return -1;
 }
 
+static const union nf_inet_addr any_addr;
+
 static int
 ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
@@ -2005,7 +2024,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                goto nla_put_failure;
 
 #ifdef CONFIG_NF_NAT_NEEDED
-       if (exp->saved_ip || exp->saved_proto.all) {
+       if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
+           exp->saved_proto.all) {
                nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
                if (!nest_parms)
                        goto nla_put_failure;
@@ -2014,7 +2034,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                        goto nla_put_failure;
 
                nat_tuple.src.l3num = nf_ct_l3num(master);
-               nat_tuple.src.u3.ip = exp->saved_ip;
+               nat_tuple.src.u3 = exp->saved_addr;
                nat_tuple.dst.protonum = nf_ct_protonum(master);
                nat_tuple.src.u = exp->saved_proto;
 
@@ -2050,15 +2070,15 @@ nla_put_failure:
 }
 
 static int
-ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
                        int event, const struct nf_conntrack_expect *exp)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
 
        event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -2109,7 +2129,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
                goto errout;
 
        type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
-       nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -2124,7 +2144,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
        rcu_read_unlock();
 
        nlmsg_end(skb, nlh);
-       nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC);
+       nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
        return 0;
 
 nla_put_failure:
@@ -2167,7 +2187,7 @@ restart:
                                cb->args[1] = 0;
                        }
                        if (ctnetlink_exp_fill_info(skb,
-                                                   NETLINK_CB(cb->skb).pid,
+                                                   NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    IPCTNL_MSG_EXP_NEW,
                                                    exp) < 0) {
@@ -2260,14 +2280,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
        }
 
        rcu_read_lock();
-       err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
+       err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
                                      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
        rcu_read_unlock();
        nf_ct_expect_put(exp);
        if (err <= 0)
                goto free;
 
-       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
        if (err < 0)
                goto out;
 
@@ -2321,7 +2341,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                /* after list removal, usage count == 1 */
                spin_lock_bh(&nf_conntrack_lock);
                if (del_timer(&exp->timeout)) {
-                       nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid,
+                       nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
                                                   nlmsg_report(nlh));
                        nf_ct_expect_put(exp);
                }
@@ -2343,7 +2363,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                                if (!strcmp(m_help->helper->name, name) &&
                                    del_timer(&exp->timeout)) {
                                        nf_ct_unlink_expect_report(exp,
-                                                       NETLINK_CB(skb).pid,
+                                                       NETLINK_CB(skb).portid,
                                                        nlmsg_report(nlh));
                                        nf_ct_expect_put(exp);
                                }
@@ -2359,7 +2379,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                                                  hnode) {
                                if (del_timer(&exp->timeout)) {
                                        nf_ct_unlink_expect_report(exp,
-                                                       NETLINK_CB(skb).pid,
+                                                       NETLINK_CB(skb).portid,
                                                        nlmsg_report(nlh));
                                        nf_ct_expect_put(exp);
                                }
@@ -2410,7 +2430,7 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
        if (err < 0)
                return err;
 
-       exp->saved_ip = nat_tuple.src.u3.ip;
+       exp->saved_addr = nat_tuple.src.u3;
        exp->saved_proto = nat_tuple.src.u;
        exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
 
@@ -2424,7 +2444,7 @@ static int
 ctnetlink_create_expect(struct net *net, u16 zone,
                        const struct nlattr * const cda[],
                        u_int8_t u3,
-                       u32 pid, int report)
+                       u32 portid, int report)
 {
        struct nf_conntrack_tuple tuple, mask, master_tuple;
        struct nf_conntrack_tuple_hash *h = NULL;
@@ -2537,7 +2557,7 @@ ctnetlink_create_expect(struct net *net, u16 zone,
                if (err < 0)
                        goto err_out;
        }
-       err = nf_ct_expect_related_report(exp, pid, report);
+       err = nf_ct_expect_related_report(exp, portid, report);
 err_out:
        nf_ct_expect_put(exp);
 out:
@@ -2580,7 +2600,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
                if (nlh->nlmsg_flags & NLM_F_CREATE) {
                        err = ctnetlink_create_expect(net, zone, cda,
                                                      u3,
-                                                     NETLINK_CB(skb).pid,
+                                                     NETLINK_CB(skb).portid,
                                                      nlmsg_report(nlh));
                }
                return err;
@@ -2595,15 +2615,15 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
 }
 
 static int
-ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu,
+ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
                             const struct ip_conntrack_stat *st)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+       unsigned int flags = portid ? NLM_F_MULTI : 0, event;
 
        event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -2642,7 +2662,7 @@ ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
 
                st = per_cpu_ptr(net->ct.stat, cpu);
-               if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid,
+               if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                                 cb->nlh->nlmsg_seq,
                                                 cpu, st) < 0)
                        break;
index 6fed9ec35248ba2c000264164c7860851655a904..cc7669ef0b95d1d7a0dd6f2a03d5e59d9687689d 100644 (file)
@@ -45,14 +45,14 @@ static DEFINE_SPINLOCK(nf_pptp_lock);
 int
 (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
                             struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                            struct PptpControlHeader *ctlh,
+                            unsigned int protoff, struct PptpControlHeader *ctlh,
                             union pptp_ctrl_union *pptpReq) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound);
 
 int
 (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
                            struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                           struct PptpControlHeader *ctlh,
+                           unsigned int protoff, struct PptpControlHeader *ctlh,
                            union pptp_ctrl_union *pptpReq) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound);
 
@@ -262,7 +262,7 @@ out_unexpect_orig:
 }
 
 static inline int
-pptp_inbound_pkt(struct sk_buff *skb,
+pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
                 struct PptpControlHeader *ctlh,
                 union pptp_ctrl_union *pptpReq,
                 unsigned int reqlen,
@@ -376,7 +376,8 @@ pptp_inbound_pkt(struct sk_buff *skb,
 
        nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound);
        if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK)
-               return nf_nat_pptp_inbound(skb, ct, ctinfo, ctlh, pptpReq);
+               return nf_nat_pptp_inbound(skb, ct, ctinfo,
+                                          protoff, ctlh, pptpReq);
        return NF_ACCEPT;
 
 invalid:
@@ -389,7 +390,7 @@ invalid:
 }
 
 static inline int
-pptp_outbound_pkt(struct sk_buff *skb,
+pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
                  struct PptpControlHeader *ctlh,
                  union pptp_ctrl_union *pptpReq,
                  unsigned int reqlen,
@@ -471,7 +472,8 @@ pptp_outbound_pkt(struct sk_buff *skb,
 
        nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound);
        if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK)
-               return nf_nat_pptp_outbound(skb, ct, ctinfo, ctlh, pptpReq);
+               return nf_nat_pptp_outbound(skb, ct, ctinfo,
+                                           protoff, ctlh, pptpReq);
        return NF_ACCEPT;
 
 invalid:
@@ -570,11 +572,11 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
         * established from PNS->PAC.  However, RFC makes no guarantee */
        if (dir == IP_CT_DIR_ORIGINAL)
                /* client -> server (PNS -> PAC) */
-               ret = pptp_outbound_pkt(skb, ctlh, pptpReq, reqlen, ct,
+               ret = pptp_outbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
                                        ctinfo);
        else
                /* server -> client (PAC -> PNS) */
-               ret = pptp_inbound_pkt(skb, ctlh, pptpReq, reqlen, ct,
+               ret = pptp_inbound_pkt(skb, protoff, ctlh, pptpReq, reqlen, ct,
                                       ctinfo);
        pr_debug("sstate: %d->%d, cstate: %d->%d\n",
                 oldsstate, info->sstate, oldcstate, info->cstate);
index 0dc63854390f70f738b85df4c41bbbd2a9f314da..51e928db48c846f469da93ed70ed072807f3359f 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/notifier.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
@@ -294,9 +293,7 @@ void nf_conntrack_l3proto_unregister(struct net *net,
        nf_ct_l3proto_unregister_sysctl(net, proto);
 
        /* Remove all contrack entries for this protocol */
-       rtnl_lock();
        nf_ct_iterate_cleanup(net, kill_l3proto, proto);
-       rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
 
@@ -502,9 +499,7 @@ void nf_conntrack_l4proto_unregister(struct net *net,
        nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
 
        /* Remove all contrack entries for this protocol */
-       rtnl_lock();
        nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
-       rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
 
index e046b3756aab755080d3edced132c459b7c8d4c4..61f9285111d19ae5b34c59b5d8f98366d0645086 100644 (file)
@@ -502,10 +502,10 @@ static inline s16 nat_offset(const struct nf_conn *ct,
 
        return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
 }
-#define NAT_OFFSET(pf, ct, dir, seq) \
-       (pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0)
+#define NAT_OFFSET(ct, dir, seq) \
+       (nat_offset(ct, dir, seq))
 #else
-#define NAT_OFFSET(pf, ct, dir, seq)   0
+#define NAT_OFFSET(ct, dir, seq)       0
 #endif
 
 static bool tcp_in_window(const struct nf_conn *ct,
@@ -538,7 +538,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                tcp_sack(skb, dataoff, tcph, &sack);
 
        /* Take into account NAT sequence number mangling */
-       receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1);
+       receiver_offset = NAT_OFFSET(ct, !dir, ack - 1);
        ack -= receiver_offset;
        sack -= receiver_offset;
 
index 5c0a112aeee6adc580f0303b50e6821d12e8839e..df8f4f284481042800b3da96ab41bf3589ef512e 100644 (file)
@@ -52,15 +52,17 @@ module_param(sip_direct_media, int, 0600);
 MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
                                   "endpoints only (default 1)");
 
-unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
-                               const char **dptr,
+unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
+                               unsigned int dataoff, const char **dptr,
                                unsigned int *datalen) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
 
-void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
+void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, unsigned int protoff,
+                                  s16 off) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
 
 unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
+                                      unsigned int protoff,
                                       unsigned int dataoff,
                                       const char **dptr,
                                       unsigned int *datalen,
@@ -69,7 +71,8 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
                                       unsigned int matchlen) __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
 
-unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
+unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
                                     const char **dptr,
                                     unsigned int *datalen,
                                     unsigned int sdpoff,
@@ -79,7 +82,8 @@ unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
                                     __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
 
-unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
+unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
                                     const char **dptr,
                                     unsigned int *datalen,
                                     unsigned int matchoff,
@@ -88,6 +92,7 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
 EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
 
 unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
+                                       unsigned int protoff,
                                        unsigned int dataoff,
                                        const char **dptr,
                                        unsigned int *datalen,
@@ -96,7 +101,8 @@ unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
                                        __read_mostly;
 EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
 
-unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
+unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int protoff,
+                                     unsigned int dataoff,
                                      const char **dptr,
                                      unsigned int *datalen,
                                      struct nf_conntrack_expect *rtp_exp,
@@ -737,13 +743,18 @@ static int sdp_addr_len(const struct nf_conn *ct, const char *dptr,
  * be tolerant and also accept records terminated with a single newline
  * character". We handle both cases.
  */
-static const struct sip_header ct_sdp_hdrs[] = {
-       [SDP_HDR_VERSION]               = SDP_HDR("v=", NULL, digits_len),
-       [SDP_HDR_OWNER_IP4]             = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
-       [SDP_HDR_CONNECTION_IP4]        = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
-       [SDP_HDR_OWNER_IP6]             = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
-       [SDP_HDR_CONNECTION_IP6]        = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
-       [SDP_HDR_MEDIA]                 = SDP_HDR("m=", NULL, media_len),
+static const struct sip_header ct_sdp_hdrs_v4[] = {
+       [SDP_HDR_VERSION]       = SDP_HDR("v=", NULL, digits_len),
+       [SDP_HDR_OWNER]         = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
+       [SDP_HDR_CONNECTION]    = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
+       [SDP_HDR_MEDIA]         = SDP_HDR("m=", NULL, media_len),
+};
+
+static const struct sip_header ct_sdp_hdrs_v6[] = {
+       [SDP_HDR_VERSION]       = SDP_HDR("v=", NULL, digits_len),
+       [SDP_HDR_OWNER]         = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
+       [SDP_HDR_CONNECTION]    = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
+       [SDP_HDR_MEDIA]         = SDP_HDR("m=", NULL, media_len),
 };
 
 /* Linear string search within SDP header values */
@@ -769,11 +780,14 @@ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
                          enum sdp_header_types term,
                          unsigned int *matchoff, unsigned int *matchlen)
 {
-       const struct sip_header *hdr = &ct_sdp_hdrs[type];
-       const struct sip_header *thdr = &ct_sdp_hdrs[term];
+       const struct sip_header *hdrs, *hdr, *thdr;
        const char *start = dptr, *limit = dptr + datalen;
        int shift = 0;
 
+       hdrs = nf_ct_l3num(ct) == NFPROTO_IPV4 ? ct_sdp_hdrs_v4 : ct_sdp_hdrs_v6;
+       hdr = &hdrs[type];
+       thdr = &hdrs[term];
+
        for (dptr += dataoff; dptr < limit; dptr++) {
                /* Find beginning of line */
                if (*dptr != '\r' && *dptr != '\n')
@@ -883,7 +897,8 @@ static void flush_expectations(struct nf_conn *ct, bool media)
        spin_unlock_bh(&nf_conntrack_lock);
 }
 
-static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
+static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
+                                unsigned int dataoff,
                                 const char **dptr, unsigned int *datalen,
                                 union nf_inet_addr *daddr, __be16 port,
                                 enum sip_expectation_classes class,
@@ -939,12 +954,12 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
                    exp->class != class)
                        break;
 #ifdef CONFIG_NF_NAT_NEEDED
-               if (exp->tuple.src.l3num == AF_INET && !direct_rtp &&
-                   (exp->saved_ip != exp->tuple.dst.u3.ip ||
+               if (!direct_rtp &&
+                   (!nf_inet_addr_cmp(&exp->saved_addr, &exp->tuple.dst.u3) ||
                     exp->saved_proto.udp.port != exp->tuple.dst.u.udp.port) &&
                    ct->status & IPS_NAT_MASK) {
-                       daddr->ip               = exp->saved_ip;
-                       tuple.dst.u3.ip         = exp->saved_ip;
+                       *daddr                  = exp->saved_addr;
+                       tuple.dst.u3            = exp->saved_addr;
                        tuple.dst.u.udp.port    = exp->saved_proto.udp.port;
                        direct_rtp = 1;
                } else
@@ -960,7 +975,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
        if (direct_rtp) {
                nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
                if (nf_nat_sdp_port &&
-                   !nf_nat_sdp_port(skb, dataoff, dptr, datalen,
+                   !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
                                     mediaoff, medialen, ntohs(rtp_port)))
                        goto err1;
        }
@@ -982,7 +997,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
 
        nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
        if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
-               ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
+               ret = nf_nat_sdp_media(skb, protoff, dataoff, dptr, datalen,
                                       rtp_exp, rtcp_exp,
                                       mediaoff, medialen, daddr);
        else {
@@ -1023,7 +1038,8 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
        return NULL;
 }
 
-static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
+static int process_sdp(struct sk_buff *skb, unsigned int protoff,
+                      unsigned int dataoff,
                       const char **dptr, unsigned int *datalen,
                       unsigned int cseq)
 {
@@ -1036,15 +1052,12 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
        unsigned int i;
        union nf_inet_addr caddr, maddr, rtp_addr;
        unsigned int port;
-       enum sdp_header_types c_hdr;
        const struct sdp_media_type *t;
        int ret = NF_ACCEPT;
        typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
        typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
 
        nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
-       c_hdr = nf_ct_l3num(ct) == AF_INET ? SDP_HDR_CONNECTION_IP4 :
-                                            SDP_HDR_CONNECTION_IP6;
 
        /* Find beginning of session description */
        if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
@@ -1058,7 +1071,7 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
         * the end of the session description. */
        caddr_len = 0;
        if (ct_sip_parse_sdp_addr(ct, *dptr, sdpoff, *datalen,
-                                 c_hdr, SDP_HDR_MEDIA,
+                                 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
                                  &matchoff, &matchlen, &caddr) > 0)
                caddr_len = matchlen;
 
@@ -1088,7 +1101,7 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
                /* The media description overrides the session description. */
                maddr_len = 0;
                if (ct_sip_parse_sdp_addr(ct, *dptr, mediaoff, *datalen,
-                                         c_hdr, SDP_HDR_MEDIA,
+                                         SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
                                          &matchoff, &matchlen, &maddr) > 0) {
                        maddr_len = matchlen;
                        memcpy(&rtp_addr, &maddr, sizeof(rtp_addr));
@@ -1097,7 +1110,8 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
                else
                        return NF_DROP;
 
-               ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
+               ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
+                                           dptr, datalen,
                                            &rtp_addr, htons(port), t->class,
                                            mediaoff, medialen);
                if (ret != NF_ACCEPT)
@@ -1105,8 +1119,9 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
 
                /* Update media connection address if present */
                if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
-                       ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
-                                             mediaoff, c_hdr, SDP_HDR_MEDIA,
+                       ret = nf_nat_sdp_addr(skb, protoff, dataoff,
+                                             dptr, datalen, mediaoff,
+                                             SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
                                              &rtp_addr);
                        if (ret != NF_ACCEPT)
                                return ret;
@@ -1117,12 +1132,13 @@ static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
        /* Update session connection and owner addresses */
        nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
        if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
-                                        &rtp_addr);
+               ret = nf_nat_sdp_session(skb, protoff, dataoff,
+                                        dptr, datalen, sdpoff, &rtp_addr);
 
        return ret;
 }
-static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_invite_response(struct sk_buff *skb, unsigned int protoff,
+                                  unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq, unsigned int code)
 {
@@ -1132,13 +1148,14 @@ static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
 
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
-               return process_sdp(skb, dataoff, dptr, datalen, cseq);
+               return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        else if (ct_sip_info->invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
 }
 
-static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_update_response(struct sk_buff *skb, unsigned int protoff,
+                                  unsigned int dataoff,
                                   const char **dptr, unsigned int *datalen,
                                   unsigned int cseq, unsigned int code)
 {
@@ -1148,13 +1165,14 @@ static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
 
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
-               return process_sdp(skb, dataoff, dptr, datalen, cseq);
+               return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        else if (ct_sip_info->invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
 }
 
-static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_prack_response(struct sk_buff *skb, unsigned int protoff,
+                                 unsigned int dataoff,
                                  const char **dptr, unsigned int *datalen,
                                  unsigned int cseq, unsigned int code)
 {
@@ -1164,13 +1182,14 @@ static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
 
        if ((code >= 100 && code <= 199) ||
            (code >= 200 && code <= 299))
-               return process_sdp(skb, dataoff, dptr, datalen, cseq);
+               return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        else if (ct_sip_info->invite_cseq == cseq)
                flush_expectations(ct, true);
        return NF_ACCEPT;
 }
 
-static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_invite_request(struct sk_buff *skb, unsigned int protoff,
+                                 unsigned int dataoff,
                                  const char **dptr, unsigned int *datalen,
                                  unsigned int cseq)
 {
@@ -1180,13 +1199,14 @@ static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
        unsigned int ret;
 
        flush_expectations(ct, true);
-       ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
+       ret = process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
        if (ret == NF_ACCEPT)
                ct_sip_info->invite_cseq = cseq;
        return ret;
 }
 
-static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_bye_request(struct sk_buff *skb, unsigned int protoff,
+                              unsigned int dataoff,
                               const char **dptr, unsigned int *datalen,
                               unsigned int cseq)
 {
@@ -1201,7 +1221,8 @@ static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
  * signalling connections. The expectation is marked inactive and is activated
  * when receiving a response indicating success from the registrar.
  */
-static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_register_request(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
                                    const char **dptr, unsigned int *datalen,
                                    unsigned int cseq)
 {
@@ -1276,8 +1297,8 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
 
        nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
        if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
-                                       matchoff, matchlen);
+               ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
+                                       exp, matchoff, matchlen);
        else {
                if (nf_ct_expect_related(exp) != 0)
                        ret = NF_DROP;
@@ -1292,7 +1313,8 @@ store_cseq:
        return ret;
 }
 
-static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_register_response(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
                                     const char **dptr, unsigned int *datalen,
                                     unsigned int cseq, unsigned int code)
 {
@@ -1374,7 +1396,8 @@ static const struct sip_handler sip_handlers[] = {
        SIP_HANDLER("REGISTER", process_register_request, process_register_response),
 };
 
-static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
+static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
+                               unsigned int dataoff,
                                const char **dptr, unsigned int *datalen)
 {
        enum ip_conntrack_info ctinfo;
@@ -1405,13 +1428,14 @@ static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
                if (*datalen < matchend + handler->len ||
                    strnicmp(*dptr + matchend, handler->method, handler->len))
                        continue;
-               return handler->response(skb, dataoff, dptr, datalen,
+               return handler->response(skb, protoff, dataoff, dptr, datalen,
                                         cseq, code);
        }
        return NF_ACCEPT;
 }
 
-static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
+static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
+                              unsigned int dataoff,
                               const char **dptr, unsigned int *datalen)
 {
        enum ip_conntrack_info ctinfo;
@@ -1436,26 +1460,28 @@ static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
                if (!cseq)
                        return NF_DROP;
 
-               return handler->request(skb, dataoff, dptr, datalen, cseq);
+               return handler->request(skb, protoff, dataoff, dptr, datalen,
+                                       cseq);
        }
        return NF_ACCEPT;
 }
 
 static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
-                          unsigned int dataoff, const char **dptr,
-                          unsigned int *datalen)
+                          unsigned int protoff, unsigned int dataoff,
+                          const char **dptr, unsigned int *datalen)
 {
        typeof(nf_nat_sip_hook) nf_nat_sip;
        int ret;
 
        if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
-               ret = process_sip_request(skb, dataoff, dptr, datalen);
+               ret = process_sip_request(skb, protoff, dataoff, dptr, datalen);
        else
-               ret = process_sip_response(skb, dataoff, dptr, datalen);
+               ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
 
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
                nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
-               if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
+               if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
+                                             dptr, datalen))
                        ret = NF_DROP;
        }
 
@@ -1523,7 +1549,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
                if (msglen > datalen)
                        return NF_DROP;
 
-               ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
+               ret = process_sip_msg(skb, ct, protoff, dataoff,
+                                     &dptr, &msglen);
                if (ret != NF_ACCEPT)
                        break;
                diff     = msglen - origlen;
@@ -1537,7 +1564,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
                nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
                if (nf_nat_sip_seq_adjust)
-                       nf_nat_sip_seq_adjust(skb, tdiff);
+                       nf_nat_sip_seq_adjust(skb, protoff, tdiff);
        }
 
        return ret;
@@ -1564,7 +1591,7 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
        if (datalen < strlen("SIP/2.0 200"))
                return NF_ACCEPT;
 
-       return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
+       return process_sip_msg(skb, ct, protoff, dataoff, &dptr, &datalen);
 }
 
 static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
index 770f76432ad02b89904646d80ed7c03571bccc16..3deec997be89e32770750abc504922761d096935 100644 (file)
@@ -18,13 +18,13 @@ extern unsigned int nf_iterate(struct list_head *head,
                                unsigned int hook,
                                const struct net_device *indev,
                                const struct net_device *outdev,
-                               struct list_head **i,
+                               struct nf_hook_ops **elemp,
                                int (*okfn)(struct sk_buff *),
                                int hook_thresh);
 
 /* nf_queue.c */
 extern int nf_queue(struct sk_buff *skb,
-                   struct list_head *elem,
+                   struct nf_hook_ops *elem,
                    u_int8_t pf, unsigned int hook,
                    struct net_device *indev,
                    struct net_device *outdev,
diff --git a/net/netfilter/nf_nat_amanda.c b/net/netfilter/nf_nat_amanda.c
new file mode 100644 (file)
index 0000000..42d3378
--- /dev/null
@@ -0,0 +1,85 @@
+/* Amanda extension for TCP NAT alteration.
+ * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca>
+ * based on a copy of HW's ip_nat_irc.c as well as other modules
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_amanda.h>
+
+MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
+MODULE_DESCRIPTION("Amanda NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_amanda");
+
+static unsigned int help(struct sk_buff *skb,
+                        enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
+                        unsigned int matchoff,
+                        unsigned int matchlen,
+                        struct nf_conntrack_expect *exp)
+{
+       char buffer[sizeof("65535")];
+       u_int16_t port;
+       unsigned int ret;
+
+       /* Connection comes from client. */
+       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+       exp->dir = IP_CT_DIR_ORIGINAL;
+
+       /* When you see the packet, we need to NAT it the same as the
+        * this one (ie. same IP: it will be TCP and master is UDP). */
+       exp->expectfn = nf_nat_follow_master;
+
+       /* Try to get same port: if not, try to change it. */
+       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
+               int res;
+
+               exp->tuple.dst.u.tcp.port = htons(port);
+               res = nf_ct_expect_related(exp);
+               if (res == 0)
+                       break;
+               else if (res != -EBUSY) {
+                       port = 0;
+                       break;
+               }
+       }
+
+       if (port == 0)
+               return NF_DROP;
+
+       sprintf(buffer, "%u", port);
+       ret = nf_nat_mangle_udp_packet(skb, exp->master, ctinfo,
+                                      protoff, matchoff, matchlen,
+                                      buffer, strlen(buffer));
+       if (ret != NF_ACCEPT)
+               nf_ct_unexpect_related(exp);
+       return ret;
+}
+
+static void __exit nf_nat_amanda_fini(void)
+{
+       RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
+       synchronize_rcu();
+}
+
+static int __init nf_nat_amanda_init(void)
+{
+       BUG_ON(nf_nat_amanda_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_amanda_hook, help);
+       return 0;
+}
+
+module_init(nf_nat_amanda_init);
+module_exit(nf_nat_amanda_fini);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
new file mode 100644 (file)
index 0000000..5f2f910
--- /dev/null
@@ -0,0 +1,856 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/gfp.h>
+#include <net/xfrm.h>
+#include <linux/jhash.h>
+#include <linux/rtnetlink.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <linux/netfilter/nf_nat.h>
+
+static DEFINE_SPINLOCK(nf_nat_lock);
+
+static DEFINE_MUTEX(nf_nat_proto_mutex);
+static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
+                                               __read_mostly;
+static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
+                                               __read_mostly;
+
+
+inline const struct nf_nat_l3proto *
+__nf_nat_l3proto_find(u8 family)
+{
+       return rcu_dereference(nf_nat_l3protos[family]);
+}
+
+inline const struct nf_nat_l4proto *
+__nf_nat_l4proto_find(u8 family, u8 protonum)
+{
+       return rcu_dereference(nf_nat_l4protos[family][protonum]);
+}
+EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
+
+#ifdef CONFIG_XFRM
+static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
+{
+       const struct nf_nat_l3proto *l3proto;
+       const struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       enum ip_conntrack_dir dir;
+       unsigned  long statusbit;
+       u8 family;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (ct == NULL)
+               return;
+
+       family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+       rcu_read_lock();
+       l3proto = __nf_nat_l3proto_find(family);
+       if (l3proto == NULL)
+               goto out;
+
+       dir = CTINFO2DIR(ctinfo);
+       if (dir == IP_CT_DIR_ORIGINAL)
+               statusbit = IPS_DST_NAT;
+       else
+               statusbit = IPS_SRC_NAT;
+
+       l3proto->decode_session(skb, ct, dir, statusbit, fl);
+out:
+       rcu_read_unlock();
+}
+
+int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family)
+{
+       struct flowi fl;
+       unsigned int hh_len;
+       struct dst_entry *dst;
+
+       if (xfrm_decode_session(skb, &fl, family) < 0)
+               return -1;
+
+       dst = skb_dst(skb);
+       if (dst->xfrm)
+               dst = ((struct xfrm_dst *)dst)->route;
+       dst_hold(dst);
+
+       dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0);
+       if (IS_ERR(dst))
+               return -1;
+
+       skb_dst_drop(skb);
+       skb_dst_set(skb, dst);
+
+       /* Change in oif may mean change in hh_len. */
+       hh_len = skb_dst(skb)->dev->hard_header_len;
+       if (skb_headroom(skb) < hh_len &&
+           pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+               return -1;
+       return 0;
+}
+EXPORT_SYMBOL(nf_xfrm_me_harder);
+#endif /* CONFIG_XFRM */
+
+/* We keep an extra hash for each conntrack, for fast searching. */
+static inline unsigned int
+hash_by_src(const struct net *net, u16 zone,
+           const struct nf_conntrack_tuple *tuple)
+{
+       unsigned int hash;
+
+       /* Original src, to ensure we map it consistently if poss. */
+       hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
+                     tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+       return ((u64)hash * net->ct.nat_htable_size) >> 32;
+}
+
+/* Is this tuple already taken? (not by us) */
+int
+nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+                 const struct nf_conn *ignored_conntrack)
+{
+       /* Conntrack tracking doesn't keep track of outgoing tuples; only
+        * incoming ones.  NAT means they don't have a fixed mapping,
+        * so we invert the tuple and look for the incoming reply.
+        *
+        * We could keep a separate hash if this proves too slow.
+        */
+       struct nf_conntrack_tuple reply;
+
+       nf_ct_invert_tuplepr(&reply, tuple);
+       return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
+}
+EXPORT_SYMBOL(nf_nat_used_tuple);
+
+/* If we source map this tuple so reply looks like reply_tuple, will
+ * that meet the constraints of range.
+ */
+static int in_range(const struct nf_nat_l3proto *l3proto,
+                   const struct nf_nat_l4proto *l4proto,
+                   const struct nf_conntrack_tuple *tuple,
+                   const struct nf_nat_range *range)
+{
+       /* If we are supposed to map IPs, then we must be in the
+        * range specified, otherwise let this drag us onto a new src IP.
+        */
+       if (range->flags & NF_NAT_RANGE_MAP_IPS &&
+           !l3proto->in_range(tuple, range))
+               return 0;
+
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
+           l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
+                             &range->min_proto, &range->max_proto))
+               return 1;
+
+       return 0;
+}
+
+static inline int
+same_src(const struct nf_conn *ct,
+        const struct nf_conntrack_tuple *tuple)
+{
+       const struct nf_conntrack_tuple *t;
+
+       t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+       return (t->dst.protonum == tuple->dst.protonum &&
+               nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
+               t->src.u.all == tuple->src.u.all);
+}
+
+/* Only called for SRC manip */
+static int
+find_appropriate_src(struct net *net, u16 zone,
+                    const struct nf_nat_l3proto *l3proto,
+                    const struct nf_nat_l4proto *l4proto,
+                    const struct nf_conntrack_tuple *tuple,
+                    struct nf_conntrack_tuple *result,
+                    const struct nf_nat_range *range)
+{
+       unsigned int h = hash_by_src(net, zone, tuple);
+       const struct nf_conn_nat *nat;
+       const struct nf_conn *ct;
+       const struct hlist_node *n;
+
+       hlist_for_each_entry_rcu(nat, n, &net->ct.nat_bysource[h], bysource) {
+               ct = nat->ct;
+               if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
+                       /* Copy source part from reply tuple. */
+                       nf_ct_invert_tuplepr(result,
+                                      &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+                       result->dst = tuple->dst;
+
+                       if (in_range(l3proto, l4proto, result, range))
+                               return 1;
+               }
+       }
+       return 0;
+}
+
+/* For [FUTURE] fragmentation handling, we want the least-used
+ * src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
+ * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
+ * 1-65535, we don't do pro-rata allocation based on ports; we choose
+ * the ip with the lowest src-ip/dst-ip/proto usage.
+ */
+static void
+find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
+                   const struct nf_nat_range *range,
+                   const struct nf_conn *ct,
+                   enum nf_nat_manip_type maniptype)
+{
+       union nf_inet_addr *var_ipp;
+       unsigned int i, max;
+       /* Host order */
+       u32 minip, maxip, j, dist;
+       bool full_range;
+
+       /* No IP mapping?  Do nothing. */
+       if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
+               return;
+
+       if (maniptype == NF_NAT_MANIP_SRC)
+               var_ipp = &tuple->src.u3;
+       else
+               var_ipp = &tuple->dst.u3;
+
+       /* Fast path: only one choice. */
+       if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
+               *var_ipp = range->min_addr;
+               return;
+       }
+
+       if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+               max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
+       else
+               max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
+
+       /* Hashing source and destination IPs gives a fairly even
+        * spread in practice (if there are a small number of IPs
+        * involved, there usually aren't that many connections
+        * anyway).  The consistency means that servers see the same
+        * client coming from the same IP (some Internet Banking sites
+        * like this), even across reboots.
+        */
+       j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
+                  range->flags & NF_NAT_RANGE_PERSISTENT ?
+                       0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+
+       full_range = false;
+       for (i = 0; i <= max; i++) {
+               /* If first bytes of the address are at the maximum, use the
+                * distance. Otherwise use the full range.
+                */
+               if (!full_range) {
+                       minip = ntohl((__force __be32)range->min_addr.all[i]);
+                       maxip = ntohl((__force __be32)range->max_addr.all[i]);
+                       dist  = maxip - minip + 1;
+               } else {
+                       minip = 0;
+                       dist  = ~0;
+               }
+
+               var_ipp->all[i] = (__force __u32)
+                       htonl(minip + (((u64)j * dist) >> 32));
+               if (var_ipp->all[i] != range->max_addr.all[i])
+                       full_range = true;
+
+               if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
+                       j ^= (__force u32)tuple->dst.u3.all[i];
+       }
+}
+
+/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
+ * we change the source to map into the range. For NF_INET_PRE_ROUTING
+ * and NF_INET_LOCAL_OUT, we change the destination to map into the
+ * range. It might not be possible to get a unique tuple, but we try.
+ * At worst (or if we race), we will end up with a final duplicate in
+ * __ip_conntrack_confirm and drop the packet. */
+static void
+get_unique_tuple(struct nf_conntrack_tuple *tuple,
+                const struct nf_conntrack_tuple *orig_tuple,
+                const struct nf_nat_range *range,
+                struct nf_conn *ct,
+                enum nf_nat_manip_type maniptype)
+{
+       const struct nf_nat_l3proto *l3proto;
+       const struct nf_nat_l4proto *l4proto;
+       struct net *net = nf_ct_net(ct);
+       u16 zone = nf_ct_zone(ct);
+
+       rcu_read_lock();
+       l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
+       l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
+                                       orig_tuple->dst.protonum);
+
+       /* 1) If this srcip/proto/src-proto-part is currently mapped,
+        * and that same mapping gives a unique tuple within the given
+        * range, use that.
+        *
+        * This is only required for source (ie. NAT/masq) mappings.
+        * So far, we don't do local source mappings, so multiple
+        * manips not an issue.
+        */
+       if (maniptype == NF_NAT_MANIP_SRC &&
+           !(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
+               /* try the original tuple first */
+               if (in_range(l3proto, l4proto, orig_tuple, range)) {
+                       if (!nf_nat_used_tuple(orig_tuple, ct)) {
+                               *tuple = *orig_tuple;
+                               goto out;
+                       }
+               } else if (find_appropriate_src(net, zone, l3proto, l4proto,
+                                               orig_tuple, tuple, range)) {
+                       pr_debug("get_unique_tuple: Found current src map\n");
+                       if (!nf_nat_used_tuple(tuple, ct))
+                               goto out;
+               }
+       }
+
+       /* 2) Select the least-used IP/proto combination in the given range */
+       *tuple = *orig_tuple;
+       find_best_ips_proto(zone, tuple, range, ct, maniptype);
+
+       /* 3) The per-protocol part of the manip is made to map into
+        * the range to make a unique tuple.
+        */
+
+       /* Only bother mapping if it's not already in range and unique */
+       if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM)) {
+               if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
+                       if (l4proto->in_range(tuple, maniptype,
+                                             &range->min_proto,
+                                             &range->max_proto) &&
+                           (range->min_proto.all == range->max_proto.all ||
+                            !nf_nat_used_tuple(tuple, ct)))
+                               goto out;
+               } else if (!nf_nat_used_tuple(tuple, ct)) {
+                       goto out;
+               }
+       }
+
+       /* Last change: get protocol to try to obtain unique tuple. */
+       l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
+out:
+       rcu_read_unlock();
+}
+
+unsigned int
+nf_nat_setup_info(struct nf_conn *ct,
+                 const struct nf_nat_range *range,
+                 enum nf_nat_manip_type maniptype)
+{
+       struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_tuple curr_tuple, new_tuple;
+       struct nf_conn_nat *nat;
+
+       /* nat helper or nfctnetlink also setup binding */
+       nat = nfct_nat(ct);
+       if (!nat) {
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL) {
+                       pr_debug("failed to add NAT extension\n");
+                       return NF_ACCEPT;
+               }
+       }
+
+       NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
+                    maniptype == NF_NAT_MANIP_DST);
+       BUG_ON(nf_nat_initialized(ct, maniptype));
+
+       /* What we've got will look like inverse of reply. Normally
+        * this is what is in the conntrack, except for prior
+        * manipulations (future optimization: if num_manips == 0,
+        * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
+        */
+       nf_ct_invert_tuplepr(&curr_tuple,
+                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+       get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
+
+       if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
+               struct nf_conntrack_tuple reply;
+
+               /* Alter conntrack table so will recognize replies. */
+               nf_ct_invert_tuplepr(&reply, &new_tuple);
+               nf_conntrack_alter_reply(ct, &reply);
+
+               /* Non-atomic: we own this at the moment. */
+               if (maniptype == NF_NAT_MANIP_SRC)
+                       ct->status |= IPS_SRC_NAT;
+               else
+                       ct->status |= IPS_DST_NAT;
+       }
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               unsigned int srchash;
+
+               srchash = hash_by_src(net, nf_ct_zone(ct),
+                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+               spin_lock_bh(&nf_nat_lock);
+               /* nf_conntrack_alter_reply might re-allocate extension aera */
+               nat = nfct_nat(ct);
+               nat->ct = ct;
+               hlist_add_head_rcu(&nat->bysource,
+                                  &net->ct.nat_bysource[srchash]);
+               spin_unlock_bh(&nf_nat_lock);
+       }
+
+       /* It's done. */
+       if (maniptype == NF_NAT_MANIP_DST)
+               ct->status |= IPS_DST_NAT_DONE;
+       else
+               ct->status |= IPS_SRC_NAT_DONE;
+
+       return NF_ACCEPT;
+}
+EXPORT_SYMBOL(nf_nat_setup_info);
+
+/* Do packet manipulations according to nf_nat_setup_info. */
+unsigned int nf_nat_packet(struct nf_conn *ct,
+                          enum ip_conntrack_info ctinfo,
+                          unsigned int hooknum,
+                          struct sk_buff *skb)
+{
+       const struct nf_nat_l3proto *l3proto;
+       const struct nf_nat_l4proto *l4proto;
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       unsigned long statusbit;
+       enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
+
+       if (mtype == NF_NAT_MANIP_SRC)
+               statusbit = IPS_SRC_NAT;
+       else
+               statusbit = IPS_DST_NAT;
+
+       /* Invert if this is reply dir. */
+       if (dir == IP_CT_DIR_REPLY)
+               statusbit ^= IPS_NAT_MASK;
+
+       /* Non-atomic: these bits don't change. */
+       if (ct->status & statusbit) {
+               struct nf_conntrack_tuple target;
+
+               /* We are aiming to look like inverse of other direction. */
+               nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
+
+               l3proto = __nf_nat_l3proto_find(target.src.l3num);
+               l4proto = __nf_nat_l4proto_find(target.src.l3num,
+                                               target.dst.protonum);
+               if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
+                       return NF_DROP;
+       }
+       return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_nat_packet);
+
+struct nf_nat_proto_clean {
+       u8      l3proto;
+       u8      l4proto;
+       bool    hash;
+};
+
+/* Clear NAT section of all conntracks, in case we're loaded again. */
+static int nf_nat_proto_clean(struct nf_conn *i, void *data)
+{
+       const struct nf_nat_proto_clean *clean = data;
+       struct nf_conn_nat *nat = nfct_nat(i);
+
+       if (!nat)
+               return 0;
+       if (!(i->status & IPS_SRC_NAT_DONE))
+               return 0;
+       if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
+           (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
+               return 0;
+
+       if (clean->hash) {
+               spin_lock_bh(&nf_nat_lock);
+               hlist_del_rcu(&nat->bysource);
+               spin_unlock_bh(&nf_nat_lock);
+       } else {
+               memset(nat, 0, sizeof(*nat));
+               i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
+                              IPS_SEQ_ADJUST);
+       }
+       return 0;
+}
+
+static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
+{
+       struct nf_nat_proto_clean clean = {
+               .l3proto = l3proto,
+               .l4proto = l4proto,
+       };
+       struct net *net;
+
+       rtnl_lock();
+       /* Step 1 - remove from bysource hash */
+       clean.hash = true;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       synchronize_rcu();
+
+       /* Step 2 - clean NAT section */
+       clean.hash = false;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       rtnl_unlock();
+}
+
+static void nf_nat_l3proto_clean(u8 l3proto)
+{
+       struct nf_nat_proto_clean clean = {
+               .l3proto = l3proto,
+       };
+       struct net *net;
+
+       rtnl_lock();
+       /* Step 1 - remove from bysource hash */
+       clean.hash = true;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       synchronize_rcu();
+
+       /* Step 2 - clean NAT section */
+       clean.hash = false;
+       for_each_net(net)
+               nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+       rtnl_unlock();
+}
+
+/* Protocol registration. */
+int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
+{
+       const struct nf_nat_l4proto **l4protos;
+       unsigned int i;
+       int ret = 0;
+
+       mutex_lock(&nf_nat_proto_mutex);
+       if (nf_nat_l4protos[l3proto] == NULL) {
+               l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
+                                  GFP_KERNEL);
+               if (l4protos == NULL) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               for (i = 0; i < IPPROTO_MAX; i++)
+                       RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
+
+               /* Before making proto_array visible to lockless readers,
+                * we must make sure its content is committed to memory.
+                */
+               smp_wmb();
+
+               nf_nat_l4protos[l3proto] = l4protos;
+       }
+
+       if (rcu_dereference_protected(
+                       nf_nat_l4protos[l3proto][l4proto->l4proto],
+                       lockdep_is_held(&nf_nat_proto_mutex)
+                       ) != &nf_nat_l4proto_unknown) {
+               ret = -EBUSY;
+               goto out;
+       }
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
+ out:
+       mutex_unlock(&nf_nat_proto_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
+
+/* No one stores the protocol anywhere; simply delete it. */
+void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
+{
+       mutex_lock(&nf_nat_proto_mutex);
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
+                        &nf_nat_l4proto_unknown);
+       mutex_unlock(&nf_nat_proto_mutex);
+       synchronize_rcu();
+
+       nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
+}
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
+
+int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
+{
+       int err;
+
+       err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
+       if (err < 0)
+               return err;
+
+       mutex_lock(&nf_nat_proto_mutex);
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
+                        &nf_nat_l4proto_tcp);
+       RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
+                        &nf_nat_l4proto_udp);
+       mutex_unlock(&nf_nat_proto_mutex);
+
+       RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
+
+void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
+{
+       mutex_lock(&nf_nat_proto_mutex);
+       RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
+       mutex_unlock(&nf_nat_proto_mutex);
+       synchronize_rcu();
+
+       nf_nat_l3proto_clean(l3proto->l3proto);
+       nf_ct_l3proto_module_put(l3proto->l3proto);
+}
+EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
+
+/* No one using conntrack by the time this called. */
+static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
+{
+       struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+
+       if (nat == NULL || nat->ct == NULL)
+               return;
+
+       NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
+
+       spin_lock_bh(&nf_nat_lock);
+       hlist_del_rcu(&nat->bysource);
+       spin_unlock_bh(&nf_nat_lock);
+}
+
+static void nf_nat_move_storage(void *new, void *old)
+{
+       struct nf_conn_nat *new_nat = new;
+       struct nf_conn_nat *old_nat = old;
+       struct nf_conn *ct = old_nat->ct;
+
+       if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
+               return;
+
+       spin_lock_bh(&nf_nat_lock);
+       hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
+       spin_unlock_bh(&nf_nat_lock);
+}
+
+static struct nf_ct_ext_type nat_extend __read_mostly = {
+       .len            = sizeof(struct nf_conn_nat),
+       .align          = __alignof__(struct nf_conn_nat),
+       .destroy        = nf_nat_cleanup_conntrack,
+       .move           = nf_nat_move_storage,
+       .id             = NF_CT_EXT_NAT,
+       .flags          = NF_CT_EXT_F_PREALLOC,
+};
+
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+
+static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
+       [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
+       [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
+};
+
+static int nfnetlink_parse_nat_proto(struct nlattr *attr,
+                                    const struct nf_conn *ct,
+                                    struct nf_nat_range *range)
+{
+       struct nlattr *tb[CTA_PROTONAT_MAX+1];
+       const struct nf_nat_l4proto *l4proto;
+       int err;
+
+       err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
+       if (err < 0)
+               return err;
+
+       l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+       if (l4proto->nlattr_to_range)
+               err = l4proto->nlattr_to_range(tb, range);
+
+       return err;
+}
+
+static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
+       [CTA_NAT_V4_MINIP]      = { .type = NLA_U32 },
+       [CTA_NAT_V4_MAXIP]      = { .type = NLA_U32 },
+       [CTA_NAT_V6_MINIP]      = { .len = sizeof(struct in6_addr) },
+       [CTA_NAT_V6_MAXIP]      = { .len = sizeof(struct in6_addr) },
+       [CTA_NAT_PROTO]         = { .type = NLA_NESTED },
+};
+
+static int
+nfnetlink_parse_nat(const struct nlattr *nat,
+                   const struct nf_conn *ct, struct nf_nat_range *range)
+{
+       const struct nf_nat_l3proto *l3proto;
+       struct nlattr *tb[CTA_NAT_MAX+1];
+       int err;
+
+       memset(range, 0, sizeof(*range));
+
+       err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
+       if (err < 0)
+               return err;
+
+       rcu_read_lock();
+       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+       if (l3proto == NULL) {
+               err = -EAGAIN;
+               goto out;
+       }
+       err = l3proto->nlattr_to_range(tb, range);
+       if (err < 0)
+               goto out;
+
+       if (!tb[CTA_NAT_PROTO])
+               goto out;
+
+       err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
+out:
+       rcu_read_unlock();
+       return err;
+}
+
+static int
+nfnetlink_parse_nat_setup(struct nf_conn *ct,
+                         enum nf_nat_manip_type manip,
+                         const struct nlattr *attr)
+{
+       struct nf_nat_range range;
+       int err;
+
+       err = nfnetlink_parse_nat(attr, ct, &range);
+       if (err < 0)
+               return err;
+       if (nf_nat_initialized(ct, manip))
+               return -EEXIST;
+
+       return nf_nat_setup_info(ct, &range, manip);
+}
+#else
+static int
+nfnetlink_parse_nat_setup(struct nf_conn *ct,
+                         enum nf_nat_manip_type manip,
+                         const struct nlattr *attr)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
+static int __net_init nf_nat_net_init(struct net *net)
+{
+       /* Leave them the same for the moment. */
+       net->ct.nat_htable_size = net->ct.htable_size;
+       net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0);
+       if (!net->ct.nat_bysource)
+               return -ENOMEM;
+       return 0;
+}
+
+static void __net_exit nf_nat_net_exit(struct net *net)
+{
+       struct nf_nat_proto_clean clean = {};
+
+       nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
+       synchronize_rcu();
+       nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
+}
+
+static struct pernet_operations nf_nat_net_ops = {
+       .init = nf_nat_net_init,
+       .exit = nf_nat_net_exit,
+};
+
+static struct nf_ct_helper_expectfn follow_master_nat = {
+       .name           = "nat-follow-master",
+       .expectfn       = nf_nat_follow_master,
+};
+
+static struct nfq_ct_nat_hook nfq_ct_nat = {
+       .seq_adjust     = nf_nat_tcp_seq_adjust,
+};
+
+static int __init nf_nat_init(void)
+{
+       int ret;
+
+       ret = nf_ct_extend_register(&nat_extend);
+       if (ret < 0) {
+               printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
+               return ret;
+       }
+
+       ret = register_pernet_subsys(&nf_nat_net_ops);
+       if (ret < 0)
+               goto cleanup_extend;
+
+       nf_ct_helper_expectfn_register(&follow_master_nat);
+
+       /* Initialize fake conntrack so that NAT will skip it */
+       nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
+
+       BUG_ON(nf_nat_seq_adjust_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
+       BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
+       RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
+                          nfnetlink_parse_nat_setup);
+       BUG_ON(nf_ct_nat_offset != NULL);
+       RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
+       RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
+#ifdef CONFIG_XFRM
+       BUG_ON(nf_nat_decode_session_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
+#endif
+       return 0;
+
+ cleanup_extend:
+       nf_ct_extend_unregister(&nat_extend);
+       return ret;
+}
+
+static void __exit nf_nat_cleanup(void)
+{
+       unsigned int i;
+
+       unregister_pernet_subsys(&nf_nat_net_ops);
+       nf_ct_extend_unregister(&nat_extend);
+       nf_ct_helper_expectfn_unregister(&follow_master_nat);
+       RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
+       RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
+       RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
+       RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
+#ifdef CONFIG_XFRM
+       RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
+#endif
+       for (i = 0; i < NFPROTO_NUMPROTO; i++)
+               kfree(nf_nat_l4protos[i]);
+       synchronize_net();
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(nf_nat_init);
+module_exit(nf_nat_cleanup);
diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c
new file mode 100644 (file)
index 0000000..e839b97
--- /dev/null
@@ -0,0 +1,143 @@
+/* FTP extension for TCP NAT alteration. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/inet.h>
+#include <linux/tcp.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_ftp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
+MODULE_DESCRIPTION("ftp NAT helper");
+MODULE_ALIAS("ip_nat_ftp");
+
+/* FIXME: Time out? --RR */
+
+static int nf_nat_ftp_fmt_cmd(struct nf_conn *ct, enum nf_ct_ftp_type type,
+                             char *buffer, size_t buflen,
+                             union nf_inet_addr *addr, u16 port)
+{
+       switch (type) {
+       case NF_CT_FTP_PORT:
+       case NF_CT_FTP_PASV:
+               return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
+                               ((unsigned char *)&addr->ip)[0],
+                               ((unsigned char *)&addr->ip)[1],
+                               ((unsigned char *)&addr->ip)[2],
+                               ((unsigned char *)&addr->ip)[3],
+                               port >> 8,
+                               port & 0xFF);
+       case NF_CT_FTP_EPRT:
+               if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+                       return snprintf(buffer, buflen, "|1|%pI4|%u|",
+                                       &addr->ip, port);
+               else
+                       return snprintf(buffer, buflen, "|2|%pI6|%u|",
+                                       &addr->ip6, port);
+       case NF_CT_FTP_EPSV:
+               return snprintf(buffer, buflen, "|||%u|", port);
+       }
+
+       return 0;
+}
+
+/* So, this packet has hit the connection tracking matching code.
+   Mangle it, and change the expectation to match the new version. */
+static unsigned int nf_nat_ftp(struct sk_buff *skb,
+                              enum ip_conntrack_info ctinfo,
+                              enum nf_ct_ftp_type type,
+                              unsigned int protoff,
+                              unsigned int matchoff,
+                              unsigned int matchlen,
+                              struct nf_conntrack_expect *exp)
+{
+       union nf_inet_addr newaddr;
+       u_int16_t port;
+       int dir = CTINFO2DIR(ctinfo);
+       struct nf_conn *ct = exp->master;
+       char buffer[sizeof("|1||65535|") + INET6_ADDRSTRLEN];
+       unsigned int buflen;
+
+       pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
+
+       /* Connection will come from wherever this packet goes, hence !dir */
+       newaddr = ct->tuplehash[!dir].tuple.dst.u3;
+       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+       exp->dir = !dir;
+
+       /* When you see the packet, we need to NAT it the same as the
+        * this one. */
+       exp->expectfn = nf_nat_follow_master;
+
+       /* Try to get same port: if not, try to change it. */
+       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
+               int ret;
+
+               exp->tuple.dst.u.tcp.port = htons(port);
+               ret = nf_ct_expect_related(exp);
+               if (ret == 0)
+                       break;
+               else if (ret != -EBUSY) {
+                       port = 0;
+                       break;
+               }
+       }
+
+       if (port == 0)
+               return NF_DROP;
+
+       buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer),
+                                   &newaddr, port);
+       if (!buflen)
+               goto out;
+
+       pr_debug("calling nf_nat_mangle_tcp_packet\n");
+
+       if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff,
+                                     matchlen, buffer, buflen))
+               goto out;
+
+       return NF_ACCEPT;
+
+out:
+       nf_ct_unexpect_related(exp);
+       return NF_DROP;
+}
+
+static void __exit nf_nat_ftp_fini(void)
+{
+       RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
+       synchronize_rcu();
+}
+
+static int __init nf_nat_ftp_init(void)
+{
+       BUG_ON(nf_nat_ftp_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
+       return 0;
+}
+
+/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
+static int warn_set(const char *val, struct kernel_param *kp)
+{
+       printk(KERN_INFO KBUILD_MODNAME
+              ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
+       return 0;
+}
+module_param_call(ports, warn_set, NULL, NULL, 0);
+
+module_init(nf_nat_ftp_init);
+module_exit(nf_nat_ftp_fini);
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
new file mode 100644 (file)
index 0000000..23c2b38
--- /dev/null
@@ -0,0 +1,435 @@
+/* nf_nat_helper.c - generic support functions for NAT helpers
+ *
+ * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
+ * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_helper.h>
+
+#define DUMP_OFFSET(x) \
+       pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
+                x->offset_before, x->offset_after, x->correction_pos);
+
+static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
+
+/* Setup TCP sequence correction given this change at this sequence */
+static inline void
+adjust_tcp_sequence(u32 seq,
+                   int sizediff,
+                   struct nf_conn *ct,
+                   enum ip_conntrack_info ctinfo)
+{
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       struct nf_conn_nat *nat = nfct_nat(ct);
+       struct nf_nat_seq *this_way = &nat->seq[dir];
+
+       pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
+                seq, sizediff);
+
+       pr_debug("adjust_tcp_sequence: Seq_offset before: ");
+       DUMP_OFFSET(this_way);
+
+       spin_lock_bh(&nf_nat_seqofs_lock);
+
+       /* SYN adjust. If it's uninitialized, or this is after last
+        * correction, record it: we don't handle more than one
+        * adjustment in the window, but do deal with common case of a
+        * retransmit */
+       if (this_way->offset_before == this_way->offset_after ||
+           before(this_way->correction_pos, seq)) {
+               this_way->correction_pos = seq;
+               this_way->offset_before = this_way->offset_after;
+               this_way->offset_after += sizediff;
+       }
+       spin_unlock_bh(&nf_nat_seqofs_lock);
+
+       pr_debug("adjust_tcp_sequence: Seq_offset after: ");
+       DUMP_OFFSET(this_way);
+}
+
+/* Get the offset value, for conntrack */
+s16 nf_nat_get_offset(const struct nf_conn *ct,
+                     enum ip_conntrack_dir dir,
+                     u32 seq)
+{
+       struct nf_conn_nat *nat = nfct_nat(ct);
+       struct nf_nat_seq *this_way;
+       s16 offset;
+
+       if (!nat)
+               return 0;
+
+       this_way = &nat->seq[dir];
+       spin_lock_bh(&nf_nat_seqofs_lock);
+       offset = after(seq, this_way->correction_pos)
+                ? this_way->offset_after : this_way->offset_before;
+       spin_unlock_bh(&nf_nat_seqofs_lock);
+
+       return offset;
+}
+
+/* Frobs data inside this packet, which is linear. */
+static void mangle_contents(struct sk_buff *skb,
+                           unsigned int dataoff,
+                           unsigned int match_offset,
+                           unsigned int match_len,
+                           const char *rep_buffer,
+                           unsigned int rep_len)
+{
+       unsigned char *data;
+
+       BUG_ON(skb_is_nonlinear(skb));
+       data = skb_network_header(skb) + dataoff;
+
+       /* move post-replacement */
+       memmove(data + match_offset + rep_len,
+               data + match_offset + match_len,
+               skb->tail - (skb->network_header + dataoff +
+                            match_offset + match_len));
+
+       /* insert data from buffer */
+       memcpy(data + match_offset, rep_buffer, rep_len);
+
+       /* update skb info */
+       if (rep_len > match_len) {
+               pr_debug("nf_nat_mangle_packet: Extending packet by "
+                        "%u from %u bytes\n", rep_len - match_len, skb->len);
+               skb_put(skb, rep_len - match_len);
+       } else {
+               pr_debug("nf_nat_mangle_packet: Shrinking packet from "
+                        "%u from %u bytes\n", match_len - rep_len, skb->len);
+               __skb_trim(skb, skb->len + rep_len - match_len);
+       }
+
+       if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) {
+               /* fix IP hdr checksum information */
+               ip_hdr(skb)->tot_len = htons(skb->len);
+               ip_send_check(ip_hdr(skb));
+       } else
+               ipv6_hdr(skb)->payload_len =
+                       htons(skb->len - sizeof(struct ipv6hdr));
+}
+
+/* Unusual, but possible case. */
+static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
+{
+       if (skb->len + extra > 65535)
+               return 0;
+
+       if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
+               return 0;
+
+       return 1;
+}
+
+void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                          __be32 seq, s16 off)
+{
+       if (!off)
+               return;
+       set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+       adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
+       nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
+}
+EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
+
+void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+                          u32 ctinfo, int off)
+{
+       const struct tcphdr *th;
+
+       if (nf_ct_protonum(ct) != IPPROTO_TCP)
+               return;
+
+       th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
+       nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
+}
+EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
+
+/* Generic function for mangling variable-length address changes inside
+ * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
+ * command in FTP).
+ *
+ * Takes care about all the nasty sequence number changes, checksumming,
+ * skb enlargement, ...
+ *
+ * */
+int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
+                              struct nf_conn *ct,
+                              enum ip_conntrack_info ctinfo,
+                              unsigned int protoff,
+                              unsigned int match_offset,
+                              unsigned int match_len,
+                              const char *rep_buffer,
+                              unsigned int rep_len, bool adjust)
+{
+       const struct nf_nat_l3proto *l3proto;
+       struct tcphdr *tcph;
+       int oldlen, datalen;
+
+       if (!skb_make_writable(skb, skb->len))
+               return 0;
+
+       if (rep_len > match_len &&
+           rep_len - match_len > skb_tailroom(skb) &&
+           !enlarge_skb(skb, rep_len - match_len))
+               return 0;
+
+       SKB_LINEAR_ASSERT(skb);
+
+       tcph = (void *)skb->data + protoff;
+
+       oldlen = skb->len - protoff;
+       mangle_contents(skb, protoff + tcph->doff*4,
+                       match_offset, match_len, rep_buffer, rep_len);
+
+       datalen = skb->len - protoff;
+
+       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+       l3proto->csum_recalc(skb, IPPROTO_TCP, tcph, &tcph->check,
+                            datalen, oldlen);
+
+       if (adjust && rep_len != match_len)
+               nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
+                                     (int)rep_len - (int)match_len);
+
+       return 1;
+}
+EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
+
+/* Generic function for mangling variable-length address changes inside
+ * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
+ * command in the Amanda protocol)
+ *
+ * Takes care about all the nasty sequence number changes, checksumming,
+ * skb enlargement, ...
+ *
+ * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
+ *       should be fairly easy to do.
+ */
+int
+nf_nat_mangle_udp_packet(struct sk_buff *skb,
+                        struct nf_conn *ct,
+                        enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
+                        unsigned int match_offset,
+                        unsigned int match_len,
+                        const char *rep_buffer,
+                        unsigned int rep_len)
+{
+       const struct nf_nat_l3proto *l3proto;
+       struct udphdr *udph;
+       int datalen, oldlen;
+
+       if (!skb_make_writable(skb, skb->len))
+               return 0;
+
+       if (rep_len > match_len &&
+           rep_len - match_len > skb_tailroom(skb) &&
+           !enlarge_skb(skb, rep_len - match_len))
+               return 0;
+
+       udph = (void *)skb->data + protoff;
+
+       oldlen = skb->len - protoff;
+       mangle_contents(skb, protoff + sizeof(*udph),
+                       match_offset, match_len, rep_buffer, rep_len);
+
+       /* update the length of the UDP packet */
+       datalen = skb->len - protoff;
+       udph->len = htons(datalen);
+
+       /* fix udp checksum if udp checksum was previously calculated */
+       if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
+               return 1;
+
+       l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+       l3proto->csum_recalc(skb, IPPROTO_UDP, udph, &udph->check,
+                            datalen, oldlen);
+
+       return 1;
+}
+EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
+
+/* Adjust one found SACK option including checksum correction */
+static void
+sack_adjust(struct sk_buff *skb,
+           struct tcphdr *tcph,
+           unsigned int sackoff,
+           unsigned int sackend,
+           struct nf_nat_seq *natseq)
+{
+       while (sackoff < sackend) {
+               struct tcp_sack_block_wire *sack;
+               __be32 new_start_seq, new_end_seq;
+
+               sack = (void *)skb->data + sackoff;
+               if (after(ntohl(sack->start_seq) - natseq->offset_before,
+                         natseq->correction_pos))
+                       new_start_seq = htonl(ntohl(sack->start_seq)
+                                       - natseq->offset_after);
+               else
+                       new_start_seq = htonl(ntohl(sack->start_seq)
+                                       - natseq->offset_before);
+
+               if (after(ntohl(sack->end_seq) - natseq->offset_before,
+                         natseq->correction_pos))
+                       new_end_seq = htonl(ntohl(sack->end_seq)
+                                     - natseq->offset_after);
+               else
+                       new_end_seq = htonl(ntohl(sack->end_seq)
+                                     - natseq->offset_before);
+
+               pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
+                        ntohl(sack->start_seq), new_start_seq,
+                        ntohl(sack->end_seq), new_end_seq);
+
+               inet_proto_csum_replace4(&tcph->check, skb,
+                                        sack->start_seq, new_start_seq, 0);
+               inet_proto_csum_replace4(&tcph->check, skb,
+                                        sack->end_seq, new_end_seq, 0);
+               sack->start_seq = new_start_seq;
+               sack->end_seq = new_end_seq;
+               sackoff += sizeof(*sack);
+       }
+}
+
+/* TCP SACK sequence number adjustment */
+static inline unsigned int
+nf_nat_sack_adjust(struct sk_buff *skb,
+                  unsigned int protoff,
+                  struct tcphdr *tcph,
+                  struct nf_conn *ct,
+                  enum ip_conntrack_info ctinfo)
+{
+       unsigned int dir, optoff, optend;
+       struct nf_conn_nat *nat = nfct_nat(ct);
+
+       optoff = protoff + sizeof(struct tcphdr);
+       optend = protoff + tcph->doff * 4;
+
+       if (!skb_make_writable(skb, optend))
+               return 0;
+
+       dir = CTINFO2DIR(ctinfo);
+
+       while (optoff < optend) {
+               /* Usually: option, length. */
+               unsigned char *op = skb->data + optoff;
+
+               switch (op[0]) {
+               case TCPOPT_EOL:
+                       return 1;
+               case TCPOPT_NOP:
+                       optoff++;
+                       continue;
+               default:
+                       /* no partial options */
+                       if (optoff + 1 == optend ||
+                           optoff + op[1] > optend ||
+                           op[1] < 2)
+                               return 0;
+                       if (op[0] == TCPOPT_SACK &&
+                           op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
+                           ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
+                               sack_adjust(skb, tcph, optoff+2,
+                                           optoff+op[1], &nat->seq[!dir]);
+                       optoff += op[1];
+               }
+       }
+       return 1;
+}
+
+/* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
+int
+nf_nat_seq_adjust(struct sk_buff *skb,
+                 struct nf_conn *ct,
+                 enum ip_conntrack_info ctinfo,
+                 unsigned int protoff)
+{
+       struct tcphdr *tcph;
+       int dir;
+       __be32 newseq, newack;
+       s16 seqoff, ackoff;
+       struct nf_conn_nat *nat = nfct_nat(ct);
+       struct nf_nat_seq *this_way, *other_way;
+
+       dir = CTINFO2DIR(ctinfo);
+
+       this_way = &nat->seq[dir];
+       other_way = &nat->seq[!dir];
+
+       if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
+               return 0;
+
+       tcph = (void *)skb->data + protoff;
+       if (after(ntohl(tcph->seq), this_way->correction_pos))
+               seqoff = this_way->offset_after;
+       else
+               seqoff = this_way->offset_before;
+
+       if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
+                 other_way->correction_pos))
+               ackoff = other_way->offset_after;
+       else
+               ackoff = other_way->offset_before;
+
+       newseq = htonl(ntohl(tcph->seq) + seqoff);
+       newack = htonl(ntohl(tcph->ack_seq) - ackoff);
+
+       inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
+       inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+
+       pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
+                ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
+                ntohl(newack));
+
+       tcph->seq = newseq;
+       tcph->ack_seq = newack;
+
+       return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+}
+
+/* Setup NAT on this expected conntrack so it follows master. */
+/* If we fail to get a free NAT slot, we'll get dropped on confirm */
+void nf_nat_follow_master(struct nf_conn *ct,
+                         struct nf_conntrack_expect *exp)
+{
+       struct nf_nat_range range;
+
+       /* This must be a fresh one. */
+       BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+       /* Change src to where master sends to */
+       range.flags = NF_NAT_RANGE_MAP_IPS;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+
+       /* For DST manip, map port here to where it's expected. */
+       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+       range.min_proto = range.max_proto = exp->saved_proto;
+       range.min_addr = range.max_addr
+               = ct->master->tuplehash[!exp->dir].tuple.src.u3;
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+EXPORT_SYMBOL(nf_nat_follow_master);
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
new file mode 100644 (file)
index 0000000..1fedee6
--- /dev/null
@@ -0,0 +1,93 @@
+/* IRC extension for TCP NAT alteration.
+ *
+ * (C) 2000-2001 by Harald Welte <laforge@gnumonks.org>
+ * (C) 2004 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ * based on a copy of RR's ip_nat_ftp.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/tcp.h>
+#include <linux/kernel.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_irc.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("IRC (DCC) NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_irc");
+
+static unsigned int help(struct sk_buff *skb,
+                        enum ip_conntrack_info ctinfo,
+                        unsigned int protoff,
+                        unsigned int matchoff,
+                        unsigned int matchlen,
+                        struct nf_conntrack_expect *exp)
+{
+       char buffer[sizeof("4294967296 65635")];
+       u_int16_t port;
+       unsigned int ret;
+
+       /* Reply comes from server. */
+       exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port;
+       exp->dir = IP_CT_DIR_REPLY;
+       exp->expectfn = nf_nat_follow_master;
+
+       /* Try to get same port: if not, try to change it. */
+       for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
+               int ret;
+
+               exp->tuple.dst.u.tcp.port = htons(port);
+               ret = nf_ct_expect_related(exp);
+               if (ret == 0)
+                       break;
+               else if (ret != -EBUSY) {
+                       port = 0;
+                       break;
+               }
+       }
+
+       if (port == 0)
+               return NF_DROP;
+
+       ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo,
+                                      protoff, matchoff, matchlen, buffer,
+                                      strlen(buffer));
+       if (ret != NF_ACCEPT)
+               nf_ct_unexpect_related(exp);
+       return ret;
+}
+
+static void __exit nf_nat_irc_fini(void)
+{
+       RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
+       synchronize_rcu();
+}
+
+static int __init nf_nat_irc_init(void)
+{
+       BUG_ON(nf_nat_irc_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_irc_hook, help);
+       return 0;
+}
+
+/* Prior to 2.6.11, we had a ports param.  No longer, but don't break users. */
+static int warn_set(const char *val, struct kernel_param *kp)
+{
+       printk(KERN_INFO KBUILD_MODNAME
+              ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
+       return 0;
+}
+module_param_call(ports, warn_set, NULL, NULL, 0);
+
+module_init(nf_nat_irc_init);
+module_exit(nf_nat_irc_fini);
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
new file mode 100644 (file)
index 0000000..9baaf73
--- /dev/null
@@ -0,0 +1,112 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/random.h>
+#include <linux/netfilter.h>
+#include <linux/export.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+                            enum nf_nat_manip_type maniptype,
+                            const union nf_conntrack_man_proto *min,
+                            const union nf_conntrack_man_proto *max)
+{
+       __be16 port;
+
+       if (maniptype == NF_NAT_MANIP_SRC)
+               port = tuple->src.u.all;
+       else
+               port = tuple->dst.u.all;
+
+       return ntohs(port) >= ntohs(min->all) &&
+              ntohs(port) <= ntohs(max->all);
+}
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_in_range);
+
+void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                struct nf_conntrack_tuple *tuple,
+                                const struct nf_nat_range *range,
+                                enum nf_nat_manip_type maniptype,
+                                const struct nf_conn *ct,
+                                u16 *rover)
+{
+       unsigned int range_size, min, i;
+       __be16 *portptr;
+       u_int16_t off;
+
+       if (maniptype == NF_NAT_MANIP_SRC)
+               portptr = &tuple->src.u.all;
+       else
+               portptr = &tuple->dst.u.all;
+
+       /* If no range specified... */
+       if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
+               /* If it's dst rewrite, can't change port */
+               if (maniptype == NF_NAT_MANIP_DST)
+                       return;
+
+               if (ntohs(*portptr) < 1024) {
+                       /* Loose convention: >> 512 is credential passing */
+                       if (ntohs(*portptr) < 512) {
+                               min = 1;
+                               range_size = 511 - min + 1;
+                       } else {
+                               min = 600;
+                               range_size = 1023 - min + 1;
+                       }
+               } else {
+                       min = 1024;
+                       range_size = 65535 - 1024 + 1;
+               }
+       } else {
+               min = ntohs(range->min_proto.all);
+               range_size = ntohs(range->max_proto.all) - min + 1;
+       }
+
+       if (range->flags & NF_NAT_RANGE_PROTO_RANDOM)
+               off = l3proto->secure_port(tuple, maniptype == NF_NAT_MANIP_SRC
+                                                 ? tuple->dst.u.all
+                                                 : tuple->src.u.all);
+       else
+               off = *rover;
+
+       for (i = 0; ; ++off) {
+               *portptr = htons(min + off % range_size);
+               if (++i != range_size && nf_nat_used_tuple(tuple, ct))
+                       continue;
+               if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM))
+                       *rover = off;
+               return;
+       }
+       return;
+}
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
+
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+                                  struct nf_nat_range *range)
+{
+       if (tb[CTA_PROTONAT_PORT_MIN]) {
+               range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
+               range->max_proto.all = range->min_proto.all;
+               range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+       }
+       if (tb[CTA_PROTONAT_PORT_MAX]) {
+               range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
+               range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_nlattr_to_range);
+#endif
diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
new file mode 100644 (file)
index 0000000..c8be2cd
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * DCCP NAT protocol helper
+ *
+ * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/dccp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static u_int16_t dccp_port_rover;
+
+static void
+dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                 struct nf_conntrack_tuple *tuple,
+                 const struct nf_nat_range *range,
+                 enum nf_nat_manip_type maniptype,
+                 const struct nf_conn *ct)
+{
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &dccp_port_rover);
+}
+
+static bool
+dccp_manip_pkt(struct sk_buff *skb,
+              const struct nf_nat_l3proto *l3proto,
+              unsigned int iphdroff, unsigned int hdroff,
+              const struct nf_conntrack_tuple *tuple,
+              enum nf_nat_manip_type maniptype)
+{
+       struct dccp_hdr *hdr;
+       __be16 *portptr, oldport, newport;
+       int hdrsize = 8; /* DCCP connection tracking guarantees this much */
+
+       if (skb->len >= hdroff + sizeof(struct dccp_hdr))
+               hdrsize = sizeof(struct dccp_hdr);
+
+       if (!skb_make_writable(skb, hdroff + hdrsize))
+               return false;
+
+       hdr = (struct dccp_hdr *)(skb->data + hdroff);
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               newport = tuple->src.u.dccp.port;
+               portptr = &hdr->dccph_sport;
+       } else {
+               newport = tuple->dst.u.dccp.port;
+               portptr = &hdr->dccph_dport;
+       }
+
+       oldport = *portptr;
+       *portptr = newport;
+
+       if (hdrsize < sizeof(*hdr))
+               return true;
+
+       l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
+                            tuple, maniptype);
+       inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
+                                0);
+       return true;
+}
+
+static const struct nf_nat_l4proto nf_nat_l4proto_dccp = {
+       .l4proto                = IPPROTO_DCCP,
+       .manip_pkt              = dccp_manip_pkt,
+       .in_range               = nf_nat_l4proto_in_range,
+       .unique_tuple           = dccp_unique_tuple,
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
+
+static int __init nf_nat_proto_dccp_init(void)
+{
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_dccp);
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
+err1:
+       return err;
+}
+
+static void __exit nf_nat_proto_dccp_fini(void)
+{
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_dccp);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_dccp);
+
+}
+
+module_init(nf_nat_proto_dccp_init);
+module_exit(nf_nat_proto_dccp_fini);
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("DCCP NAT protocol helper");
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
new file mode 100644 (file)
index 0000000..e64faa5
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sctp.h>
+#include <linux/module.h>
+#include <net/sctp/checksum.h>
+
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static u_int16_t nf_sctp_port_rover;
+
+static void
+sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                 struct nf_conntrack_tuple *tuple,
+                 const struct nf_nat_range *range,
+                 enum nf_nat_manip_type maniptype,
+                 const struct nf_conn *ct)
+{
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &nf_sctp_port_rover);
+}
+
+static bool
+sctp_manip_pkt(struct sk_buff *skb,
+              const struct nf_nat_l3proto *l3proto,
+              unsigned int iphdroff, unsigned int hdroff,
+              const struct nf_conntrack_tuple *tuple,
+              enum nf_nat_manip_type maniptype)
+{
+       struct sk_buff *frag;
+       sctp_sctphdr_t *hdr;
+       __be32 crc32;
+
+       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+               return false;
+
+       hdr = (struct sctphdr *)(skb->data + hdroff);
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               /* Get rid of src port */
+               hdr->source = tuple->src.u.sctp.port;
+       } else {
+               /* Get rid of dst port */
+               hdr->dest = tuple->dst.u.sctp.port;
+       }
+
+       crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
+       skb_walk_frags(skb, frag)
+               crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
+                                         crc32);
+       crc32 = sctp_end_cksum(crc32);
+       hdr->checksum = crc32;
+
+       return true;
+}
+
+static const struct nf_nat_l4proto nf_nat_l4proto_sctp = {
+       .l4proto                = IPPROTO_SCTP,
+       .manip_pkt              = sctp_manip_pkt,
+       .in_range               = nf_nat_l4proto_in_range,
+       .unique_tuple           = sctp_unique_tuple,
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
+
+static int __init nf_nat_proto_sctp_init(void)
+{
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_sctp);
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
+err1:
+       return err;
+}
+
+static void __exit nf_nat_proto_sctp_exit(void)
+{
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_sctp);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_sctp);
+}
+
+module_init(nf_nat_proto_sctp_init);
+module_exit(nf_nat_proto_sctp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCTP NAT protocol helper");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
new file mode 100644 (file)
index 0000000..83ec8a6
--- /dev/null
@@ -0,0 +1,85 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+#include <net/netfilter/nf_nat_core.h>
+
+static u16 tcp_port_rover;
+
+static void
+tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                struct nf_conntrack_tuple *tuple,
+                const struct nf_nat_range *range,
+                enum nf_nat_manip_type maniptype,
+                const struct nf_conn *ct)
+{
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &tcp_port_rover);
+}
+
+static bool
+tcp_manip_pkt(struct sk_buff *skb,
+             const struct nf_nat_l3proto *l3proto,
+             unsigned int iphdroff, unsigned int hdroff,
+             const struct nf_conntrack_tuple *tuple,
+             enum nf_nat_manip_type maniptype)
+{
+       struct tcphdr *hdr;
+       __be16 *portptr, newport, oldport;
+       int hdrsize = 8; /* TCP connection tracking guarantees this much */
+
+       /* this could be a inner header returned in icmp packet; in such
+          cases we cannot update the checksum field since it is outside of
+          the 8 bytes of transport layer headers we are guaranteed */
+       if (skb->len >= hdroff + sizeof(struct tcphdr))
+               hdrsize = sizeof(struct tcphdr);
+
+       if (!skb_make_writable(skb, hdroff + hdrsize))
+               return false;
+
+       hdr = (struct tcphdr *)(skb->data + hdroff);
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               /* Get rid of src port */
+               newport = tuple->src.u.tcp.port;
+               portptr = &hdr->source;
+       } else {
+               /* Get rid of dst port */
+               newport = tuple->dst.u.tcp.port;
+               portptr = &hdr->dest;
+       }
+
+       oldport = *portptr;
+       *portptr = newport;
+
+       if (hdrsize < sizeof(*hdr))
+               return true;
+
+       l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
+       inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+       return true;
+}
+
+const struct nf_nat_l4proto nf_nat_l4proto_tcp = {
+       .l4proto                = IPPROTO_TCP,
+       .manip_pkt              = tcp_manip_pkt,
+       .in_range               = nf_nat_l4proto_in_range,
+       .unique_tuple           = tcp_unique_tuple,
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
new file mode 100644 (file)
index 0000000..7df613f
--- /dev/null
@@ -0,0 +1,76 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/udp.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static u16 udp_port_rover;
+
+static void
+udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                struct nf_conntrack_tuple *tuple,
+                const struct nf_nat_range *range,
+                enum nf_nat_manip_type maniptype,
+                const struct nf_conn *ct)
+{
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &udp_port_rover);
+}
+
+static bool
+udp_manip_pkt(struct sk_buff *skb,
+             const struct nf_nat_l3proto *l3proto,
+             unsigned int iphdroff, unsigned int hdroff,
+             const struct nf_conntrack_tuple *tuple,
+             enum nf_nat_manip_type maniptype)
+{
+       struct udphdr *hdr;
+       __be16 *portptr, newport;
+
+       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+               return false;
+       hdr = (struct udphdr *)(skb->data + hdroff);
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               /* Get rid of src port */
+               newport = tuple->src.u.udp.port;
+               portptr = &hdr->source;
+       } else {
+               /* Get rid of dst port */
+               newport = tuple->dst.u.udp.port;
+               portptr = &hdr->dest;
+       }
+       if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+               l3proto->csum_update(skb, iphdroff, &hdr->check,
+                                    tuple, maniptype);
+               inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
+                                        0);
+               if (!hdr->check)
+                       hdr->check = CSUM_MANGLED_0;
+       }
+       *portptr = newport;
+       return true;
+}
+
+const struct nf_nat_l4proto nf_nat_l4proto_udp = {
+       .l4proto                = IPPROTO_UDP,
+       .manip_pkt              = udp_manip_pkt,
+       .in_range               = nf_nat_l4proto_in_range,
+       .unique_tuple           = udp_unique_tuple,
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c
new file mode 100644 (file)
index 0000000..776a0d1
--- /dev/null
@@ -0,0 +1,106 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/udp.h>
+
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static u16 udplite_port_rover;
+
+static void
+udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                    struct nf_conntrack_tuple *tuple,
+                    const struct nf_nat_range *range,
+                    enum nf_nat_manip_type maniptype,
+                    const struct nf_conn *ct)
+{
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &udplite_port_rover);
+}
+
+static bool
+udplite_manip_pkt(struct sk_buff *skb,
+                 const struct nf_nat_l3proto *l3proto,
+                 unsigned int iphdroff, unsigned int hdroff,
+                 const struct nf_conntrack_tuple *tuple,
+                 enum nf_nat_manip_type maniptype)
+{
+       struct udphdr *hdr;
+       __be16 *portptr, newport;
+
+       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+               return false;
+
+       hdr = (struct udphdr *)(skb->data + hdroff);
+
+       if (maniptype == NF_NAT_MANIP_SRC) {
+               /* Get rid of source port */
+               newport = tuple->src.u.udp.port;
+               portptr = &hdr->source;
+       } else {
+               /* Get rid of dst port */
+               newport = tuple->dst.u.udp.port;
+               portptr = &hdr->dest;
+       }
+
+       l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
+       inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
+       if (!hdr->check)
+               hdr->check = CSUM_MANGLED_0;
+
+       *portptr = newport;
+       return true;
+}
+
+static const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
+       .l4proto                = IPPROTO_UDPLITE,
+       .manip_pkt              = udplite_manip_pkt,
+       .in_range               = nf_nat_l4proto_in_range,
+       .unique_tuple           = udplite_unique_tuple,
+#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
+
+static int __init nf_nat_proto_udplite_init(void)
+{
+       int err;
+
+       err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
+       if (err < 0)
+               goto err1;
+       err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_udplite);
+       if (err < 0)
+               goto err2;
+       return 0;
+
+err2:
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
+err1:
+       return err;
+}
+
+static void __exit nf_nat_proto_udplite_fini(void)
+{
+       nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_udplite);
+       nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_udplite);
+}
+
+module_init(nf_nat_proto_udplite_init);
+module_exit(nf_nat_proto_udplite_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("UDP-Lite NAT protocol helper");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/netfilter/nf_nat_proto_unknown.c b/net/netfilter/nf_nat_proto_unknown.c
new file mode 100644 (file)
index 0000000..6e494d5
--- /dev/null
@@ -0,0 +1,54 @@
+/* The "unknown" protocol.  This is what is used for protocols we
+ * don't understand.  It's returned by ip_ct_find_proto().
+ */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
+                            enum nf_nat_manip_type manip_type,
+                            const union nf_conntrack_man_proto *min,
+                            const union nf_conntrack_man_proto *max)
+{
+       return true;
+}
+
+static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                struct nf_conntrack_tuple *tuple,
+                                const struct nf_nat_range *range,
+                                enum nf_nat_manip_type maniptype,
+                                const struct nf_conn *ct)
+{
+       /* Sorry: we can't help you; if it's not unique, we can't frob
+        * anything.
+        */
+       return;
+}
+
+static bool
+unknown_manip_pkt(struct sk_buff *skb,
+                 const struct nf_nat_l3proto *l3proto,
+                 unsigned int iphdroff, unsigned int hdroff,
+                 const struct nf_conntrack_tuple *tuple,
+                 enum nf_nat_manip_type maniptype)
+{
+       return true;
+}
+
+const struct nf_nat_l4proto nf_nat_l4proto_unknown = {
+       .manip_pkt              = unknown_manip_pkt,
+       .in_range               = unknown_in_range,
+       .unique_tuple           = unknown_unique_tuple,
+};
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
new file mode 100644 (file)
index 0000000..16303c7
--- /dev/null
@@ -0,0 +1,612 @@
+/* SIP extension for NAT alteration.
+ *
+ * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
+ * based on RR's ip_nat_ftp.c and other modules.
+ * (C) 2007 United Security Providers
+ * (C) 2007, 2008, 2011, 2012 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/inet.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_sip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>");
+MODULE_DESCRIPTION("SIP NAT helper");
+MODULE_ALIAS("ip_nat_sip");
+
+
+static unsigned int mangle_packet(struct sk_buff *skb, unsigned int protoff,
+                                 unsigned int dataoff,
+                                 const char **dptr, unsigned int *datalen,
+                                 unsigned int matchoff, unsigned int matchlen,
+                                 const char *buffer, unsigned int buflen)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       struct tcphdr *th;
+       unsigned int baseoff;
+
+       if (nf_ct_protonum(ct) == IPPROTO_TCP) {
+               th = (struct tcphdr *)(skb->data + protoff);
+               baseoff = protoff + th->doff * 4;
+               matchoff += dataoff - baseoff;
+
+               if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+                                               protoff, matchoff, matchlen,
+                                               buffer, buflen, false))
+                       return 0;
+       } else {
+               baseoff = protoff + sizeof(struct udphdr);
+               matchoff += dataoff - baseoff;
+
+               if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
+                                             protoff, matchoff, matchlen,
+                                             buffer, buflen))
+                       return 0;
+       }
+
+       /* Reload data pointer and adjust datalen value */
+       *dptr = skb->data + dataoff;
+       *datalen += buflen - matchlen;
+       return 1;
+}
+
+static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer,
+                           const union nf_inet_addr *addr, bool delim)
+{
+       if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+               return sprintf(buffer, "%pI4", &addr->ip);
+       else {
+               if (delim)
+                       return sprintf(buffer, "[%pI6c]", &addr->ip6);
+               else
+                       return sprintf(buffer, "%pI6c", &addr->ip6);
+       }
+}
+
+static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer,
+                                const union nf_inet_addr *addr, u16 port)
+{
+       if (nf_ct_l3num(ct) == NFPROTO_IPV4)
+               return sprintf(buffer, "%pI4:%u", &addr->ip, port);
+       else
+               return sprintf(buffer, "[%pI6c]:%u", &addr->ip6, port);
+}
+
+static int map_addr(struct sk_buff *skb, unsigned int protoff,
+                   unsigned int dataoff,
+                   const char **dptr, unsigned int *datalen,
+                   unsigned int matchoff, unsigned int matchlen,
+                   union nf_inet_addr *addr, __be16 port)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
+       unsigned int buflen;
+       union nf_inet_addr newaddr;
+       __be16 newport;
+
+       if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, addr) &&
+           ct->tuplehash[dir].tuple.src.u.udp.port == port) {
+               newaddr = ct->tuplehash[!dir].tuple.dst.u3;
+               newport = ct->tuplehash[!dir].tuple.dst.u.udp.port;
+       } else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) &&
+                  ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
+               newaddr = ct->tuplehash[!dir].tuple.src.u3;
+               newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
+       } else
+               return 1;
+
+       if (nf_inet_addr_cmp(&newaddr, addr) && newport == port)
+               return 1;
+
+       buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, ntohs(newport));
+       return mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                            matchoff, matchlen, buffer, buflen);
+}
+
+static int map_sip_addr(struct sk_buff *skb, unsigned int protoff,
+                       unsigned int dataoff,
+                       const char **dptr, unsigned int *datalen,
+                       enum sip_header_types type)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       unsigned int matchlen, matchoff;
+       union nf_inet_addr addr;
+       __be16 port;
+
+       if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
+                                   &matchoff, &matchlen, &addr, &port) <= 0)
+               return 1;
+       return map_addr(skb, protoff, dataoff, dptr, datalen,
+                       matchoff, matchlen, &addr, port);
+}
+
+static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
+                              unsigned int dataoff,
+                              const char **dptr, unsigned int *datalen)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       unsigned int coff, matchoff, matchlen;
+       enum sip_header_types hdr;
+       union nf_inet_addr addr;
+       __be16 port;
+       int request, in_header;
+
+       /* Basic rules: requests and responses. */
+       if (strnicmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) {
+               if (ct_sip_parse_request(ct, *dptr, *datalen,
+                                        &matchoff, &matchlen,
+                                        &addr, &port) > 0 &&
+                   !map_addr(skb, protoff, dataoff, dptr, datalen,
+                             matchoff, matchlen, &addr, port))
+                       return NF_DROP;
+               request = 1;
+       } else
+               request = 0;
+
+       if (nf_ct_protonum(ct) == IPPROTO_TCP)
+               hdr = SIP_HDR_VIA_TCP;
+       else
+               hdr = SIP_HDR_VIA_UDP;
+
+       /* Translate topmost Via header and parameters */
+       if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
+                                   hdr, NULL, &matchoff, &matchlen,
+                                   &addr, &port) > 0) {
+               unsigned int olen, matchend, poff, plen, buflen, n;
+               char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
+
+               /* We're only interested in headers related to this
+                * connection */
+               if (request) {
+                       if (!nf_inet_addr_cmp(&addr,
+                                       &ct->tuplehash[dir].tuple.src.u3) ||
+                           port != ct->tuplehash[dir].tuple.src.u.udp.port)
+                               goto next;
+               } else {
+                       if (!nf_inet_addr_cmp(&addr,
+                                       &ct->tuplehash[dir].tuple.dst.u3) ||
+                           port != ct->tuplehash[dir].tuple.dst.u.udp.port)
+                               goto next;
+               }
+
+               olen = *datalen;
+               if (!map_addr(skb, protoff, dataoff, dptr, datalen,
+                             matchoff, matchlen, &addr, port))
+                       return NF_DROP;
+
+               matchend = matchoff + matchlen + *datalen - olen;
+
+               /* The maddr= parameter (RFC 2361) specifies where to send
+                * the reply. */
+               if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
+                                              "maddr=", &poff, &plen,
+                                              &addr, true) > 0 &&
+                   nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3) &&
+                   !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3)) {
+                       buflen = sip_sprintf_addr(ct, buffer,
+                                       &ct->tuplehash[!dir].tuple.dst.u3,
+                                       true);
+                       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                                          poff, plen, buffer, buflen))
+                               return NF_DROP;
+               }
+
+               /* The received= parameter (RFC 2361) contains the address
+                * from which the server received the request. */
+               if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
+                                              "received=", &poff, &plen,
+                                              &addr, false) > 0 &&
+                   nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.dst.u3) &&
+                   !nf_inet_addr_cmp(&addr, &ct->tuplehash[!dir].tuple.src.u3)) {
+                       buflen = sip_sprintf_addr(ct, buffer,
+                                       &ct->tuplehash[!dir].tuple.src.u3,
+                                       false);
+                       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                                          poff, plen, buffer, buflen))
+                               return NF_DROP;
+               }
+
+               /* The rport= parameter (RFC 3581) contains the port number
+                * from which the server received the request. */
+               if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen,
+                                                "rport=", &poff, &plen,
+                                                &n) > 0 &&
+                   htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port &&
+                   htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
+                       __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
+                       buflen = sprintf(buffer, "%u", ntohs(p));
+                       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                                          poff, plen, buffer, buflen))
+                               return NF_DROP;
+               }
+       }
+
+next:
+       /* Translate Contact headers */
+       coff = 0;
+       in_header = 0;
+       while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
+                                      SIP_HDR_CONTACT, &in_header,
+                                      &matchoff, &matchlen,
+                                      &addr, &port) > 0) {
+               if (!map_addr(skb, protoff, dataoff, dptr, datalen,
+                             matchoff, matchlen,
+                             &addr, port))
+                       return NF_DROP;
+       }
+
+       if (!map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_FROM) ||
+           !map_sip_addr(skb, protoff, dataoff, dptr, datalen, SIP_HDR_TO))
+               return NF_DROP;
+
+       return NF_ACCEPT;
+}
+
+static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
+                                 s16 off)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       const struct tcphdr *th;
+
+       if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
+               return;
+
+       th = (struct tcphdr *)(skb->data + protoff);
+       nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
+}
+
+/* Handles expected signalling connections and media streams */
+static void nf_nat_sip_expected(struct nf_conn *ct,
+                               struct nf_conntrack_expect *exp)
+{
+       struct nf_nat_range range;
+
+       /* This must be a fresh one. */
+       BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+       /* For DST manip, map port here to where it's expected. */
+       range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+       range.min_proto = range.max_proto = exp->saved_proto;
+       range.min_addr = range.max_addr = exp->saved_addr;
+       nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+
+       /* Change src to where master sends to, but only if the connection
+        * actually came from the same source. */
+       if (nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
+                            &ct->master->tuplehash[exp->dir].tuple.src.u3)) {
+               range.flags = NF_NAT_RANGE_MAP_IPS;
+               range.min_addr = range.max_addr
+                       = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
+               nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+       }
+}
+
+static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
+                                     unsigned int dataoff,
+                                     const char **dptr, unsigned int *datalen,
+                                     struct nf_conntrack_expect *exp,
+                                     unsigned int matchoff,
+                                     unsigned int matchlen)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       union nf_inet_addr newaddr;
+       u_int16_t port;
+       char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
+       unsigned int buflen;
+
+       /* Connection will come from reply */
+       if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                            &ct->tuplehash[!dir].tuple.dst.u3))
+               newaddr = exp->tuple.dst.u3;
+       else
+               newaddr = ct->tuplehash[!dir].tuple.dst.u3;
+
+       /* If the signalling port matches the connection's source port in the
+        * original direction, try to use the destination port in the opposite
+        * direction. */
+       if (exp->tuple.dst.u.udp.port ==
+           ct->tuplehash[dir].tuple.src.u.udp.port)
+               port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
+       else
+               port = ntohs(exp->tuple.dst.u.udp.port);
+
+       exp->saved_addr = exp->tuple.dst.u3;
+       exp->tuple.dst.u3 = newaddr;
+       exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
+       exp->dir = !dir;
+       exp->expectfn = nf_nat_sip_expected;
+
+       for (; port != 0; port++) {
+               int ret;
+
+               exp->tuple.dst.u.udp.port = htons(port);
+               ret = nf_ct_expect_related(exp);
+               if (ret == 0)
+                       break;
+               else if (ret != -EBUSY) {
+                       port = 0;
+                       break;
+               }
+       }
+
+       if (port == 0)
+               return NF_DROP;
+
+       if (!nf_inet_addr_cmp(&exp->tuple.dst.u3, &exp->saved_addr) ||
+           exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
+               buflen = sip_sprintf_addr_port(ct, buffer, &newaddr, port);
+               if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                                  matchoff, matchlen, buffer, buflen))
+                       goto err;
+       }
+       return NF_ACCEPT;
+
+err:
+       nf_ct_unexpect_related(exp);
+       return NF_DROP;
+}
+
+static int mangle_content_len(struct sk_buff *skb, unsigned int protoff,
+                             unsigned int dataoff,
+                             const char **dptr, unsigned int *datalen)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       unsigned int matchoff, matchlen;
+       char buffer[sizeof("65536")];
+       int buflen, c_len;
+
+       /* Get actual SDP length */
+       if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
+                                 SDP_HDR_VERSION, SDP_HDR_UNSPEC,
+                                 &matchoff, &matchlen) <= 0)
+               return 0;
+       c_len = *datalen - matchoff + strlen("v=");
+
+       /* Now, update SDP length */
+       if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CONTENT_LENGTH,
+                             &matchoff, &matchlen) <= 0)
+               return 0;
+
+       buflen = sprintf(buffer, "%u", c_len);
+       return mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                            matchoff, matchlen, buffer, buflen);
+}
+
+static int mangle_sdp_packet(struct sk_buff *skb, unsigned int protoff,
+                            unsigned int dataoff,
+                            const char **dptr, unsigned int *datalen,
+                            unsigned int sdpoff,
+                            enum sdp_header_types type,
+                            enum sdp_header_types term,
+                            char *buffer, int buflen)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       unsigned int matchlen, matchoff;
+
+       if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
+                                 &matchoff, &matchlen) <= 0)
+               return -ENOENT;
+       return mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                            matchoff, matchlen, buffer, buflen) ? 0 : -EINVAL;
+}
+
+static unsigned int nf_nat_sdp_addr(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
+                                   const char **dptr, unsigned int *datalen,
+                                   unsigned int sdpoff,
+                                   enum sdp_header_types type,
+                                   enum sdp_header_types term,
+                                   const union nf_inet_addr *addr)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       char buffer[INET6_ADDRSTRLEN];
+       unsigned int buflen;
+
+       buflen = sip_sprintf_addr(ct, buffer, addr, false);
+       if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen,
+                             sdpoff, type, term, buffer, buflen))
+               return 0;
+
+       return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
+}
+
+static unsigned int nf_nat_sdp_port(struct sk_buff *skb, unsigned int protoff,
+                                   unsigned int dataoff,
+                                   const char **dptr, unsigned int *datalen,
+                                   unsigned int matchoff,
+                                   unsigned int matchlen,
+                                   u_int16_t port)
+{
+       char buffer[sizeof("nnnnn")];
+       unsigned int buflen;
+
+       buflen = sprintf(buffer, "%u", port);
+       if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
+                          matchoff, matchlen, buffer, buflen))
+               return 0;
+
+       return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
+}
+
+static unsigned int nf_nat_sdp_session(struct sk_buff *skb, unsigned int protoff,
+                                      unsigned int dataoff,
+                                      const char **dptr, unsigned int *datalen,
+                                      unsigned int sdpoff,
+                                      const union nf_inet_addr *addr)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       char buffer[INET6_ADDRSTRLEN];
+       unsigned int buflen;
+
+       /* Mangle session description owner and contact addresses */
+       buflen = sip_sprintf_addr(ct, buffer, addr, false);
+       if (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
+                             SDP_HDR_OWNER, SDP_HDR_MEDIA, buffer, buflen))
+               return 0;
+
+       switch (mangle_sdp_packet(skb, protoff, dataoff, dptr, datalen, sdpoff,
+                                 SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
+                                 buffer, buflen)) {
+       case 0:
+       /*
+        * RFC 2327:
+        *
+        * Session description
+        *
+        * c=* (connection information - not required if included in all media)
+        */
+       case -ENOENT:
+               break;
+       default:
+               return 0;
+       }
+
+       return mangle_content_len(skb, protoff, dataoff, dptr, datalen);
+}
+
+/* So, this packet has hit the connection tracking matching code.
+   Mangle it, and change the expectation to match the new version. */
+static unsigned int nf_nat_sdp_media(struct sk_buff *skb, unsigned int protoff,
+                                    unsigned int dataoff,
+                                    const char **dptr, unsigned int *datalen,
+                                    struct nf_conntrack_expect *rtp_exp,
+                                    struct nf_conntrack_expect *rtcp_exp,
+                                    unsigned int mediaoff,
+                                    unsigned int medialen,
+                                    union nf_inet_addr *rtp_addr)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+       u_int16_t port;
+
+       /* Connection will come from reply */
+       if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                            &ct->tuplehash[!dir].tuple.dst.u3))
+               *rtp_addr = rtp_exp->tuple.dst.u3;
+       else
+               *rtp_addr = ct->tuplehash[!dir].tuple.dst.u3;
+
+       rtp_exp->saved_addr = rtp_exp->tuple.dst.u3;
+       rtp_exp->tuple.dst.u3 = *rtp_addr;
+       rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port;
+       rtp_exp->dir = !dir;
+       rtp_exp->expectfn = nf_nat_sip_expected;
+
+       rtcp_exp->saved_addr = rtcp_exp->tuple.dst.u3;
+       rtcp_exp->tuple.dst.u3 = *rtp_addr;
+       rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port;
+       rtcp_exp->dir = !dir;
+       rtcp_exp->expectfn = nf_nat_sip_expected;
+
+       /* Try to get same pair of ports: if not, try to change them. */
+       for (port = ntohs(rtp_exp->tuple.dst.u.udp.port);
+            port != 0; port += 2) {
+               int ret;
+
+               rtp_exp->tuple.dst.u.udp.port = htons(port);
+               ret = nf_ct_expect_related(rtp_exp);
+               if (ret == -EBUSY)
+                       continue;
+               else if (ret < 0) {
+                       port = 0;
+                       break;
+               }
+               rtcp_exp->tuple.dst.u.udp.port = htons(port + 1);
+               ret = nf_ct_expect_related(rtcp_exp);
+               if (ret == 0)
+                       break;
+               else if (ret == -EBUSY) {
+                       nf_ct_unexpect_related(rtp_exp);
+                       continue;
+               } else if (ret < 0) {
+                       nf_ct_unexpect_related(rtp_exp);
+                       port = 0;
+                       break;
+               }
+       }
+
+       if (port == 0)
+               goto err1;
+
+       /* Update media port. */
+       if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
+           !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
+                            mediaoff, medialen, port))
+               goto err2;
+
+       return NF_ACCEPT;
+
+err2:
+       nf_ct_unexpect_related(rtp_exp);
+       nf_ct_unexpect_related(rtcp_exp);
+err1:
+       return NF_DROP;
+}
+
+static struct nf_ct_helper_expectfn sip_nat = {
+       .name           = "sip",
+       .expectfn       = nf_nat_sip_expected,
+};
+
+static void __exit nf_nat_sip_fini(void)
+{
+       RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
+       nf_ct_helper_expectfn_unregister(&sip_nat);
+       synchronize_rcu();
+}
+
+static int __init nf_nat_sip_init(void)
+{
+       BUG_ON(nf_nat_sip_hook != NULL);
+       BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
+       BUG_ON(nf_nat_sip_expect_hook != NULL);
+       BUG_ON(nf_nat_sdp_addr_hook != NULL);
+       BUG_ON(nf_nat_sdp_port_hook != NULL);
+       BUG_ON(nf_nat_sdp_session_hook != NULL);
+       BUG_ON(nf_nat_sdp_media_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_sip_hook, nf_nat_sip);
+       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, nf_nat_sip_seq_adjust);
+       RCU_INIT_POINTER(nf_nat_sip_expect_hook, nf_nat_sip_expect);
+       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, nf_nat_sdp_addr);
+       RCU_INIT_POINTER(nf_nat_sdp_port_hook, nf_nat_sdp_port);
+       RCU_INIT_POINTER(nf_nat_sdp_session_hook, nf_nat_sdp_session);
+       RCU_INIT_POINTER(nf_nat_sdp_media_hook, nf_nat_sdp_media);
+       nf_ct_helper_expectfn_register(&sip_nat);
+       return 0;
+}
+
+module_init(nf_nat_sip_init);
+module_exit(nf_nat_sip_fini);
diff --git a/net/netfilter/nf_nat_tftp.c b/net/netfilter/nf_nat_tftp.c
new file mode 100644 (file)
index 0000000..ccabbda
--- /dev/null
@@ -0,0 +1,50 @@
+/* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/udp.h>
+
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_tftp.h>
+
+MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
+MODULE_DESCRIPTION("TFTP NAT helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ip_nat_tftp");
+
+static unsigned int help(struct sk_buff *skb,
+                        enum ip_conntrack_info ctinfo,
+                        struct nf_conntrack_expect *exp)
+{
+       const struct nf_conn *ct = exp->master;
+
+       exp->saved_proto.udp.port
+               = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port;
+       exp->dir = IP_CT_DIR_REPLY;
+       exp->expectfn = nf_nat_follow_master;
+       if (nf_ct_expect_related(exp) != 0)
+               return NF_DROP;
+       return NF_ACCEPT;
+}
+
+static void __exit nf_nat_tftp_fini(void)
+{
+       RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
+       synchronize_rcu();
+}
+
+static int __init nf_nat_tftp_init(void)
+{
+       BUG_ON(nf_nat_tftp_hook != NULL);
+       RCU_INIT_POINTER(nf_nat_tftp_hook, help);
+       return 0;
+}
+
+module_init(nf_nat_tftp_init);
+module_exit(nf_nat_tftp_fini);
index ce60cf0f6c11a49d9d8bc8bf5c8918d10dbfb316..8d2cf9ec37a850951648640728ab1ad6d8f53e6d 100644 (file)
@@ -118,7 +118,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  * through nf_reinject().
  */
 static int __nf_queue(struct sk_buff *skb,
-                     struct list_head *elem,
+                     struct nf_hook_ops *elem,
                      u_int8_t pf, unsigned int hook,
                      struct net_device *indev,
                      struct net_device *outdev,
@@ -155,7 +155,7 @@ static int __nf_queue(struct sk_buff *skb,
 
        *entry = (struct nf_queue_entry) {
                .skb    = skb,
-               .elem   = list_entry(elem, struct nf_hook_ops, list),
+               .elem   = elem,
                .pf     = pf,
                .hook   = hook,
                .indev  = indev,
@@ -225,7 +225,7 @@ static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
 #endif
 
 int nf_queue(struct sk_buff *skb,
-            struct list_head *elem,
+            struct nf_hook_ops *elem,
             u_int8_t pf, unsigned int hook,
             struct net_device *indev,
             struct net_device *outdev,
@@ -287,7 +287,7 @@ int nf_queue(struct sk_buff *skb,
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 {
        struct sk_buff *skb = entry->skb;
-       struct list_head *elem = &entry->elem->list;
+       struct nf_hook_ops *elem = entry->elem;
        const struct nf_afinfo *afinfo;
        int err;
 
@@ -297,7 +297,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 
        /* Continue traversal iff userspace said ok... */
        if (verdict == NF_REPEAT) {
-               elem = elem->prev;
+               elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
                verdict = NF_ACCEPT;
        }
 
index a26503342e7184737c419ddd36144dacebc59d20..ffb92c03a358a8ce64c9e824a09db72a823b76d8 100644 (file)
@@ -241,7 +241,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
 #endif
        };
 
-       nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, THIS_MODULE, &cfg);
+       nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
        if (!nfnl)
                return -ENOMEM;
        net->nfnl_stash = nfnl;
index b2e7310ca0b8e05d9835c4898f9670993d5ebed3..589d686f0b4cbe0f25b785790dfeba9bf1a13d98 100644 (file)
@@ -79,11 +79,11 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
 
        if (tb[NFACCT_BYTES]) {
                atomic64_set(&nfacct->bytes,
-                            be64_to_cpu(nla_get_u64(tb[NFACCT_BYTES])));
+                            be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES])));
        }
        if (tb[NFACCT_PKTS]) {
                atomic64_set(&nfacct->pkts,
-                            be64_to_cpu(nla_get_u64(tb[NFACCT_PKTS])));
+                            be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
        }
        atomic_set(&nfacct->refcnt, 1);
        list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
@@ -91,16 +91,16 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
 }
 
 static int
-nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                   int event, struct nf_acct *acct)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
        u64 pkts, bytes;
 
        event |= NFNL_SUBSYS_ACCT << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -150,7 +150,7 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (last && cur != last)
                        continue;
 
-               if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).pid,
+               if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq,
                                       NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                       NFNL_MSG_ACCT_NEW, cur) < 0) {
@@ -195,7 +195,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
                        break;
                }
 
-               ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).pid,
+               ret = nfnl_acct_fill_info(skb2, NETLINK_CB(skb).portid,
                                         nlh->nlmsg_seq,
                                         NFNL_MSG_TYPE(nlh->nlmsg_type),
                                         NFNL_MSG_ACCT_NEW, cur);
@@ -203,7 +203,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
                        kfree_skb(skb2);
                        break;
                }
-               ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+               ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
                                        MSG_DONTWAIT);
                if (ret > 0)
                        ret = 0;
index d6836193d479a00b155884a259a6d1f2f0245a4c..945950a8b1f11b50bccd440006b3f4afc1be2bee 100644 (file)
@@ -74,7 +74,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
        if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
                return -EINVAL;
 
-       tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
+       tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
        tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
 
        return 0;
@@ -85,6 +85,9 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
 {
        const struct nf_conn_help *help = nfct_help(ct);
 
+       if (attr == NULL)
+               return -EINVAL;
+
        if (help->helper->data_len == 0)
                return -EINVAL;
 
@@ -395,16 +398,16 @@ nla_put_failure:
 }
 
 static int
-nfnl_cthelper_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+nfnl_cthelper_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                        int event, struct nf_conntrack_helper *helper)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
        int status;
 
        event |= NFNL_SUBSYS_CTHELPER << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -468,7 +471,7 @@ restart:
                                cb->args[1] = 0;
                        }
                        if (nfnl_cthelper_fill_info(skb,
-                                           NETLINK_CB(cb->skb).pid,
+                                           NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
                                            NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                            NFNL_MSG_CTHELPER_NEW, cur) < 0) {
@@ -538,7 +541,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
                                break;
                        }
 
-                       ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).pid,
+                       ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
                                                nlh->nlmsg_seq,
                                                NFNL_MSG_TYPE(nlh->nlmsg_type),
                                                NFNL_MSG_CTHELPER_NEW, cur);
@@ -547,7 +550,7 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
                                break;
                        }
 
-                       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+                       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
                                                MSG_DONTWAIT);
                        if (ret > 0)
                                ret = 0;
index cdecbc8fe965e9ed66216e1702fd1b43fe569091..8847b4d8be06b9ad536c2bb32d9bbd3d34a7c289 100644 (file)
@@ -155,16 +155,16 @@ err_proto_put:
 }
 
 static int
-ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
                       int event, struct ctnl_timeout *timeout)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
-       unsigned int flags = pid ? NLM_F_MULTI : 0;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
        struct nf_conntrack_l4proto *l4proto = timeout->l4proto;
 
        event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
        if (nlh == NULL)
                goto nlmsg_failure;
 
@@ -222,7 +222,7 @@ ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
                if (last && cur != last)
                        continue;
 
-               if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid,
+               if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq,
                                           NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
                                           IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
@@ -268,7 +268,7 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
                        break;
                }
 
-               ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid,
+               ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).portid,
                                             nlh->nlmsg_seq,
                                             NFNL_MSG_TYPE(nlh->nlmsg_type),
                                             IPCTNL_MSG_TIMEOUT_NEW, cur);
@@ -276,7 +276,7 @@ cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
                        kfree_skb(skb2);
                        break;
                }
-               ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid,
+               ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
                                        MSG_DONTWAIT);
                if (ret > 0)
                        ret = 0;
index 5cfb5bedb2b8e8f2fa44ed936a7cab265b5878e6..9f199f2e31fae16ff667e7a1f230e6826d015794 100644 (file)
@@ -55,7 +55,8 @@ struct nfulnl_instance {
        unsigned int qlen;              /* number of nlmsgs in skb */
        struct sk_buff *skb;            /* pre-allocatd skb */
        struct timer_list timer;
-       int peer_pid;                   /* PID of the peer process */
+       struct user_namespace *peer_user_ns;    /* User namespace of the peer process */
+       int peer_portid;                        /* PORTID of the peer process */
 
        /* configurable parameters */
        unsigned int flushtimeout;      /* timeout until queue flush */
@@ -132,7 +133,7 @@ instance_put(struct nfulnl_instance *inst)
 static void nfulnl_timer(unsigned long data);
 
 static struct nfulnl_instance *
-instance_create(u_int16_t group_num, int pid)
+instance_create(u_int16_t group_num, int portid, struct user_namespace *user_ns)
 {
        struct nfulnl_instance *inst;
        int err;
@@ -162,7 +163,8 @@ instance_create(u_int16_t group_num, int pid)
 
        setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
 
-       inst->peer_pid = pid;
+       inst->peer_user_ns = user_ns;
+       inst->peer_portid = portid;
        inst->group_num = group_num;
 
        inst->qthreshold        = NFULNL_QTHRESH_DEFAULT;
@@ -334,7 +336,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
                if (!nlh)
                        goto out;
        }
-       status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
+       status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_portid,
                                   MSG_DONTWAIT);
 
        inst->qlen = 0;
@@ -505,8 +507,10 @@ __build_packet_message(struct nfulnl_instance *inst,
                read_lock_bh(&sk->sk_callback_lock);
                if (sk->sk_socket && sk->sk_socket->file) {
                        struct file *file = sk->sk_socket->file;
-                       __be32 uid = htonl(file->f_cred->fsuid);
-                       __be32 gid = htonl(file->f_cred->fsgid);
+                       const struct cred *cred = file->f_cred;
+                       struct user_namespace *user_ns = inst->peer_user_ns;
+                       __be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
+                       __be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
                        read_unlock_bh(&sk->sk_callback_lock);
                        if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
                            nla_put_be32(inst->skb, NFULA_GID, gid))
@@ -700,7 +704,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
        if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
                int i;
 
-               /* destroy all instances for this pid */
+               /* destroy all instances for this portid */
                spin_lock_bh(&instances_lock);
                for  (i = 0; i < INSTANCE_BUCKETS; i++) {
                        struct hlist_node *tmp, *t2;
@@ -709,7 +713,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
 
                        hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
                                if ((net_eq(n->net, &init_net)) &&
-                                   (n->pid == inst->peer_pid))
+                                   (n->portid == inst->peer_portid))
                                        __instance_destroy(inst);
                        }
                }
@@ -771,7 +775,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
        }
 
        inst = instance_lookup_get(group_num);
-       if (inst && inst->peer_pid != NETLINK_CB(skb).pid) {
+       if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
                ret = -EPERM;
                goto out_put;
        }
@@ -785,7 +789,8 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                        }
 
                        inst = instance_create(group_num,
-                                              NETLINK_CB(skb).pid);
+                                              NETLINK_CB(skb).portid,
+                                              sk_user_ns(NETLINK_CB(skb).ssk));
                        if (IS_ERR(inst)) {
                                ret = PTR_ERR(inst);
                                goto out;
@@ -943,7 +948,7 @@ static int seq_show(struct seq_file *s, void *v)
 
        return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
                          inst->group_num,
-                         inst->peer_pid, inst->qlen,
+                         inst->peer_portid, inst->qlen,
                          inst->copy_mode, inst->copy_range,
                          inst->flushtimeout, atomic_read(&inst->use));
 }
index c0496a55ad0ceffb5470872cadc83c218a4b70c9..e12d44e75b21f79f266b3dc580fd283aa81b58f9 100644 (file)
@@ -44,7 +44,7 @@ struct nfqnl_instance {
        struct hlist_node hlist;                /* global list of queues */
        struct rcu_head rcu;
 
-       int peer_pid;
+       int peer_portid;
        unsigned int queue_maxlen;
        unsigned int copy_range;
        unsigned int queue_dropped;
@@ -92,7 +92,7 @@ instance_lookup(u_int16_t queue_num)
 }
 
 static struct nfqnl_instance *
-instance_create(u_int16_t queue_num, int pid)
+instance_create(u_int16_t queue_num, int portid)
 {
        struct nfqnl_instance *inst;
        unsigned int h;
@@ -111,7 +111,7 @@ instance_create(u_int16_t queue_num, int pid)
        }
 
        inst->queue_num = queue_num;
-       inst->peer_pid = pid;
+       inst->peer_portid = portid;
        inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
        inst->copy_range = 0xfffff;
        inst->copy_mode = NFQNL_COPY_NONE;
@@ -225,7 +225,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
 {
        sk_buff_data_t old_tail;
        size_t size;
-       size_t data_len = 0;
+       size_t data_len = 0, cap_len = 0;
        struct sk_buff *skb;
        struct nlattr *nla;
        struct nfqnl_msg_packet_hdr *pmsg;
@@ -247,7 +247,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
 #endif
                + nla_total_size(sizeof(u_int32_t))     /* mark */
                + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
-               + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
+               + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)
+               + nla_total_size(sizeof(u_int32_t)));   /* cap_len */
 
        outdev = entry->outdev;
 
@@ -266,6 +267,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
                        data_len = entskb->len;
 
                size += nla_total_size(data_len);
+               cap_len = entskb->len;
                break;
        }
 
@@ -402,12 +404,14 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
                goto nla_put_failure;
 
+       if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
+               goto nla_put_failure;
+
        nlh->nlmsg_len = skb->tail - old_tail;
        return skb;
 
 nla_put_failure:
-       if (skb)
-               kfree_skb(skb);
+       kfree_skb(skb);
        net_err_ratelimited("nf_queue: error creating packet message\n");
        return NULL;
 }
@@ -440,7 +444,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        }
        spin_lock_bh(&queue->lock);
 
-       if (!queue->peer_pid) {
+       if (!queue->peer_portid) {
                err = -EINVAL;
                goto err_out_free_nskb;
        }
@@ -459,7 +463,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        *packet_id_ptr = htonl(entry->id);
 
        /* nfnetlink_unicast will either free the nskb or add it to a socket */
-       err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
+       err = nfnetlink_unicast(nskb, &init_net, queue->peer_portid, MSG_DONTWAIT);
        if (err < 0) {
                queue->queue_user_dropped++;
                goto err_out_unlock;
@@ -527,9 +531,13 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
 
        case NFQNL_COPY_PACKET:
                queue->copy_mode = mode;
-               /* we're using struct nlattr which has 16bit nla_len */
-               if (range > 0xffff)
-                       queue->copy_range = 0xffff;
+               /* We're using struct nlattr which has 16bit nla_len. Note that
+                * nla_len includes the header length. Thus, the maximum packet
+                * length that we support is 65531 bytes. We send truncated
+                * packets if the specified length is larger than that.
+                */
+               if (range > 0xffff - NLA_HDRLEN)
+                       queue->copy_range = 0xffff - NLA_HDRLEN;
                else
                        queue->copy_range = range;
                break;
@@ -616,7 +624,7 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
        if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
                int i;
 
-               /* destroy all instances for this pid */
+               /* destroy all instances for this portid */
                spin_lock(&instances_lock);
                for (i = 0; i < INSTANCE_BUCKETS; i++) {
                        struct hlist_node *tmp, *t2;
@@ -625,7 +633,7 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
 
                        hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
                                if ((n->net == &init_net) &&
-                                   (n->pid == inst->peer_pid))
+                                   (n->portid == inst->peer_portid))
                                        __instance_destroy(inst);
                        }
                }
@@ -650,7 +658,7 @@ static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
        [NFQA_MARK]             = { .type = NLA_U32 },
 };
 
-static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
+static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlportid)
 {
        struct nfqnl_instance *queue;
 
@@ -658,7 +666,7 @@ static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
        if (!queue)
                return ERR_PTR(-ENODEV);
 
-       if (queue->peer_pid != nlpid)
+       if (queue->peer_portid != nlportid)
                return ERR_PTR(-EPERM);
 
        return queue;
@@ -698,7 +706,7 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
        LIST_HEAD(batch_list);
        u16 queue_num = ntohs(nfmsg->res_id);
 
-       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
+       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
        if (IS_ERR(queue))
                return PTR_ERR(queue);
 
@@ -749,7 +757,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
        queue = instance_lookup(queue_num);
        if (!queue)
 
-       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
+       queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid);
        if (IS_ERR(queue))
                return PTR_ERR(queue);
 
@@ -832,7 +840,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
 
        rcu_read_lock();
        queue = instance_lookup(queue_num);
-       if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
+       if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
                ret = -EPERM;
                goto err_out_unlock;
        }
@@ -844,7 +852,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
                                ret = -EBUSY;
                                goto err_out_unlock;
                        }
-                       queue = instance_create(queue_num, NETLINK_CB(skb).pid);
+                       queue = instance_create(queue_num, NETLINK_CB(skb).portid);
                        if (IS_ERR(queue)) {
                                ret = PTR_ERR(queue);
                                goto err_out_unlock;
@@ -1016,7 +1024,7 @@ static int seq_show(struct seq_file *s, void *v)
 
        return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
                          inst->queue_num,
-                         inst->peer_pid, inst->queue_total,
+                         inst->peer_portid, inst->queue_total,
                          inst->copy_mode, inst->copy_range,
                          inst->queue_dropped, inst->queue_user_dropped,
                          inst->id_sequence, 1);
index 116018560c6028789835b92abf481747765b469b..16c712563860bad8b8ba03041b06cf2c386d43ab 100644 (file)
@@ -72,14 +72,44 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
                return 0;
 }
 
+static int
+xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
+                const struct xt_tgchk_param *par)
+{
+       struct nf_conntrack_helper *helper;
+       struct nf_conn_help *help;
+       u8 proto;
+
+       proto = xt_ct_find_proto(par);
+       if (!proto) {
+               pr_info("You must specify a L4 protocol, and not use "
+                       "inversions on it.\n");
+               return -ENOENT;
+       }
+
+       helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
+                                                   proto);
+       if (helper == NULL) {
+               pr_info("No such helper \"%s\"\n", helper_name);
+               return -ENOENT;
+       }
+
+       help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
+       if (help == NULL) {
+               module_put(helper->me);
+               return -ENOMEM;
+       }
+
+       help->helper = helper;
+       return 0;
+}
+
 static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
 {
        struct xt_ct_target_info *info = par->targinfo;
        struct nf_conntrack_tuple t;
-       struct nf_conn_help *help;
        struct nf_conn *ct;
-       int ret = 0;
-       u8 proto;
+       int ret;
 
        if (info->flags & ~XT_CT_NOTRACK)
                return -EINVAL;
@@ -112,31 +142,9 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
                goto err3;
 
        if (info->helper[0]) {
-               struct nf_conntrack_helper *helper;
-
-               ret = -ENOENT;
-               proto = xt_ct_find_proto(par);
-               if (!proto) {
-                       pr_info("You must specify a L4 protocol, "
-                               "and not use inversions on it.\n");
-                       goto err3;
-               }
-
-               ret = -ENOENT;
-               helper = nf_conntrack_helper_try_module_get(info->helper,
-                                                           par->family,
-                                                           proto);
-               if (helper == NULL) {
-                       pr_info("No such helper \"%s\"\n", info->helper);
-                       goto err3;
-               }
-
-               ret = -ENOMEM;
-               help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
-               if (help == NULL)
+               ret = xt_ct_set_helper(ct, info->helper, par);
+               if (ret < 0)
                        goto err3;
-
-               help->helper = helper;
        }
 
        __set_bit(IPS_TEMPLATE_BIT, &ct->status);
@@ -164,17 +172,77 @@ static void __xt_ct_tg_timeout_put(struct ctnl_timeout *timeout)
 }
 #endif
 
+static int
+xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
+                 const char *timeout_name)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+       typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
+       struct ctnl_timeout *timeout;
+       struct nf_conn_timeout *timeout_ext;
+       const struct ipt_entry *e = par->entryinfo;
+       struct nf_conntrack_l4proto *l4proto;
+       int ret = 0;
+
+       rcu_read_lock();
+       timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
+       if (timeout_find_get == NULL) {
+               ret = -ENOENT;
+               pr_info("Timeout policy base is empty\n");
+               goto out;
+       }
+
+       if (e->ip.invflags & IPT_INV_PROTO) {
+               ret = -EINVAL;
+               pr_info("You cannot use inversion on L4 protocol\n");
+               goto out;
+       }
+
+       timeout = timeout_find_get(timeout_name);
+       if (timeout == NULL) {
+               ret = -ENOENT;
+               pr_info("No such timeout policy \"%s\"\n", timeout_name);
+               goto out;
+       }
+
+       if (timeout->l3num != par->family) {
+               ret = -EINVAL;
+               pr_info("Timeout policy `%s' can only be used by L3 protocol "
+                       "number %d\n", timeout_name, timeout->l3num);
+               goto err_put_timeout;
+       }
+       /* Make sure the timeout policy matches any existing protocol tracker,
+        * otherwise default to generic.
+        */
+       l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto);
+       if (timeout->l4proto->l4proto != l4proto->l4proto) {
+               ret = -EINVAL;
+               pr_info("Timeout policy `%s' can only be used by L4 protocol "
+                       "number %d\n",
+                       timeout_name, timeout->l4proto->l4proto);
+               goto err_put_timeout;
+       }
+       timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
+       if (timeout_ext == NULL)
+               ret = -ENOMEM;
+
+err_put_timeout:
+       __xt_ct_tg_timeout_put(timeout);
+out:
+       rcu_read_unlock();
+       return ret;
+#else
+       return -EOPNOTSUPP;
+#endif
+}
+
 static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
 {
        struct xt_ct_target_info_v1 *info = par->targinfo;
        struct nf_conntrack_tuple t;
-       struct nf_conn_help *help;
        struct nf_conn *ct;
-       int ret = 0;
-       u8 proto;
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-       struct ctnl_timeout *timeout;
-#endif
+       int ret;
+
        if (info->flags & ~XT_CT_NOTRACK)
                return -EINVAL;
 
@@ -206,93 +274,16 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
                goto err3;
 
        if (info->helper[0]) {
-               struct nf_conntrack_helper *helper;
-
-               ret = -ENOENT;
-               proto = xt_ct_find_proto(par);
-               if (!proto) {
-                       pr_info("You must specify a L4 protocol, "
-                               "and not use inversions on it.\n");
-                       goto err3;
-               }
-
-               ret = -ENOENT;
-               helper = nf_conntrack_helper_try_module_get(info->helper,
-                                                           par->family,
-                                                           proto);
-               if (helper == NULL) {
-                       pr_info("No such helper \"%s\"\n", info->helper);
-                       goto err3;
-               }
-
-               ret = -ENOMEM;
-               help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
-               if (help == NULL)
+               ret = xt_ct_set_helper(ct, info->helper, par);
+               if (ret < 0)
                        goto err3;
-
-               help->helper = helper;
        }
 
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        if (info->timeout[0]) {
-               typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
-               struct nf_conn_timeout *timeout_ext;
-
-               rcu_read_lock();
-               timeout_find_get =
-                       rcu_dereference(nf_ct_timeout_find_get_hook);
-
-               if (timeout_find_get) {
-                       const struct ipt_entry *e = par->entryinfo;
-                       struct nf_conntrack_l4proto *l4proto;
-
-                       if (e->ip.invflags & IPT_INV_PROTO) {
-                               ret = -EINVAL;
-                               pr_info("You cannot use inversion on "
-                                        "L4 protocol\n");
-                               goto err4;
-                       }
-                       timeout = timeout_find_get(info->timeout);
-                       if (timeout == NULL) {
-                               ret = -ENOENT;
-                               pr_info("No such timeout policy \"%s\"\n",
-                                       info->timeout);
-                               goto err4;
-                       }
-                       if (timeout->l3num != par->family) {
-                               ret = -EINVAL;
-                               pr_info("Timeout policy `%s' can only be "
-                                       "used by L3 protocol number %d\n",
-                                       info->timeout, timeout->l3num);
-                               goto err5;
-                       }
-                       /* Make sure the timeout policy matches any existing
-                        * protocol tracker, otherwise default to generic.
-                        */
-                       l4proto = __nf_ct_l4proto_find(par->family,
-                                                      e->ip.proto);
-                       if (timeout->l4proto->l4proto != l4proto->l4proto) {
-                               ret = -EINVAL;
-                               pr_info("Timeout policy `%s' can only be "
-                                       "used by L4 protocol number %d\n",
-                                       info->timeout,
-                                       timeout->l4proto->l4proto);
-                               goto err5;
-                       }
-                       timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
-                                                           GFP_ATOMIC);
-                       if (timeout_ext == NULL) {
-                               ret = -ENOMEM;
-                               goto err5;
-                       }
-               } else {
-                       ret = -ENOENT;
-                       pr_info("Timeout policy base is empty\n");
-                       goto err4;
-               }
-               rcu_read_unlock();
+               ret = xt_ct_set_timeout(ct, par, info->timeout);
+               if (ret < 0)
+                       goto err3;
        }
-#endif
 
        __set_bit(IPS_TEMPLATE_BIT, &ct->status);
        __set_bit(IPS_CONFIRMED_BIT, &ct->status);
@@ -300,12 +291,6 @@ out:
        info->ct = ct;
        return 0;
 
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-err5:
-       __xt_ct_tg_timeout_put(timeout);
-err4:
-       rcu_read_unlock();
-#endif
 err3:
        nf_conntrack_free(ct);
 err2:
@@ -330,15 +315,30 @@ static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
        nf_ct_put(info->ct);
 }
 
-static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
+static void xt_ct_destroy_timeout(struct nf_conn *ct)
 {
-       struct xt_ct_target_info_v1 *info = par->targinfo;
-       struct nf_conn *ct = info->ct;
-       struct nf_conn_help *help;
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        struct nf_conn_timeout *timeout_ext;
        typeof(nf_ct_timeout_put_hook) timeout_put;
+
+       rcu_read_lock();
+       timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
+
+       if (timeout_put) {
+               timeout_ext = nf_ct_timeout_find(ct);
+               if (timeout_ext)
+                       timeout_put(timeout_ext->timeout);
+       }
+       rcu_read_unlock();
 #endif
+}
+
+static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
+{
+       struct xt_ct_target_info_v1 *info = par->targinfo;
+       struct nf_conn *ct = info->ct;
+       struct nf_conn_help *help;
+
        if (!nf_ct_is_untracked(ct)) {
                help = nfct_help(ct);
                if (help)
@@ -346,17 +346,7 @@ static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
 
                nf_ct_l3proto_module_put(par->family);
 
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-               rcu_read_lock();
-               timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
-
-               if (timeout_put) {
-                       timeout_ext = nf_ct_timeout_find(ct);
-                       if (timeout_ext)
-                               timeout_put(timeout_ext->timeout);
-               }
-               rcu_read_unlock();
-#endif
+               xt_ct_destroy_timeout(ct);
        }
        nf_ct_put(info->ct);
 }
index 91e9af4d1f42c3baef9af1261c9464c70cd1bac0..fa40096940a1712be7ac0c517ea5f1d7af4dfd56 100644 (file)
@@ -151,10 +151,12 @@ static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk)
                return;
 
        read_lock_bh(&sk->sk_callback_lock);
-       if (sk->sk_socket && sk->sk_socket->file)
+       if (sk->sk_socket && sk->sk_socket->file) {
+               const struct cred *cred = sk->sk_socket->file->f_cred;
                sb_add(m, "UID=%u GID=%u ",
-                       sk->sk_socket->file->f_cred->fsuid,
-                       sk->sk_socket->file->f_cred->fsgid);
+                       from_kuid_munged(&init_user_ns, cred->fsuid),
+                       from_kgid_munged(&init_user_ns, cred->fsgid));
+       }
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
diff --git a/net/netfilter/xt_NETMAP.c b/net/netfilter/xt_NETMAP.c
new file mode 100644 (file)
index 0000000..b253e07
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk>
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat.h>
+
+static unsigned int
+netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       struct nf_nat_range newrange;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       union nf_inet_addr new_addr, netmask;
+       unsigned int i;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++)
+               netmask.ip6[i] = ~(range->min_addr.ip6[i] ^
+                                  range->max_addr.ip6[i]);
+
+       if (par->hooknum == NF_INET_PRE_ROUTING ||
+           par->hooknum == NF_INET_LOCAL_OUT)
+               new_addr.in6 = ipv6_hdr(skb)->daddr;
+       else
+               new_addr.in6 = ipv6_hdr(skb)->saddr;
+
+       for (i = 0; i < ARRAY_SIZE(new_addr.ip6); i++) {
+               new_addr.ip6[i] &= ~netmask.ip6[i];
+               new_addr.ip6[i] |= range->min_addr.ip6[i] &
+                                  netmask.ip6[i];
+       }
+
+       newrange.flags  = range->flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr       = new_addr;
+       newrange.max_addr       = new_addr;
+       newrange.min_proto      = range->min_proto;
+       newrange.max_proto      = range->max_proto;
+
+       return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
+}
+
+static int netmap_tg6_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+
+       if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
+               return -EINVAL;
+       return 0;
+}
+
+static unsigned int
+netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       __be32 new_ip, netmask;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range newrange;
+
+       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
+                    par->hooknum == NF_INET_POST_ROUTING ||
+                    par->hooknum == NF_INET_LOCAL_OUT ||
+                    par->hooknum == NF_INET_LOCAL_IN);
+       ct = nf_ct_get(skb, &ctinfo);
+
+       netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
+
+       if (par->hooknum == NF_INET_PRE_ROUTING ||
+           par->hooknum == NF_INET_LOCAL_OUT)
+               new_ip = ip_hdr(skb)->daddr & ~netmask;
+       else
+               new_ip = ip_hdr(skb)->saddr & ~netmask;
+       new_ip |= mr->range[0].min_ip & netmask;
+
+       memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+       memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+       newrange.flags       = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.ip = new_ip;
+       newrange.max_addr.ip = new_ip;
+       newrange.min_proto   = mr->range[0].min;
+       newrange.max_proto   = mr->range[0].max;
+
+       /* Hand modified range to generic setup. */
+       return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum));
+}
+
+static int netmap_tg4_check(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+
+       if (!(mr->range[0].flags & NF_NAT_RANGE_MAP_IPS)) {
+               pr_debug("bad MAP_IPS.\n");
+               return -EINVAL;
+       }
+       if (mr->rangesize != 1) {
+               pr_debug("bad rangesize %u.\n", mr->rangesize);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct xt_target netmap_tg_reg[] __read_mostly = {
+       {
+               .name       = "NETMAP",
+               .family     = NFPROTO_IPV6,
+               .revision   = 0,
+               .target     = netmap_tg6,
+               .targetsize = sizeof(struct nf_nat_range),
+               .table      = "nat",
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_POST_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT) |
+                             (1 << NF_INET_LOCAL_IN),
+               .checkentry = netmap_tg6_checkentry,
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "NETMAP",
+               .family     = NFPROTO_IPV4,
+               .revision   = 0,
+               .target     = netmap_tg4,
+               .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .table      = "nat",
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_POST_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT) |
+                             (1 << NF_INET_LOCAL_IN),
+               .checkentry = netmap_tg4_check,
+               .me         = THIS_MODULE,
+       },
+};
+
+static int __init netmap_tg_init(void)
+{
+       return xt_register_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
+}
+
+static void netmap_tg_exit(void)
+{
+       xt_unregister_targets(netmap_tg_reg, ARRAY_SIZE(netmap_tg_reg));
+}
+
+module_init(netmap_tg_init);
+module_exit(netmap_tg_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of subnets");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS("ip6t_NETMAP");
+MODULE_ALIAS("ipt_NETMAP");
index 7babe7d687169d6231fa17149bfd10cb52ee7ebe..817f9e9f2b16c70930df5e51d62f5fd606e76f69 100644 (file)
@@ -43,7 +43,7 @@ static u32 hash_v4(const struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
 
        /* packets in either direction go into same queue */
-       if (iph->saddr < iph->daddr)
+       if ((__force u32)iph->saddr < (__force u32)iph->daddr)
                return jhash_3words((__force u32)iph->saddr,
                        (__force u32)iph->daddr, iph->protocol, jhash_initval);
 
@@ -57,7 +57,8 @@ static u32 hash_v6(const struct sk_buff *skb)
        const struct ipv6hdr *ip6h = ipv6_hdr(skb);
        u32 a, b, c;
 
-       if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
+       if ((__force u32)ip6h->saddr.s6_addr32[3] <
+           (__force u32)ip6h->daddr.s6_addr32[3]) {
                a = (__force u32) ip6h->saddr.s6_addr32[3];
                b = (__force u32) ip6h->daddr.s6_addr32[3];
        } else {
@@ -65,7 +66,8 @@ static u32 hash_v6(const struct sk_buff *skb)
                a = (__force u32) ip6h->daddr.s6_addr32[3];
        }
 
-       if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
+       if ((__force u32)ip6h->saddr.s6_addr32[1] <
+           (__force u32)ip6h->daddr.s6_addr32[1])
                c = (__force u32) ip6h->saddr.s6_addr32[1];
        else
                c = (__force u32) ip6h->daddr.s6_addr32[1];
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
deleted file mode 100644 (file)
index 9d78218..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/* This is a module which is used for setting up fake conntracks
- * on packets so that they are not seen by the conntrack/NAT code.
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/nf_conntrack.h>
-
-MODULE_DESCRIPTION("Xtables: Disabling connection tracking for packets");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("ipt_NOTRACK");
-MODULE_ALIAS("ip6t_NOTRACK");
-
-static unsigned int
-notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       /* Previously seen (loopback)? Ignore. */
-       if (skb->nfct != NULL)
-               return XT_CONTINUE;
-
-       /* Attach fake conntrack entry.
-          If there is a real ct entry correspondig to this packet,
-          it'll hang aroun till timing out. We don't deal with it
-          for performance reasons. JK */
-       skb->nfct = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
-
-       return XT_CONTINUE;
-}
-
-static struct xt_target notrack_tg_reg __read_mostly = {
-       .name     = "NOTRACK",
-       .revision = 0,
-       .family   = NFPROTO_UNSPEC,
-       .target   = notrack_tg,
-       .table    = "raw",
-       .me       = THIS_MODULE,
-};
-
-static int __init notrack_tg_init(void)
-{
-       return xt_register_target(&notrack_tg_reg);
-}
-
-static void __exit notrack_tg_exit(void)
-{
-       xt_unregister_target(&notrack_tg_reg);
-}
-
-module_init(notrack_tg_init);
-module_exit(notrack_tg_exit);
diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
new file mode 100644 (file)
index 0000000..22a1030
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/if.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter.h>
+#include <linux/types.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/addrconf.h>
+#include <net/checksum.h>
+#include <net/protocol.h>
+#include <net/netfilter/nf_nat.h>
+
+static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+
+static unsigned int
+redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       struct nf_nat_range newrange;
+       struct in6_addr newdst;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (par->hooknum == NF_INET_LOCAL_OUT)
+               newdst = loopback_addr;
+       else {
+               struct inet6_dev *idev;
+               struct inet6_ifaddr *ifa;
+               bool addr = false;
+
+               rcu_read_lock();
+               idev = __in6_dev_get(skb->dev);
+               if (idev != NULL) {
+                       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+                               newdst = ifa->addr;
+                               addr = true;
+                               break;
+                       }
+               }
+               rcu_read_unlock();
+
+               if (!addr)
+                       return NF_DROP;
+       }
+
+       newrange.flags          = range->flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.in6   = newdst;
+       newrange.max_addr.in6   = newdst;
+       newrange.min_proto      = range->min_proto;
+       newrange.max_proto      = range->max_proto;
+
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+
+static int redirect_tg6_checkentry(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+
+       if (range->flags & NF_NAT_RANGE_MAP_IPS)
+               return -EINVAL;
+       return 0;
+}
+
+/* FIXME: Take multiple ranges --RR */
+static int redirect_tg4_check(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+
+       if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) {
+               pr_debug("bad MAP_IPS.\n");
+               return -EINVAL;
+       }
+       if (mr->rangesize != 1) {
+               pr_debug("bad rangesize %u.\n", mr->rangesize);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static unsigned int
+redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       __be32 newdst;
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range newrange;
+
+       NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
+                    par->hooknum == NF_INET_LOCAL_OUT);
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+       /* Local packets: make them go to loopback */
+       if (par->hooknum == NF_INET_LOCAL_OUT)
+               newdst = htonl(0x7F000001);
+       else {
+               struct in_device *indev;
+               struct in_ifaddr *ifa;
+
+               newdst = 0;
+
+               rcu_read_lock();
+               indev = __in_dev_get_rcu(skb->dev);
+               if (indev && (ifa = indev->ifa_list))
+                       newdst = ifa->ifa_local;
+               rcu_read_unlock();
+
+               if (!newdst)
+                       return NF_DROP;
+       }
+
+       /* Transfer from original range. */
+       memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+       memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+       newrange.flags       = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+       newrange.min_addr.ip = newdst;
+       newrange.max_addr.ip = newdst;
+       newrange.min_proto   = mr->range[0].min;
+       newrange.max_proto   = mr->range[0].max;
+
+       /* Hand modified range to generic setup. */
+       return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+
+static struct xt_target redirect_tg_reg[] __read_mostly = {
+       {
+               .name       = "REDIRECT",
+               .family     = NFPROTO_IPV6,
+               .revision   = 0,
+               .table      = "nat",
+               .checkentry = redirect_tg6_checkentry,
+               .target     = redirect_tg6,
+               .targetsize = sizeof(struct nf_nat_range),
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "REDIRECT",
+               .family     = NFPROTO_IPV4,
+               .revision   = 0,
+               .table      = "nat",
+               .target     = redirect_tg4,
+               .checkentry = redirect_tg4_check,
+               .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .hooks      = (1 << NF_INET_PRE_ROUTING) |
+                             (1 << NF_INET_LOCAL_OUT),
+               .me         = THIS_MODULE,
+       },
+};
+
+static int __init redirect_tg_init(void)
+{
+       return xt_register_targets(redirect_tg_reg,
+                                  ARRAY_SIZE(redirect_tg_reg));
+}
+
+static void __exit redirect_tg_exit(void)
+{
+       xt_unregister_targets(redirect_tg_reg, ARRAY_SIZE(redirect_tg_reg));
+}
+
+module_init(redirect_tg_init);
+module_exit(redirect_tg_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Xtables: Connection redirection to localhost");
+MODULE_ALIAS("ip6t_REDIRECT");
+MODULE_ALIAS("ipt_REDIRECT");
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
new file mode 100644 (file)
index 0000000..81aafa8
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat_core.h>
+
+static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+
+       if (mr->rangesize != 1) {
+               pr_info("%s: multiple ranges no longer supported\n",
+                       par->target->name);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void xt_nat_convert_range(struct nf_nat_range *dst,
+                                const struct nf_nat_ipv4_range *src)
+{
+       memset(&dst->min_addr, 0, sizeof(dst->min_addr));
+       memset(&dst->max_addr, 0, sizeof(dst->max_addr));
+
+       dst->flags       = src->flags;
+       dst->min_addr.ip = src->min_ip;
+       dst->max_addr.ip = src->max_ip;
+       dst->min_proto   = src->min;
+       dst->max_proto   = src->max;
+}
+
+static unsigned int
+xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range range;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+                     ctinfo == IP_CT_RELATED_REPLY));
+
+       xt_nat_convert_range(&range, &mr->range[0]);
+       return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+}
+
+static unsigned int
+xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
+       struct nf_nat_range range;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+       xt_nat_convert_range(&range, &mr->range[0]);
+       return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+static unsigned int
+xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+                     ctinfo == IP_CT_RELATED_REPLY));
+
+       return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC);
+}
+
+static unsigned int
+xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       const struct nf_nat_range *range = par->targinfo;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       NF_CT_ASSERT(ct != NULL &&
+                    (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+       return nf_nat_setup_info(ct, range, NF_NAT_MANIP_DST);
+}
+
+static struct xt_target xt_nat_target_reg[] __read_mostly = {
+       {
+               .name           = "SNAT",
+               .revision       = 0,
+               .checkentry     = xt_nat_checkentry_v0,
+               .target         = xt_snat_target_v0,
+               .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .family         = NFPROTO_IPV4,
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_POST_ROUTING) |
+                                 (1 << NF_INET_LOCAL_OUT),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "DNAT",
+               .revision       = 0,
+               .checkentry     = xt_nat_checkentry_v0,
+               .target         = xt_dnat_target_v0,
+               .targetsize     = sizeof(struct nf_nat_ipv4_multi_range_compat),
+               .family         = NFPROTO_IPV4,
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_PRE_ROUTING) |
+                                 (1 << NF_INET_LOCAL_IN),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "SNAT",
+               .revision       = 1,
+               .target         = xt_snat_target_v1,
+               .targetsize     = sizeof(struct nf_nat_range),
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_POST_ROUTING) |
+                                 (1 << NF_INET_LOCAL_OUT),
+               .me             = THIS_MODULE,
+       },
+       {
+               .name           = "DNAT",
+               .revision       = 1,
+               .target         = xt_dnat_target_v1,
+               .targetsize     = sizeof(struct nf_nat_range),
+               .table          = "nat",
+               .hooks          = (1 << NF_INET_PRE_ROUTING) |
+                                 (1 << NF_INET_LOCAL_IN),
+               .me             = THIS_MODULE,
+       },
+};
+
+static int __init xt_nat_init(void)
+{
+       return xt_register_targets(xt_nat_target_reg,
+                                  ARRAY_SIZE(xt_nat_target_reg));
+}
+
+static void __exit xt_nat_exit(void)
+{
+       xt_unregister_targets(xt_nat_target_reg, ARRAY_SIZE(xt_nat_target_reg));
+}
+
+module_init(xt_nat_init);
+module_exit(xt_nat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS("ipt_SNAT");
+MODULE_ALIAS("ipt_DNAT");
+MODULE_ALIAS("ip6t_SNAT");
+MODULE_ALIAS("ip6t_DNAT");
index 846f895cb656ddf48989d28900c55b2bae9f475e..a5e673d32bdaec2dec9fb7fce26905fff1459b77 100644 (file)
@@ -269,7 +269,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
                                                mss <<= 8;
                                                mss |= optp[2];
 
-                                               mss = ntohs(mss);
+                                               mss = ntohs((__force __be16)mss);
                                                break;
                                        case OSFOPT_TS:
                                                loop_cont = 1;
index 772d7389b3376d623c9ab3a6ce71883619e356e1..ca2e577ed8ac196caca1190d9ae65557bd5ab8ed 100644 (file)
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_owner.h>
 
+static int owner_check(const struct xt_mtchk_param *par)
+{
+       struct xt_owner_match_info *info = par->matchinfo;
+
+       /* For now only allow adding matches from the initial user namespace */
+       if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) &&
+           (current_user_ns() != &init_user_ns))
+               return -EINVAL;
+       return 0;
+}
+
 static bool
 owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -37,17 +48,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
                return ((info->match ^ info->invert) &
                       (XT_OWNER_UID | XT_OWNER_GID)) == 0;
 
-       if (info->match & XT_OWNER_UID)
-               if ((filp->f_cred->fsuid >= info->uid_min &&
-                   filp->f_cred->fsuid <= info->uid_max) ^
+       if (info->match & XT_OWNER_UID) {
+               kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
+               kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
+               if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+                    uid_lte(filp->f_cred->fsuid, uid_max)) ^
                    !(info->invert & XT_OWNER_UID))
                        return false;
+       }
 
-       if (info->match & XT_OWNER_GID)
-               if ((filp->f_cred->fsgid >= info->gid_min &&
-                   filp->f_cred->fsgid <= info->gid_max) ^
+       if (info->match & XT_OWNER_GID) {
+               kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
+               kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
+               if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
+                    gid_lte(filp->f_cred->fsgid, gid_max)) ^
                    !(info->invert & XT_OWNER_GID))
                        return false;
+       }
 
        return true;
 }
@@ -56,6 +73,7 @@ static struct xt_match owner_mt_reg __read_mostly = {
        .name       = "owner",
        .revision   = 1,
        .family     = NFPROTO_UNSPEC,
+       .checkentry = owner_check,
        .match      = owner_mt,
        .matchsize  = sizeof(struct xt_owner_match_info),
        .hooks      = (1 << NF_INET_LOCAL_OUT) |
index ae2ad1eec8d0ccc03fa862cb9a752701c5cd9493..4635c9b0045981d862cb3b31a34ac9ab735eb85e 100644 (file)
@@ -317,6 +317,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        struct recent_table *t;
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry *pde;
+       kuid_t uid;
+       kgid_t gid;
 #endif
        unsigned int i;
        int ret = -EINVAL;
@@ -372,6 +374,13 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        for (i = 0; i < ip_list_hash_size; i++)
                INIT_LIST_HEAD(&t->iphash[i]);
 #ifdef CONFIG_PROC_FS
+       uid = make_kuid(&init_user_ns, ip_list_uid);
+       gid = make_kgid(&init_user_ns, ip_list_gid);
+       if (!uid_valid(uid) || !gid_valid(gid)) {
+               kfree(t);
+               ret = -EINVAL;
+               goto out;
+       }
        pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
                  &recent_mt_fops, t);
        if (pde == NULL) {
@@ -379,8 +388,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
                ret = -ENOMEM;
                goto out;
        }
-       pde->uid = ip_list_uid;
-       pde->gid = ip_list_gid;
+       pde->uid = uid;
+       pde->gid = gid;
 #endif
        spin_lock_bh(&recent_lock);
        list_add_tail(&t->list, &recent_net->tables);
index c6f7db720d84f4650e975a952054c04d15868fc3..865a9e54f3ad85477e8b3d5dc481f1ea444fe978 100644 (file)
@@ -356,6 +356,27 @@ static struct xt_match set_matches[] __read_mostly = {
                .destroy        = set_match_v1_destroy,
                .me             = THIS_MODULE
        },
+       /* --return-nomatch flag support */
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV4,
+               .revision       = 2,
+               .match          = set_match_v1,
+               .matchsize      = sizeof(struct xt_set_info_match_v1),
+               .checkentry     = set_match_v1_checkentry,
+               .destroy        = set_match_v1_destroy,
+               .me             = THIS_MODULE
+       },
+       {
+               .name           = "set",
+               .family         = NFPROTO_IPV6,
+               .revision       = 2,
+               .match          = set_match_v1,
+               .matchsize      = sizeof(struct xt_set_info_match_v1),
+               .checkentry     = set_match_v1_checkentry,
+               .destroy        = set_match_v1_destroy,
+               .me             = THIS_MODULE
+       },
 };
 
 static struct xt_target set_targets[] __read_mostly = {
@@ -389,6 +410,7 @@ static struct xt_target set_targets[] __read_mostly = {
                .destroy        = set_target_v1_destroy,
                .me             = THIS_MODULE
        },
+       /* --timeout and --exist flags support */
        {
                .name           = "SET",
                .revision       = 2,
index 9ea482d08cf7e53d8e8b398fd449ca75f05f8538..63b2bdb59e955fd012de3787f9e52279169fe4bc 100644 (file)
@@ -108,9 +108,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        const struct iphdr *iph = ip_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
        struct sock *sk;
-       __be32 daddr, saddr;
-       __be16 dport, sport;
-       u8 protocol;
+       __be32 uninitialized_var(daddr), uninitialized_var(saddr);
+       __be16 uninitialized_var(dport), uninitialized_var(sport);
+       u8 uninitialized_var(protocol);
 #ifdef XT_SOCKET_HAVE_CONNTRACK
        struct nf_conn const *ct;
        enum ip_conntrack_info ctinfo;
@@ -261,9 +261,9 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
        struct sock *sk;
-       struct in6_addr *daddr, *saddr;
-       __be16 dport, sport;
-       int thoff = 0, tproto;
+       struct in6_addr *daddr = NULL, *saddr = NULL;
+       __be16 uninitialized_var(dport), uninitialized_var(sport);
+       int thoff = 0, uninitialized_var(tproto);
        const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
 
        tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
index c48975ff8ea27c4d1e7d99ae60f67f1da94d8003..0ae55a36f492902ef19753c336160b453ef0cd34 100644 (file)
@@ -42,6 +42,7 @@ static const u_int16_t days_since_leapyear[] = {
  */
 enum {
        DSE_FIRST = 2039,
+       SECONDS_PER_DAY = 86400,
 };
 static const u_int16_t days_since_epoch[] = {
        /* 2039 - 2030 */
@@ -78,7 +79,7 @@ static inline unsigned int localtime_1(struct xtm *r, time_t time)
        unsigned int v, w;
 
        /* Each day has 86400s, so finding the hour/minute is actually easy. */
-       v         = time % 86400;
+       v         = time % SECONDS_PER_DAY;
        r->second = v % 60;
        w         = v / 60;
        r->minute = w % 60;
@@ -199,6 +200,18 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
                if (packet_time < info->daytime_start &&
                    packet_time > info->daytime_stop)
                        return false;
+
+               /** if user asked to ignore 'next day', then e.g.
+                *  '1 PM Wed, August 1st' should be treated
+                *  like 'Tue 1 PM July 31st'.
+                *
+                * This also causes
+                * 'Monday, "23:00 to 01:00", to match for 2 hours, starting
+                * Monday 23:00 to Tuesday 01:00.
+                */
+               if ((info->flags & XT_TIME_CONTIGUOUS) &&
+                    packet_time <= info->daytime_stop)
+                       stamp -= SECONDS_PER_DAY;
        }
 
        localtime_2(&current_time, stamp);
@@ -227,6 +240,15 @@ static int time_mt_check(const struct xt_mtchk_param *par)
                return -EDOM;
        }
 
+       if (info->flags & ~XT_TIME_ALL_FLAGS) {
+               pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS);
+               return -EINVAL;
+       }
+
+       if ((info->flags & XT_TIME_CONTIGUOUS) &&
+            info->daytime_start < info->daytime_stop)
+               return -EINVAL;
+
        return 0;
 }
 
index 6bf878335d9436d40b1619c37c8918f3bd12beb0..c15042f987bd8f5697338b88cc77ed6553a0d609 100644 (file)
@@ -627,7 +627,7 @@ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg)
        struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg;
        void *data;
 
-       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
                           cb_arg->seq, &netlbl_cipsov4_gnl_family,
                           NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL);
        if (data == NULL)
index 4809e2e48b02542931d436188680f9663a6dd699..c5384ffc61469a422f1e5f4a9519131d95b56236 100644 (file)
@@ -448,7 +448,7 @@ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg)
        struct netlbl_domhsh_walk_arg *cb_arg = arg;
        void *data;
 
-       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
                           cb_arg->seq, &netlbl_mgmt_gnl_family,
                           NLM_F_MULTI, NLBL_MGMT_C_LISTALL);
        if (data == NULL)
@@ -613,7 +613,7 @@ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb,
        int ret_val = -ENOMEM;
        void *data;
 
-       data = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       data = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                           &netlbl_mgmt_gnl_family, NLM_F_MULTI,
                           NLBL_MGMT_C_PROTOCOLS);
        if (data == NULL)
index e7ff694f1049be48b3193d4a0b3ffded63aecbcd..847d495cd4de0bee797f994fcaf491ed6efb30ce 100644 (file)
@@ -1096,7 +1096,7 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd,
        char *secctx;
        u32 secctx_len;
 
-       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid,
+       data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid,
                           cb_arg->seq, &netlbl_unlabel_gnl_family,
                           NLM_F_MULTI, cmd);
        if (data == NULL)
@@ -1541,7 +1541,7 @@ int __init netlbl_unlabel_defconf(void)
         * it is called is at bootup before the audit subsystem is reporting
         * messages so don't worry to much about these values. */
        security_task_getsecid(current, &audit_info.secid);
-       audit_info.loginuid = 0;
+       audit_info.loginuid = GLOBAL_ROOT_UID;
        audit_info.sessionid = 0;
 
        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
index 9fae63f102980e2e789c2d8b06baf9594f1cb4b6..9650c4ad5f886ea29a7a02c82fffa100284ea5d7 100644 (file)
@@ -109,7 +109,7 @@ struct audit_buffer *netlbl_audit_start_common(int type,
                return NULL;
 
        audit_log_format(audit_buf, "netlabel: auid=%u ses=%u",
-                        audit_info->loginuid,
+                        from_kuid(&init_user_ns, audit_info->loginuid),
                         audit_info->sessionid);
 
        if (audit_info->secid != 0 &&
index 527023823b5c5ea1a48c373b49e9f1688891a494..0f2e3ad69c473afb5f7fcdbc1cbb05b5c0be46f7 100644 (file)
@@ -67,8 +67,8 @@
 struct netlink_sock {
        /* struct sock has to be the first member of netlink_sock */
        struct sock             sk;
-       u32                     pid;
-       u32                     dst_pid;
+       u32                     portid;
+       u32                     dst_portid;
        u32                     dst_group;
        u32                     flags;
        u32                     subscriptions;
@@ -104,7 +104,7 @@ static inline int netlink_is_kernel(struct sock *sk)
        return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
 }
 
-struct nl_pid_hash {
+struct nl_portid_hash {
        struct hlist_head       *table;
        unsigned long           rehash_time;
 
@@ -118,10 +118,10 @@ struct nl_pid_hash {
 };
 
 struct netlink_table {
-       struct nl_pid_hash      hash;
+       struct nl_portid_hash   hash;
        struct hlist_head       mc_list;
        struct listeners __rcu  *listeners;
-       unsigned int            nl_nonroot;
+       unsigned int            flags;
        unsigned int            groups;
        struct mutex            *cb_mutex;
        struct module           *module;
@@ -145,9 +145,9 @@ static inline u32 netlink_group_mask(u32 group)
        return group ? 1 << (group - 1) : 0;
 }
 
-static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
+static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
 {
-       return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
+       return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
 }
 
 static void netlink_destroy_callback(struct netlink_callback *cb)
@@ -239,17 +239,17 @@ netlink_unlock_table(void)
                wake_up(&nl_table_wait);
 }
 
-static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid)
+static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 {
-       struct nl_pid_hash *hash = &nl_table[protocol].hash;
+       struct nl_portid_hash *hash = &nl_table[protocol].hash;
        struct hlist_head *head;
        struct sock *sk;
        struct hlist_node *node;
 
        read_lock(&nl_table_lock);
-       head = nl_pid_hashfn(hash, pid);
+       head = nl_portid_hashfn(hash, portid);
        sk_for_each(sk, node, head) {
-               if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
+               if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) {
                        sock_hold(sk);
                        goto found;
                }
@@ -260,7 +260,7 @@ found:
        return sk;
 }
 
-static struct hlist_head *nl_pid_hash_zalloc(size_t size)
+static struct hlist_head *nl_portid_hash_zalloc(size_t size)
 {
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_ATOMIC);
@@ -270,7 +270,7 @@ static struct hlist_head *nl_pid_hash_zalloc(size_t size)
                                         get_order(size));
 }
 
-static void nl_pid_hash_free(struct hlist_head *table, size_t size)
+static void nl_portid_hash_free(struct hlist_head *table, size_t size)
 {
        if (size <= PAGE_SIZE)
                kfree(table);
@@ -278,7 +278,7 @@ static void nl_pid_hash_free(struct hlist_head *table, size_t size)
                free_pages((unsigned long)table, get_order(size));
 }
 
-static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
+static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
 {
        unsigned int omask, mask, shift;
        size_t osize, size;
@@ -296,7 +296,7 @@ static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
                size *= 2;
        }
 
-       table = nl_pid_hash_zalloc(size);
+       table = nl_portid_hash_zalloc(size);
        if (!table)
                return 0;
 
@@ -311,23 +311,23 @@ static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
                struct hlist_node *node, *tmp;
 
                sk_for_each_safe(sk, node, tmp, &otable[i])
-                       __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
+                       __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
        }
 
-       nl_pid_hash_free(otable, osize);
+       nl_portid_hash_free(otable, osize);
        hash->rehash_time = jiffies + 10 * 60 * HZ;
        return 1;
 }
 
-static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
+static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
 {
        int avg = hash->entries >> hash->shift;
 
-       if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
+       if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
                return 1;
 
        if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
-               nl_pid_hash_rehash(hash, 0);
+               nl_portid_hash_rehash(hash, 0);
                return 1;
        }
 
@@ -356,9 +356,9 @@ netlink_update_listeners(struct sock *sk)
         * makes sure updates are visible before bind or setsockopt return. */
 }
 
-static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
+static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
 {
-       struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
+       struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
        struct hlist_head *head;
        int err = -EADDRINUSE;
        struct sock *osk;
@@ -366,10 +366,10 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
        int len;
 
        netlink_table_grab();
-       head = nl_pid_hashfn(hash, pid);
+       head = nl_portid_hashfn(hash, portid);
        len = 0;
        sk_for_each(osk, node, head) {
-               if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
+               if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid))
                        break;
                len++;
        }
@@ -377,17 +377,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
                goto err;
 
        err = -EBUSY;
-       if (nlk_sk(sk)->pid)
+       if (nlk_sk(sk)->portid)
                goto err;
 
        err = -ENOMEM;
        if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
                goto err;
 
-       if (len && nl_pid_hash_dilute(hash, len))
-               head = nl_pid_hashfn(hash, pid);
+       if (len && nl_portid_hash_dilute(hash, len))
+               head = nl_portid_hashfn(hash, portid);
        hash->entries++;
-       nlk_sk(sk)->pid = pid;
+       nlk_sk(sk)->portid = portid;
        sk_add_node(sk, head);
        err = 0;
 
@@ -518,11 +518,11 @@ static int netlink_release(struct socket *sock)
 
        skb_queue_purge(&sk->sk_write_queue);
 
-       if (nlk->pid) {
+       if (nlk->portid) {
                struct netlink_notify n = {
                                                .net = sock_net(sk),
                                                .protocol = sk->sk_protocol,
-                                               .pid = nlk->pid,
+                                               .portid = nlk->portid,
                                          };
                atomic_notifier_call_chain(&netlink_chain,
                                NETLINK_URELEASE, &n);
@@ -536,6 +536,8 @@ static int netlink_release(struct socket *sock)
                if (--nl_table[sk->sk_protocol].registered == 0) {
                        kfree(nl_table[sk->sk_protocol].listeners);
                        nl_table[sk->sk_protocol].module = NULL;
+                       nl_table[sk->sk_protocol].bind = NULL;
+                       nl_table[sk->sk_protocol].flags = 0;
                        nl_table[sk->sk_protocol].registered = 0;
                }
        } else if (nlk->subscriptions) {
@@ -557,24 +559,24 @@ static int netlink_autobind(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
-       struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
+       struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash;
        struct hlist_head *head;
        struct sock *osk;
        struct hlist_node *node;
-       s32 pid = task_tgid_vnr(current);
+       s32 portid = task_tgid_vnr(current);
        int err;
        static s32 rover = -4097;
 
 retry:
        cond_resched();
        netlink_table_grab();
-       head = nl_pid_hashfn(hash, pid);
+       head = nl_portid_hashfn(hash, portid);
        sk_for_each(osk, node, head) {
                if (!net_eq(sock_net(osk), net))
                        continue;
-               if (nlk_sk(osk)->pid == pid) {
-                       /* Bind collision, search negative pid values. */
-                       pid = rover--;
+               if (nlk_sk(osk)->portid == portid) {
+                       /* Bind collision, search negative portid values. */
+                       portid = rover--;
                        if (rover > -4097)
                                rover = -4097;
                        netlink_table_ungrab();
@@ -583,7 +585,7 @@ retry:
        }
        netlink_table_ungrab();
 
-       err = netlink_insert(sk, net, pid);
+       err = netlink_insert(sk, net, portid);
        if (err == -EADDRINUSE)
                goto retry;
 
@@ -596,7 +598,7 @@ retry:
 
 static inline int netlink_capable(const struct socket *sock, unsigned int flag)
 {
-       return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
+       return (nl_table[sock->sk->sk_protocol].flags & flag) ||
               capable(CAP_NET_ADMIN);
 }
 
@@ -659,15 +661,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        /* Only superuser is allowed to listen multicasts */
        if (nladdr->nl_groups) {
-               if (!netlink_capable(sock, NL_NONROOT_RECV))
+               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
                        return err;
        }
 
-       if (nlk->pid) {
-               if (nladdr->nl_pid != nlk->pid)
+       if (nlk->portid) {
+               if (nladdr->nl_pid != nlk->portid)
                        return -EINVAL;
        } else {
                err = nladdr->nl_pid ?
@@ -713,7 +715,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
 
        if (addr->sa_family == AF_UNSPEC) {
                sk->sk_state    = NETLINK_UNCONNECTED;
-               nlk->dst_pid    = 0;
+               nlk->dst_portid = 0;
                nlk->dst_group  = 0;
                return 0;
        }
@@ -721,15 +723,15 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        /* Only superuser is allowed to send multicasts */
-       if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
+       if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
                return -EPERM;
 
-       if (!nlk->pid)
+       if (!nlk->portid)
                err = netlink_autobind(sock);
 
        if (err == 0) {
                sk->sk_state    = NETLINK_CONNECTED;
-               nlk->dst_pid    = nladdr->nl_pid;
+               nlk->dst_portid = nladdr->nl_pid;
                nlk->dst_group  = ffs(nladdr->nl_groups);
        }
 
@@ -748,10 +750,10 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
        *addr_len = sizeof(*nladdr);
 
        if (peer) {
-               nladdr->nl_pid = nlk->dst_pid;
+               nladdr->nl_pid = nlk->dst_portid;
                nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
        } else {
-               nladdr->nl_pid = nlk->pid;
+               nladdr->nl_pid = nlk->portid;
                nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
        }
        return 0;
@@ -770,19 +772,19 @@ static void netlink_overrun(struct sock *sk)
        atomic_inc(&sk->sk_drops);
 }
 
-static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
+static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
 {
        struct sock *sock;
        struct netlink_sock *nlk;
 
-       sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
+       sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
        if (!sock)
                return ERR_PTR(-ECONNREFUSED);
 
        /* Don't bother queuing skb if kernel socket has no input function */
        nlk = nlk_sk(sock);
        if (sock->sk_state == NETLINK_CONNECTED &&
-           nlk->dst_pid != nlk_sk(ssk)->pid) {
+           nlk->dst_portid != nlk_sk(ssk)->portid) {
                sock_put(sock);
                return ERR_PTR(-ECONNREFUSED);
        }
@@ -912,7 +914,8 @@ static void netlink_rcv_wake(struct sock *sk)
                wake_up_interruptible(&nlk->wait);
 }
 
-static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
+static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
+                                 struct sock *ssk)
 {
        int ret;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -921,6 +924,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
        if (nlk->netlink_rcv != NULL) {
                ret = skb->len;
                skb_set_owner_r(skb, sk);
+               NETLINK_CB(skb).ssk = ssk;
                nlk->netlink_rcv(skb);
                consume_skb(skb);
        } else {
@@ -931,7 +935,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
 }
 
 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
-                   u32 pid, int nonblock)
+                   u32 portid, int nonblock)
 {
        struct sock *sk;
        int err;
@@ -941,13 +945,13 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
 
        timeo = sock_sndtimeo(ssk, nonblock);
 retry:
-       sk = netlink_getsockbypid(ssk, pid);
+       sk = netlink_getsockbyportid(ssk, portid);
        if (IS_ERR(sk)) {
                kfree_skb(skb);
                return PTR_ERR(sk);
        }
        if (netlink_is_kernel(sk))
-               return netlink_unicast_kernel(sk, skb);
+               return netlink_unicast_kernel(sk, skb, ssk);
 
        if (sk_filter(sk, skb)) {
                err = skb->len;
@@ -1001,7 +1005,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
 struct netlink_broadcast_data {
        struct sock *exclude_sk;
        struct net *net;
-       u32 pid;
+       u32 portid;
        u32 group;
        int failure;
        int delivery_failure;
@@ -1022,7 +1026,7 @@ static int do_one_broadcast(struct sock *sk,
        if (p->exclude_sk == sk)
                goto out;
 
-       if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
+       if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
            !test_bit(p->group - 1, nlk->groups))
                goto out;
 
@@ -1074,7 +1078,7 @@ out:
        return 0;
 }
 
-int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
        u32 group, gfp_t allocation,
        int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
        void *filter_data)
@@ -1088,7 +1092,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
 
        info.exclude_sk = ssk;
        info.net = net;
-       info.pid = pid;
+       info.portid = portid;
        info.group = group;
        info.failure = 0;
        info.delivery_failure = 0;
@@ -1126,17 +1130,17 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
 }
 EXPORT_SYMBOL(netlink_broadcast_filtered);
 
-int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
                      u32 group, gfp_t allocation)
 {
-       return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
+       return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
                NULL, NULL);
 }
 EXPORT_SYMBOL(netlink_broadcast);
 
 struct netlink_set_err_data {
        struct sock *exclude_sk;
-       u32 pid;
+       u32 portid;
        u32 group;
        int code;
 };
@@ -1152,7 +1156,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
        if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
                goto out;
 
-       if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
+       if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
            !test_bit(p->group - 1, nlk->groups))
                goto out;
 
@@ -1170,14 +1174,14 @@ out:
 /**
  * netlink_set_err - report error to broadcast listeners
  * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
- * @pid: the PID of a process that we want to skip (if any)
+ * @portid: the PORTID of a process that we want to skip (if any)
  * @groups: the broadcast group that will notice the error
  * @code: error code, must be negative (as usual in kernelspace)
  *
  * This function returns the number of broadcast listeners that have set the
  * NETLINK_RECV_NO_ENOBUFS socket option.
  */
-int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
+int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
 {
        struct netlink_set_err_data info;
        struct hlist_node *node;
@@ -1185,7 +1189,7 @@ int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
        int ret = 0;
 
        info.exclude_sk = ssk;
-       info.pid = pid;
+       info.portid = portid;
        info.group = group;
        /* sk->sk_err wants a positive error value */
        info.code = -code;
@@ -1242,7 +1246,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                break;
        case NETLINK_ADD_MEMBERSHIP:
        case NETLINK_DROP_MEMBERSHIP: {
-               if (!netlink_capable(sock, NL_NONROOT_RECV))
+               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
@@ -1350,7 +1354,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *addr = msg->msg_name;
-       u32 dst_pid;
+       u32 dst_portid;
        u32 dst_group;
        struct sk_buff *skb;
        int err;
@@ -1370,18 +1374,18 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                err = -EINVAL;
                if (addr->nl_family != AF_NETLINK)
                        goto out;
-               dst_pid = addr->nl_pid;
+               dst_portid = addr->nl_pid;
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
-               if ((dst_group || dst_pid) &&
-                   !netlink_capable(sock, NL_NONROOT_SEND))
+               if ((dst_group || dst_portid) &&
+                   !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
                        goto out;
        } else {
-               dst_pid = nlk->dst_pid;
+               dst_portid = nlk->dst_portid;
                dst_group = nlk->dst_group;
        }
 
-       if (!nlk->pid) {
+       if (!nlk->portid) {
                err = netlink_autobind(sock);
                if (err)
                        goto out;
@@ -1395,9 +1399,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (skb == NULL)
                goto out;
 
-       NETLINK_CB(skb).pid     = nlk->pid;
+       NETLINK_CB(skb).portid  = nlk->portid;
        NETLINK_CB(skb).dst_group = dst_group;
-       memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
+       NETLINK_CB(skb).creds   = siocb->scm->creds;
 
        err = -EFAULT;
        if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1413,9 +1417,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
        if (dst_group) {
                atomic_inc(&skb->users);
-               netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
+               netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
        }
-       err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
+       err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
 
 out:
        scm_destroy(siocb->scm);
@@ -1478,7 +1482,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
                struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
                addr->nl_family = AF_NETLINK;
                addr->nl_pad    = 0;
-               addr->nl_pid    = NETLINK_CB(skb).pid;
+               addr->nl_pid    = NETLINK_CB(skb).portid;
                addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
                msg->msg_namelen = sizeof(*addr);
        }
@@ -1522,9 +1526,8 @@ static void netlink_data_ready(struct sock *sk, int len)
  */
 
 struct sock *
-netlink_kernel_create(struct net *net, int unit,
-                     struct module *module,
-                     struct netlink_kernel_cfg *cfg)
+__netlink_kernel_create(struct net *net, int unit, struct module *module,
+                       struct netlink_kernel_cfg *cfg)
 {
        struct socket *sock;
        struct sock *sk;
@@ -1578,7 +1581,10 @@ netlink_kernel_create(struct net *net, int unit,
                rcu_assign_pointer(nl_table[unit].listeners, listeners);
                nl_table[unit].cb_mutex = cb_mutex;
                nl_table[unit].module = module;
-               nl_table[unit].bind = cfg ? cfg->bind : NULL;
+               if (cfg) {
+                       nl_table[unit].bind = cfg->bind;
+                       nl_table[unit].flags = cfg->flags;
+               }
                nl_table[unit].registered = 1;
        } else {
                kfree(listeners);
@@ -1596,8 +1602,7 @@ out_sock_release_nosk:
        sock_release(sock);
        return NULL;
 }
-EXPORT_SYMBOL(netlink_kernel_create);
-
+EXPORT_SYMBOL(__netlink_kernel_create);
 
 void
 netlink_kernel_release(struct sock *sk)
@@ -1677,15 +1682,8 @@ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
        netlink_table_ungrab();
 }
 
-void netlink_set_nonroot(int protocol, unsigned int flags)
-{
-       if ((unsigned int)protocol < MAX_LINKS)
-               nl_table[protocol].nl_nonroot = flags;
-}
-EXPORT_SYMBOL(netlink_set_nonroot);
-
 struct nlmsghdr *
-__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
+__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
 {
        struct nlmsghdr *nlh;
        int size = NLMSG_LENGTH(len);
@@ -1694,7 +1692,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
        nlh->nlmsg_type = type;
        nlh->nlmsg_len = size;
        nlh->nlmsg_flags = flags;
-       nlh->nlmsg_pid = pid;
+       nlh->nlmsg_pid = portid;
        nlh->nlmsg_seq = seq;
        if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
                memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
@@ -1790,7 +1788,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        atomic_inc(&skb->users);
        cb->skb = skb;
 
-       sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
+       sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
        if (sk == NULL) {
                netlink_destroy_callback(cb);
                return -ECONNREFUSED;
@@ -1838,7 +1836,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
 
                sk = netlink_lookup(sock_net(in_skb->sk),
                                    in_skb->sk->sk_protocol,
-                                   NETLINK_CB(in_skb).pid);
+                                   NETLINK_CB(in_skb).portid);
                if (sk) {
                        sk->sk_err = ENOBUFS;
                        sk->sk_error_report(sk);
@@ -1847,12 +1845,12 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
                return;
        }
 
-       rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+       rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
                          NLMSG_ERROR, payload, 0);
        errmsg = nlmsg_data(rep);
        errmsg->error = err;
        memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
-       netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
+       netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
 }
 EXPORT_SYMBOL(netlink_ack);
 
@@ -1902,33 +1900,33 @@ EXPORT_SYMBOL(netlink_rcv_skb);
  * nlmsg_notify - send a notification netlink message
  * @sk: netlink socket to use
  * @skb: notification message
- * @pid: destination netlink pid for reports or 0
+ * @portid: destination netlink portid for reports or 0
  * @group: destination multicast group or 0
  * @report: 1 to report back, 0 to disable
  * @flags: allocation flags
  */
-int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
                 unsigned int group, int report, gfp_t flags)
 {
        int err = 0;
 
        if (group) {
-               int exclude_pid = 0;
+               int exclude_portid = 0;
 
                if (report) {
                        atomic_inc(&skb->users);
-                       exclude_pid = pid;
+                       exclude_portid = portid;
                }
 
                /* errors reported via destination sk->sk_err, but propagate
                 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
-               err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
+               err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
        }
 
        if (report) {
                int err2;
 
-               err2 = nlmsg_unicast(sk, skb, pid);
+               err2 = nlmsg_unicast(sk, skb, portid);
                if (!err || err == -ESRCH)
                        err = err2;
        }
@@ -1953,7 +1951,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
        loff_t off = 0;
 
        for (i = 0; i < MAX_LINKS; i++) {
-               struct nl_pid_hash *hash = &nl_table[i].hash;
+               struct nl_portid_hash *hash = &nl_table[i].hash;
 
                for (j = 0; j <= hash->mask; j++) {
                        sk_for_each(s, node, &hash->table[j]) {
@@ -2001,7 +1999,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        j = iter->hash_idx + 1;
 
        do {
-               struct nl_pid_hash *hash = &nl_table[i].hash;
+               struct nl_portid_hash *hash = &nl_table[i].hash;
 
                for (; j <= hash->mask; j++) {
                        s = sk_head(&hash->table[j]);
@@ -2040,7 +2038,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
                           s,
                           s->sk_protocol,
-                          nlk->pid,
+                          nlk->portid,
                           nlk->groups ? (u32)nlk->groups[0] : 0,
                           sk_rmem_alloc_get(s),
                           sk_wmem_alloc_get(s),
@@ -2148,7 +2146,7 @@ static void __init netlink_add_usersock_entry(void)
        rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
-       nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
+       nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
 
        netlink_table_ungrab();
 }
@@ -2185,12 +2183,12 @@ static int __init netlink_proto_init(void)
        order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
 
        for (i = 0; i < MAX_LINKS; i++) {
-               struct nl_pid_hash *hash = &nl_table[i].hash;
+               struct nl_portid_hash *hash = &nl_table[i].hash;
 
-               hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
+               hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
                if (!hash->table) {
                        while (i-- > 0)
-                               nl_pid_hash_free(nl_table[i].hash.table,
+                               nl_portid_hash_free(nl_table[i].hash.table,
                                                 1 * sizeof(*hash->table));
                        kfree(nl_table);
                        goto panic;
index fda497412fc34a5b10aa25a1c7599118d5e832a8..f2aabb6f410582439604d7a3c0f379a5cd798621 100644 (file)
@@ -501,7 +501,7 @@ EXPORT_SYMBOL(genl_unregister_family);
 /**
  * genlmsg_put - Add generic netlink header to netlink message
  * @skb: socket buffer holding the message
- * @pid: netlink pid the message is addressed to
+ * @portid: netlink portid the message is addressed to
  * @seq: sequence number (usually the one of the sender)
  * @family: generic netlink family
  * @flags: netlink message flags
@@ -509,13 +509,13 @@ EXPORT_SYMBOL(genl_unregister_family);
  *
  * Returns pointer to user specific header
  */
-void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
                                struct genl_family *family, int flags, u8 cmd)
 {
        struct nlmsghdr *nlh;
        struct genlmsghdr *hdr;
 
-       nlh = nlmsg_put(skb, pid, seq, family->id, GENL_HDRLEN +
+       nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
                        family->hdrsize, flags);
        if (nlh == NULL)
                return NULL;
@@ -585,7 +585,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        }
 
        info.snd_seq = nlh->nlmsg_seq;
-       info.snd_pid = NETLINK_CB(skb).pid;
+       info.snd_portid = NETLINK_CB(skb).portid;
        info.nlhdr = nlh;
        info.genlhdr = nlmsg_data(nlh);
        info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
@@ -626,12 +626,12 @@ static struct genl_family genl_ctrl = {
        .netnsok = true,
 };
 
-static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
+static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
                          u32 flags, struct sk_buff *skb, u8 cmd)
 {
        void *hdr;
 
-       hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
+       hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
        if (hdr == NULL)
                return -1;
 
@@ -701,7 +701,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
+static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid,
                                u32 seq, u32 flags, struct sk_buff *skb,
                                u8 cmd)
 {
@@ -709,7 +709,7 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
        struct nlattr *nla_grps;
        struct nlattr *nest;
 
-       hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
+       hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
        if (hdr == NULL)
                return -1;
 
@@ -756,7 +756,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
                        if (++n < fams_to_skip)
                                continue;
-                       if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid,
+                       if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           skb, CTRL_CMD_NEWFAMILY) < 0)
                                goto errout;
@@ -773,7 +773,7 @@ errout:
 }
 
 static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
-                                            u32 pid, int seq, u8 cmd)
+                                            u32 portid, int seq, u8 cmd)
 {
        struct sk_buff *skb;
        int err;
@@ -782,7 +782,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
        if (skb == NULL)
                return ERR_PTR(-ENOBUFS);
 
-       err = ctrl_fill_info(family, pid, seq, 0, skb, cmd);
+       err = ctrl_fill_info(family, portid, seq, 0, skb, cmd);
        if (err < 0) {
                nlmsg_free(skb);
                return ERR_PTR(err);
@@ -792,7 +792,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
 }
 
 static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
-                                           u32 pid, int seq, u8 cmd)
+                                           u32 portid, int seq, u8 cmd)
 {
        struct sk_buff *skb;
        int err;
@@ -801,7 +801,7 @@ static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
        if (skb == NULL)
                return ERR_PTR(-ENOBUFS);
 
-       err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd);
+       err = ctrl_fill_mcgrp_info(grp, portid, seq, 0, skb, cmd);
        if (err < 0) {
                nlmsg_free(skb);
                return ERR_PTR(err);
@@ -853,7 +853,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
                return -ENOENT;
        }
 
-       msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq,
+       msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq,
                                    CTRL_CMD_NEWFAMILY);
        if (IS_ERR(msg))
                return PTR_ERR(msg);
@@ -918,11 +918,11 @@ static int __net_init genl_pernet_init(struct net *net)
        struct netlink_kernel_cfg cfg = {
                .input          = genl_rcv,
                .cb_mutex       = &genl_mutex,
+               .flags          = NL_CFG_F_NONROOT_RECV,
        };
 
        /* we'll bump the group number right afterwards */
-       net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC,
-                                              THIS_MODULE, &cfg);
+       net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg);
 
        if (!net->genl_sock && net_eq(net, &init_net))
                panic("GENL: Cannot initialize generic netlink\n");
@@ -955,8 +955,6 @@ static int __init genl_init(void)
        if (err < 0)
                goto problem;
 
-       netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
-
        err = register_pernet_subsys(&genl_pernet_ops);
        if (err)
                goto problem;
@@ -973,7 +971,7 @@ problem:
 
 subsys_initcall(genl_init);
 
-static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
+static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
                         gfp_t flags)
 {
        struct sk_buff *tmp;
@@ -988,7 +986,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
                                goto error;
                        }
                        err = nlmsg_multicast(prev->genl_sock, tmp,
-                                             pid, group, flags);
+                                             portid, group, flags);
                        if (err)
                                goto error;
                }
@@ -996,20 +994,20 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
                prev = net;
        }
 
-       return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags);
+       return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
  error:
        kfree_skb(skb);
        return err;
 }
 
-int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
+int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, unsigned int group,
                            gfp_t flags)
 {
-       return genlmsg_mcast(skb, pid, group, flags);
+       return genlmsg_mcast(skb, portid, group, flags);
 }
 EXPORT_SYMBOL(genlmsg_multicast_allns);
 
-void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group,
                 struct nlmsghdr *nlh, gfp_t flags)
 {
        struct sock *sk = net->genl_sock;
@@ -1018,6 +1016,6 @@ void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
        if (nlh)
                report = nlmsg_report(nlh);
 
-       nlmsg_notify(sk, skb, pid, group, report, flags);
+       nlmsg_notify(sk, skb, portid, group, report, flags);
 }
 EXPORT_SYMBOL(genl_notify);
index ff749794bc5b87d0b72e36e48cf92caa9487d130..479bee36dc3e4bab3338213d7d8e3a078bf96d8d 100644 (file)
@@ -679,7 +679,7 @@ static void nfc_release(struct device *d)
 
        if (dev->ops->check_presence) {
                del_timer_sync(&dev->check_pres_timer);
-               destroy_workqueue(dev->check_pres_wq);
+               cancel_work_sync(&dev->check_pres_work);
        }
 
        nfc_genl_data_exit(&dev->genl_data);
@@ -715,7 +715,7 @@ static void nfc_check_pres_timeout(unsigned long data)
 {
        struct nfc_dev *dev = (struct nfc_dev *)data;
 
-       queue_work(dev->check_pres_wq, &dev->check_pres_work);
+       schedule_work(&dev->check_pres_work);
 }
 
 struct class nfc_class = {
@@ -784,20 +784,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
        dev->targets_generation = 1;
 
        if (ops->check_presence) {
-               char name[32];
                init_timer(&dev->check_pres_timer);
                dev->check_pres_timer.data = (unsigned long)dev;
                dev->check_pres_timer.function = nfc_check_pres_timeout;
 
                INIT_WORK(&dev->check_pres_work, nfc_check_pres_work);
-               snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx);
-               dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT |
-                                                    WQ_UNBOUND |
-                                                    WQ_MEM_RECLAIM, 1);
-               if (dev->check_pres_wq == NULL) {
-                       kfree(dev);
-                       return NULL;
-               }
        }
 
        return dev;
index f9c44b2fb065de5b7f753390caeb4b3ab5466b68..c5dbb6891b24bcce74821a648b04345d02dfcc39 100644 (file)
@@ -4,5 +4,5 @@
 
 obj-$(CONFIG_NFC_HCI) += hci.o
 
-hci-y                  := core.o hcp.o command.o
-hci-$(CONFIG_NFC_SHDLC)        += shdlc.o
+hci-y                  := core.o hcp.o command.o llc.o llc_nop.o
+hci-$(CONFIG_NFC_SHDLC) += llc_shdlc.o
index 46362ef979db1ca88b4ad69d4b6adb7bd440f844..71c6a7086b8f04b4e6264969d21f1ba5a362fcb8 100644 (file)
 
 #include "hci.h"
 
-static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, int err,
-                              struct sk_buff *skb, void *cb_data)
+static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+                              const u8 *param, size_t param_len,
+                              data_exchange_cb_t cb, void *cb_context)
 {
-       struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
+       pr_debug("exec cmd async through pipe=%d, cmd=%d, plen=%zd\n", pipe,
+                cmd, param_len);
+
+       /* TODO: Define hci cmd execution delay. Should it be the same
+        * for all commands?
+        */
+       return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd,
+                                     param, param_len, cb, cb_context, 3000);
+}
+
+/*
+ * HCI command execution completion callback.
+ * err will be a standard linux error (may be converted from HCI response)
+ * skb contains the response data and must be disposed, or may be NULL if
+ * an error occured
+ */
+static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err)
+{
+       struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)context;
 
        pr_debug("HCI Cmd completed with result=%d\n", err);
 
@@ -55,7 +74,8 @@ static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
        hcp_ew.exec_complete = false;
        hcp_ew.result_skb = NULL;
 
-       pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len);
+       pr_debug("exec cmd sync through pipe=%d, cmd=%d, plen=%zd\n", pipe,
+                cmd, param_len);
 
        /* TODO: Define hci cmd execution delay. Should it be the same
         * for all commands?
@@ -133,6 +153,23 @@ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
 }
 EXPORT_SYMBOL(nfc_hci_send_cmd);
 
+int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+                          const u8 *param, size_t param_len,
+                          data_exchange_cb_t cb, void *cb_context)
+{
+       u8 pipe;
+
+       pr_debug("\n");
+
+       pipe = hdev->gate2pipe[gate];
+       if (pipe == NFC_HCI_INVALID_PIPE)
+               return -EADDRNOTAVAIL;
+
+       return nfc_hci_execute_cmd_async(hdev, pipe, cmd, param, param_len,
+                                        cb, cb_context);
+}
+EXPORT_SYMBOL(nfc_hci_send_cmd_async);
+
 int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
                      const u8 *param, size_t param_len)
 {
index 1ac7b3fac6c9bb9e12030efb67d540821f690988..5fbb6e40793eb61f6e9ff8459812f7df2d81bf54 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <net/nfc/nfc.h>
 #include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
 
 #include "hci.h"
 
@@ -57,12 +58,11 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
        if (hdev->cmd_pending_msg) {
                if (timer_pending(&hdev->cmd_timer) == 0) {
                        if (hdev->cmd_pending_msg->cb)
-                               hdev->cmd_pending_msg->cb(hdev,
-                                                         -ETIME,
-                                                         NULL,
-                                                         hdev->
+                               hdev->cmd_pending_msg->cb(hdev->
                                                          cmd_pending_msg->
-                                                         cb_context);
+                                                         cb_context,
+                                                         NULL,
+                                                         -ETIME);
                        kfree(hdev->cmd_pending_msg);
                        hdev->cmd_pending_msg = NULL;
                } else
@@ -78,12 +78,12 @@ next_msg:
 
        pr_debug("msg_tx_queue has a cmd to send\n");
        while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
-               r = hdev->ops->xmit(hdev, skb);
+               r = nfc_llc_xmit_from_hci(hdev->llc, skb);
                if (r < 0) {
                        kfree_skb(skb);
                        skb_queue_purge(&msg->msg_frags);
                        if (msg->cb)
-                               msg->cb(hdev, r, NULL, msg->cb_context);
+                               msg->cb(msg->cb_context, NULL, r);
                        kfree(msg);
                        break;
                }
@@ -133,15 +133,15 @@ static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
        del_timer_sync(&hdev->cmd_timer);
 
        if (hdev->cmd_pending_msg->cb)
-               hdev->cmd_pending_msg->cb(hdev, err, skb,
-                                         hdev->cmd_pending_msg->cb_context);
+               hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context,
+                                         skb, err);
        else
                kfree_skb(skb);
 
        kfree(hdev->cmd_pending_msg);
        hdev->cmd_pending_msg = NULL;
 
-       queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+       schedule_work(&hdev->msg_tx_work);
 }
 
 void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
@@ -326,7 +326,7 @@ static void nfc_hci_cmd_timeout(unsigned long data)
 {
        struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data;
 
-       queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+       schedule_work(&hdev->msg_tx_work);
 }
 
 static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
@@ -398,8 +398,7 @@ disconnect_all:
        nfc_hci_disconnect_all_gates(hdev);
 
 exit:
-       if (skb)
-               kfree_skb(skb);
+       kfree_skb(skb);
 
        return r;
 }
@@ -470,29 +469,38 @@ static int hci_dev_up(struct nfc_dev *nfc_dev)
                        return r;
        }
 
+       r = nfc_llc_start(hdev->llc);
+       if (r < 0)
+               goto exit_close;
+
        r = hci_dev_session_init(hdev);
        if (r < 0)
-               goto exit;
+               goto exit_llc;
 
        r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
                               NFC_HCI_EVT_END_OPERATION, NULL, 0);
        if (r < 0)
-               goto exit;
+               goto exit_llc;
 
        if (hdev->ops->hci_ready) {
                r = hdev->ops->hci_ready(hdev);
                if (r < 0)
-                       goto exit;
+                       goto exit_llc;
        }
 
        r = hci_dev_version(hdev);
        if (r < 0)
-               goto exit;
+               goto exit_llc;
+
+       return 0;
+
+exit_llc:
+       nfc_llc_stop(hdev->llc);
+
+exit_close:
+       if (hdev->ops->close)
+               hdev->ops->close(hdev);
 
-exit:
-       if (r < 0)
-               if (hdev->ops->close)
-                       hdev->ops->close(hdev);
        return r;
 }
 
@@ -500,6 +508,8 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
 
+       nfc_llc_stop(hdev->llc);
+
        if (hdev->ops->close)
                hdev->ops->close(hdev);
 
@@ -539,13 +549,37 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev,
 {
 }
 
+#define HCI_CB_TYPE_TRANSCEIVE 1
+
+static void hci_transceive_cb(void *context, struct sk_buff *skb, int err)
+{
+       struct nfc_hci_dev *hdev = context;
+
+       switch (hdev->async_cb_type) {
+       case HCI_CB_TYPE_TRANSCEIVE:
+               /*
+                * TODO: Check RF Error indicator to make sure data is valid.
+                * It seems that HCI cmd can complete without error, but data
+                * can be invalid if an RF error occured? Ignore for now.
+                */
+               if (err == 0)
+                       skb_trim(skb, skb->len - 1); /* RF Err ind */
+
+               hdev->async_cb(hdev->async_cb_context, skb, err);
+               break;
+       default:
+               if (err == 0)
+                       kfree_skb(skb);
+               break;
+       }
+}
+
 static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
                          struct sk_buff *skb, data_exchange_cb_t cb,
                          void *cb_context)
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
        int r;
-       struct sk_buff *res_skb = NULL;
 
        pr_debug("target_idx=%d\n", target->idx);
 
@@ -553,40 +587,37 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
        case NFC_HCI_RF_READER_A_GATE:
        case NFC_HCI_RF_READER_B_GATE:
                if (hdev->ops->data_exchange) {
-                       r = hdev->ops->data_exchange(hdev, target, skb,
-                                                    &res_skb);
+                       r = hdev->ops->data_exchange(hdev, target, skb, cb,
+                                                    cb_context);
                        if (r <= 0)     /* handled */
                                break;
                }
 
                *skb_push(skb, 1) = 0;  /* CTR, see spec:10.2.2.1 */
-               r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                    NFC_HCI_WR_XCHG_DATA,
-                                    skb->data, skb->len, &res_skb);
-               /*
-                * TODO: Check RF Error indicator to make sure data is valid.
-                * It seems that HCI cmd can complete without error, but data
-                * can be invalid if an RF error occured? Ignore for now.
-                */
-               if (r == 0)
-                       skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */
+
+               hdev->async_cb_type = HCI_CB_TYPE_TRANSCEIVE;
+               hdev->async_cb = cb;
+               hdev->async_cb_context = cb_context;
+
+               r = nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+                                          NFC_HCI_WR_XCHG_DATA, skb->data,
+                                          skb->len, hci_transceive_cb, hdev);
                break;
        default:
                if (hdev->ops->data_exchange) {
-                       r = hdev->ops->data_exchange(hdev, target, skb,
-                                                    &res_skb);
+                       r = hdev->ops->data_exchange(hdev, target, skb, cb,
+                                                    cb_context);
                        if (r == 1)
                                r = -ENOTSUPP;
                }
                else
                        r = -ENOTSUPP;
+               break;
        }
 
        kfree_skb(skb);
 
-       cb(cb_context, res_skb, r);
-
-       return 0;
+       return r;
 }
 
 static int hci_check_presence(struct nfc_dev *nfc_dev,
@@ -600,6 +631,93 @@ static int hci_check_presence(struct nfc_dev *nfc_dev,
        return 0;
 }
 
+static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
+{
+       mutex_lock(&hdev->msg_tx_mutex);
+
+       if (hdev->cmd_pending_msg == NULL) {
+               nfc_driver_failure(hdev->ndev, err);
+               goto exit;
+       }
+
+       __nfc_hci_cmd_completion(hdev, err, NULL);
+
+exit:
+       mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+static void nfc_hci_llc_failure(struct nfc_hci_dev *hdev, int err)
+{
+       nfc_hci_failure(hdev, err);
+}
+
+static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hcp_packet *packet;
+       u8 type;
+       u8 instruction;
+       struct sk_buff *hcp_skb;
+       u8 pipe;
+       struct sk_buff *frag_skb;
+       int msg_len;
+
+       packet = (struct hcp_packet *)skb->data;
+       if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
+               skb_queue_tail(&hdev->rx_hcp_frags, skb);
+               return;
+       }
+
+       /* it's the last fragment. Does it need re-aggregation? */
+       if (skb_queue_len(&hdev->rx_hcp_frags)) {
+               pipe = packet->header & NFC_HCI_FRAGMENT;
+               skb_queue_tail(&hdev->rx_hcp_frags, skb);
+
+               msg_len = 0;
+               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+                       msg_len += (frag_skb->len -
+                                   NFC_HCI_HCP_PACKET_HEADER_LEN);
+               }
+
+               hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
+                                            msg_len, GFP_KERNEL);
+               if (hcp_skb == NULL) {
+                       nfc_hci_failure(hdev, -ENOMEM);
+                       return;
+               }
+
+               *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+
+               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+                       msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
+                       memcpy(skb_put(hcp_skb, msg_len),
+                              frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
+                              msg_len);
+               }
+
+               skb_queue_purge(&hdev->rx_hcp_frags);
+       } else {
+               packet->header &= NFC_HCI_FRAGMENT;
+               hcp_skb = skb;
+       }
+
+       /* if this is a response, dispatch immediately to
+        * unblock waiting cmd context. Otherwise, enqueue to dispatch
+        * in separate context where handler can also execute command.
+        */
+       packet = (struct hcp_packet *)hcp_skb->data;
+       type = HCP_MSG_GET_TYPE(packet->message.header);
+       if (type == NFC_HCI_HCP_RESPONSE) {
+               pipe = packet->header;
+               instruction = HCP_MSG_GET_CMD(packet->message.header);
+               skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
+                        NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+               nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
+       } else {
+               skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
+               schedule_work(&hdev->msg_rx_work);
+       }
+}
+
 static struct nfc_ops hci_nfc_ops = {
        .dev_up = hci_dev_up,
        .dev_down = hci_dev_down,
@@ -614,6 +732,7 @@ static struct nfc_ops hci_nfc_ops = {
 struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
                                            struct nfc_hci_init_data *init_data,
                                            u32 protocols,
+                                           const char *llc_name,
                                            int tx_headroom,
                                            int tx_tailroom,
                                            int max_link_payload)
@@ -630,10 +749,19 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
        if (hdev == NULL)
                return NULL;
 
+       hdev->llc = nfc_llc_allocate(llc_name, hdev, ops->xmit,
+                                    nfc_hci_recv_from_llc, tx_headroom,
+                                    tx_tailroom, nfc_hci_llc_failure);
+       if (hdev->llc == NULL) {
+               kfree(hdev);
+               return NULL;
+       }
+
        hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
                                         tx_headroom + HCI_CMDS_HEADROOM,
                                         tx_tailroom);
        if (!hdev->ndev) {
+               nfc_llc_free(hdev->llc);
                kfree(hdev);
                return NULL;
        }
@@ -653,29 +781,18 @@ EXPORT_SYMBOL(nfc_hci_allocate_device);
 void nfc_hci_free_device(struct nfc_hci_dev *hdev)
 {
        nfc_free_device(hdev->ndev);
+       nfc_llc_free(hdev->llc);
        kfree(hdev);
 }
 EXPORT_SYMBOL(nfc_hci_free_device);
 
 int nfc_hci_register_device(struct nfc_hci_dev *hdev)
 {
-       struct device *dev = &hdev->ndev->dev;
-       const char *devname = dev_name(dev);
-       char name[32];
-       int r = 0;
-
        mutex_init(&hdev->msg_tx_mutex);
 
        INIT_LIST_HEAD(&hdev->msg_tx_queue);
 
        INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
-       snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname);
-       hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
-                                         WQ_MEM_RECLAIM, 1);
-       if (hdev->msg_tx_wq == NULL) {
-               r = -ENOMEM;
-               goto exit;
-       }
 
        init_timer(&hdev->cmd_timer);
        hdev->cmd_timer.data = (unsigned long)hdev;
@@ -684,27 +801,10 @@ int nfc_hci_register_device(struct nfc_hci_dev *hdev)
        skb_queue_head_init(&hdev->rx_hcp_frags);
 
        INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
-       snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname);
-       hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
-                                         WQ_MEM_RECLAIM, 1);
-       if (hdev->msg_rx_wq == NULL) {
-               r = -ENOMEM;
-               goto exit;
-       }
 
        skb_queue_head_init(&hdev->msg_rx_queue);
 
-       r = nfc_register_device(hdev->ndev);
-
-exit:
-       if (r < 0) {
-               if (hdev->msg_tx_wq)
-                       destroy_workqueue(hdev->msg_tx_wq);
-               if (hdev->msg_rx_wq)
-                       destroy_workqueue(hdev->msg_rx_wq);
-       }
-
-       return r;
+       return nfc_register_device(hdev->ndev);
 }
 EXPORT_SYMBOL(nfc_hci_register_device);
 
@@ -725,9 +825,8 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
 
        nfc_unregister_device(hdev->ndev);
 
-       destroy_workqueue(hdev->msg_tx_wq);
-
-       destroy_workqueue(hdev->msg_rx_wq);
+       cancel_work_sync(&hdev->msg_tx_work);
+       cancel_work_sync(&hdev->msg_rx_work);
 }
 EXPORT_SYMBOL(nfc_hci_unregister_device);
 
@@ -743,93 +842,30 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
 }
 EXPORT_SYMBOL(nfc_hci_get_clientdata);
 
-static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
-{
-       mutex_lock(&hdev->msg_tx_mutex);
-
-       if (hdev->cmd_pending_msg == NULL) {
-               nfc_driver_failure(hdev->ndev, err);
-               goto exit;
-       }
-
-       __nfc_hci_cmd_completion(hdev, err, NULL);
-
-exit:
-       mutex_unlock(&hdev->msg_tx_mutex);
-}
-
 void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
 {
        nfc_hci_failure(hdev, err);
 }
 EXPORT_SYMBOL(nfc_hci_driver_failure);
 
-void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+void inline nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hcp_packet *packet;
-       u8 type;
-       u8 instruction;
-       struct sk_buff *hcp_skb;
-       u8 pipe;
-       struct sk_buff *frag_skb;
-       int msg_len;
-
-       packet = (struct hcp_packet *)skb->data;
-       if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
-               skb_queue_tail(&hdev->rx_hcp_frags, skb);
-               return;
-       }
-
-       /* it's the last fragment. Does it need re-aggregation? */
-       if (skb_queue_len(&hdev->rx_hcp_frags)) {
-               pipe = packet->header & NFC_HCI_FRAGMENT;
-               skb_queue_tail(&hdev->rx_hcp_frags, skb);
-
-               msg_len = 0;
-               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
-                       msg_len += (frag_skb->len -
-                                   NFC_HCI_HCP_PACKET_HEADER_LEN);
-               }
-
-               hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
-                                            msg_len, GFP_KERNEL);
-               if (hcp_skb == NULL) {
-                       nfc_hci_failure(hdev, -ENOMEM);
-                       return;
-               }
-
-               *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
-
-               skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
-                       msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
-                       memcpy(skb_put(hcp_skb, msg_len),
-                              frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
-                              msg_len);
-               }
+       nfc_llc_rcv_from_drv(hdev->llc, skb);
+}
+EXPORT_SYMBOL(nfc_hci_recv_frame);
 
-               skb_queue_purge(&hdev->rx_hcp_frags);
-       } else {
-               packet->header &= NFC_HCI_FRAGMENT;
-               hcp_skb = skb;
-       }
+static int __init nfc_hci_init(void)
+{
+       return nfc_llc_init();
+}
 
-       /* if this is a response, dispatch immediately to
-        * unblock waiting cmd context. Otherwise, enqueue to dispatch
-        * in separate context where handler can also execute command.
-        */
-       packet = (struct hcp_packet *)hcp_skb->data;
-       type = HCP_MSG_GET_TYPE(packet->message.header);
-       if (type == NFC_HCI_HCP_RESPONSE) {
-               pipe = packet->header;
-               instruction = HCP_MSG_GET_CMD(packet->message.header);
-               skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
-                        NFC_HCI_HCP_MESSAGE_HEADER_LEN);
-               nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
-       } else {
-               skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
-               queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work);
-       }
+static void __exit nfc_hci_exit(void)
+{
+       nfc_llc_exit();
 }
-EXPORT_SYMBOL(nfc_hci_recv_frame);
+
+subsys_initcall(nfc_hci_init);
+module_exit(nfc_hci_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFC HCI Core");
index fa9a21e922396bd396dc11a3b9dd916282b39ef9..b274d12c18ac5d7b5ac3a6aa4e4fc943738d6eea 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __LOCAL_HCI_H
 #define __LOCAL_HCI_H
 
+#include <net/nfc/hci.h>
+
 struct gate_pipe_map {
        u8 gate;
        u8 pipe;
@@ -35,15 +37,6 @@ struct hcp_packet {
        struct hcp_message message;
 } __packed;
 
-/*
- * HCI command execution completion callback.
- * result will be a standard linux error (may be converted from HCI response)
- * skb contains the response data and must be disposed, or may be NULL if
- * an error occured
- */
-typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, int result,
-                             struct sk_buff *skb, void *cb_data);
-
 struct hcp_exec_waiter {
        wait_queue_head_t *wq;
        bool exec_complete;
@@ -55,7 +48,7 @@ struct hci_msg {
        struct list_head msg_l;
        struct sk_buff_head msg_frags;
        bool wait_response;
-       hci_cmd_cb_t cb;
+       data_exchange_cb_t cb;
        void *cb_context;
        unsigned long completion_delay;
 };
@@ -83,7 +76,7 @@ struct hci_create_pipe_resp {
 int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
                           u8 type, u8 instruction,
                           const u8 *payload, size_t payload_len,
-                          hci_cmd_cb_t cb, void *cb_data,
+                          data_exchange_cb_t cb, void *cb_context,
                           unsigned long completion_delay);
 
 u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
index f4dad1a8974078864fd43f650e52598a6b46c566..bc308a7ca6093f2857905beb3eecfe86d612671f 100644 (file)
@@ -35,7 +35,7 @@
 int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
                           u8 type, u8 instruction,
                           const u8 *payload, size_t payload_len,
-                          hci_cmd_cb_t cb, void *cb_data,
+                          data_exchange_cb_t cb, void *cb_context,
                           unsigned long completion_delay)
 {
        struct nfc_dev *ndev = hdev->ndev;
@@ -52,7 +52,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
        skb_queue_head_init(&cmd->msg_frags);
        cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
        cmd->cb = cb;
-       cmd->cb_context = cb_data;
+       cmd->cb_context = cb_context;
        cmd->completion_delay = completion_delay;
 
        hci_len = payload_len + 1;
@@ -108,7 +108,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
        list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
        mutex_unlock(&hdev->msg_tx_mutex);
 
-       queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+       schedule_work(&hdev->msg_tx_work);
 
        return 0;
 
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
new file mode 100644 (file)
index 0000000..ae1205d
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Link Layer Control manager
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <net/nfc/llc.h>
+
+#include "llc.h"
+
+static struct list_head llc_engines;
+
+int nfc_llc_init(void)
+{
+       int r;
+
+       INIT_LIST_HEAD(&llc_engines);
+
+       r = nfc_llc_nop_register();
+       if (r)
+               goto exit;
+
+       r = nfc_llc_shdlc_register();
+       if (r)
+               goto exit;
+
+       return 0;
+
+exit:
+       nfc_llc_exit();
+       return r;
+}
+
+void nfc_llc_exit(void)
+{
+       struct nfc_llc_engine *llc_engine, *n;
+
+       list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
+               list_del(&llc_engine->entry);
+               kfree(llc_engine->name);
+               kfree(llc_engine);
+       }
+}
+
+int nfc_llc_register(const char *name, struct nfc_llc_ops *ops)
+{
+       struct nfc_llc_engine *llc_engine;
+
+       llc_engine = kzalloc(sizeof(struct nfc_llc_engine), GFP_KERNEL);
+       if (llc_engine == NULL)
+               return -ENOMEM;
+
+       llc_engine->name = kstrdup(name, GFP_KERNEL);
+       if (llc_engine->name == NULL) {
+               kfree(llc_engine);
+               return -ENOMEM;
+       }
+       llc_engine->ops = ops;
+
+       INIT_LIST_HEAD(&llc_engine->entry);
+       list_add_tail (&llc_engine->entry, &llc_engines);
+
+       return 0;
+}
+
+static struct nfc_llc_engine *nfc_llc_name_to_engine(const char *name)
+{
+       struct nfc_llc_engine *llc_engine;
+
+       list_for_each_entry(llc_engine, &llc_engines, entry) {
+               if (strcmp(llc_engine->name, name) == 0)
+                       return llc_engine;
+       }
+
+       return NULL;
+}
+
+void nfc_llc_unregister(const char *name)
+{
+       struct nfc_llc_engine *llc_engine;
+
+       llc_engine = nfc_llc_name_to_engine(name);
+       if (llc_engine == NULL)
+               return;
+
+       list_del(&llc_engine->entry);
+       kfree(llc_engine->name);
+       kfree(llc_engine);
+}
+
+struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
+                                xmit_to_drv_t xmit_to_drv,
+                                rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                                int tx_tailroom, llc_failure_t llc_failure)
+{
+       struct nfc_llc_engine *llc_engine;
+       struct nfc_llc *llc;
+
+       llc_engine = nfc_llc_name_to_engine(name);
+       if (llc_engine == NULL)
+               return NULL;
+
+       llc = kzalloc(sizeof(struct nfc_llc), GFP_KERNEL);
+       if (llc == NULL)
+               return NULL;
+
+       llc->data = llc_engine->ops->init(hdev, xmit_to_drv, rcv_to_hci,
+                                         tx_headroom, tx_tailroom,
+                                         &llc->rx_headroom, &llc->rx_tailroom,
+                                         llc_failure);
+       if (llc->data == NULL) {
+               kfree(llc);
+               return NULL;
+       }
+       llc->ops = llc_engine->ops;
+
+       return llc;
+}
+
+void nfc_llc_free(struct nfc_llc *llc)
+{
+       llc->ops->deinit(llc);
+       kfree(llc);
+}
+
+inline void nfc_llc_get_rx_head_tail_room(struct nfc_llc *llc, int *rx_headroom,
+                                         int *rx_tailroom)
+{
+       *rx_headroom = llc->rx_headroom;
+       *rx_tailroom = llc->rx_tailroom;
+}
+
+inline int nfc_llc_start(struct nfc_llc *llc)
+{
+       return llc->ops->start(llc);
+}
+
+inline int nfc_llc_stop(struct nfc_llc *llc)
+{
+       return llc->ops->stop(llc);
+}
+
+inline void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       llc->ops->rcv_from_drv(llc, skb);
+}
+
+inline int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       return llc->ops->xmit_from_hci(llc, skb);
+}
+
+inline void *nfc_llc_get_data(struct nfc_llc *llc)
+{
+       return llc->data;
+}
diff --git a/net/nfc/hci/llc.h b/net/nfc/hci/llc.h
new file mode 100644 (file)
index 0000000..7be0b7f
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Link Layer Control manager
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_LLC_H_
+#define __LOCAL_LLC_H_
+
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+#include <linux/skbuff.h>
+
+struct nfc_llc_ops {
+       void *(*init) (struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+                      rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                      int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+                      llc_failure_t llc_failure);
+       void (*deinit) (struct nfc_llc *llc);
+       int (*start) (struct nfc_llc *llc);
+       int (*stop) (struct nfc_llc *llc);
+       void (*rcv_from_drv) (struct nfc_llc *llc, struct sk_buff *skb);
+       int (*xmit_from_hci) (struct nfc_llc *llc, struct sk_buff *skb);
+};
+
+struct nfc_llc_engine {
+       const char *name;
+       struct nfc_llc_ops *ops;
+       struct list_head entry;
+};
+
+struct nfc_llc {
+       void *data;
+       struct nfc_llc_ops *ops;
+       int rx_headroom;
+       int rx_tailroom;
+};
+
+void *nfc_llc_get_data(struct nfc_llc *llc);
+
+int nfc_llc_register(const char *name, struct nfc_llc_ops *ops);
+void nfc_llc_unregister(const char *name);
+
+int nfc_llc_nop_register(void);
+
+#if defined(CONFIG_NFC_SHDLC)
+int nfc_llc_shdlc_register(void);
+#else
+static inline int nfc_llc_shdlc_register(void)
+{
+       return 0;
+}
+#endif
+
+#endif /* __LOCAL_LLC_H_ */
diff --git a/net/nfc/hci/llc_nop.c b/net/nfc/hci/llc_nop.c
new file mode 100644 (file)
index 0000000..87b1029
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * nop (passthrough) Link Layer Control
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+
+#include "llc.h"
+
+struct llc_nop {
+       struct nfc_hci_dev *hdev;
+       xmit_to_drv_t xmit_to_drv;
+       rcv_to_hci_t rcv_to_hci;
+       int tx_headroom;
+       int tx_tailroom;
+       llc_failure_t llc_failure;
+};
+
+static void *llc_nop_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+                         rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                         int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+                         llc_failure_t llc_failure)
+{
+       struct llc_nop *llc_nop;
+
+       *rx_headroom = 0;
+       *rx_tailroom = 0;
+
+       llc_nop = kzalloc(sizeof(struct llc_nop), GFP_KERNEL);
+       if (llc_nop == NULL)
+               return NULL;
+
+       llc_nop->hdev = hdev;
+       llc_nop->xmit_to_drv = xmit_to_drv;
+       llc_nop->rcv_to_hci = rcv_to_hci;
+       llc_nop->tx_headroom = tx_headroom;
+       llc_nop->tx_tailroom = tx_tailroom;
+       llc_nop->llc_failure = llc_failure;
+
+       return llc_nop;
+}
+
+static void llc_nop_deinit(struct nfc_llc *llc)
+{
+       kfree(nfc_llc_get_data(llc));
+}
+
+static int llc_nop_start(struct nfc_llc *llc)
+{
+       return 0;
+}
+
+static int llc_nop_stop(struct nfc_llc *llc)
+{
+       return 0;
+}
+
+static void llc_nop_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       struct llc_nop *llc_nop = nfc_llc_get_data(llc);
+
+       llc_nop->rcv_to_hci(llc_nop->hdev, skb);
+}
+
+static int llc_nop_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       struct llc_nop *llc_nop = nfc_llc_get_data(llc);
+
+       return llc_nop->xmit_to_drv(llc_nop->hdev, skb);
+}
+
+static struct nfc_llc_ops llc_nop_ops = {
+       .init = llc_nop_init,
+       .deinit = llc_nop_deinit,
+       .start = llc_nop_start,
+       .stop = llc_nop_stop,
+       .rcv_from_drv = llc_nop_rcv_from_drv,
+       .xmit_from_hci = llc_nop_xmit_from_hci,
+};
+
+int nfc_llc_nop_register(void)
+{
+       return nfc_llc_register(LLC_NOP_NAME, &llc_nop_ops);
+}
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
new file mode 100644 (file)
index 0000000..01cbc72
--- /dev/null
@@ -0,0 +1,857 @@
+/*
+ * shdlc Link Layer Control
+ *
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+
+#include "llc.h"
+
+enum shdlc_state {
+       SHDLC_DISCONNECTED = 0,
+       SHDLC_CONNECTING = 1,
+       SHDLC_NEGOTIATING = 2,
+       SHDLC_HALF_CONNECTED = 3,
+       SHDLC_CONNECTED = 4
+};
+
+struct llc_shdlc {
+       struct nfc_hci_dev *hdev;
+       xmit_to_drv_t xmit_to_drv;
+       rcv_to_hci_t rcv_to_hci;
+
+       struct mutex state_mutex;
+       enum shdlc_state state;
+       int hard_fault;
+
+       wait_queue_head_t *connect_wq;
+       int connect_tries;
+       int connect_result;
+       struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
+
+       u8 w;                           /* window size */
+       bool srej_support;
+
+       struct timer_list t1_timer;     /* send ack timeout */
+       bool t1_active;
+
+       struct timer_list t2_timer;     /* guard/retransmit timeout */
+       bool t2_active;
+
+       int ns;                         /* next seq num for send */
+       int nr;                         /* next expected seq num for receive */
+       int dnr;                        /* oldest sent unacked seq num */
+
+       struct sk_buff_head rcv_q;
+
+       struct sk_buff_head send_q;
+       bool rnr;                       /* other side is not ready to receive */
+
+       struct sk_buff_head ack_pending_q;
+
+       struct work_struct sm_work;
+
+       int tx_headroom;
+       int tx_tailroom;
+
+       llc_failure_t llc_failure;
+};
+
+#define SHDLC_LLC_HEAD_ROOM    2
+
+#define SHDLC_MAX_WINDOW       4
+#define SHDLC_SREJ_SUPPORT     false
+
+#define SHDLC_CONTROL_HEAD_MASK        0xe0
+#define SHDLC_CONTROL_HEAD_I   0x80
+#define SHDLC_CONTROL_HEAD_I2  0xa0
+#define SHDLC_CONTROL_HEAD_S   0xc0
+#define SHDLC_CONTROL_HEAD_U   0xe0
+
+#define SHDLC_CONTROL_NS_MASK  0x38
+#define SHDLC_CONTROL_NR_MASK  0x07
+#define SHDLC_CONTROL_TYPE_MASK        0x18
+
+#define SHDLC_CONTROL_M_MASK   0x1f
+
+enum sframe_type {
+       S_FRAME_RR = 0x00,
+       S_FRAME_REJ = 0x01,
+       S_FRAME_RNR = 0x02,
+       S_FRAME_SREJ = 0x03
+};
+
+enum uframe_modifier {
+       U_FRAME_UA = 0x06,
+       U_FRAME_RSET = 0x19
+};
+
+#define SHDLC_CONNECT_VALUE_MS 5
+#define SHDLC_T1_VALUE_MS(w)   ((5 * w) / 4)
+#define SHDLC_T2_VALUE_MS      300
+
+#define SHDLC_DUMP_SKB(info, skb)                                \
+do {                                                             \
+       pr_debug("%s:\n", info);                                  \
+       print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
+                      16, 1, skb->data, skb->len, 0);            \
+} while (0)
+
+/* checks x < y <= z modulo 8 */
+static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
+{
+       if (x < z)
+               return ((x < y) && (y <= z)) ? true : false;
+       else
+               return ((y > x) || (y <= z)) ? true : false;
+}
+
+/* checks x <= y < z modulo 8 */
+static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
+{
+       if (x <= z)
+               return ((x <= y) && (y < z)) ? true : false;
+       else                    /* x > z -> z+8 > x */
+               return ((y >= x) || (y < z)) ? true : false;
+}
+
+static struct sk_buff *llc_shdlc_alloc_skb(struct llc_shdlc *shdlc,
+                                          int payload_len)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
+                       shdlc->tx_tailroom + payload_len, GFP_KERNEL);
+       if (skb)
+               skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
+
+       return skb;
+}
+
+/* immediately sends an S frame. */
+static int llc_shdlc_send_s_frame(struct llc_shdlc *shdlc,
+                                 enum sframe_type sframe_type, int nr)
+{
+       int r;
+       struct sk_buff *skb;
+
+       pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
+
+       skb = llc_shdlc_alloc_skb(shdlc, 0);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
+
+       r = shdlc->xmit_to_drv(shdlc->hdev, skb);
+
+       kfree_skb(skb);
+
+       return r;
+}
+
+/* immediately sends an U frame. skb may contain optional payload */
+static int llc_shdlc_send_u_frame(struct llc_shdlc *shdlc,
+                                 struct sk_buff *skb,
+                                 enum uframe_modifier uframe_modifier)
+{
+       int r;
+
+       pr_debug("uframe_modifier=%d\n", uframe_modifier);
+
+       *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
+
+       r = shdlc->xmit_to_drv(shdlc->hdev, skb);
+
+       kfree_skb(skb);
+
+       return r;
+}
+
+/*
+ * Free ack_pending frames until y_nr - 1, and reset t2 according to
+ * the remaining oldest ack_pending frame sent time
+ */
+static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
+{
+       struct sk_buff *skb;
+       int dnr = shdlc->dnr;   /* MUST initially be < y_nr */
+
+       pr_debug("release ack pending up to frame %d excluded\n", y_nr);
+
+       while (dnr != y_nr) {
+               pr_debug("release ack pending frame %d\n", dnr);
+
+               skb = skb_dequeue(&shdlc->ack_pending_q);
+               kfree_skb(skb);
+
+               dnr = (dnr + 1) % 8;
+       }
+
+       if (skb_queue_empty(&shdlc->ack_pending_q)) {
+               if (shdlc->t2_active) {
+                       del_timer_sync(&shdlc->t2_timer);
+                       shdlc->t2_active = false;
+
+                       pr_debug
+                           ("All sent frames acked. Stopped T2(retransmit)\n");
+               }
+       } else {
+               skb = skb_peek(&shdlc->ack_pending_q);
+
+               mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
+                         msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+               shdlc->t2_active = true;
+
+               pr_debug
+                   ("Start T2(retransmit) for remaining unacked sent frames\n");
+       }
+}
+
+/*
+ * Receive validated frames from lower layer. skb contains HCI payload only.
+ * Handle according to algorithm at spec:10.8.2
+ */
+static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
+                                 struct sk_buff *skb, int ns, int nr)
+{
+       int x_ns = ns;
+       int y_nr = nr;
+
+       pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
+
+       if (shdlc->state != SHDLC_CONNECTED)
+               goto exit;
+
+       if (x_ns != shdlc->nr) {
+               llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
+               goto exit;
+       }
+
+       if (shdlc->t1_active == false) {
+               shdlc->t1_active = true;
+               mod_timer(&shdlc->t1_timer, jiffies +
+                         msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
+               pr_debug("(re)Start T1(send ack)\n");
+       }
+
+       if (skb->len) {
+               shdlc->rcv_to_hci(shdlc->hdev, skb);
+               skb = NULL;
+       }
+
+       shdlc->nr = (shdlc->nr + 1) % 8;
+
+       if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+               llc_shdlc_reset_t2(shdlc, y_nr);
+
+               shdlc->dnr = y_nr;
+       }
+
+exit:
+       kfree_skb(skb);
+}
+
+static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
+{
+       pr_debug("remote acked up to frame %d excluded\n", y_nr);
+
+       if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+               llc_shdlc_reset_t2(shdlc, y_nr);
+               shdlc->dnr = y_nr;
+       }
+}
+
+static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
+{
+       struct sk_buff *skb;
+
+       pr_debug("ns reset to %d\n", shdlc->dnr);
+
+       while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
+               skb_pull(skb, 1);       /* remove control field */
+               skb_queue_head(&shdlc->send_q, skb);
+       }
+       shdlc->ns = shdlc->dnr;
+}
+
+static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
+{
+       struct sk_buff *skb;
+
+       pr_debug("remote asks retransmition from frame %d\n", y_nr);
+
+       if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
+               if (shdlc->t2_active) {
+                       del_timer_sync(&shdlc->t2_timer);
+                       shdlc->t2_active = false;
+                       pr_debug("Stopped T2(retransmit)\n");
+               }
+
+               if (shdlc->dnr != y_nr) {
+                       while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
+                               skb = skb_dequeue(&shdlc->ack_pending_q);
+                               kfree_skb(skb);
+                       }
+               }
+
+               llc_shdlc_requeue_ack_pending(shdlc);
+       }
+}
+
+/* See spec RR:10.8.3 REJ:10.8.4 */
+static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
+                                 enum sframe_type s_frame_type, int nr)
+{
+       struct sk_buff *skb;
+
+       if (shdlc->state != SHDLC_CONNECTED)
+               return;
+
+       switch (s_frame_type) {
+       case S_FRAME_RR:
+               llc_shdlc_rcv_ack(shdlc, nr);
+               if (shdlc->rnr == true) {       /* see SHDLC 10.7.7 */
+                       shdlc->rnr = false;
+                       if (shdlc->send_q.qlen == 0) {
+                               skb = llc_shdlc_alloc_skb(shdlc, 0);
+                               if (skb)
+                                       skb_queue_tail(&shdlc->send_q, skb);
+                       }
+               }
+               break;
+       case S_FRAME_REJ:
+               llc_shdlc_rcv_rej(shdlc, nr);
+               break;
+       case S_FRAME_RNR:
+               llc_shdlc_rcv_ack(shdlc, nr);
+               shdlc->rnr = true;
+               break;
+       default:
+               break;
+       }
+}
+
+static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
+{
+       pr_debug("result=%d\n", r);
+
+       del_timer_sync(&shdlc->connect_timer);
+
+       if (r == 0) {
+               shdlc->ns = 0;
+               shdlc->nr = 0;
+               shdlc->dnr = 0;
+
+               shdlc->state = SHDLC_HALF_CONNECTED;
+       } else {
+               shdlc->state = SHDLC_DISCONNECTED;
+       }
+
+       shdlc->connect_result = r;
+
+       wake_up(shdlc->connect_wq);
+}
+
+static int llc_shdlc_connect_initiate(struct llc_shdlc *shdlc)
+{
+       struct sk_buff *skb;
+
+       pr_debug("\n");
+
+       skb = llc_shdlc_alloc_skb(shdlc, 2);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
+       *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
+
+       return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
+}
+
+static int llc_shdlc_connect_send_ua(struct llc_shdlc *shdlc)
+{
+       struct sk_buff *skb;
+
+       pr_debug("\n");
+
+       skb = llc_shdlc_alloc_skb(shdlc, 0);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
+}
+
+static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
+                                 struct sk_buff *skb,
+                                 enum uframe_modifier u_frame_modifier)
+{
+       u8 w = SHDLC_MAX_WINDOW;
+       bool srej_support = SHDLC_SREJ_SUPPORT;
+       int r;
+
+       pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
+
+       switch (u_frame_modifier) {
+       case U_FRAME_RSET:
+               switch (shdlc->state) {
+               case SHDLC_NEGOTIATING:
+               case SHDLC_CONNECTING:
+                       /*
+                        * We sent RSET, but chip wants to negociate or we
+                        * got RSET before we managed to send out our.
+                        */
+                       if (skb->len > 0)
+                               w = skb->data[0];
+
+                       if (skb->len > 1)
+                               srej_support = skb->data[1] & 0x01 ? true :
+                                              false;
+
+                       if ((w <= SHDLC_MAX_WINDOW) &&
+                           (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
+                               shdlc->w = w;
+                               shdlc->srej_support = srej_support;
+                               r = llc_shdlc_connect_send_ua(shdlc);
+                               llc_shdlc_connect_complete(shdlc, r);
+                       }
+                       break;
+               case SHDLC_HALF_CONNECTED:
+                       /*
+                        * Chip resent RSET due to its timeout - Ignote it
+                        * as we already sent UA.
+                        */
+                       break;
+               case SHDLC_CONNECTED:
+                       /*
+                        * Chip wants to reset link. This is unexpected and
+                        * unsupported.
+                        */
+                       shdlc->hard_fault = -ECONNRESET;
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case U_FRAME_UA:
+               if ((shdlc->state == SHDLC_CONNECTING &&
+                    shdlc->connect_tries > 0) ||
+                   (shdlc->state == SHDLC_NEGOTIATING)) {
+                       llc_shdlc_connect_complete(shdlc, 0);
+                       shdlc->state = SHDLC_CONNECTED;
+               }
+               break;
+       default:
+               break;
+       }
+
+       kfree_skb(skb);
+}
+
+static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
+{
+       struct sk_buff *skb;
+       u8 control;
+       int nr;
+       int ns;
+       enum sframe_type s_frame_type;
+       enum uframe_modifier u_frame_modifier;
+
+       if (shdlc->rcv_q.qlen)
+               pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
+
+       while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
+               control = skb->data[0];
+               skb_pull(skb, 1);
+               switch (control & SHDLC_CONTROL_HEAD_MASK) {
+               case SHDLC_CONTROL_HEAD_I:
+               case SHDLC_CONTROL_HEAD_I2:
+                       if (shdlc->state == SHDLC_HALF_CONNECTED)
+                               shdlc->state = SHDLC_CONNECTED;
+
+                       ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
+                       nr = control & SHDLC_CONTROL_NR_MASK;
+                       llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
+                       break;
+               case SHDLC_CONTROL_HEAD_S:
+                       if (shdlc->state == SHDLC_HALF_CONNECTED)
+                               shdlc->state = SHDLC_CONNECTED;
+
+                       s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
+                       nr = control & SHDLC_CONTROL_NR_MASK;
+                       llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
+                       kfree_skb(skb);
+                       break;
+               case SHDLC_CONTROL_HEAD_U:
+                       u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
+                       llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
+                       break;
+               default:
+                       pr_err("UNKNOWN Control=%d\n", control);
+                       kfree_skb(skb);
+                       break;
+               }
+       }
+}
+
+static int llc_shdlc_w_used(int ns, int dnr)
+{
+       int unack_count;
+
+       if (dnr <= ns)
+               unack_count = ns - dnr;
+       else
+               unack_count = 8 - dnr + ns;
+
+       return unack_count;
+}
+
+/* Send frames according to algorithm at spec:10.8.1 */
+static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
+{
+       struct sk_buff *skb;
+       int r;
+       unsigned long time_sent;
+
+       if (shdlc->send_q.qlen)
+               pr_debug
+                   ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
+                    shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
+                    shdlc->rnr == false ? "false" : "true",
+                    shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
+                    shdlc->ack_pending_q.qlen);
+
+       while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
+              (shdlc->rnr == false)) {
+
+               if (shdlc->t1_active) {
+                       del_timer_sync(&shdlc->t1_timer);
+                       shdlc->t1_active = false;
+                       pr_debug("Stopped T1(send ack)\n");
+               }
+
+               skb = skb_dequeue(&shdlc->send_q);
+
+               *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
+                                   shdlc->nr;
+
+               pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
+                        shdlc->nr);
+               SHDLC_DUMP_SKB("shdlc frame written", skb);
+
+               r = shdlc->xmit_to_drv(shdlc->hdev, skb);
+               if (r < 0) {
+                       shdlc->hard_fault = r;
+                       break;
+               }
+
+               shdlc->ns = (shdlc->ns + 1) % 8;
+
+               time_sent = jiffies;
+               *(unsigned long *)skb->cb = time_sent;
+
+               skb_queue_tail(&shdlc->ack_pending_q, skb);
+
+               if (shdlc->t2_active == false) {
+                       shdlc->t2_active = true;
+                       mod_timer(&shdlc->t2_timer, time_sent +
+                                 msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+                       pr_debug("Started T2 (retransmit)\n");
+               }
+       }
+}
+
+static void llc_shdlc_connect_timeout(unsigned long data)
+{
+       struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
+
+       pr_debug("\n");
+
+       schedule_work(&shdlc->sm_work);
+}
+
+static void llc_shdlc_t1_timeout(unsigned long data)
+{
+       struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
+
+       pr_debug("SoftIRQ: need to send ack\n");
+
+       schedule_work(&shdlc->sm_work);
+}
+
+static void llc_shdlc_t2_timeout(unsigned long data)
+{
+       struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
+
+       pr_debug("SoftIRQ: need to retransmit\n");
+
+       schedule_work(&shdlc->sm_work);
+}
+
+static void llc_shdlc_sm_work(struct work_struct *work)
+{
+       struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
+       int r;
+
+       pr_debug("\n");
+
+       mutex_lock(&shdlc->state_mutex);
+
+       switch (shdlc->state) {
+       case SHDLC_DISCONNECTED:
+               skb_queue_purge(&shdlc->rcv_q);
+               skb_queue_purge(&shdlc->send_q);
+               skb_queue_purge(&shdlc->ack_pending_q);
+               break;
+       case SHDLC_CONNECTING:
+               if (shdlc->hard_fault) {
+                       llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+                       break;
+               }
+
+               if (shdlc->connect_tries++ < 5)
+                       r = llc_shdlc_connect_initiate(shdlc);
+               else
+                       r = -ETIME;
+               if (r < 0)
+                       llc_shdlc_connect_complete(shdlc, r);
+               else {
+                       mod_timer(&shdlc->connect_timer, jiffies +
+                                 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
+
+                       shdlc->state = SHDLC_NEGOTIATING;
+               }
+               break;
+       case SHDLC_NEGOTIATING:
+               if (timer_pending(&shdlc->connect_timer) == 0) {
+                       shdlc->state = SHDLC_CONNECTING;
+                       schedule_work(&shdlc->sm_work);
+               }
+
+               llc_shdlc_handle_rcv_queue(shdlc);
+
+               if (shdlc->hard_fault) {
+                       llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+                       break;
+               }
+               break;
+       case SHDLC_HALF_CONNECTED:
+       case SHDLC_CONNECTED:
+               llc_shdlc_handle_rcv_queue(shdlc);
+               llc_shdlc_handle_send_queue(shdlc);
+
+               if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
+                       pr_debug
+                           ("Handle T1(send ack) elapsed (T1 now inactive)\n");
+
+                       shdlc->t1_active = false;
+                       r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
+                                                  shdlc->nr);
+                       if (r < 0)
+                               shdlc->hard_fault = r;
+               }
+
+               if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
+                       pr_debug
+                           ("Handle T2(retransmit) elapsed (T2 inactive)\n");
+
+                       shdlc->t2_active = false;
+
+                       llc_shdlc_requeue_ack_pending(shdlc);
+                       llc_shdlc_handle_send_queue(shdlc);
+               }
+
+               if (shdlc->hard_fault) {
+                       shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
+               }
+               break;
+       default:
+               break;
+       }
+       mutex_unlock(&shdlc->state_mutex);
+}
+
+/*
+ * Called from syscall context to establish shdlc link. Sleeps until
+ * link is ready or failure.
+ */
+static int llc_shdlc_connect(struct llc_shdlc *shdlc)
+{
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
+
+       pr_debug("\n");
+
+       mutex_lock(&shdlc->state_mutex);
+
+       shdlc->state = SHDLC_CONNECTING;
+       shdlc->connect_wq = &connect_wq;
+       shdlc->connect_tries = 0;
+       shdlc->connect_result = 1;
+
+       mutex_unlock(&shdlc->state_mutex);
+
+       schedule_work(&shdlc->sm_work);
+
+       wait_event(connect_wq, shdlc->connect_result != 1);
+
+       return shdlc->connect_result;
+}
+
+static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
+{
+       pr_debug("\n");
+
+       mutex_lock(&shdlc->state_mutex);
+
+       shdlc->state = SHDLC_DISCONNECTED;
+
+       mutex_unlock(&shdlc->state_mutex);
+
+       schedule_work(&shdlc->sm_work);
+}
+
+/*
+ * Receive an incoming shdlc frame. Frame has already been crc-validated.
+ * skb contains only LLC header and payload.
+ * If skb == NULL, it is a notification that the link below is dead.
+ */
+static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
+{
+       if (skb == NULL) {
+               pr_err("NULL Frame -> link is dead\n");
+               shdlc->hard_fault = -EREMOTEIO;
+       } else {
+               SHDLC_DUMP_SKB("incoming frame", skb);
+               skb_queue_tail(&shdlc->rcv_q, skb);
+       }
+
+       schedule_work(&shdlc->sm_work);
+}
+
+static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+                           rcv_to_hci_t rcv_to_hci, int tx_headroom,
+                           int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+                           llc_failure_t llc_failure)
+{
+       struct llc_shdlc *shdlc;
+
+       *rx_headroom = SHDLC_LLC_HEAD_ROOM;
+       *rx_tailroom = 0;
+
+       shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
+       if (shdlc == NULL)
+               return NULL;
+
+       mutex_init(&shdlc->state_mutex);
+       shdlc->state = SHDLC_DISCONNECTED;
+
+       init_timer(&shdlc->connect_timer);
+       shdlc->connect_timer.data = (unsigned long)shdlc;
+       shdlc->connect_timer.function = llc_shdlc_connect_timeout;
+
+       init_timer(&shdlc->t1_timer);
+       shdlc->t1_timer.data = (unsigned long)shdlc;
+       shdlc->t1_timer.function = llc_shdlc_t1_timeout;
+
+       init_timer(&shdlc->t2_timer);
+       shdlc->t2_timer.data = (unsigned long)shdlc;
+       shdlc->t2_timer.function = llc_shdlc_t2_timeout;
+
+       shdlc->w = SHDLC_MAX_WINDOW;
+       shdlc->srej_support = SHDLC_SREJ_SUPPORT;
+
+       skb_queue_head_init(&shdlc->rcv_q);
+       skb_queue_head_init(&shdlc->send_q);
+       skb_queue_head_init(&shdlc->ack_pending_q);
+
+       INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
+
+       shdlc->hdev = hdev;
+       shdlc->xmit_to_drv = xmit_to_drv;
+       shdlc->rcv_to_hci = rcv_to_hci;
+       shdlc->tx_headroom = tx_headroom;
+       shdlc->tx_tailroom = tx_tailroom;
+       shdlc->llc_failure = llc_failure;
+
+       return shdlc;
+}
+
+static void llc_shdlc_deinit(struct nfc_llc *llc)
+{
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+       skb_queue_purge(&shdlc->rcv_q);
+       skb_queue_purge(&shdlc->send_q);
+       skb_queue_purge(&shdlc->ack_pending_q);
+
+       kfree(shdlc);
+}
+
+static int llc_shdlc_start(struct nfc_llc *llc)
+{
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+       return llc_shdlc_connect(shdlc);
+}
+
+static int llc_shdlc_stop(struct nfc_llc *llc)
+{
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+       llc_shdlc_disconnect(shdlc);
+
+       return 0;
+}
+
+static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+       llc_shdlc_recv_frame(shdlc, skb);
+}
+
+static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+       struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+       skb_queue_tail(&shdlc->send_q, skb);
+
+       schedule_work(&shdlc->sm_work);
+
+       return 0;
+}
+
+static struct nfc_llc_ops llc_shdlc_ops = {
+       .init = llc_shdlc_init,
+       .deinit = llc_shdlc_deinit,
+       .start = llc_shdlc_start,
+       .stop = llc_shdlc_stop,
+       .rcv_from_drv = llc_shdlc_rcv_from_drv,
+       .xmit_from_hci = llc_shdlc_xmit_from_hci,
+};
+
+int nfc_llc_shdlc_register(void)
+{
+       return nfc_llc_register(LLC_SHDLC_NAME, &llc_shdlc_ops);
+}
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
deleted file mode 100644 (file)
index 6f840c1..0000000
+++ /dev/null
@@ -1,951 +0,0 @@
-/*
- * Copyright (C) 2012  Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
-
-#include <linux/sched.h>
-#include <linux/export.h>
-#include <linux/wait.h>
-#include <linux/crc-ccitt.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-
-#include <net/nfc/hci.h>
-#include <net/nfc/shdlc.h>
-
-#define SHDLC_LLC_HEAD_ROOM    2
-#define SHDLC_LLC_TAIL_ROOM    2
-
-#define SHDLC_MAX_WINDOW       4
-#define SHDLC_SREJ_SUPPORT     false
-
-#define SHDLC_CONTROL_HEAD_MASK        0xe0
-#define SHDLC_CONTROL_HEAD_I   0x80
-#define SHDLC_CONTROL_HEAD_I2  0xa0
-#define SHDLC_CONTROL_HEAD_S   0xc0
-#define SHDLC_CONTROL_HEAD_U   0xe0
-
-#define SHDLC_CONTROL_NS_MASK  0x38
-#define SHDLC_CONTROL_NR_MASK  0x07
-#define SHDLC_CONTROL_TYPE_MASK        0x18
-
-#define SHDLC_CONTROL_M_MASK   0x1f
-
-enum sframe_type {
-       S_FRAME_RR = 0x00,
-       S_FRAME_REJ = 0x01,
-       S_FRAME_RNR = 0x02,
-       S_FRAME_SREJ = 0x03
-};
-
-enum uframe_modifier {
-       U_FRAME_UA = 0x06,
-       U_FRAME_RSET = 0x19
-};
-
-#define SHDLC_CONNECT_VALUE_MS 5
-#define SHDLC_T1_VALUE_MS(w)   ((5 * w) / 4)
-#define SHDLC_T2_VALUE_MS      300
-
-#define SHDLC_DUMP_SKB(info, skb)                                \
-do {                                                             \
-       pr_debug("%s:\n", info);                                  \
-       print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
-                      16, 1, skb->data, skb->len, 0);            \
-} while (0)
-
-/* checks x < y <= z modulo 8 */
-static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
-{
-       if (x < z)
-               return ((x < y) && (y <= z)) ? true : false;
-       else
-               return ((y > x) || (y <= z)) ? true : false;
-}
-
-/* checks x <= y < z modulo 8 */
-static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
-{
-       if (x <= z)
-               return ((x <= y) && (y < z)) ? true : false;
-       else                    /* x > z -> z+8 > x */
-               return ((y >= x) || (y < z)) ? true : false;
-}
-
-static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc,
-                                          int payload_len)
-{
-       struct sk_buff *skb;
-
-       skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM +
-                       shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM +
-                       payload_len, GFP_KERNEL);
-       if (skb)
-               skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM);
-
-       return skb;
-}
-
-static void nfc_shdlc_add_len_crc(struct sk_buff *skb)
-{
-       u16 crc;
-       int len;
-
-       len = skb->len + 2;
-       *skb_push(skb, 1) = len;
-
-       crc = crc_ccitt(0xffff, skb->data, skb->len);
-       crc = ~crc;
-       *skb_put(skb, 1) = crc & 0xff;
-       *skb_put(skb, 1) = crc >> 8;
-}
-
-/* immediately sends an S frame. */
-static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
-                                 enum sframe_type sframe_type, int nr)
-{
-       int r;
-       struct sk_buff *skb;
-
-       pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
-
-       skb = nfc_shdlc_alloc_skb(shdlc, 0);
-       if (skb == NULL)
-               return -ENOMEM;
-
-       *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
-
-       nfc_shdlc_add_len_crc(skb);
-
-       r = shdlc->ops->xmit(shdlc, skb);
-
-       kfree_skb(skb);
-
-       return r;
-}
-
-/* immediately sends an U frame. skb may contain optional payload */
-static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
-                                 struct sk_buff *skb,
-                                 enum uframe_modifier uframe_modifier)
-{
-       int r;
-
-       pr_debug("uframe_modifier=%d\n", uframe_modifier);
-
-       *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
-
-       nfc_shdlc_add_len_crc(skb);
-
-       r = shdlc->ops->xmit(shdlc, skb);
-
-       kfree_skb(skb);
-
-       return r;
-}
-
-/*
- * Free ack_pending frames until y_nr - 1, and reset t2 according to
- * the remaining oldest ack_pending frame sent time
- */
-static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
-{
-       struct sk_buff *skb;
-       int dnr = shdlc->dnr;   /* MUST initially be < y_nr */
-
-       pr_debug("release ack pending up to frame %d excluded\n", y_nr);
-
-       while (dnr != y_nr) {
-               pr_debug("release ack pending frame %d\n", dnr);
-
-               skb = skb_dequeue(&shdlc->ack_pending_q);
-               kfree_skb(skb);
-
-               dnr = (dnr + 1) % 8;
-       }
-
-       if (skb_queue_empty(&shdlc->ack_pending_q)) {
-               if (shdlc->t2_active) {
-                       del_timer_sync(&shdlc->t2_timer);
-                       shdlc->t2_active = false;
-
-                       pr_debug
-                           ("All sent frames acked. Stopped T2(retransmit)\n");
-               }
-       } else {
-               skb = skb_peek(&shdlc->ack_pending_q);
-
-               mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
-                         msecs_to_jiffies(SHDLC_T2_VALUE_MS));
-               shdlc->t2_active = true;
-
-               pr_debug
-                   ("Start T2(retransmit) for remaining unacked sent frames\n");
-       }
-}
-
-/*
- * Receive validated frames from lower layer. skb contains HCI payload only.
- * Handle according to algorithm at spec:10.8.2
- */
-static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
-                                 struct sk_buff *skb, int ns, int nr)
-{
-       int x_ns = ns;
-       int y_nr = nr;
-
-       pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
-
-       if (shdlc->state != SHDLC_CONNECTED)
-               goto exit;
-
-       if (x_ns != shdlc->nr) {
-               nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
-               goto exit;
-       }
-
-       if (shdlc->t1_active == false) {
-               shdlc->t1_active = true;
-               mod_timer(&shdlc->t1_timer,
-                         msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
-               pr_debug("(re)Start T1(send ack)\n");
-       }
-
-       if (skb->len) {
-               nfc_hci_recv_frame(shdlc->hdev, skb);
-               skb = NULL;
-       }
-
-       shdlc->nr = (shdlc->nr + 1) % 8;
-
-       if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
-               nfc_shdlc_reset_t2(shdlc, y_nr);
-
-               shdlc->dnr = y_nr;
-       }
-
-exit:
-       if (skb)
-               kfree_skb(skb);
-}
-
-static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr)
-{
-       pr_debug("remote acked up to frame %d excluded\n", y_nr);
-
-       if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
-               nfc_shdlc_reset_t2(shdlc, y_nr);
-               shdlc->dnr = y_nr;
-       }
-}
-
-static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc)
-{
-       struct sk_buff *skb;
-
-       pr_debug("ns reset to %d\n", shdlc->dnr);
-
-       while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
-               skb_pull(skb, 2);       /* remove len+control */
-               skb_trim(skb, skb->len - 2);    /* remove crc */
-               skb_queue_head(&shdlc->send_q, skb);
-       }
-       shdlc->ns = shdlc->dnr;
-}
-
-static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
-{
-       struct sk_buff *skb;
-
-       pr_debug("remote asks retransmition from frame %d\n", y_nr);
-
-       if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
-               if (shdlc->t2_active) {
-                       del_timer_sync(&shdlc->t2_timer);
-                       shdlc->t2_active = false;
-                       pr_debug("Stopped T2(retransmit)\n");
-               }
-
-               if (shdlc->dnr != y_nr) {
-                       while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
-                               skb = skb_dequeue(&shdlc->ack_pending_q);
-                               kfree_skb(skb);
-                       }
-               }
-
-               nfc_shdlc_requeue_ack_pending(shdlc);
-       }
-}
-
-/* See spec RR:10.8.3 REJ:10.8.4 */
-static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
-                                 enum sframe_type s_frame_type, int nr)
-{
-       struct sk_buff *skb;
-
-       if (shdlc->state != SHDLC_CONNECTED)
-               return;
-
-       switch (s_frame_type) {
-       case S_FRAME_RR:
-               nfc_shdlc_rcv_ack(shdlc, nr);
-               if (shdlc->rnr == true) {       /* see SHDLC 10.7.7 */
-                       shdlc->rnr = false;
-                       if (shdlc->send_q.qlen == 0) {
-                               skb = nfc_shdlc_alloc_skb(shdlc, 0);
-                               if (skb)
-                                       skb_queue_tail(&shdlc->send_q, skb);
-                       }
-               }
-               break;
-       case S_FRAME_REJ:
-               nfc_shdlc_rcv_rej(shdlc, nr);
-               break;
-       case S_FRAME_RNR:
-               nfc_shdlc_rcv_ack(shdlc, nr);
-               shdlc->rnr = true;
-               break;
-       default:
-               break;
-       }
-}
-
-static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
-{
-       pr_debug("result=%d\n", r);
-
-       del_timer_sync(&shdlc->connect_timer);
-
-       if (r == 0) {
-               shdlc->ns = 0;
-               shdlc->nr = 0;
-               shdlc->dnr = 0;
-
-               shdlc->state = SHDLC_CONNECTED;
-       } else {
-               shdlc->state = SHDLC_DISCONNECTED;
-       }
-
-       shdlc->connect_result = r;
-
-       wake_up(shdlc->connect_wq);
-}
-
-static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc)
-{
-       struct sk_buff *skb;
-
-       pr_debug("\n");
-
-       skb = nfc_shdlc_alloc_skb(shdlc, 2);
-       if (skb == NULL)
-               return -ENOMEM;
-
-       *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
-       *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
-
-       return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
-}
-
-static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc)
-{
-       struct sk_buff *skb;
-
-       pr_debug("\n");
-
-       skb = nfc_shdlc_alloc_skb(shdlc, 0);
-       if (skb == NULL)
-               return -ENOMEM;
-
-       return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
-}
-
-static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
-                                 struct sk_buff *skb,
-                                 enum uframe_modifier u_frame_modifier)
-{
-       u8 w = SHDLC_MAX_WINDOW;
-       bool srej_support = SHDLC_SREJ_SUPPORT;
-       int r;
-
-       pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
-
-       switch (u_frame_modifier) {
-       case U_FRAME_RSET:
-               if (shdlc->state == SHDLC_NEGOCIATING) {
-                       /* we sent RSET, but chip wants to negociate */
-                       if (skb->len > 0)
-                               w = skb->data[0];
-
-                       if (skb->len > 1)
-                               srej_support = skb->data[1] & 0x01 ? true :
-                                              false;
-
-                       if ((w <= SHDLC_MAX_WINDOW) &&
-                           (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
-                               shdlc->w = w;
-                               shdlc->srej_support = srej_support;
-                               r = nfc_shdlc_connect_send_ua(shdlc);
-                               nfc_shdlc_connect_complete(shdlc, r);
-                       }
-               } else if (shdlc->state == SHDLC_CONNECTED) {
-                       /*
-                        * Chip wants to reset link. This is unexpected and
-                        * unsupported.
-                        */
-                       shdlc->hard_fault = -ECONNRESET;
-               }
-               break;
-       case U_FRAME_UA:
-               if ((shdlc->state == SHDLC_CONNECTING &&
-                    shdlc->connect_tries > 0) ||
-                   (shdlc->state == SHDLC_NEGOCIATING))
-                       nfc_shdlc_connect_complete(shdlc, 0);
-               break;
-       default:
-               break;
-       }
-
-       kfree_skb(skb);
-}
-
-static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
-{
-       struct sk_buff *skb;
-       u8 control;
-       int nr;
-       int ns;
-       enum sframe_type s_frame_type;
-       enum uframe_modifier u_frame_modifier;
-
-       if (shdlc->rcv_q.qlen)
-               pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
-
-       while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
-               control = skb->data[0];
-               skb_pull(skb, 1);
-               switch (control & SHDLC_CONTROL_HEAD_MASK) {
-               case SHDLC_CONTROL_HEAD_I:
-               case SHDLC_CONTROL_HEAD_I2:
-                       ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
-                       nr = control & SHDLC_CONTROL_NR_MASK;
-                       nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
-                       break;
-               case SHDLC_CONTROL_HEAD_S:
-                       s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
-                       nr = control & SHDLC_CONTROL_NR_MASK;
-                       nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
-                       kfree_skb(skb);
-                       break;
-               case SHDLC_CONTROL_HEAD_U:
-                       u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
-                       nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
-                       break;
-               default:
-                       pr_err("UNKNOWN Control=%d\n", control);
-                       kfree_skb(skb);
-                       break;
-               }
-       }
-}
-
-static int nfc_shdlc_w_used(int ns, int dnr)
-{
-       int unack_count;
-
-       if (dnr <= ns)
-               unack_count = ns - dnr;
-       else
-               unack_count = 8 - dnr + ns;
-
-       return unack_count;
-}
-
-/* Send frames according to algorithm at spec:10.8.1 */
-static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
-{
-       struct sk_buff *skb;
-       int r;
-       unsigned long time_sent;
-
-       if (shdlc->send_q.qlen)
-               pr_debug
-                   ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
-                    shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
-                    shdlc->rnr == false ? "false" : "true",
-                    shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr),
-                    shdlc->ack_pending_q.qlen);
-
-       while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
-              (shdlc->rnr == false)) {
-
-               if (shdlc->t1_active) {
-                       del_timer_sync(&shdlc->t1_timer);
-                       shdlc->t1_active = false;
-                       pr_debug("Stopped T1(send ack)\n");
-               }
-
-               skb = skb_dequeue(&shdlc->send_q);
-
-               *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
-                                   shdlc->nr;
-
-               pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
-                        shdlc->nr);
-       /*      SHDLC_DUMP_SKB("shdlc frame written", skb); */
-
-               nfc_shdlc_add_len_crc(skb);
-
-               r = shdlc->ops->xmit(shdlc, skb);
-               if (r < 0) {
-                       shdlc->hard_fault = r;
-                       break;
-               }
-
-               shdlc->ns = (shdlc->ns + 1) % 8;
-
-               time_sent = jiffies;
-               *(unsigned long *)skb->cb = time_sent;
-
-               skb_queue_tail(&shdlc->ack_pending_q, skb);
-
-               if (shdlc->t2_active == false) {
-                       shdlc->t2_active = true;
-                       mod_timer(&shdlc->t2_timer, time_sent +
-                                 msecs_to_jiffies(SHDLC_T2_VALUE_MS));
-                       pr_debug("Started T2 (retransmit)\n");
-               }
-       }
-}
-
-static void nfc_shdlc_connect_timeout(unsigned long data)
-{
-       struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
-
-       pr_debug("\n");
-
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-}
-
-static void nfc_shdlc_t1_timeout(unsigned long data)
-{
-       struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
-
-       pr_debug("SoftIRQ: need to send ack\n");
-
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-}
-
-static void nfc_shdlc_t2_timeout(unsigned long data)
-{
-       struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
-
-       pr_debug("SoftIRQ: need to retransmit\n");
-
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-}
-
-static void nfc_shdlc_sm_work(struct work_struct *work)
-{
-       struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work);
-       int r;
-
-       pr_debug("\n");
-
-       mutex_lock(&shdlc->state_mutex);
-
-       switch (shdlc->state) {
-       case SHDLC_DISCONNECTED:
-               skb_queue_purge(&shdlc->rcv_q);
-               skb_queue_purge(&shdlc->send_q);
-               skb_queue_purge(&shdlc->ack_pending_q);
-               break;
-       case SHDLC_CONNECTING:
-               if (shdlc->hard_fault) {
-                       nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
-                       break;
-               }
-
-               if (shdlc->connect_tries++ < 5)
-                       r = nfc_shdlc_connect_initiate(shdlc);
-               else
-                       r = -ETIME;
-               if (r < 0)
-                       nfc_shdlc_connect_complete(shdlc, r);
-               else {
-                       mod_timer(&shdlc->connect_timer, jiffies +
-                                 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
-
-                       shdlc->state = SHDLC_NEGOCIATING;
-               }
-               break;
-       case SHDLC_NEGOCIATING:
-               if (timer_pending(&shdlc->connect_timer) == 0) {
-                       shdlc->state = SHDLC_CONNECTING;
-                       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-               }
-
-               nfc_shdlc_handle_rcv_queue(shdlc);
-
-               if (shdlc->hard_fault) {
-                       nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
-                       break;
-               }
-               break;
-       case SHDLC_CONNECTED:
-               nfc_shdlc_handle_rcv_queue(shdlc);
-               nfc_shdlc_handle_send_queue(shdlc);
-
-               if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
-                       pr_debug
-                           ("Handle T1(send ack) elapsed (T1 now inactive)\n");
-
-                       shdlc->t1_active = false;
-                       r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
-                                                  shdlc->nr);
-                       if (r < 0)
-                               shdlc->hard_fault = r;
-               }
-
-               if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
-                       pr_debug
-                           ("Handle T2(retransmit) elapsed (T2 inactive)\n");
-
-                       shdlc->t2_active = false;
-
-                       nfc_shdlc_requeue_ack_pending(shdlc);
-                       nfc_shdlc_handle_send_queue(shdlc);
-               }
-
-               if (shdlc->hard_fault) {
-                       nfc_hci_driver_failure(shdlc->hdev, shdlc->hard_fault);
-               }
-               break;
-       default:
-               break;
-       }
-       mutex_unlock(&shdlc->state_mutex);
-}
-
-/*
- * Called from syscall context to establish shdlc link. Sleeps until
- * link is ready or failure.
- */
-static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
-{
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
-
-       pr_debug("\n");
-
-       mutex_lock(&shdlc->state_mutex);
-
-       shdlc->state = SHDLC_CONNECTING;
-       shdlc->connect_wq = &connect_wq;
-       shdlc->connect_tries = 0;
-       shdlc->connect_result = 1;
-
-       mutex_unlock(&shdlc->state_mutex);
-
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-
-       wait_event(connect_wq, shdlc->connect_result != 1);
-
-       return shdlc->connect_result;
-}
-
-static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
-{
-       pr_debug("\n");
-
-       mutex_lock(&shdlc->state_mutex);
-
-       shdlc->state = SHDLC_DISCONNECTED;
-
-       mutex_unlock(&shdlc->state_mutex);
-
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-}
-
-/*
- * Receive an incoming shdlc frame. Frame has already been crc-validated.
- * skb contains only LLC header and payload.
- * If skb == NULL, it is a notification that the link below is dead.
- */
-void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
-{
-       if (skb == NULL) {
-               pr_err("NULL Frame -> link is dead\n");
-               shdlc->hard_fault = -EREMOTEIO;
-       } else {
-               SHDLC_DUMP_SKB("incoming frame", skb);
-               skb_queue_tail(&shdlc->rcv_q, skb);
-       }
-
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-}
-EXPORT_SYMBOL(nfc_shdlc_recv_frame);
-
-static int nfc_shdlc_open(struct nfc_hci_dev *hdev)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-       int r;
-
-       pr_debug("\n");
-
-       if (shdlc->ops->open) {
-               r = shdlc->ops->open(shdlc);
-               if (r < 0)
-                       return r;
-       }
-
-       r = nfc_shdlc_connect(shdlc);
-       if (r < 0 && shdlc->ops->close)
-               shdlc->ops->close(shdlc);
-
-       return r;
-}
-
-static void nfc_shdlc_close(struct nfc_hci_dev *hdev)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       pr_debug("\n");
-
-       nfc_shdlc_disconnect(shdlc);
-
-       if (shdlc->ops->close)
-               shdlc->ops->close(shdlc);
-}
-
-static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-       int r = 0;
-
-       pr_debug("\n");
-
-       if (shdlc->ops->hci_ready)
-               r = shdlc->ops->hci_ready(shdlc);
-
-       return r;
-}
-
-static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb);
-
-       skb_queue_tail(&shdlc->send_q, skb);
-
-       queue_work(shdlc->sm_wq, &shdlc->sm_work);
-
-       return 0;
-}
-
-static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev,
-                               u32 im_protocols, u32 tm_protocols)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       pr_debug("\n");
-
-       if (shdlc->ops->start_poll)
-               return shdlc->ops->start_poll(shdlc,
-                                             im_protocols, tm_protocols);
-
-       return 0;
-}
-
-static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
-                                     struct nfc_target *target)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       if (shdlc->ops->target_from_gate)
-               return shdlc->ops->target_from_gate(shdlc, gate, target);
-
-       return -EPERM;
-}
-
-static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev,
-                                               u8 gate,
-                                               struct nfc_target *target)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       pr_debug("\n");
-
-       if (shdlc->ops->complete_target_discovered)
-               return shdlc->ops->complete_target_discovered(shdlc, gate,
-                                                             target);
-
-       return 0;
-}
-
-static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
-                                  struct nfc_target *target,
-                                  struct sk_buff *skb,
-                                  struct sk_buff **res_skb)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       if (shdlc->ops->data_exchange)
-               return shdlc->ops->data_exchange(shdlc, target, skb, res_skb);
-
-       return -EPERM;
-}
-
-static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
-                                   struct nfc_target *target)
-{
-       struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
-
-       if (shdlc->ops->check_presence)
-               return shdlc->ops->check_presence(shdlc, target);
-
-       return 0;
-}
-
-static struct nfc_hci_ops shdlc_ops = {
-       .open = nfc_shdlc_open,
-       .close = nfc_shdlc_close,
-       .hci_ready = nfc_shdlc_hci_ready,
-       .xmit = nfc_shdlc_xmit,
-       .start_poll = nfc_shdlc_start_poll,
-       .target_from_gate = nfc_shdlc_target_from_gate,
-       .complete_target_discovered = nfc_shdlc_complete_target_discovered,
-       .data_exchange = nfc_shdlc_data_exchange,
-       .check_presence = nfc_shdlc_check_presence,
-};
-
-struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
-                                    struct nfc_hci_init_data *init_data,
-                                    u32 protocols,
-                                    int tx_headroom, int tx_tailroom,
-                                    int max_link_payload, const char *devname)
-{
-       struct nfc_shdlc *shdlc;
-       int r;
-       char name[32];
-
-       if (ops->xmit == NULL)
-               return NULL;
-
-       shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL);
-       if (shdlc == NULL)
-               return NULL;
-
-       mutex_init(&shdlc->state_mutex);
-       shdlc->ops = ops;
-       shdlc->state = SHDLC_DISCONNECTED;
-
-       init_timer(&shdlc->connect_timer);
-       shdlc->connect_timer.data = (unsigned long)shdlc;
-       shdlc->connect_timer.function = nfc_shdlc_connect_timeout;
-
-       init_timer(&shdlc->t1_timer);
-       shdlc->t1_timer.data = (unsigned long)shdlc;
-       shdlc->t1_timer.function = nfc_shdlc_t1_timeout;
-
-       init_timer(&shdlc->t2_timer);
-       shdlc->t2_timer.data = (unsigned long)shdlc;
-       shdlc->t2_timer.function = nfc_shdlc_t2_timeout;
-
-       shdlc->w = SHDLC_MAX_WINDOW;
-       shdlc->srej_support = SHDLC_SREJ_SUPPORT;
-
-       skb_queue_head_init(&shdlc->rcv_q);
-       skb_queue_head_init(&shdlc->send_q);
-       skb_queue_head_init(&shdlc->ack_pending_q);
-
-       INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work);
-       snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname);
-       shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
-                                      WQ_MEM_RECLAIM, 1);
-       if (shdlc->sm_wq == NULL)
-               goto err_allocwq;
-
-       shdlc->client_headroom = tx_headroom;
-       shdlc->client_tailroom = tx_tailroom;
-
-       shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols,
-                                             tx_headroom + SHDLC_LLC_HEAD_ROOM,
-                                             tx_tailroom + SHDLC_LLC_TAIL_ROOM,
-                                             max_link_payload);
-       if (shdlc->hdev == NULL)
-               goto err_allocdev;
-
-       nfc_hci_set_clientdata(shdlc->hdev, shdlc);
-
-       r = nfc_hci_register_device(shdlc->hdev);
-       if (r < 0)
-               goto err_regdev;
-
-       return shdlc;
-
-err_regdev:
-       nfc_hci_free_device(shdlc->hdev);
-
-err_allocdev:
-       destroy_workqueue(shdlc->sm_wq);
-
-err_allocwq:
-       kfree(shdlc);
-
-       return NULL;
-}
-EXPORT_SYMBOL(nfc_shdlc_allocate);
-
-void nfc_shdlc_free(struct nfc_shdlc *shdlc)
-{
-       pr_debug("\n");
-
-       nfc_hci_unregister_device(shdlc->hdev);
-       nfc_hci_free_device(shdlc->hdev);
-
-       destroy_workqueue(shdlc->sm_wq);
-
-       skb_queue_purge(&shdlc->rcv_q);
-       skb_queue_purge(&shdlc->send_q);
-       skb_queue_purge(&shdlc->ack_pending_q);
-
-       kfree(shdlc);
-}
-EXPORT_SYMBOL(nfc_shdlc_free);
-
-void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata)
-{
-       pr_debug("\n");
-
-       shdlc->clientdata = clientdata;
-}
-EXPORT_SYMBOL(nfc_shdlc_set_clientdata);
-
-void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc)
-{
-       return shdlc->clientdata;
-}
-EXPORT_SYMBOL(nfc_shdlc_get_clientdata);
-
-struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc)
-{
-       return shdlc->hdev;
-}
-EXPORT_SYMBOL(nfc_shdlc_get_hci_dev);
index b982b5b890d73da30a315567851d5d91e7ec9285..c45ccd6c094c5b16f258be4ffd7a1cff4433f6dc 100644 (file)
@@ -312,6 +312,8 @@ int nfc_llcp_send_symm(struct nfc_dev *dev)
 
        skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
 
+       nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_TX);
+
        return nfc_data_exchange(dev, local->target_idx, skb,
                                 nfc_llcp_recv, local);
 }
index 82f0f7588b463d8ba0a8e1931124c03f94de29ed..cc10d073c3381179671ef709b58baf4600e51a2b 100644 (file)
@@ -56,7 +56,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
        sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
                llcp_sock = nfc_llcp_sock(sk);
 
-               lock_sock(sk);
+               bh_lock_sock(sk);
 
                if (sk->sk_state == LLCP_CONNECTED)
                        nfc_put_device(llcp_sock->dev);
@@ -68,26 +68,26 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
                        list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
                                                 accept_queue) {
                                accept_sk = &lsk->sk;
-                               lock_sock(accept_sk);
+                               bh_lock_sock(accept_sk);
 
                                nfc_llcp_accept_unlink(accept_sk);
 
                                accept_sk->sk_state = LLCP_CLOSED;
 
-                               release_sock(accept_sk);
+                               bh_unlock_sock(accept_sk);
 
                                sock_orphan(accept_sk);
                        }
 
                        if (listen == true) {
-                               release_sock(sk);
+                               bh_unlock_sock(sk);
                                continue;
                        }
                }
 
                sk->sk_state = LLCP_CLOSED;
 
-               release_sock(sk);
+               bh_unlock_sock(sk);
 
                sock_orphan(sk);
 
@@ -114,9 +114,9 @@ static void local_release(struct kref *ref)
        nfc_llcp_socket_release(local, false);
        del_timer_sync(&local->link_timer);
        skb_queue_purge(&local->tx_queue);
-       destroy_workqueue(local->tx_wq);
-       destroy_workqueue(local->rx_wq);
-       destroy_workqueue(local->timeout_wq);
+       cancel_work_sync(&local->tx_work);
+       cancel_work_sync(&local->rx_work);
+       cancel_work_sync(&local->timeout_work);
        kfree_skb(local->rx_pending);
        kfree(local);
 }
@@ -181,7 +181,7 @@ static void nfc_llcp_symm_timer(unsigned long data)
 
        pr_err("SYMM timeout\n");
 
-       queue_work(local->timeout_wq, &local->timeout_work);
+       schedule_work(&local->timeout_work);
 }
 
 struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
@@ -426,6 +426,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        u8 *miux_tlv, miux_length;
        __be16 miux;
        u8 gb_len = 0;
+       int ret = 0;
 
        version = LLCP_VERSION_11;
        version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
@@ -450,8 +451,8 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        gb_len += ARRAY_SIZE(llcp_magic);
 
        if (gb_len > NFC_MAX_GT_LEN) {
-               kfree(version_tlv);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        gb_cur = local->gb;
@@ -471,12 +472,15 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        memcpy(gb_cur, miux_tlv, miux_length);
        gb_cur += miux_length;
 
+       local->gb_len = gb_len;
+
+out:
        kfree(version_tlv);
        kfree(lto_tlv);
+       kfree(wks_tlv);
+       kfree(miux_tlv);
 
-       local->gb_len = gb_len;
-
-       return 0;
+       return ret;
 }
 
 u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
@@ -554,6 +558,46 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
        sock->recv_ack_n = (sock->recv_n - 1) % 16;
 }
 
+void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
+                              struct sk_buff *skb, u8 direction)
+{
+       struct hlist_node *node;
+       struct sk_buff *skb_copy = NULL, *nskb;
+       struct sock *sk;
+       u8 *data;
+
+       read_lock(&local->raw_sockets.lock);
+
+       sk_for_each(sk, node, &local->raw_sockets.head) {
+               if (sk->sk_state != LLCP_BOUND)
+                       continue;
+
+               if (skb_copy == NULL) {
+                       skb_copy = __pskb_copy(skb, NFC_LLCP_RAW_HEADER_SIZE,
+                                              GFP_ATOMIC);
+
+                       if (skb_copy == NULL)
+                               continue;
+
+                       data = skb_push(skb_copy, NFC_LLCP_RAW_HEADER_SIZE);
+
+                       data[0] = local->dev ? local->dev->idx : 0xFF;
+                       data[1] = direction;
+               }
+
+               nskb = skb_clone(skb_copy, GFP_ATOMIC);
+               if (!nskb)
+                       continue;
+
+               if (sock_queue_rcv_skb(sk, nskb))
+                       kfree_skb(nskb);
+       }
+
+       read_unlock(&local->raw_sockets.lock);
+
+       kfree_skb(skb_copy);
+}
+
 static void nfc_llcp_tx_work(struct work_struct *work)
 {
        struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
@@ -574,6 +618,9 @@ static void nfc_llcp_tx_work(struct work_struct *work)
                                       DUMP_PREFIX_OFFSET, 16, 1,
                                       skb->data, skb->len, true);
 
+                       nfc_llcp_send_to_raw_sock(local, skb,
+                                                 NFC_LLCP_DIRECTION_TX);
+
                        ret = nfc_data_exchange(local->dev, local->target_idx,
                                                skb, nfc_llcp_recv, local);
 
@@ -1018,6 +1065,8 @@ static void nfc_llcp_rx_work(struct work_struct *work)
                print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
                               16, 1, skb->data, skb->len, true);
 
+       nfc_llcp_send_to_raw_sock(local, skb, NFC_LLCP_DIRECTION_RX);
+
        switch (ptype) {
        case LLCP_PDU_SYMM:
                pr_debug("SYMM\n");
@@ -1052,7 +1101,7 @@ static void nfc_llcp_rx_work(struct work_struct *work)
 
        }
 
-       queue_work(local->tx_wq, &local->tx_work);
+       schedule_work(&local->tx_work);
        kfree_skb(local->rx_pending);
        local->rx_pending = NULL;
 
@@ -1071,7 +1120,7 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
 
        local->rx_pending = skb_get(skb);
        del_timer(&local->link_timer);
-       queue_work(local->rx_wq, &local->rx_work);
+       schedule_work(&local->rx_work);
 
        return;
 }
@@ -1086,7 +1135,7 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
 
        local->rx_pending = skb_get(skb);
        del_timer(&local->link_timer);
-       queue_work(local->rx_wq, &local->rx_work);
+       schedule_work(&local->rx_work);
 
        return 0;
 }
@@ -1121,7 +1170,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
        if (rf_mode == NFC_RF_INITIATOR) {
                pr_debug("Queueing Tx work\n");
 
-               queue_work(local->tx_wq, &local->tx_work);
+               schedule_work(&local->tx_work);
        } else {
                mod_timer(&local->link_timer,
                          jiffies + msecs_to_jiffies(local->remote_lto));
@@ -1130,10 +1179,7 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
 
 int nfc_llcp_register_device(struct nfc_dev *ndev)
 {
-       struct device *dev = &ndev->dev;
        struct nfc_llcp_local *local;
-       char name[32];
-       int err;
 
        local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL);
        if (local == NULL)
@@ -1149,41 +1195,15 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
 
        skb_queue_head_init(&local->tx_queue);
        INIT_WORK(&local->tx_work, nfc_llcp_tx_work);
-       snprintf(name, sizeof(name), "%s_llcp_tx_wq", dev_name(dev));
-       local->tx_wq =
-               alloc_workqueue(name,
-                               WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                               1);
-       if (local->tx_wq == NULL) {
-               err = -ENOMEM;
-               goto err_local;
-       }
 
        local->rx_pending = NULL;
        INIT_WORK(&local->rx_work, nfc_llcp_rx_work);
-       snprintf(name, sizeof(name), "%s_llcp_rx_wq", dev_name(dev));
-       local->rx_wq =
-               alloc_workqueue(name,
-                               WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                               1);
-       if (local->rx_wq == NULL) {
-               err = -ENOMEM;
-               goto err_tx_wq;
-       }
 
        INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work);
-       snprintf(name, sizeof(name), "%s_llcp_timeout_wq", dev_name(dev));
-       local->timeout_wq =
-               alloc_workqueue(name,
-                               WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
-                               1);
-       if (local->timeout_wq == NULL) {
-               err = -ENOMEM;
-               goto err_rx_wq;
-       }
 
-       local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock);
-       local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock);
+       rwlock_init(&local->sockets.lock);
+       rwlock_init(&local->connecting_sockets.lock);
+       rwlock_init(&local->raw_sockets.lock);
 
        nfc_llcp_build_gb(local);
 
@@ -1192,17 +1212,6 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
 
        list_add(&llcp_devices, &local->list);
 
-       return 0;
-
-err_rx_wq:
-       destroy_workqueue(local->rx_wq);
-
-err_tx_wq:
-       destroy_workqueue(local->tx_wq);
-
-err_local:
-       kfree(local);
-
        return 0;
 }
 
index 83b8bba5a2803d2cc4bafb05d7e553221434ae0e..fdb2d24e60bda5fe85d8225c38b25a1caed7c0de 100644 (file)
@@ -56,12 +56,9 @@ struct nfc_llcp_local {
 
        struct timer_list link_timer;
        struct sk_buff_head tx_queue;
-       struct workqueue_struct *tx_wq;
        struct work_struct       tx_work;
-       struct workqueue_struct *rx_wq;
        struct work_struct       rx_work;
        struct sk_buff *rx_pending;
-       struct workqueue_struct *timeout_wq;
        struct work_struct       timeout_work;
 
        u32 target_idx;
@@ -89,6 +86,7 @@ struct nfc_llcp_local {
        /* sockets array */
        struct llcp_sock_list sockets;
        struct llcp_sock_list connecting_sockets;
+       struct llcp_sock_list raw_sockets;
 };
 
 struct nfc_llcp_sock {
@@ -187,6 +185,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
 u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
 void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap);
 int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock);
+void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
+                              struct sk_buff *skb, u8 direction);
 
 /* Sock API */
 struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
index ddeb9aa398f0ced280fed93b853319923cf43c69..40f056debf9aaecd1fc7993a186a21230a111856 100644 (file)
@@ -142,6 +142,60 @@ error:
        return ret;
 }
 
+static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
+                             int alen)
+{
+       struct sock *sk = sock->sk;
+       struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+       struct nfc_llcp_local *local;
+       struct nfc_dev *dev;
+       struct sockaddr_nfc_llcp llcp_addr;
+       int len, ret = 0;
+
+       if (!addr || addr->sa_family != AF_NFC)
+               return -EINVAL;
+
+       pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+
+       memset(&llcp_addr, 0, sizeof(llcp_addr));
+       len = min_t(unsigned int, sizeof(llcp_addr), alen);
+       memcpy(&llcp_addr, addr, len);
+
+       lock_sock(sk);
+
+       if (sk->sk_state != LLCP_CLOSED) {
+               ret = -EBADFD;
+               goto error;
+       }
+
+       dev = nfc_get_device(llcp_addr.dev_idx);
+       if (dev == NULL) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       local = nfc_llcp_find_local(dev);
+       if (local == NULL) {
+               ret = -ENODEV;
+               goto put_dev;
+       }
+
+       llcp_sock->dev = dev;
+       llcp_sock->local = nfc_llcp_local_get(local);
+       llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
+
+       nfc_llcp_sock_link(&local->raw_sockets, sk);
+
+       sk->sk_state = LLCP_BOUND;
+
+put_dev:
+       nfc_put_device(dev);
+
+error:
+       release_sock(sk);
+       return ret;
+}
+
 static int llcp_sock_listen(struct socket *sock, int backlog)
 {
        struct sock *sk = sock->sk;
@@ -300,9 +354,6 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
        pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
                 llcp_sock->dsap, llcp_sock->ssap);
 
-       if (llcp_sock == NULL || llcp_sock->dev == NULL)
-               return -EBADFD;
-
        uaddr->sa_family = AF_NFC;
 
        *len = sizeof(struct sockaddr_nfc_llcp);
@@ -421,7 +472,10 @@ static int llcp_sock_release(struct socket *sock)
 
        release_sock(sk);
 
-       nfc_llcp_sock_unlink(&local->sockets, sk);
+       if (sock->type == SOCK_RAW)
+               nfc_llcp_sock_unlink(&local->raw_sockets, sk);
+       else
+               nfc_llcp_sock_unlink(&local->sockets, sk);
 
 out:
        sock_orphan(sk);
@@ -617,7 +671,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (!(flags & MSG_PEEK)) {
 
                /* SOCK_STREAM: re-queue skb if it contains unreceived data */
-               if (sk->sk_type == SOCK_STREAM) {
+               if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_RAW) {
                        skb_pull(skb, copied);
                        if (skb->len) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
@@ -658,6 +712,26 @@ static const struct proto_ops llcp_sock_ops = {
        .mmap           = sock_no_mmap,
 };
 
+static const struct proto_ops llcp_rawsock_ops = {
+       .family         = PF_NFC,
+       .owner          = THIS_MODULE,
+       .bind           = llcp_raw_sock_bind,
+       .connect        = sock_no_connect,
+       .release        = llcp_sock_release,
+       .socketpair     = sock_no_socketpair,
+       .accept         = sock_no_accept,
+       .getname        = llcp_sock_getname,
+       .poll           = llcp_sock_poll,
+       .ioctl          = sock_no_ioctl,
+       .listen         = sock_no_listen,
+       .shutdown       = sock_no_shutdown,
+       .setsockopt     = sock_no_setsockopt,
+       .getsockopt     = sock_no_getsockopt,
+       .sendmsg        = sock_no_sendmsg,
+       .recvmsg        = llcp_sock_recvmsg,
+       .mmap           = sock_no_mmap,
+};
+
 static void llcp_sock_destruct(struct sock *sk)
 {
        struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -735,10 +809,15 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
 
        pr_debug("%p\n", sock);
 
-       if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM)
+       if (sock->type != SOCK_STREAM &&
+           sock->type != SOCK_DGRAM &&
+           sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sock->ops = &llcp_sock_ops;
+       if (sock->type == SOCK_RAW)
+               sock->ops = &llcp_rawsock_ops;
+       else
+               sock->ops = &llcp_sock_ops;
 
        sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
        if (sk == NULL)
index f81efe13985a71e2bd9c3bc16e0799a750ccee3d..acf9abb7d99badc592592f83b42f4acbb14ef4de 100644 (file)
@@ -176,6 +176,27 @@ static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
                     (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
 }
 
+struct nci_set_config_param {
+       __u8    id;
+       size_t  len;
+       __u8    *val;
+};
+
+static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
+{
+       struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
+       struct nci_core_set_config_cmd cmd;
+
+       BUG_ON(param->len > NCI_MAX_PARAM_LEN);
+
+       cmd.num_params = 1;
+       cmd.param.id = param->id;
+       cmd.param.len = param->len;
+       memcpy(cmd.param.val, param->val, param->len);
+
+       nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
+}
+
 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
 {
        struct nci_rf_disc_cmd cmd;
@@ -388,6 +409,32 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
        return nci_close_device(ndev);
 }
 
+static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
+{
+       struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+       struct nci_set_config_param param;
+       __u8 local_gb[NFC_MAX_GT_LEN];
+       int i, rc = 0;
+
+       param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
+       if ((param.val == NULL) || (param.len == 0))
+               return rc;
+
+       if (param.len > NCI_MAX_PARAM_LEN)
+               return -EINVAL;
+
+       for (i = 0; i < param.len; i++)
+               local_gb[param.len-1-i] = param.val[i];
+
+       param.id = NCI_PN_ATR_REQ_GEN_BYTES;
+       param.val = local_gb;
+
+       rc = nci_request(ndev, nci_set_config_req, (unsigned long)&param,
+                        msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
+
+       return rc;
+}
+
 static int nci_start_poll(struct nfc_dev *nfc_dev,
                          __u32 im_protocols, __u32 tm_protocols)
 {
@@ -415,6 +462,14 @@ static int nci_start_poll(struct nfc_dev *nfc_dev,
                        return -EBUSY;
        }
 
+       if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+               rc = nci_set_local_general_bytes(nfc_dev);
+               if (rc) {
+                       pr_err("failed to set local general bytes\n");
+                       return rc;
+               }
+       }
+
        rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
                         msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
 
@@ -509,7 +564,7 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       pr_debug("target_idx %d\n", target->idx);
+       pr_debug("entry\n");
 
        if (!ndev->target_active_prot) {
                pr_err("unable to deactivate target, no active target\n");
@@ -524,6 +579,38 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
        }
 }
 
+
+static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
+                          __u8 comm_mode, __u8 *gb, size_t gb_len)
+{
+       struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+       int rc;
+
+       pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
+
+       rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
+       if (rc)
+               return rc;
+
+       rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
+                                         ndev->remote_gb_len);
+       if (!rc)
+               rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
+                                       NFC_RF_INITIATOR);
+
+       return rc;
+}
+
+static int nci_dep_link_down(struct nfc_dev *nfc_dev)
+{
+       pr_debug("entry\n");
+
+       nci_deactivate_target(nfc_dev, NULL);
+
+       return 0;
+}
+
+
 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
                          struct sk_buff *skb,
                          data_exchange_cb_t cb, void *cb_context)
@@ -557,6 +644,8 @@ static struct nfc_ops nci_nfc_ops = {
        .dev_down = nci_dev_down,
        .start_poll = nci_start_poll,
        .stop_poll = nci_stop_poll,
+       .dep_link_up = nci_dep_link_up,
+       .dep_link_down = nci_dep_link_down,
        .activate_target = nci_activate_target,
        .deactivate_target = nci_deactivate_target,
        .im_transceive = nci_transceive,
index af7a93b04393a1ff3cbc088390f4972592dd3b1c..b2aa98ef0927cb5760f72fb40d016a0b1dcf5575 100644 (file)
@@ -176,6 +176,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
                        protocol = NFC_PROTO_ISO14443_B_MASK;
        else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
                protocol = NFC_PROTO_FELICA_MASK;
+       else if (rf_protocol == NCI_RF_PROTOCOL_NFC_DEP)
+               protocol = NFC_PROTO_NFC_DEP_MASK;
        else
                protocol = 0;
 
@@ -361,6 +363,33 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
        return NCI_STATUS_OK;
 }
 
+static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev,
+                       struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
+{
+       struct activation_params_poll_nfc_dep *poll;
+       int i;
+
+       switch (ntf->activation_rf_tech_and_mode) {
+       case NCI_NFC_A_PASSIVE_POLL_MODE:
+       case NCI_NFC_F_PASSIVE_POLL_MODE:
+               poll = &ntf->activation_params.poll_nfc_dep;
+               poll->atr_res_len = min_t(__u8, *data++, 63);
+               pr_debug("atr_res_len %d\n", poll->atr_res_len);
+               if (poll->atr_res_len > 0) {
+                       for (i = 0; i < poll->atr_res_len; i++)
+                               poll->atr_res[poll->atr_res_len-1-i] = data[i];
+               }
+               break;
+
+       default:
+               pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+                      ntf->activation_rf_tech_and_mode);
+               return NCI_STATUS_RF_PROTOCOL_ERROR;
+       }
+
+       return NCI_STATUS_OK;
+}
+
 static void nci_target_auto_activated(struct nci_dev *ndev,
                                      struct nci_rf_intf_activated_ntf *ntf)
 {
@@ -454,6 +483,11 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
                                                                    &ntf, data);
                        break;
 
+               case NCI_RF_INTERFACE_NFC_DEP:
+                       err = nci_extract_activation_params_nfc_dep(ndev,
+                                                                   &ntf, data);
+                       break;
+
                case NCI_RF_INTERFACE_FRAME:
                        /* no activation params */
                        break;
@@ -473,6 +507,24 @@ exit:
 
                /* set the available credits to initial value */
                atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+
+               /* store general bytes to be reported later in dep_link_up */
+               if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) {
+                       ndev->remote_gb_len = 0;
+
+                       if (ntf.activation_params_len > 0) {
+                               /* ATR_RES general bytes at offset 15 */
+                               ndev->remote_gb_len = min_t(__u8,
+                                       (ntf.activation_params
+                                       .poll_nfc_dep.atr_res_len
+                                       - NFC_ATR_RES_GT_OFFSET),
+                                       NFC_MAX_GT_LEN);
+                               memcpy(ndev->remote_gb,
+                                      (ntf.activation_params.poll_nfc_dep
+                                      .atr_res + NFC_ATR_RES_GT_OFFSET),
+                                      ndev->remote_gb_len);
+                       }
+               }
        }
 
        if (atomic_read(&ndev->state) == NCI_DISCOVERY) {
index 3003c3390e492c18907cc7f5076949439dd7273d..dd072f38ad00f5241d2cdc1299965d8bcc3966b9 100644 (file)
@@ -119,6 +119,16 @@ exit:
        nci_req_complete(ndev, rsp_1->status);
 }
 
+static void nci_core_set_config_rsp_packet(struct nci_dev *ndev,
+                                          struct sk_buff *skb)
+{
+       struct nci_core_set_config_rsp *rsp = (void *) skb->data;
+
+       pr_debug("status 0x%x\n", rsp->status);
+
+       nci_req_complete(ndev, rsp->status);
+}
+
 static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
                                       struct sk_buff *skb)
 {
@@ -194,6 +204,10 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                nci_core_init_rsp_packet(ndev, skb);
                break;
 
+       case NCI_OP_CORE_SET_CONFIG_RSP:
+               nci_core_set_config_rsp_packet(ndev, skb);
+               break;
+
        case NCI_OP_RF_DISCOVER_MAP_RSP:
                nci_rf_disc_map_rsp_packet(ndev, skb);
                break;
index 4c51714ee74177509d6d7831c064e89280424ded..c1b5285cbde79fa6c861106649d57728373c4ba4 100644 (file)
@@ -58,7 +58,7 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
 {
        void *hdr;
 
-       hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                          &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
        if (!hdr)
                return -EMSGSIZE;
@@ -165,7 +165,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       dev->genl_data.poll_req_pid = 0;
+       dev->genl_data.poll_req_portid = 0;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
        if (!msg)
@@ -347,13 +347,13 @@ free_msg:
 }
 
 static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
-                               u32 pid, u32 seq,
+                               u32 portid, u32 seq,
                                struct netlink_callback *cb,
                                int flags)
 {
        void *hdr;
 
-       hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
+       hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags,
                          NFC_CMD_GET_DEVICE);
        if (!hdr)
                return -EMSGSIZE;
@@ -401,7 +401,7 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
        while (dev) {
                int rc;
 
-               rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
+               rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
                if (rc < 0)
                        break;
@@ -520,7 +520,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
                goto out_putdev;
        }
 
-       rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
+       rc = nfc_genl_send_device(msg, dev, info->snd_portid, info->snd_seq,
                                  NULL, 0);
        if (rc < 0)
                goto out_free;
@@ -611,7 +611,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
 
        rc = nfc_start_poll(dev, im_protocols, tm_protocols);
        if (!rc)
-               dev->genl_data.poll_req_pid = info->snd_pid;
+               dev->genl_data.poll_req_portid = info->snd_portid;
 
        mutex_unlock(&dev->genl_data.genl_data_mutex);
 
@@ -645,13 +645,13 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
 
        mutex_lock(&dev->genl_data.genl_data_mutex);
 
-       if (dev->genl_data.poll_req_pid != info->snd_pid) {
+       if (dev->genl_data.poll_req_portid != info->snd_portid) {
                rc = -EBUSY;
                goto out;
        }
 
        rc = nfc_stop_poll(dev);
-       dev->genl_data.poll_req_pid = 0;
+       dev->genl_data.poll_req_portid = 0;
 
 out:
        mutex_unlock(&dev->genl_data.genl_data_mutex);
@@ -761,38 +761,70 @@ static struct genl_ops nfc_genl_ops[] = {
        },
 };
 
-static int nfc_genl_rcv_nl_event(struct notifier_block *this,
-                                unsigned long event, void *ptr)
+
+struct urelease_work {
+       struct  work_struct w;
+       int     portid;
+};
+
+static void nfc_urelease_event_work(struct work_struct *work)
 {
-       struct netlink_notify *n = ptr;
+       struct urelease_work *w = container_of(work, struct urelease_work, w);
        struct class_dev_iter iter;
        struct nfc_dev *dev;
 
-       if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
-               goto out;
+       pr_debug("portid %d\n", w->portid);
 
-       pr_debug("NETLINK_URELEASE event from id %d\n", n->pid);
+       mutex_lock(&nfc_devlist_mutex);
 
        nfc_device_iter_init(&iter);
        dev = nfc_device_iter_next(&iter);
 
        while (dev) {
-               if (dev->genl_data.poll_req_pid == n->pid) {
+               mutex_lock(&dev->genl_data.genl_data_mutex);
+
+               if (dev->genl_data.poll_req_portid == w->portid) {
                        nfc_stop_poll(dev);
-                       dev->genl_data.poll_req_pid = 0;
+                       dev->genl_data.poll_req_portid = 0;
                }
+
+               mutex_unlock(&dev->genl_data.genl_data_mutex);
+
                dev = nfc_device_iter_next(&iter);
        }
 
        nfc_device_iter_exit(&iter);
 
+       mutex_unlock(&nfc_devlist_mutex);
+
+       kfree(w);
+}
+
+static int nfc_genl_rcv_nl_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct netlink_notify *n = ptr;
+       struct urelease_work *w;
+
+       if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
+               goto out;
+
+       pr_debug("NETLINK_URELEASE event from id %d\n", n->portid);
+
+       w = kmalloc(sizeof(*w), GFP_ATOMIC);
+       if (w) {
+               INIT_WORK((struct work_struct *) w, nfc_urelease_event_work);
+               w->portid = n->portid;
+               schedule_work((struct work_struct *) w);
+       }
+
 out:
        return NOTIFY_DONE;
 }
 
 void nfc_genl_data_init(struct nfc_genl_data *genl_data)
 {
-       genl_data->poll_req_pid = 0;
+       genl_data->poll_req_portid = 0;
        mutex_init(&genl_data->genl_data_mutex);
 }
 
index 954405ceae9ed5141293d3f47ce7c784aeee3c31..08114478cb853256c3e6f0aeb63d791887513cd8 100644 (file)
@@ -266,7 +266,7 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
        if (unlikely(!skb))
                return -ENOMEM;
 
-       vport = rcu_dereference(dp->ports[out_port]);
+       vport = ovs_vport_rcu(dp, out_port);
        if (unlikely(!vport)) {
                kfree_skb(skb);
                return -ENODEV;
@@ -286,7 +286,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
        upcall.cmd = OVS_PACKET_CMD_ACTION;
        upcall.key = &OVS_CB(skb)->flow->key;
        upcall.userdata = NULL;
-       upcall.pid = 0;
+       upcall.portid = 0;
 
        for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
                 a = nla_next(a, &rem)) {
@@ -296,7 +296,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                        break;
 
                case OVS_USERSPACE_ATTR_PID:
-                       upcall.pid = nla_get_u32(a);
+                       upcall.portid = nla_get_u32(a);
                        break;
                }
        }
index cf58cedad0833f9e9e704401fdecb5480c121caf..4c4b62ccc7d745bf9ba3298f17c2417b28c563b1 100644 (file)
 #include <linux/dmi.h>
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include "datapath.h"
 #include "flow.h"
 #include "vport-internal_dev.h"
 
+/**
+ * struct ovs_net - Per net-namespace data for ovs.
+ * @dps: List of datapaths to enable dumping them all out.
+ * Protected by genl_mutex.
+ */
+struct ovs_net {
+       struct list_head dps;
+};
+
+static int ovs_net_id __read_mostly;
+
+#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
+static void rehash_flow_table(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+
 /**
  * DOC: Locking:
  *
  * each other.
  */
 
-/* Global list of datapaths to enable dumping them all out.
- * Protected by genl_mutex.
- */
-static LIST_HEAD(dps);
-
-#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
-static void rehash_flow_table(struct work_struct *work);
-static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
-
 static struct vport *new_vport(const struct vport_parms *);
-static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
+static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
                             const struct dp_upcall_info *);
-static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
+static int queue_userspace_packet(struct net *, int dp_ifindex,
+                                 struct sk_buff *,
                                  const struct dp_upcall_info *);
 
 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
-static struct datapath *get_dp(int dp_ifindex)
+static struct datapath *get_dp(struct net *net, int dp_ifindex)
 {
        struct datapath *dp = NULL;
        struct net_device *dev;
 
        rcu_read_lock();
-       dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
+       dev = dev_get_by_index_rcu(net, dp_ifindex);
        if (dev) {
                struct vport *vport = ovs_internal_dev_get_vport(dev);
                if (vport)
@@ -107,7 +116,7 @@ static struct datapath *get_dp(int dp_ifindex)
 /* Must be called with rcu_read_lock or RTNL lock. */
 const char *ovs_dp_name(const struct datapath *dp)
 {
-       struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+       struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL);
        return vport->ops->get_name(vport);
 }
 
@@ -118,7 +127,7 @@ static int get_dpifindex(struct datapath *dp)
 
        rcu_read_lock();
 
-       local = rcu_dereference(dp->ports[OVSP_LOCAL]);
+       local = ovs_vport_rcu(dp, OVSP_LOCAL);
        if (local)
                ifindex = local->ops->get_ifindex(local);
        else
@@ -135,9 +144,31 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
 
        ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
        free_percpu(dp->stats_percpu);
+       release_net(ovs_dp_get_net(dp));
+       kfree(dp->ports);
        kfree(dp);
 }
 
+static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
+                                           u16 port_no)
+{
+       return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
+}
+
+struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
+{
+       struct vport *vport;
+       struct hlist_node *n;
+       struct hlist_head *head;
+
+       head = vport_hash_bucket(dp, port_no);
+       hlist_for_each_entry_rcu(vport, n, head, dp_hash_node) {
+               if (vport->port_no == port_no)
+                       return vport;
+       }
+       return NULL;
+}
+
 /* Called with RTNL lock and genl_lock. */
 static struct vport *new_vport(const struct vport_parms *parms)
 {
@@ -146,9 +177,9 @@ static struct vport *new_vport(const struct vport_parms *parms)
        vport = ovs_vport_add(parms);
        if (!IS_ERR(vport)) {
                struct datapath *dp = parms->dp;
+               struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
 
-               rcu_assign_pointer(dp->ports[parms->port_no], vport);
-               list_add(&vport->node, &dp->port_list);
+               hlist_add_head_rcu(&vport->dp_hash_node, head);
        }
 
        return vport;
@@ -160,8 +191,7 @@ void ovs_dp_detach_port(struct vport *p)
        ASSERT_RTNL();
 
        /* First drop references to device. */
-       list_del(&p->node);
-       rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
+       hlist_del_rcu(&p->dp_hash_node);
 
        /* Then destroy it. */
        ovs_vport_del(p);
@@ -195,7 +225,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
                upcall.cmd = OVS_PACKET_CMD_MISS;
                upcall.key = &key;
                upcall.userdata = NULL;
-               upcall.pid = p->upcall_pid;
+               upcall.portid = p->upcall_portid;
                ovs_dp_upcall(dp, skb, &upcall);
                consume_skb(skb);
                stats_counter = &stats->n_missed;
@@ -220,17 +250,18 @@ static struct genl_family dp_packet_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_PACKET_FAMILY,
        .version = OVS_PACKET_VERSION,
-       .maxattr = OVS_PACKET_ATTR_MAX
+       .maxattr = OVS_PACKET_ATTR_MAX,
+       .netnsok = true
 };
 
 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
-             const struct dp_upcall_info *upcall_info)
+                 const struct dp_upcall_info *upcall_info)
 {
        struct dp_stats_percpu *stats;
        int dp_ifindex;
        int err;
 
-       if (upcall_info->pid == 0) {
+       if (upcall_info->portid == 0) {
                err = -ENOTCONN;
                goto err;
        }
@@ -242,9 +273,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
        }
 
        if (!skb_is_gso(skb))
-               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+               err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
        else
-               err = queue_gso_packets(dp_ifindex, skb, upcall_info);
+               err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
        if (err)
                goto err;
 
@@ -260,7 +291,8 @@ err:
        return err;
 }
 
-static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
+static int queue_gso_packets(struct net *net, int dp_ifindex,
+                            struct sk_buff *skb,
                             const struct dp_upcall_info *upcall_info)
 {
        unsigned short gso_type = skb_shinfo(skb)->gso_type;
@@ -276,7 +308,7 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
        /* Queue all of the segments. */
        skb = segs;
        do {
-               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+               err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
                if (err)
                        break;
 
@@ -306,7 +338,8 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
        return err;
 }
 
-static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
+static int queue_userspace_packet(struct net *net, int dp_ifindex,
+                                 struct sk_buff *skb,
                                  const struct dp_upcall_info *upcall_info)
 {
        struct ovs_header *upcall;
@@ -362,7 +395,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
 
        skb_copy_and_csum_dev(skb, nla_data(nla));
 
-       err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+       err = genlmsg_unicast(net, user_skb, upcall_info->portid);
 
 out:
        kfree_skb(nskb);
@@ -370,15 +403,10 @@ out:
 }
 
 /* Called with genl_mutex. */
-static int flush_flows(int dp_ifindex)
+static int flush_flows(struct datapath *dp)
 {
        struct flow_table *old_table;
        struct flow_table *new_table;
-       struct datapath *dp;
-
-       dp = get_dp(dp_ifindex);
-       if (!dp)
-               return -ENODEV;
 
        old_table = genl_dereference(dp->table);
        new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
@@ -668,7 +696,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        packet->priority = flow->key.phy.priority;
 
        rcu_read_lock();
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        err = -ENODEV;
        if (!dp)
                goto err_unlock;
@@ -742,7 +770,8 @@ static struct genl_family dp_flow_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_FLOW_FAMILY,
        .version = OVS_FLOW_VERSION,
-       .maxattr = OVS_FLOW_ATTR_MAX
+       .maxattr = OVS_FLOW_ATTR_MAX,
+       .netnsok = true
 };
 
 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
@@ -751,7 +780,7 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = {
 
 /* Called with genl_lock. */
 static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
-                                 struct sk_buff *skb, u32 pid,
+                                 struct sk_buff *skb, u32 portid,
                                  u32 seq, u32 flags, u8 cmd)
 {
        const int skb_orig_len = skb->len;
@@ -766,7 +795,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
        sf_acts = rcu_dereference_protected(flow->sf_acts,
                                            lockdep_genl_is_held());
 
-       ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
        if (!ovs_header)
                return -EMSGSIZE;
 
@@ -850,7 +879,7 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
 
 static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
                                               struct datapath *dp,
-                                              u32 pid, u32 seq, u8 cmd)
+                                              u32 portid, u32 seq, u8 cmd)
 {
        struct sk_buff *skb;
        int retval;
@@ -859,7 +888,7 @@ static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
+       retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
        BUG_ON(retval < 0);
        return skb;
 }
@@ -894,7 +923,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                goto error;
        }
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        error = -ENODEV;
        if (!dp)
                goto error;
@@ -941,7 +970,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                flow->hash = ovs_flow_hash(&key, key_len);
                ovs_flow_tbl_insert(table, flow);
 
-               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                                info->snd_seq,
                                                OVS_FLOW_CMD_NEW);
        } else {
@@ -979,7 +1008,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                        ovs_flow_deferred_free_acts(old_acts);
                }
 
-               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                               info->snd_seq, OVS_FLOW_CMD_NEW);
 
                /* Clear stats. */
@@ -991,11 +1020,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (!IS_ERR(reply))
-               genl_notify(reply, genl_info_net(info), info->snd_pid,
+               genl_notify(reply, genl_info_net(info), info->snd_portid,
                           ovs_dp_flow_multicast_group.id, info->nlhdr,
                           GFP_KERNEL);
        else
-               netlink_set_err(init_net.genl_sock, 0,
+               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
        return 0;
 
@@ -1023,7 +1052,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp)
                return -ENODEV;
 
@@ -1032,7 +1061,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        if (!flow)
                return -ENOENT;
 
-       reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+       reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
                                        info->snd_seq, OVS_FLOW_CMD_NEW);
        if (IS_ERR(reply))
                return PTR_ERR(reply);
@@ -1052,16 +1081,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        int err;
        int key_len;
 
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
        if (!a[OVS_FLOW_ATTR_KEY])
-               return flush_flows(ovs_header->dp_ifindex);
+               return flush_flows(dp);
+
        err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
        if (err)
                return err;
 
-       dp = get_dp(ovs_header->dp_ifindex);
-       if (!dp)
-               return -ENODEV;
-
        table = genl_dereference(dp->table);
        flow = ovs_flow_tbl_lookup(table, &key, key_len);
        if (!flow)
@@ -1073,13 +1103,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
        ovs_flow_tbl_remove(table, flow);
 
-       err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
+       err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
                                     info->snd_seq, 0, OVS_FLOW_CMD_DEL);
        BUG_ON(err < 0);
 
        ovs_flow_deferred_free(flow);
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
        return 0;
 }
@@ -1090,7 +1120,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct datapath *dp;
        struct flow_table *table;
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp)
                return -ENODEV;
 
@@ -1107,7 +1137,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        break;
 
                if (ovs_flow_cmd_fill_info(flow, dp, skb,
-                                          NETLINK_CB(cb->skb).pid,
+                                          NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           OVS_FLOW_CMD_NEW) < 0)
                        break;
@@ -1152,7 +1182,8 @@ static struct genl_family dp_datapath_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_DATAPATH_FAMILY,
        .version = OVS_DATAPATH_VERSION,
-       .maxattr = OVS_DP_ATTR_MAX
+       .maxattr = OVS_DP_ATTR_MAX,
+       .netnsok = true
 };
 
 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
@@ -1160,13 +1191,13 @@ static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
 };
 
 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
-                               u32 pid, u32 seq, u32 flags, u8 cmd)
+                               u32 portid, u32 seq, u32 flags, u8 cmd)
 {
        struct ovs_header *ovs_header;
        struct ovs_dp_stats dp_stats;
        int err;
 
-       ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
                                   flags, cmd);
        if (!ovs_header)
                goto error;
@@ -1191,7 +1222,7 @@ error:
        return -EMSGSIZE;
 }
 
-static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
+static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
                                             u32 seq, u8 cmd)
 {
        struct sk_buff *skb;
@@ -1201,7 +1232,7 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
+       retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
        if (retval < 0) {
                kfree_skb(skb);
                return ERR_PTR(retval);
@@ -1210,18 +1241,19 @@ static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
 }
 
 /* Called with genl_mutex and optionally with RTNL lock also. */
-static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+static struct datapath *lookup_datapath(struct net *net,
+                                       struct ovs_header *ovs_header,
                                        struct nlattr *a[OVS_DP_ATTR_MAX + 1])
 {
        struct datapath *dp;
 
        if (!a[OVS_DP_ATTR_NAME])
-               dp = get_dp(ovs_header->dp_ifindex);
+               dp = get_dp(net, ovs_header->dp_ifindex);
        else {
                struct vport *vport;
 
                rcu_read_lock();
-               vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+               vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
                dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
                rcu_read_unlock();
        }
@@ -1235,22 +1267,21 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *reply;
        struct datapath *dp;
        struct vport *vport;
-       int err;
+       struct ovs_net *ovs_net;
+       int err, i;
 
        err = -EINVAL;
        if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
                goto err;
 
        rtnl_lock();
-       err = -ENODEV;
-       if (!try_module_get(THIS_MODULE))
-               goto err_unlock_rtnl;
 
        err = -ENOMEM;
        dp = kzalloc(sizeof(*dp), GFP_KERNEL);
        if (dp == NULL)
-               goto err_put_module;
-       INIT_LIST_HEAD(&dp->port_list);
+               goto err_unlock_rtnl;
+
+       ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
 
        /* Allocate table. */
        err = -ENOMEM;
@@ -1264,13 +1295,23 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto err_destroy_table;
        }
 
+       dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+                       GFP_KERNEL);
+       if (!dp->ports) {
+               err = -ENOMEM;
+               goto err_destroy_percpu;
+       }
+
+       for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
+               INIT_HLIST_HEAD(&dp->ports[i]);
+
        /* Set up our datapath device. */
        parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
        parms.type = OVS_VPORT_TYPE_INTERNAL;
        parms.options = NULL;
        parms.dp = dp;
        parms.port_no = OVSP_LOCAL;
-       parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+       parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
 
        vport = new_vport(&parms);
        if (IS_ERR(vport)) {
@@ -1278,64 +1319,59 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
                if (err == -EBUSY)
                        err = -EEXIST;
 
-               goto err_destroy_percpu;
+               goto err_destroy_ports_array;
        }
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
                goto err_destroy_local_port;
 
-       list_add_tail(&dp->list_node, &dps);
+       ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
+       list_add_tail(&dp->list_node, &ovs_net->dps);
        rtnl_unlock();
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_datapath_multicast_group.id, info->nlhdr,
                    GFP_KERNEL);
        return 0;
 
 err_destroy_local_port:
-       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+       ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
+err_destroy_ports_array:
+       kfree(dp->ports);
 err_destroy_percpu:
        free_percpu(dp->stats_percpu);
 err_destroy_table:
        ovs_flow_tbl_destroy(genl_dereference(dp->table));
 err_free_dp:
+       release_net(ovs_dp_get_net(dp));
        kfree(dp);
-err_put_module:
-       module_put(THIS_MODULE);
 err_unlock_rtnl:
        rtnl_unlock();
 err:
        return err;
 }
 
-static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+/* Called with genl_mutex. */
+static void __dp_destroy(struct datapath *dp)
 {
-       struct vport *vport, *next_vport;
-       struct sk_buff *reply;
-       struct datapath *dp;
-       int err;
+       int i;
 
        rtnl_lock();
-       dp = lookup_datapath(info->userhdr, info->attrs);
-       err = PTR_ERR(dp);
-       if (IS_ERR(dp))
-               goto exit_unlock;
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
-                                     info->snd_seq, OVS_DP_CMD_DEL);
-       err = PTR_ERR(reply);
-       if (IS_ERR(reply))
-               goto exit_unlock;
+       for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
+               struct vport *vport;
+               struct hlist_node *node, *n;
 
-       list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
-               if (vport->port_no != OVSP_LOCAL)
-                       ovs_dp_detach_port(vport);
+               hlist_for_each_entry_safe(vport, node, n, &dp->ports[i], dp_hash_node)
+                       if (vport->port_no != OVSP_LOCAL)
+                               ovs_dp_detach_port(vport);
+       }
 
        list_del(&dp->list_node);
-       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+       ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL));
 
        /* rtnl_unlock() will wait until all the references to devices that
         * are pending unregistration have been dropped.  We do it here to
@@ -1345,17 +1381,32 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
        rtnl_unlock();
 
        call_rcu(&dp->rcu, destroy_dp_rcu);
-       module_put(THIS_MODULE);
+}
+
+static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *reply;
+       struct datapath *dp;
+       int err;
+
+       dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+       err = PTR_ERR(dp);
+       if (IS_ERR(dp))
+               return err;
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
+                                     info->snd_seq, OVS_DP_CMD_DEL);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               return err;
+
+       __dp_destroy(dp);
+
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_datapath_multicast_group.id, info->nlhdr,
                    GFP_KERNEL);
 
        return 0;
-
-exit_unlock:
-       rtnl_unlock();
-       return err;
 }
 
 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
@@ -1364,20 +1415,20 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        int err;
 
-       dp = lookup_datapath(info->userhdr, info->attrs);
+       dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        if (IS_ERR(dp))
                return PTR_ERR(dp);
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
-               netlink_set_err(init_net.genl_sock, 0,
+               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_datapath_multicast_group.id, err);
                return 0;
        }
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_datapath_multicast_group.id, info->nlhdr,
                    GFP_KERNEL);
 
@@ -1389,11 +1440,11 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *reply;
        struct datapath *dp;
 
-       dp = lookup_datapath(info->userhdr, info->attrs);
+       dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
        if (IS_ERR(dp))
                return PTR_ERR(dp);
 
-       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+       reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
                                      info->snd_seq, OVS_DP_CMD_NEW);
        if (IS_ERR(reply))
                return PTR_ERR(reply);
@@ -1403,13 +1454,14 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
 
 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
        struct datapath *dp;
        int skip = cb->args[0];
        int i = 0;
 
-       list_for_each_entry(dp, &dps, list_node) {
+       list_for_each_entry(dp, &ovs_net->dps, list_node) {
                if (i >= skip &&
-                   ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+                   ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                         OVS_DP_CMD_NEW) < 0)
                        break;
@@ -1459,7 +1511,8 @@ static struct genl_family dp_vport_genl_family = {
        .hdrsize = sizeof(struct ovs_header),
        .name = OVS_VPORT_FAMILY,
        .version = OVS_VPORT_VERSION,
-       .maxattr = OVS_VPORT_ATTR_MAX
+       .maxattr = OVS_VPORT_ATTR_MAX,
+       .netnsok = true
 };
 
 struct genl_multicast_group ovs_dp_vport_multicast_group = {
@@ -1468,13 +1521,13 @@ struct genl_multicast_group ovs_dp_vport_multicast_group = {
 
 /* Called with RTNL lock or RCU read lock. */
 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
-                                  u32 pid, u32 seq, u32 flags, u8 cmd)
+                                  u32 portid, u32 seq, u32 flags, u8 cmd)
 {
        struct ovs_header *ovs_header;
        struct ovs_vport_stats vport_stats;
        int err;
 
-       ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
                                 flags, cmd);
        if (!ovs_header)
                return -EMSGSIZE;
@@ -1484,7 +1537,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
        if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
            nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
            nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
-           nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
+           nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
                goto nla_put_failure;
 
        ovs_vport_get_stats(vport, &vport_stats);
@@ -1506,7 +1559,7 @@ error:
 }
 
 /* Called with RTNL lock or RCU read lock. */
-struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
                                         u32 seq, u8 cmd)
 {
        struct sk_buff *skb;
@@ -1516,7 +1569,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
+       retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
        if (retval < 0) {
                kfree_skb(skb);
                return ERR_PTR(retval);
@@ -1525,14 +1578,15 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
 }
 
 /* Called with RTNL lock or RCU read lock. */
-static struct vport *lookup_vport(struct ovs_header *ovs_header,
+static struct vport *lookup_vport(struct net *net,
+                                 struct ovs_header *ovs_header,
                                  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
 {
        struct datapath *dp;
        struct vport *vport;
 
        if (a[OVS_VPORT_ATTR_NAME]) {
-               vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+               vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
                if (!vport)
                        return ERR_PTR(-ENODEV);
                if (ovs_header->dp_ifindex &&
@@ -1545,11 +1599,11 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header,
                if (port_no >= DP_MAX_PORTS)
                        return ERR_PTR(-EFBIG);
 
-               dp = get_dp(ovs_header->dp_ifindex);
+               dp = get_dp(net, ovs_header->dp_ifindex);
                if (!dp)
                        return ERR_PTR(-ENODEV);
 
-               vport = rcu_dereference_rtnl(dp->ports[port_no]);
+               vport = ovs_vport_rtnl_rcu(dp, port_no);
                if (!vport)
                        return ERR_PTR(-ENOENT);
                return vport;
@@ -1574,7 +1628,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto exit;
 
        rtnl_lock();
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        err = -ENODEV;
        if (!dp)
                goto exit_unlock;
@@ -1586,7 +1640,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
                if (port_no >= DP_MAX_PORTS)
                        goto exit_unlock;
 
-               vport = rtnl_dereference(dp->ports[port_no]);
+               vport = ovs_vport_rtnl_rcu(dp, port_no);
                err = -EBUSY;
                if (vport)
                        goto exit_unlock;
@@ -1596,7 +1650,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
                                err = -EFBIG;
                                goto exit_unlock;
                        }
-                       vport = rtnl_dereference(dp->ports[port_no]);
+                       vport = ovs_vport_rtnl(dp, port_no);
                        if (!vport)
                                break;
                }
@@ -1607,21 +1661,21 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
        parms.options = a[OVS_VPORT_ATTR_OPTIONS];
        parms.dp = dp;
        parms.port_no = port_no;
-       parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+       parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
 
        vport = new_vport(&parms);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
                ovs_dp_detach_port(vport);
                goto exit_unlock;
        }
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 
 exit_unlock:
@@ -1638,7 +1692,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        rtnl_lock();
-       vport = lookup_vport(info->userhdr, a);
+       vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
@@ -1653,17 +1707,17 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
        if (err)
                goto exit_unlock;
        if (a[OVS_VPORT_ATTR_UPCALL_PID])
-               vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+               vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        if (IS_ERR(reply)) {
-               netlink_set_err(init_net.genl_sock, 0,
+               netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
                                ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
                goto exit_unlock;
        }
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 
 exit_unlock:
@@ -1679,7 +1733,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        rtnl_lock();
-       vport = lookup_vport(info->userhdr, a);
+       vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
@@ -1689,7 +1743,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock;
        }
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_DEL);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
@@ -1697,7 +1751,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
        ovs_dp_detach_port(vport);
 
-       genl_notify(reply, genl_info_net(info), info->snd_pid,
+       genl_notify(reply, genl_info_net(info), info->snd_portid,
                    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
 
 exit_unlock:
@@ -1714,12 +1768,12 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        rcu_read_lock();
-       vport = lookup_vport(ovs_header, a);
+       vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
        err = PTR_ERR(vport);
        if (IS_ERR(vport))
                goto exit_unlock;
 
-       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+       reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        err = PTR_ERR(reply);
        if (IS_ERR(reply))
@@ -1738,54 +1792,39 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
        struct datapath *dp;
-       u32 port_no;
-       int retval;
+       int bucket = cb->args[0], skip = cb->args[1];
+       int i, j = 0;
 
-       dp = get_dp(ovs_header->dp_ifindex);
+       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp)
                return -ENODEV;
 
        rcu_read_lock();
-       for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
+       for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
                struct vport *vport;
-
-               vport = rcu_dereference(dp->ports[port_no]);
-               if (!vport)
-                       continue;
-
-               if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
-                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                           OVS_VPORT_CMD_NEW) < 0)
-                       break;
-       }
-       rcu_read_unlock();
-
-       cb->args[0] = port_no;
-       retval = skb->len;
-
-       return retval;
-}
-
-static void rehash_flow_table(struct work_struct *work)
-{
-       struct datapath *dp;
-
-       genl_lock();
-
-       list_for_each_entry(dp, &dps, list_node) {
-               struct flow_table *old_table = genl_dereference(dp->table);
-               struct flow_table *new_table;
-
-               new_table = ovs_flow_tbl_rehash(old_table);
-               if (!IS_ERR(new_table)) {
-                       rcu_assign_pointer(dp->table, new_table);
-                       ovs_flow_tbl_deferred_destroy(old_table);
+               struct hlist_node *n;
+
+               j = 0;
+               hlist_for_each_entry_rcu(vport, n, &dp->ports[i], dp_hash_node) {
+                       if (j >= skip &&
+                           ovs_vport_cmd_fill_info(vport, skb,
+                                                   NETLINK_CB(cb->skb).portid,
+                                                   cb->nlh->nlmsg_seq,
+                                                   NLM_F_MULTI,
+                                                   OVS_VPORT_CMD_NEW) < 0)
+                               goto out;
+
+                       j++;
                }
+               skip = 0;
        }
+out:
+       rcu_read_unlock();
 
-       genl_unlock();
+       cb->args[0] = i;
+       cb->args[1] = j;
 
-       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+       return skb->len;
 }
 
 static struct genl_ops dp_vport_genl_ops[] = {
@@ -1872,6 +1911,59 @@ error:
        return err;
 }
 
+static void rehash_flow_table(struct work_struct *work)
+{
+       struct datapath *dp;
+       struct net *net;
+
+       genl_lock();
+       rtnl_lock();
+       for_each_net(net) {
+               struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+               list_for_each_entry(dp, &ovs_net->dps, list_node) {
+                       struct flow_table *old_table = genl_dereference(dp->table);
+                       struct flow_table *new_table;
+
+                       new_table = ovs_flow_tbl_rehash(old_table);
+                       if (!IS_ERR(new_table)) {
+                               rcu_assign_pointer(dp->table, new_table);
+                               ovs_flow_tbl_deferred_destroy(old_table);
+                       }
+               }
+       }
+       rtnl_unlock();
+       genl_unlock();
+
+       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+}
+
+static int __net_init ovs_init_net(struct net *net)
+{
+       struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+       INIT_LIST_HEAD(&ovs_net->dps);
+       return 0;
+}
+
+static void __net_exit ovs_exit_net(struct net *net)
+{
+       struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+       struct datapath *dp, *dp_next;
+
+       genl_lock();
+       list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
+               __dp_destroy(dp);
+       genl_unlock();
+}
+
+static struct pernet_operations ovs_net_ops = {
+       .init = ovs_init_net,
+       .exit = ovs_exit_net,
+       .id   = &ovs_net_id,
+       .size = sizeof(struct ovs_net),
+};
+
 static int __init dp_init(void)
 {
        struct sk_buff *dummy_skb;
@@ -1889,10 +1981,14 @@ static int __init dp_init(void)
        if (err)
                goto error_flow_exit;
 
-       err = register_netdevice_notifier(&ovs_dp_device_notifier);
+       err = register_pernet_device(&ovs_net_ops);
        if (err)
                goto error_vport_exit;
 
+       err = register_netdevice_notifier(&ovs_dp_device_notifier);
+       if (err)
+               goto error_netns_exit;
+
        err = dp_register_genl();
        if (err < 0)
                goto error_unreg_notifier;
@@ -1903,6 +1999,8 @@ static int __init dp_init(void)
 
 error_unreg_notifier:
        unregister_netdevice_notifier(&ovs_dp_device_notifier);
+error_netns_exit:
+       unregister_pernet_device(&ovs_net_ops);
 error_vport_exit:
        ovs_vport_exit();
 error_flow_exit:
@@ -1914,9 +2012,10 @@ error:
 static void dp_cleanup(void)
 {
        cancel_delayed_work_sync(&rehash_flow_wq);
-       rcu_barrier();
        dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
        unregister_netdevice_notifier(&ovs_dp_device_notifier);
+       unregister_pernet_device(&ovs_net_ops);
+       rcu_barrier();
        ovs_vport_exit();
        ovs_flow_exit();
 }
index c1105c147531001b65e8cdf75b0ccab2558bae0e..031dfbf37c937dff57657d33d45143d454cc4e46 100644 (file)
 #include <linux/u64_stats_sync.h>
 
 #include "flow.h"
+#include "vport.h"
 
-struct vport;
+#define DP_MAX_PORTS           USHRT_MAX
+#define DP_VPORT_HASH_BUCKETS  1024
 
-#define DP_MAX_PORTS 1024
 #define SAMPLE_ACTION_DEPTH 3
 
 /**
@@ -58,11 +59,10 @@ struct dp_stats_percpu {
  * @list_node: Element in global 'dps' list.
  * @n_flows: Number of flows currently in flow table.
  * @table: Current flow table.  Protected by genl_lock and RCU.
- * @ports: Map from port number to &struct vport.  %OVSP_LOCAL port
- * always exists, other ports may be %NULL.  Protected by RTNL and RCU.
- * @port_list: List of all ports in @ports in arbitrary order.  RTNL required
- * to iterate or modify.
+ * @ports: Hash table for ports.  %OVSP_LOCAL port always exists.  Protected by
+ * RTNL and RCU.
  * @stats_percpu: Per-CPU datapath statistics.
+ * @net: Reference to net namespace.
  *
  * Context: See the comment on locking at the top of datapath.c for additional
  * locking information.
@@ -75,13 +75,37 @@ struct datapath {
        struct flow_table __rcu *table;
 
        /* Switch ports. */
-       struct vport __rcu *ports[DP_MAX_PORTS];
-       struct list_head port_list;
+       struct hlist_head *ports;
 
        /* Stats. */
        struct dp_stats_percpu __percpu *stats_percpu;
+
+#ifdef CONFIG_NET_NS
+       /* Network namespace ref. */
+       struct net *net;
+#endif
 };
 
+struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
+
+static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       return ovs_lookup_vport(dp, port_no);
+}
+
+static inline struct vport *ovs_vport_rtnl_rcu(const struct datapath *dp, int port_no)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held() && !rtnl_is_locked());
+       return ovs_lookup_vport(dp, port_no);
+}
+
+static inline struct vport *ovs_vport_rtnl(const struct datapath *dp, int port_no)
+{
+       ASSERT_RTNL();
+       return ovs_lookup_vport(dp, port_no);
+}
+
 /**
  * struct ovs_skb_cb - OVS data in skb CB
  * @flow: The flow associated with this packet.  May be %NULL if no flow.
@@ -105,9 +129,19 @@ struct dp_upcall_info {
        u8 cmd;
        const struct sw_flow_key *key;
        const struct nlattr *userdata;
-       u32 pid;
+       u32 portid;
 };
 
+static inline struct net *ovs_dp_get_net(struct datapath *dp)
+{
+       return read_pnet(&dp->net);
+}
+
+static inline void ovs_dp_set_net(struct datapath *dp, struct net *net)
+{
+       write_pnet(&dp->net, net);
+}
+
 extern struct notifier_block ovs_dp_device_notifier;
 extern struct genl_multicast_group ovs_dp_vport_multicast_group;
 
index 36dcee8fc84a27dc29785e9a1adfecb8f021ee57..5558350e0d33ee4f9b26f8be13801a6139fb7aa8 100644 (file)
@@ -41,19 +41,21 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
        case NETDEV_UNREGISTER:
                if (!ovs_is_internal_dev(dev)) {
                        struct sk_buff *notify;
+                       struct datapath *dp = vport->dp;
 
                        notify = ovs_vport_cmd_build_info(vport, 0, 0,
                                                          OVS_VPORT_CMD_DEL);
                        ovs_dp_detach_port(vport);
                        if (IS_ERR(notify)) {
-                               netlink_set_err(init_net.genl_sock, 0,
+                               netlink_set_err(ovs_dp_get_net(dp)->genl_sock, 0,
                                                ovs_dp_vport_multicast_group.id,
                                                PTR_ERR(notify));
                                break;
                        }
 
-                       genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
-                                         GFP_KERNEL);
+                       genlmsg_multicast_netns(ovs_dp_get_net(dp), notify, 0,
+                                               ovs_dp_vport_multicast_group.id,
+                                               GFP_KERNEL);
                }
                break;
        }
index b7f38b161909f0ea9ad8cf373ae77ab957cf5311..98c70630ad06178778dc4f1e04275fa6032facc8 100644 (file)
@@ -203,10 +203,7 @@ struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
        int actions_len = nla_len(actions);
        struct sw_flow_actions *sfa;
 
-       /* At least DP_MAX_PORTS actions are required to be able to flood a
-        * packet to every port.  Factor of 2 allows for setting VLAN tags,
-        * etc. */
-       if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
+       if (actions_len > MAX_ACTIONS_BUFSIZE)
                return ERR_PTR(-EINVAL);
 
        sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
@@ -427,19 +424,11 @@ void ovs_flow_deferred_free(struct sw_flow *flow)
        call_rcu(&flow->rcu, rcu_free_flow_callback);
 }
 
-/* RCU callback used by ovs_flow_deferred_free_acts. */
-static void rcu_free_acts_callback(struct rcu_head *rcu)
-{
-       struct sw_flow_actions *sf_acts = container_of(rcu,
-                       struct sw_flow_actions, rcu);
-       kfree(sf_acts);
-}
-
 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
  * The caller must hold rcu_read_lock for this to be sensible. */
 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
 {
-       call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+       kfree_rcu(sf_acts, rcu);
 }
 
 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
@@ -1000,7 +989,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
                swkey->phy.in_port = in_port;
                attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
        } else {
-               swkey->phy.in_port = USHRT_MAX;
+               swkey->phy.in_port = DP_MAX_PORTS;
        }
 
        /* Data attributes. */
@@ -1143,7 +1132,7 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
        const struct nlattr *nla;
        int rem;
 
-       *in_port = USHRT_MAX;
+       *in_port = DP_MAX_PORTS;
        *priority = 0;
 
        nla_for_each_nested(nla, attr, rem) {
@@ -1180,7 +1169,7 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
            nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
                goto nla_put_failure;
 
-       if (swkey->phy.in_port != USHRT_MAX &&
+       if (swkey->phy.in_port != DP_MAX_PORTS &&
            nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
                goto nla_put_failure;
 
index c30df1a10c670ad01b7b8b88c49434c95c0c659e..14a324eb017b44cca9263701d094f3221a37c9a3 100644 (file)
@@ -43,7 +43,7 @@ struct sw_flow_actions {
 struct sw_flow_key {
        struct {
                u32     priority;       /* Packet QoS priority. */
-               u16     in_port;        /* Input switch port (or USHRT_MAX). */
+               u16     in_port;        /* Input switch port (or DP_MAX_PORTS). */
        } phy;
        struct {
                u8     src[ETH_ALEN];   /* Ethernet source address. */
@@ -163,6 +163,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
 int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
                               const struct nlattr *);
 
+#define MAX_ACTIONS_BUFSIZE    (16 * 1024)
 #define TBL_MIN_BUCKETS                1024
 
 struct flow_table {
index 4061b9ee07f7ca30af27ffa6f4264716dcda4e84..5d460c37df07ce70b615de1cff8e2191e358bab1 100644 (file)
@@ -144,7 +144,7 @@ static void do_setup(struct net_device *netdev)
        netdev->tx_queue_len = 0;
 
        netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
-                               NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+                          NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
 
        netdev->vlan_features = netdev->features;
        netdev->features |= NETIF_F_HW_VLAN_TX;
@@ -175,9 +175,14 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
                goto error_free_vport;
        }
 
+       dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
        internal_dev = internal_dev_priv(netdev_vport->dev);
        internal_dev->vport = vport;
 
+       /* Restrict bridge port to current netns. */
+       if (vport->port_no == OVSP_LOCAL)
+               netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+
        err = register_netdevice(netdev_vport->dev);
        if (err)
                goto error_free_netdev;
index 6ea3551cc78c8f21a966aeaeffc04a575eeeb824..3c1e58ba714bf9534b597c9f28147cc7c3038b39 100644 (file)
@@ -83,7 +83,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
 
        netdev_vport = netdev_vport_priv(vport);
 
-       netdev_vport->dev = dev_get_by_name(&init_net, parms->name);
+       netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
        if (!netdev_vport->dev) {
                err = -ENODEV;
                goto error_free_vport;
index 6140336e79d7dd679603b7a1cb7dc750d3ed7f10..03779e8a262289f9a8178e62e0adf5afc16a9ae5 100644 (file)
  * 02110-1301, USA
  */
 
-#include <linux/dcache.h>
 #include <linux/etherdevice.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
+#include <linux/jhash.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
@@ -27,7 +27,9 @@
 #include <linux/rcupdate.h>
 #include <linux/rtnetlink.h>
 #include <linux/compat.h>
+#include <net/net_namespace.h>
 
+#include "datapath.h"
 #include "vport.h"
 #include "vport-internal_dev.h"
 
@@ -67,9 +69,9 @@ void ovs_vport_exit(void)
        kfree(dev_table);
 }
 
-static struct hlist_head *hash_bucket(const char *name)
+static struct hlist_head *hash_bucket(struct net *net, const char *name)
 {
-       unsigned int hash = full_name_hash(name, strlen(name));
+       unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
        return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
 }
 
@@ -80,14 +82,15 @@ static struct hlist_head *hash_bucket(const char *name)
  *
  * Must be called with RTNL or RCU read lock.
  */
-struct vport *ovs_vport_locate(const char *name)
+struct vport *ovs_vport_locate(struct net *net, const char *name)
 {
-       struct hlist_head *bucket = hash_bucket(name);
+       struct hlist_head *bucket = hash_bucket(net, name);
        struct vport *vport;
        struct hlist_node *node;
 
        hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
-               if (!strcmp(name, vport->ops->get_name(vport)))
+               if (!strcmp(name, vport->ops->get_name(vport)) &&
+                   net_eq(ovs_dp_get_net(vport->dp), net))
                        return vport;
 
        return NULL;
@@ -122,8 +125,9 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
 
        vport->dp = parms->dp;
        vport->port_no = parms->port_no;
-       vport->upcall_pid = parms->upcall_pid;
+       vport->upcall_portid = parms->upcall_portid;
        vport->ops = ops;
+       INIT_HLIST_NODE(&vport->dp_hash_node);
 
        vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
        if (!vport->percpu_stats) {
@@ -170,14 +174,17 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
 
        for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
                if (vport_ops_list[i]->type == parms->type) {
+                       struct hlist_head *bucket;
+
                        vport = vport_ops_list[i]->create(parms);
                        if (IS_ERR(vport)) {
                                err = PTR_ERR(vport);
                                goto out;
                        }
 
-                       hlist_add_head_rcu(&vport->hash_node,
-                                          hash_bucket(vport->ops->get_name(vport)));
+                       bucket = hash_bucket(ovs_dp_get_net(vport->dp),
+                                            vport->ops->get_name(vport));
+                       hlist_add_head_rcu(&vport->hash_node, bucket);
                        return vport;
                }
        }
@@ -391,7 +398,7 @@ void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
        case VPORT_E_TX_ERROR:
                vport->err_stats.tx_errors++;
                break;
-       };
+       }
 
        spin_unlock(&vport->stats_lock);
 }
index aac680ca2b06410ec3300766583c3aaef78370ca..3f7961ea3c568d54011d6a570975fceb3c20d15f 100644 (file)
@@ -20,6 +20,7 @@
 #define VPORT_H 1
 
 #include <linux/list.h>
+#include <linux/netlink.h>
 #include <linux/openvswitch.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
@@ -38,7 +39,7 @@ void ovs_vport_exit(void);
 struct vport *ovs_vport_add(const struct vport_parms *);
 void ovs_vport_del(struct vport *);
 
-struct vport *ovs_vport_locate(const char *name);
+struct vport *ovs_vport_locate(struct net *net, const char *name);
 
 void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
 
@@ -69,10 +70,10 @@ struct vport_err_stats {
  * @rcu: RCU callback head for deferred destruction.
  * @port_no: Index into @dp's @ports array.
  * @dp: Datapath to which this port belongs.
- * @node: Element in @dp's @port_list.
- * @upcall_pid: The Netlink port to use for packets received on this port that
+ * @upcall_portid: The Netlink port to use for packets received on this port that
  * miss the flow table.
  * @hash_node: Element in @dev_table hash table in vport.c.
+ * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
  * @ops: Class structure.
  * @percpu_stats: Points to per-CPU statistics used and maintained by vport
  * @stats_lock: Protects @err_stats;
@@ -82,10 +83,10 @@ struct vport {
        struct rcu_head rcu;
        u16 port_no;
        struct datapath *dp;
-       struct list_head node;
-       u32 upcall_pid;
+       u32 upcall_portid;
 
        struct hlist_node hash_node;
+       struct hlist_node dp_hash_node;
        const struct vport_ops *ops;
 
        struct vport_percpu_stats __percpu *percpu_stats;
@@ -112,7 +113,7 @@ struct vport_parms {
        /* For ovs_vport_alloc(). */
        struct datapath *dp;
        u16 port_no;
-       u32 upcall_pid;
+       u32 upcall_portid;
 };
 
 /**
index 0060e3b396b7b41fb0a5791966a39222cce8c04d..cc55b35f80e5acd045c37fc20bb37d98ba3d258c 100644 (file)
@@ -14,3 +14,11 @@ config PACKET
          be called af_packet.
 
          If unsure, say Y.
+
+config PACKET_DIAG
+       tristate "Packet: sockets monitoring interface"
+       depends on PACKET
+       default n
+       ---help---
+         Support for PF_PACKET sockets monitoring interface used by the ss tool.
+         If unsure, say Y.
index 81183eabfdec5cee58148d85df0877fc0e3a3d45..9df61347a3c3e98938c7b11a83195a22eb5d755c 100644 (file)
@@ -3,3 +3,5 @@
 #
 
 obj-$(CONFIG_PACKET) += af_packet.o
+obj-$(CONFIG_PACKET_DIAG) += af_packet_diag.o
+af_packet_diag-y += diag.o
index c5c9e2a54218207f0dba9b16920b2e17da84c353..94060edbbd706ed11c7609913f1e434e5cec76c1 100644 (file)
@@ -93,6 +93,8 @@
 #include <net/inet_common.h>
 #endif
 
+#include "internal.h"
+
 /*
    Assumptions:
    - if device has no dev->hard_header routine, it adds and removes ll header
@@ -146,14 +148,6 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
 
 /* Private packet socket structures. */
 
-struct packet_mclist {
-       struct packet_mclist    *next;
-       int                     ifindex;
-       int                     count;
-       unsigned short          type;
-       unsigned short          alen;
-       unsigned char           addr[MAX_ADDR_LEN];
-};
 /* identical to struct packet_mreq except it has
  * a longer address field.
  */
@@ -175,63 +169,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 #define BLK_PLUS_PRIV(sz_of_priv) \
        (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 
-/* kbdq - kernel block descriptor queue */
-struct tpacket_kbdq_core {
-       struct pgv      *pkbdq;
-       unsigned int    feature_req_word;
-       unsigned int    hdrlen;
-       unsigned char   reset_pending_on_curr_blk;
-       unsigned char   delete_blk_timer;
-       unsigned short  kactive_blk_num;
-       unsigned short  blk_sizeof_priv;
-
-       /* last_kactive_blk_num:
-        * trick to see if user-space has caught up
-        * in order to avoid refreshing timer when every single pkt arrives.
-        */
-       unsigned short  last_kactive_blk_num;
-
-       char            *pkblk_start;
-       char            *pkblk_end;
-       int             kblk_size;
-       unsigned int    knum_blocks;
-       uint64_t        knxt_seq_num;
-       char            *prev;
-       char            *nxt_offset;
-       struct sk_buff  *skb;
-
-       atomic_t        blk_fill_in_prog;
-
-       /* Default is set to 8ms */
-#define DEFAULT_PRB_RETIRE_TOV (8)
-
-       unsigned short  retire_blk_tov;
-       unsigned short  version;
-       unsigned long   tov_in_jiffies;
-
-       /* timer to retire an outstanding block */
-       struct timer_list retire_blk_timer;
-};
-
 #define PGV_FROM_VMALLOC 1
-struct pgv {
-       char *buffer;
-};
-
-struct packet_ring_buffer {
-       struct pgv              *pg_vec;
-       unsigned int            head;
-       unsigned int            frames_per_block;
-       unsigned int            frame_size;
-       unsigned int            frame_max;
-
-       unsigned int            pg_vec_order;
-       unsigned int            pg_vec_pages;
-       unsigned int            pg_vec_len;
-
-       struct tpacket_kbdq_core        prb_bdqc;
-       atomic_t                pending;
-};
 
 #define BLOCK_STATUS(x)        ((x)->hdr.bh1.block_status)
 #define BLOCK_NUM_PKTS(x)      ((x)->hdr.bh1.num_pkts)
@@ -269,52 +207,6 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
                struct tpacket3_hdr *);
 static void packet_flush_mclist(struct sock *sk);
 
-struct packet_fanout;
-struct packet_sock {
-       /* struct sock has to be the first member of packet_sock */
-       struct sock             sk;
-       struct packet_fanout    *fanout;
-       struct tpacket_stats    stats;
-       union  tpacket_stats_u  stats_u;
-       struct packet_ring_buffer       rx_ring;
-       struct packet_ring_buffer       tx_ring;
-       int                     copy_thresh;
-       spinlock_t              bind_lock;
-       struct mutex            pg_vec_lock;
-       unsigned int            running:1,      /* prot_hook is attached*/
-                               auxdata:1,
-                               origdev:1,
-                               has_vnet_hdr:1;
-       int                     ifindex;        /* bound device         */
-       __be16                  num;
-       struct packet_mclist    *mclist;
-       atomic_t                mapped;
-       enum tpacket_versions   tp_version;
-       unsigned int            tp_hdrlen;
-       unsigned int            tp_reserve;
-       unsigned int            tp_loss:1;
-       unsigned int            tp_tstamp;
-       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
-};
-
-#define PACKET_FANOUT_MAX      256
-
-struct packet_fanout {
-#ifdef CONFIG_NET_NS
-       struct net              *net;
-#endif
-       unsigned int            num_members;
-       u16                     id;
-       u8                      type;
-       u8                      defrag;
-       atomic_t                rr_cur;
-       struct list_head        list;
-       struct sock             *arr[PACKET_FANOUT_MAX];
-       spinlock_t              lock;
-       atomic_t                sk_ref;
-       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
-};
-
 struct packet_skb_cb {
        unsigned int origlen;
        union {
@@ -334,11 +226,6 @@ struct packet_skb_cb {
        (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
        ((x)->kactive_blk_num+1) : 0)
 
-static struct packet_sock *pkt_sk(struct sock *sk)
-{
-       return (struct packet_sock *)sk;
-}
-
 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 static void __fanout_link(struct sock *sk, struct packet_sock *po);
 
@@ -968,7 +855,8 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
                ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
                ppd->tp_status = TP_STATUS_VLAN_VALID;
        } else {
-               ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
+               ppd->hv1.tp_vlan_tci = 0;
+               ppd->tp_status = TP_STATUS_AVAILABLE;
        }
 }
 
@@ -1243,7 +1131,8 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
        return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
 }
 
-static DEFINE_MUTEX(fanout_mutex);
+DEFINE_MUTEX(fanout_mutex);
+EXPORT_SYMBOL_GPL(fanout_mutex);
 static LIST_HEAD(fanout_list);
 
 static void __fanout_link(struct sock *sk, struct packet_sock *po)
@@ -1364,9 +1253,9 @@ static void fanout_release(struct sock *sk)
        if (!f)
                return;
 
+       mutex_lock(&fanout_mutex);
        po->fanout = NULL;
 
-       mutex_lock(&fanout_mutex);
        if (atomic_dec_and_test(&f->sk_ref)) {
                list_del(&f->list);
                dev_remove_pack(&f->prot_hook);
@@ -2063,7 +1952,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        int tp_len, size_max;
        unsigned char *addr;
        int len_sum = 0;
-       int status = 0;
+       int status = TP_STATUS_AVAILABLE;
        int hlen, tlen;
 
        mutex_lock(&po->pg_vec_lock);
@@ -2428,10 +2317,13 @@ static int packet_release(struct socket *sock)
        net = sock_net(sk);
        po = pkt_sk(sk);
 
-       spin_lock_bh(&net->packet.sklist_lock);
+       mutex_lock(&net->packet.sklist_lock);
        sk_del_node_init_rcu(sk);
+       mutex_unlock(&net->packet.sklist_lock);
+
+       preempt_disable();
        sock_prot_inuse_add(net, sk->sk_prot, -1);
-       spin_unlock_bh(&net->packet.sklist_lock);
+       preempt_enable();
 
        spin_lock(&po->bind_lock);
        unregister_prot_hook(sk, false);
@@ -2630,10 +2522,13 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
                register_prot_hook(sk);
        }
 
-       spin_lock_bh(&net->packet.sklist_lock);
+       mutex_lock(&net->packet.sklist_lock);
        sk_add_node_rcu(sk, &net->packet.sklist);
+       mutex_unlock(&net->packet.sklist_lock);
+
+       preempt_disable();
        sock_prot_inuse_add(net, &packet_proto, 1);
-       spin_unlock_bh(&net->packet.sklist_lock);
+       preempt_enable();
 
        return 0;
 out:
@@ -3854,7 +3749,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
                           po->ifindex,
                           po->running,
                           atomic_read(&s->sk_rmem_alloc),
-                          sock_i_uid(s),
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
                           sock_i_ino(s));
        }
 
@@ -3886,7 +3781,7 @@ static const struct file_operations packet_seq_fops = {
 
 static int __net_init packet_net_init(struct net *net)
 {
-       spin_lock_init(&net->packet.sklist_lock);
+       mutex_init(&net->packet.sklist_lock);
        INIT_HLIST_HEAD(&net->packet.sklist);
 
        if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
diff --git a/net/packet/diag.c b/net/packet/diag.c
new file mode 100644 (file)
index 0000000..8db6e21
--- /dev/null
@@ -0,0 +1,242 @@
+#include <linux/module.h>
+#include <linux/sock_diag.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/packet_diag.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#include "internal.h"
+
+static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+       struct packet_diag_info pinfo;
+
+       pinfo.pdi_index = po->ifindex;
+       pinfo.pdi_version = po->tp_version;
+       pinfo.pdi_reserve = po->tp_reserve;
+       pinfo.pdi_copy_thresh = po->copy_thresh;
+       pinfo.pdi_tstamp = po->tp_tstamp;
+
+       pinfo.pdi_flags = 0;
+       if (po->running)
+               pinfo.pdi_flags |= PDI_RUNNING;
+       if (po->auxdata)
+               pinfo.pdi_flags |= PDI_AUXDATA;
+       if (po->origdev)
+               pinfo.pdi_flags |= PDI_ORIGDEV;
+       if (po->has_vnet_hdr)
+               pinfo.pdi_flags |= PDI_VNETHDR;
+       if (po->tp_loss)
+               pinfo.pdi_flags |= PDI_LOSS;
+
+       return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
+}
+
+static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
+{
+       struct nlattr *mca;
+       struct packet_mclist *ml;
+
+       mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
+       if (!mca)
+               return -EMSGSIZE;
+
+       rtnl_lock();
+       for (ml = po->mclist; ml; ml = ml->next) {
+               struct packet_diag_mclist *dml;
+
+               dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
+               if (!dml) {
+                       rtnl_unlock();
+                       nla_nest_cancel(nlskb, mca);
+                       return -EMSGSIZE;
+               }
+
+               dml->pdmc_index = ml->ifindex;
+               dml->pdmc_type = ml->type;
+               dml->pdmc_alen = ml->alen;
+               dml->pdmc_count = ml->count;
+               BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
+               memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
+       }
+
+       rtnl_unlock();
+       nla_nest_end(nlskb, mca);
+
+       return 0;
+}
+
+static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
+               struct sk_buff *nlskb)
+{
+       struct packet_diag_ring pdr;
+
+       if (!ring->pg_vec || ((ver > TPACKET_V2) &&
+                               (nl_type == PACKET_DIAG_TX_RING)))
+               return 0;
+
+       pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
+       pdr.pdr_block_nr = ring->pg_vec_len;
+       pdr.pdr_frame_size = ring->frame_size;
+       pdr.pdr_frame_nr = ring->frame_max + 1;
+
+       if (ver > TPACKET_V2) {
+               pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
+               pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
+               pdr.pdr_features = ring->prb_bdqc.feature_req_word;
+       } else {
+               pdr.pdr_retire_tmo = 0;
+               pdr.pdr_sizeof_priv = 0;
+               pdr.pdr_features = 0;
+       }
+
+       return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
+}
+
+static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
+{
+       int ret;
+
+       mutex_lock(&po->pg_vec_lock);
+       ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
+                       PACKET_DIAG_RX_RING, skb);
+       if (!ret)
+               ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
+                               PACKET_DIAG_TX_RING, skb);
+       mutex_unlock(&po->pg_vec_lock);
+
+       return ret;
+}
+
+static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
+{
+       int ret = 0;
+
+       mutex_lock(&fanout_mutex);
+       if (po->fanout) {
+               u32 val;
+
+               val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
+               ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
+       }
+       mutex_unlock(&fanout_mutex);
+
+       return ret;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct packet_diag_req *req,
+               u32 portid, u32 seq, u32 flags, int sk_ino)
+{
+       struct nlmsghdr *nlh;
+       struct packet_diag_msg *rp;
+       struct packet_sock *po = pkt_sk(sk);
+
+       nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       rp = nlmsg_data(nlh);
+       rp->pdiag_family = AF_PACKET;
+       rp->pdiag_type = sk->sk_type;
+       rp->pdiag_num = ntohs(po->num);
+       rp->pdiag_ino = sk_ino;
+       sock_diag_save_cookie(sk, rp->pdiag_cookie);
+
+       if ((req->pdiag_show & PACKET_SHOW_INFO) &&
+                       pdiag_put_info(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
+                       pdiag_put_mclist(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
+                       pdiag_put_rings_cfg(po, skb))
+               goto out_nlmsg_trim;
+
+       if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
+                       pdiag_put_fanout(po, skb))
+               goto out_nlmsg_trim;
+
+       return nlmsg_end(skb, nlh);
+
+out_nlmsg_trim:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       int num = 0, s_num = cb->args[0];
+       struct packet_diag_req *req;
+       struct net *net;
+       struct sock *sk;
+       struct hlist_node *node;
+
+       net = sock_net(skb->sk);
+       req = nlmsg_data(cb->nlh);
+
+       mutex_lock(&net->packet.sklist_lock);
+       sk_for_each(sk, node, &net->packet.sklist) {
+               if (!net_eq(sock_net(sk), net))
+                       continue;
+               if (num < s_num)
+                       goto next;
+
+               if (sk_diag_fill(sk, skb, req, NETLINK_CB(cb->skb).portid,
+                                       cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                       sock_i_ino(sk)) < 0)
+                       goto done;
+next:
+               num++;
+       }
+done:
+       mutex_unlock(&net->packet.sklist_lock);
+       cb->args[0] = num;
+
+       return skb->len;
+}
+
+static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+       int hdrlen = sizeof(struct packet_diag_req);
+       struct net *net = sock_net(skb->sk);
+       struct packet_diag_req *req;
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       req = nlmsg_data(h);
+       /* Make it possible to support protocol filtering later */
+       if (req->sdiag_protocol)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = packet_diag_dump,
+               };
+               return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+       } else
+               return -EOPNOTSUPP;
+}
+
+static const struct sock_diag_handler packet_diag_handler = {
+       .family = AF_PACKET,
+       .dump = packet_diag_handler_dump,
+};
+
+static int __init packet_diag_init(void)
+{
+       return sock_diag_register(&packet_diag_handler);
+}
+
+static void __exit packet_diag_exit(void)
+{
+       sock_diag_unregister(&packet_diag_handler);
+}
+
+module_init(packet_diag_init);
+module_exit(packet_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
diff --git a/net/packet/internal.h b/net/packet/internal.h
new file mode 100644 (file)
index 0000000..44945f6
--- /dev/null
@@ -0,0 +1,121 @@
+#ifndef __PACKET_INTERNAL_H__
+#define __PACKET_INTERNAL_H__
+
+struct packet_mclist {
+       struct packet_mclist    *next;
+       int                     ifindex;
+       int                     count;
+       unsigned short          type;
+       unsigned short          alen;
+       unsigned char           addr[MAX_ADDR_LEN];
+};
+
+/* kbdq - kernel block descriptor queue */
+struct tpacket_kbdq_core {
+       struct pgv      *pkbdq;
+       unsigned int    feature_req_word;
+       unsigned int    hdrlen;
+       unsigned char   reset_pending_on_curr_blk;
+       unsigned char   delete_blk_timer;
+       unsigned short  kactive_blk_num;
+       unsigned short  blk_sizeof_priv;
+
+       /* last_kactive_blk_num:
+        * trick to see if user-space has caught up
+        * in order to avoid refreshing timer when every single pkt arrives.
+        */
+       unsigned short  last_kactive_blk_num;
+
+       char            *pkblk_start;
+       char            *pkblk_end;
+       int             kblk_size;
+       unsigned int    knum_blocks;
+       uint64_t        knxt_seq_num;
+       char            *prev;
+       char            *nxt_offset;
+       struct sk_buff  *skb;
+
+       atomic_t        blk_fill_in_prog;
+
+       /* Default is set to 8ms */
+#define DEFAULT_PRB_RETIRE_TOV (8)
+
+       unsigned short  retire_blk_tov;
+       unsigned short  version;
+       unsigned long   tov_in_jiffies;
+
+       /* timer to retire an outstanding block */
+       struct timer_list retire_blk_timer;
+};
+
+struct pgv {
+       char *buffer;
+};
+
+struct packet_ring_buffer {
+       struct pgv              *pg_vec;
+       unsigned int            head;
+       unsigned int            frames_per_block;
+       unsigned int            frame_size;
+       unsigned int            frame_max;
+
+       unsigned int            pg_vec_order;
+       unsigned int            pg_vec_pages;
+       unsigned int            pg_vec_len;
+
+       struct tpacket_kbdq_core        prb_bdqc;
+       atomic_t                pending;
+};
+
+extern struct mutex fanout_mutex;
+#define PACKET_FANOUT_MAX      256
+
+struct packet_fanout {
+#ifdef CONFIG_NET_NS
+       struct net              *net;
+#endif
+       unsigned int            num_members;
+       u16                     id;
+       u8                      type;
+       u8                      defrag;
+       atomic_t                rr_cur;
+       struct list_head        list;
+       struct sock             *arr[PACKET_FANOUT_MAX];
+       spinlock_t              lock;
+       atomic_t                sk_ref;
+       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+};
+
+struct packet_sock {
+       /* struct sock has to be the first member of packet_sock */
+       struct sock             sk;
+       struct packet_fanout    *fanout;
+       struct tpacket_stats    stats;
+       union  tpacket_stats_u  stats_u;
+       struct packet_ring_buffer       rx_ring;
+       struct packet_ring_buffer       tx_ring;
+       int                     copy_thresh;
+       spinlock_t              bind_lock;
+       struct mutex            pg_vec_lock;
+       unsigned int            running:1,      /* prot_hook is attached*/
+                               auxdata:1,
+                               origdev:1,
+                               has_vnet_hdr:1;
+       int                     ifindex;        /* bound device         */
+       __be16                  num;
+       struct packet_mclist    *mclist;
+       atomic_t                mapped;
+       enum tpacket_versions   tp_version;
+       unsigned int            tp_hdrlen;
+       unsigned int            tp_reserve;
+       unsigned int            tp_loss:1;
+       unsigned int            tp_tstamp;
+       struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+};
+
+static struct packet_sock *pkt_sk(struct sock *sk)
+{
+       return (struct packet_sock *)sk;
+}
+
+#endif
index 7dd762a464e55f9ef7f41c83b433aa81b6f4d7cf..83a8389619aa7ccc0a490a8f8b664200bee53df9 100644 (file)
@@ -33,7 +33,7 @@
 /* Device address handling */
 
 static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
-                    u32 pid, u32 seq, int event);
+                    u32 portid, u32 seq, int event);
 
 void phonet_address_notify(int event, struct net_device *dev, u8 addr)
 {
@@ -101,12 +101,12 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
 }
 
 static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
-                       u32 pid, u32 seq, int event)
+                       u32 portid, u32 seq, int event)
 {
        struct ifaddrmsg *ifm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -148,7 +148,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
                                continue;
 
                        if (fill_addr(skb, pnd->netdev, addr << 2,
-                                        NETLINK_CB(cb->skb).pid,
+                                        NETLINK_CB(cb->skb).portid,
                                        cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0)
                                goto out;
                }
@@ -165,12 +165,12 @@ out:
 /* Routes handling */
 
 static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
-                       u32 pid, u32 seq, int event)
+                       u32 portid, u32 seq, int event)
 {
        struct rtmsg *rtm;
        struct nlmsghdr *nlh;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), 0);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -276,7 +276,7 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 
                if (addr_idx++ < addr_start_idx)
                        continue;
-               if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).pid,
+               if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, RTM_NEWROUTE))
                        goto out;
        }
index 0acc943f713a94c5d695cd150fa445e906eba751..b7e98278225574db446bbc06e657e38efd0522e1 100644 (file)
@@ -612,7 +612,8 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
                        sk->sk_protocol, pn->sobject, pn->dobject,
                        pn->resource, sk->sk_state,
                        sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
-                       sock_i_uid(sk), sock_i_ino(sk),
+                       from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+                       sock_i_ino(sk),
                        atomic_read(&sk->sk_refcnt), sk,
                        atomic_read(&sk->sk_drops), &len);
        }
@@ -796,7 +797,8 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
                struct sock *sk = *psk;
 
                seq_printf(seq, "%02X %5d %lu%n",
-                          (int) (psk - pnres.sk), sock_i_uid(sk),
+                          (int) (psk - pnres.sk),
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
                           sock_i_ino(sk), &len);
        }
        seq_printf(seq, "%*s\n", 63 - len, "");
index af95c8e058fc0d45096234aa8aeb7c8da6fda175..a65ee78db0c54e1062186b9b132fe46fac380c91 100644 (file)
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
                        break;
        }
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        state_change(sk);
 }
 
index 72981375f47cc90cba0aaac80952bec3b0636bb4..7787537e9c2e95fffbcc8f89d594fc6de6ad52fe 100644 (file)
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("listen data ready sk %p\n", sk);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
        if (!ready) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
                queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 6243258f840f0e79dace450b97e3914e1badbacc..4fac4f2bb9dccd11f304aa6dd94f8852e91a5b53 100644 (file)
@@ -322,7 +322,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -336,7 +336,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 1b4fd68f0c7c4db1f97859f407cded3a1e4f8144..81cf5a4c5e40c3c50b98c6694edd265a37e13b51 100644 (file)
@@ -174,7 +174,7 @@ void rds_tcp_write_space(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                write_space = sk->sk_write_space;
@@ -194,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
 
        /*
         * write_space is only called when data leaves tcp's send queue if
index 752b72360ebcb5736c849201aa6ed00361714ed9..a5c95274127990b34de5af72f82e9135cf73aa85 100644 (file)
@@ -150,6 +150,20 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
        rfkill_led_trigger_event(rfkill);
 }
 
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+       return rfkill->led_trigger.name;
+}
+EXPORT_SYMBOL(rfkill_get_led_trigger_name);
+
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+       BUG_ON(!rfkill);
+
+       rfkill->ledtrigname = name;
+}
+EXPORT_SYMBOL(rfkill_set_led_trigger_name);
+
 static int rfkill_led_trigger_register(struct rfkill *rfkill)
 {
        rfkill->led_trigger.name = rfkill->ledtrigname
@@ -256,6 +270,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
 {
        unsigned long flags;
+       bool prev, curr;
        int err;
 
        if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
@@ -270,6 +285,8 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
                rfkill->ops->query(rfkill, rfkill->data);
 
        spin_lock_irqsave(&rfkill->lock, flags);
+       prev = rfkill->state & RFKILL_BLOCK_SW;
+
        if (rfkill->state & RFKILL_BLOCK_SW)
                rfkill->state |= RFKILL_BLOCK_SW_PREV;
        else
@@ -299,10 +316,13 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        }
        rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
        rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
+       curr = rfkill->state & RFKILL_BLOCK_SW;
        spin_unlock_irqrestore(&rfkill->lock, flags);
 
        rfkill_led_trigger_event(rfkill);
-       rfkill_event(rfkill);
+
+       if (prev != curr)
+               rfkill_event(rfkill);
 }
 
 #ifdef CONFIG_RFKILL_INPUT
index 24c55c53e6a2fe150c2a34b181c60e2527278a55..c9d931e7ffecf1134e3743f4fc3920a10bb1f9cc 100644 (file)
@@ -164,8 +164,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op)
        rfkill_op_pending = true;
        if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
                /* bypass the limiter for EPO */
-               cancel_delayed_work(&rfkill_op_work);
-               schedule_delayed_work(&rfkill_op_work, 0);
+               mod_delayed_work(system_wq, &rfkill_op_work, 0);
                rfkill_last_scheduled = jiffies;
        } else
                rfkill_schedule_ratelimited();
index 8b1f9f49960f6b6d40285b80896944923737323e..011d2384b115ebdbc3bc1f8fdf79a1ef54e8bd3e 100644 (file)
@@ -948,7 +948,8 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn,
 
        _enter("");
 
-       key = key_alloc(&key_type_rxrpc, "x", 0, 0, cred, 0,
+       key = key_alloc(&key_type_rxrpc, "x",
+                       GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 0,
                        KEY_ALLOC_NOT_IN_QUOTA);
        if (IS_ERR(key)) {
                _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key));
@@ -994,7 +995,8 @@ struct key *rxrpc_get_null_key(const char *keyname)
        struct key *key;
        int ret;
 
-       key = key_alloc(&key_type_rxrpc, keyname, 0, 0, cred,
+       key = key_alloc(&key_type_rxrpc, keyname,
+                       GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
                        KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA);
        if (IS_ERR(key))
                return key;
index e3d2c78cb52c9aebf735fa01c1712203216a920e..102761d294cbe7b470de479be5f07b7c5d4d69ed 100644 (file)
@@ -644,7 +644,7 @@ errout:
 }
 
 static int
-tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
+tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq,
             u16 flags, int event, int bind, int ref)
 {
        struct tcamsg *t;
@@ -652,7 +652,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        t = nlmsg_data(nlh);
@@ -678,7 +678,7 @@ out_nlmsg_trim:
 }
 
 static int
-act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
+act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
               struct tc_action *a, int event)
 {
        struct sk_buff *skb;
@@ -686,16 +686,16 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
-       if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
+       if (tca_get_fill(skb, a, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
                kfree_skb(skb);
                return -EINVAL;
        }
 
-       return rtnl_unicast(skb, net, pid);
+       return rtnl_unicast(skb, net, portid);
 }
 
 static struct tc_action *
-tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
+tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
 {
        struct nlattr *tb[TCA_ACT_MAX + 1];
        struct tc_action *a;
@@ -762,7 +762,7 @@ static struct tc_action *create_a(int i)
 }
 
 static int tca_action_flush(struct net *net, struct nlattr *nla,
-                           struct nlmsghdr *n, u32 pid)
+                           struct nlmsghdr *n, u32 portid)
 {
        struct sk_buff *skb;
        unsigned char *b;
@@ -799,7 +799,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        if (a->ops == NULL)
                goto err_out;
 
-       nlh = nlmsg_put(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
+       nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
        if (!nlh)
                goto out_module_put;
        t = nlmsg_data(nlh);
@@ -823,7 +823,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        nlh->nlmsg_flags |= NLM_F_ROOT;
        module_put(a->ops->owner);
        kfree(a);
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+       err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                             n->nlmsg_flags & NLM_F_ECHO);
        if (err > 0)
                return 0;
@@ -841,7 +841,7 @@ noflush_out:
 
 static int
 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
-             u32 pid, int event)
+             u32 portid, int event)
 {
        int i, ret;
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
@@ -853,13 +853,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
 
        if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
                if (tb[1] != NULL)
-                       return tca_action_flush(net, tb[1], n, pid);
+                       return tca_action_flush(net, tb[1], n, portid);
                else
                        return -EINVAL;
        }
 
        for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
-               act = tcf_action_get_1(tb[i], n, pid);
+               act = tcf_action_get_1(tb[i], n, portid);
                if (IS_ERR(act)) {
                        ret = PTR_ERR(act);
                        goto err;
@@ -874,7 +874,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        }
 
        if (event == RTM_GETACTION)
-               ret = act_get_notify(net, pid, n, head, event);
+               ret = act_get_notify(net, portid, n, head, event);
        else { /* delete */
                struct sk_buff *skb;
 
@@ -884,7 +884,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                        goto err;
                }
 
-               if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event,
+               if (tca_get_fill(skb, head, portid, n->nlmsg_seq, 0, event,
                                 0, 1) <= 0) {
                        kfree_skb(skb);
                        ret = -EINVAL;
@@ -893,7 +893,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
 
                /* now do the delete */
                tcf_action_destroy(head, 0);
-               ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+               ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                                     n->nlmsg_flags & NLM_F_ECHO);
                if (ret > 0)
                        return 0;
@@ -905,7 +905,7 @@ err:
 }
 
 static int tcf_add_notify(struct net *net, struct tc_action *a,
-                         u32 pid, u32 seq, int event, u16 flags)
+                         u32 portid, u32 seq, int event, u16 flags)
 {
        struct tcamsg *t;
        struct nlmsghdr *nlh;
@@ -920,7 +920,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
 
        b = skb_tail_pointer(skb);
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
        if (!nlh)
                goto out_kfree_skb;
        t = nlmsg_data(nlh);
@@ -940,7 +940,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        NETLINK_CB(skb).dst_group = RTNLGRP_TC;
 
-       err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
+       err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
        if (err > 0)
                err = 0;
        return err;
@@ -953,7 +953,7 @@ out_kfree_skb:
 
 static int
 tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
-              u32 pid, int ovr)
+              u32 portid, int ovr)
 {
        int ret = 0;
        struct tc_action *act;
@@ -971,7 +971,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        /* dump then free all the actions after update; inserted policy
         * stays intact
         */
-       ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
+       ret = tcf_add_notify(net, act, portid, seq, RTM_NEWACTION, n->nlmsg_flags);
        for (a = act; a; a = act) {
                act = a->next;
                kfree(a);
@@ -984,7 +984,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_ACT_MAX + 1];
-       u32 pid = skb ? NETLINK_CB(skb).pid : 0;
+       u32 portid = skb ? NETLINK_CB(skb).portid : 0;
        int ret = 0, ovr = 0;
 
        ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
@@ -1008,17 +1008,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                if (n->nlmsg_flags & NLM_F_REPLACE)
                        ovr = 1;
 replay:
-               ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
+               ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
                if (ret == -EAGAIN)
                        goto replay;
                break;
        case RTM_DELACTION:
                ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
-                                   pid, RTM_DELACTION);
+                                   portid, RTM_DELACTION);
                break;
        case RTM_GETACTION:
                ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
-                                   pid, RTM_GETACTION);
+                                   portid, RTM_GETACTION);
                break;
        default:
                BUG();
@@ -1085,7 +1085,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
                goto out_module_put;
        }
 
-       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
                        cb->nlh->nlmsg_type, sizeof(*t), 0);
        if (!nlh)
                goto out_module_put;
@@ -1109,7 +1109,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
                nla_nest_cancel(skb, nest);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
-       if (NETLINK_CB(cb->skb).pid && ret)
+       if (NETLINK_CB(cb->skb).portid && ret)
                nlh->nlmsg_flags |= NLM_F_MULTI;
        module_put(a_o->owner);
        return skb->len;
index 6dd1131f2ec1f023f6113c94c832103860dbb6bb..7ae02892437c25bf1e925d6dc756ed23aabff1a3 100644 (file)
@@ -319,7 +319,7 @@ replay:
                }
        }
 
-       err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
+       err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh);
        if (err == 0) {
                if (tp_created) {
                        spin_lock_bh(root_lock);
@@ -343,13 +343,13 @@ errout:
 }
 
 static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
-                        unsigned long fh, u32 pid, u32 seq, u16 flags, int event)
+                        unsigned long fh, u32 portid, u32 seq, u16 flags, int event)
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
        unsigned char *b = skb_tail_pointer(skb);
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        tcm = nlmsg_data(nlh);
@@ -381,18 +381,18 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
                          unsigned long fh, int event)
 {
        struct sk_buff *skb;
-       u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
 
-       if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) {
+       if (tcf_fill_node(skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
                kfree_skb(skb);
                return -EINVAL;
        }
 
-       return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+       return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                              n->nlmsg_flags & NLM_F_ECHO);
 }
 
@@ -407,7 +407,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
 {
        struct tcf_dump_args *a = (void *)arg;
 
-       return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid,
+       return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
                             a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
 }
 
@@ -465,7 +465,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                if (t > s_t)
                        memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
                if (cb->args[1] == 0) {
-                       if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
+                       if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                          RTM_NEWTFILTER) <= 0)
                                break;
index 590960a22a77fb0747c2fc42c78caff686cd8ac8..344a11b342e5ad333430ba68605bec6ce5f27f5c 100644 (file)
@@ -162,7 +162,8 @@ errout:
        return err;
 }
 
-static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+static int basic_change(struct sk_buff *in_skb,
+                       struct tcf_proto *tp, unsigned long base, u32 handle,
                        struct nlattr **tca, unsigned long *arg)
 {
        int err;
index 7743ea8d1d387d920dcee43abe4a3bfeb8502bfd..2ecde225ae609af970a39e5acc5c2b3ce797f6b2 100644 (file)
@@ -77,11 +77,18 @@ struct cgroup_subsys net_cls_subsys = {
        .name           = "net_cls",
        .create         = cgrp_create,
        .destroy        = cgrp_destroy,
-#ifdef CONFIG_NET_CLS_CGROUP
        .subsys_id      = net_cls_subsys_id,
-#endif
        .base_cftypes   = ss_files,
        .module         = THIS_MODULE,
+
+       /*
+        * While net_cls cgroup has the rudimentary hierarchy support of
+        * inheriting the parent's classid on cgroup creation, it doesn't
+        * properly propagates config changes in ancestors to their
+        * descendents.  A child should follow the parent's configuration
+        * but be allowed to override it.  Fix it and remove the following.
+        */
+       .broken_hierarchy = true,
 };
 
 struct cls_cgroup_head {
@@ -151,7 +158,8 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
        [TCA_CGROUP_EMATCHES]   = { .type = NLA_NESTED },
 };
 
-static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
+static int cls_cgroup_change(struct sk_buff *in_skb,
+                            struct tcf_proto *tp, unsigned long base,
                             u32 handle, struct nlattr **tca,
                             unsigned long *arg)
 {
@@ -283,12 +291,6 @@ static int __init init_cgroup_cls(void)
        if (ret)
                goto out;
 
-#ifndef CONFIG_NET_CLS_CGROUP
-       /* We can't use rcu_assign_pointer because this is an int. */
-       smp_wmb();
-       net_cls_subsys_id = net_cls_subsys.subsys_id;
-#endif
-
        ret = register_tcf_proto_ops(&cls_cgroup_ops);
        if (ret)
                cgroup_unload_subsys(&net_cls_subsys);
@@ -301,11 +303,6 @@ static void __exit exit_cgroup_cls(void)
 {
        unregister_tcf_proto_ops(&cls_cgroup_ops);
 
-#ifndef CONFIG_NET_CLS_CGROUP
-       net_cls_subsys_id = -1;
-       synchronize_rcu();
-#endif
-
        cgroup_unload_subsys(&net_cls_subsys);
 }
 
index ccd08c8dc6a72b18f0c7c45f743fa4912f954413..ce82d0cb1b4762e8ffcc58d8e46beacd86e31db0 100644 (file)
@@ -193,15 +193,19 @@ static u32 flow_get_rtclassid(const struct sk_buff *skb)
 
 static u32 flow_get_skuid(const struct sk_buff *skb)
 {
-       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
-               return skb->sk->sk_socket->file->f_cred->fsuid;
+       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
+               kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid;
+               return from_kuid(&init_user_ns, skuid);
+       }
        return 0;
 }
 
 static u32 flow_get_skgid(const struct sk_buff *skb)
 {
-       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
-               return skb->sk->sk_socket->file->f_cred->fsgid;
+       if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) {
+               kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid;
+               return from_kgid(&init_user_ns, skgid);
+       }
        return 0;
 }
 
@@ -347,7 +351,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
        [TCA_FLOW_PERTURB]      = { .type = NLA_U32 },
 };
 
-static int flow_change(struct tcf_proto *tp, unsigned long base,
+static int flow_change(struct sk_buff *in_skb, 
+                      struct tcf_proto *tp, unsigned long base,
                       u32 handle, struct nlattr **tca,
                       unsigned long *arg)
 {
@@ -386,6 +391,10 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
 
                if (fls(keymask) - 1 > FLOW_KEY_MAX)
                        return -EOPNOTSUPP;
+
+               if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
+                   sk_user_ns(NETLINK_CB(in_skb).ssk) != &init_user_ns)
+                       return -EOPNOTSUPP;
        }
 
        err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
index 8384a47972403360c22688d8daa1ebdf34338aad..4075a0aef2aa2e83fdf167c056e216c3abfc6204 100644 (file)
@@ -233,7 +233,8 @@ errout:
        return err;
 }
 
-static int fw_change(struct tcf_proto *tp, unsigned long base,
+static int fw_change(struct sk_buff *in_skb,
+                    struct tcf_proto *tp, unsigned long base,
                     u32 handle,
                     struct nlattr **tca,
                     unsigned long *arg)
index 44f405cb9aafa54d07ecbd99d006778f71e5634e..c10d57bf98f2aa036434633db10a03fa530688da 100644 (file)
@@ -427,7 +427,8 @@ errout:
        return err;
 }
 
-static int route4_change(struct tcf_proto *tp, unsigned long base,
+static int route4_change(struct sk_buff *in_skb,
+                      struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
                       unsigned long *arg)
index 18ab93ec8d7e3abb9b25514d09485d4a3e3b21c4..494bbb90924a36445d73ef5c921112b685722128 100644 (file)
@@ -416,7 +416,8 @@ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
        [TCA_RSVP_PINFO]        = { .len = sizeof(struct tc_rsvp_pinfo) },
 };
 
-static int rsvp_change(struct tcf_proto *tp, unsigned long base,
+static int rsvp_change(struct sk_buff *in_skb,
+                      struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
                       unsigned long *arg)
index fe29420d0b0e5a4241dbe7a5190c2f0686ce1fa0..a1293b4ab7a13a38ae2653bc91bf48527cfdee7a 100644 (file)
@@ -332,7 +332,8 @@ errout:
 }
 
 static int
-tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+tcindex_change(struct sk_buff *in_skb,
+              struct tcf_proto *tp, unsigned long base, u32 handle,
               struct nlattr **tca, unsigned long *arg)
 {
        struct nlattr *opt = tca[TCA_OPTIONS];
index d45373fb00b968f058459c654f7088551410778f..c7c27bc91b5af300e74fe8434f7fcedb30635df6 100644 (file)
@@ -544,7 +544,8 @@ errout:
        return err;
 }
 
-static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
+static int u32_change(struct sk_buff *in_skb,
+                     struct tcf_proto *tp, unsigned long base, u32 handle,
                      struct nlattr **tca,
                      unsigned long *arg)
 {
index 4ab6e33255736b90374d3dc53da60e79c277ebb9..7c3de6ffa5164db0f7abd3f0e2cc0ca2e92ecda9 100644 (file)
@@ -461,7 +461,7 @@ META_COLLECTOR(int_sk_sndtimeo)
 META_COLLECTOR(int_sk_sendmsg_off)
 {
        SKIP_NONLOCAL(skb);
-       dst->value = skb->sk->sk_sndmsg_off;
+       dst->value = skb->sk->sk_frag.offset;
 }
 
 META_COLLECTOR(int_sk_write_pend)
index a08b4ab3e421da67538f019cd6d7ec19f49fb12d..a18d975db59cea34eb0558490deb800f24c10d22 100644 (file)
@@ -1185,7 +1185,7 @@ graft:
 }
 
 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
-                        u32 pid, u32 seq, u16 flags, int event)
+                        u32 portid, u32 seq, u16 flags, int event)
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
@@ -1193,7 +1193,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        struct gnet_dump d;
        struct qdisc_size_table *stab;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        tcm = nlmsg_data(nlh);
@@ -1248,25 +1248,25 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
                        struct Qdisc *old, struct Qdisc *new)
 {
        struct sk_buff *skb;
-       u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
 
        if (old && !tc_qdisc_dump_ignore(old)) {
-               if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+               if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
                                  0, RTM_DELQDISC) < 0)
                        goto err_out;
        }
        if (new && !tc_qdisc_dump_ignore(new)) {
-               if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+               if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
                                  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
                        goto err_out;
        }
 
        if (skb->len)
-               return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+               return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                                      n->nlmsg_flags & NLM_F_ECHO);
 
 err_out:
@@ -1289,7 +1289,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                q_idx++;
        } else {
                if (!tc_qdisc_dump_ignore(q) &&
-                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                        goto done;
                q_idx++;
@@ -1300,7 +1300,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                        continue;
                }
                if (!tc_qdisc_dump_ignore(q) &&
-                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
                        goto done;
                q_idx++;
@@ -1375,7 +1375,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        const struct Qdisc_class_ops *cops;
        unsigned long cl = 0;
        unsigned long new_cl;
-       u32 pid = tcm->tcm_parent;
+       u32 portid = tcm->tcm_parent;
        u32 clid = tcm->tcm_handle;
        u32 qid = TC_H_MAJ(clid);
        int err;
@@ -1403,8 +1403,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
        /* Step 1. Determine qdisc handle X:0 */
 
-       if (pid != TC_H_ROOT) {
-               u32 qid1 = TC_H_MAJ(pid);
+       if (portid != TC_H_ROOT) {
+               u32 qid1 = TC_H_MAJ(portid);
 
                if (qid && qid1) {
                        /* If both majors are known, they must be identical. */
@@ -1418,10 +1418,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                /* Now qid is genuine qdisc handle consistent
                 * both with parent and child.
                 *
-                * TC_H_MAJ(pid) still may be unspecified, complete it now.
+                * TC_H_MAJ(portid) still may be unspecified, complete it now.
                 */
-               if (pid)
-                       pid = TC_H_MAKE(qid, pid);
+               if (portid)
+                       portid = TC_H_MAKE(qid, portid);
        } else {
                if (qid == 0)
                        qid = dev->qdisc->handle;
@@ -1439,7 +1439,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
        /* Now try to get class */
        if (clid == 0) {
-               if (pid == TC_H_ROOT)
+               if (portid == TC_H_ROOT)
                        clid = qid;
        } else
                clid = TC_H_MAKE(qid, clid);
@@ -1478,7 +1478,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
        new_cl = cl;
        err = -EOPNOTSUPP;
        if (cops->change)
-               err = cops->change(q, clid, pid, tca, &new_cl);
+               err = cops->change(q, clid, portid, tca, &new_cl);
        if (err == 0)
                tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
 
@@ -1492,7 +1492,7 @@ out:
 
 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
                          unsigned long cl,
-                         u32 pid, u32 seq, u16 flags, int event)
+                         u32 portid, u32 seq, u16 flags, int event)
 {
        struct tcmsg *tcm;
        struct nlmsghdr  *nlh;
@@ -1500,7 +1500,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        struct gnet_dump d;
        const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
 
-       nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
        tcm = nlmsg_data(nlh);
@@ -1540,18 +1540,18 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
                         unsigned long cl, int event)
 {
        struct sk_buff *skb;
-       u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOBUFS;
 
-       if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
+       if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
                kfree_skb(skb);
                return -EINVAL;
        }
 
-       return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+       return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                              n->nlmsg_flags & NLM_F_ECHO);
 }
 
@@ -1565,7 +1565,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
 {
        struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
 
-       return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
+       return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
 }
 
index 9ce0b4fe23ffcb95db43bd11f0f45fe1f4edf65f..71e50c80315fe00437461f59aee17a2b5b2b09b1 100644 (file)
@@ -352,7 +352,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct drr_sched *q = qdisc_priv(sch);
        struct drr_class *cl;
-       int err;
+       int err = 0;
 
        cl = drr_classify(skb, sch, &err);
        if (cl == NULL) {
index 511323e89cecb221f9650d9d35e13c20d5d54b29..aefc1504dc88f8b07963c390417f6dbf26544fa5 100644 (file)
@@ -324,24 +324,6 @@ void netif_carrier_off(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_carrier_off);
 
-/**
- *     netif_notify_peers - notify network peers about existence of @dev
- *     @dev: network device
- *
- * Generate traffic such that interested network peers are aware of
- * @dev, such as by generating a gratuitous ARP. This may be used when
- * a device wants to inform the rest of the network about some sort of
- * reconfiguration such as a failover event or virtual machine
- * migration.
- */
-void netif_notify_peers(struct net_device *dev)
-{
-       rtnl_lock();
-       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
-       rtnl_unlock();
-}
-EXPORT_SYMBOL(netif_notify_peers);
-
 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
    under all circumstances. It is difficult to invent anything faster or
    cheaper.
@@ -545,6 +527,8 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 };
 EXPORT_SYMBOL(pfifo_fast_ops);
 
+static struct lock_class_key qdisc_tx_busylock;
+
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          struct Qdisc_ops *ops)
 {
@@ -552,6 +536,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        struct Qdisc *sch;
        unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
        int err = -ENOBUFS;
+       struct net_device *dev = dev_queue->dev;
 
        p = kzalloc_node(size, GFP_KERNEL,
                         netdev_queue_numa_node_read(dev_queue));
@@ -571,12 +556,16 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        }
        INIT_LIST_HEAD(&sch->list);
        skb_queue_head_init(&sch->q);
+
        spin_lock_init(&sch->busylock);
+       lockdep_set_class(&sch->busylock,
+                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+
        sch->ops = ops;
        sch->enqueue = ops->enqueue;
        sch->dequeue = ops->dequeue;
        sch->dev_queue = dev_queue;
-       dev_hold(qdisc_dev(sch));
+       dev_hold(dev);
        atomic_set(&sch->refcnt, 1);
 
        return sch;
index 211a212170451c41f352f087dc9e0b3af9e8ae2c..f0dd83cff90652dc870f5bc720f9ae1a404b8931 100644 (file)
@@ -881,7 +881,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl;
-       int err;
+       int err = 0;
 
        cl = qfq_classify(skb, sch, &err);
        if (cl == NULL) {
index ebaef3ed6065bee6d49880cde701ee49b26585e4..b1ef3bc301a5ad424fb041c0f6f37c010098bcc3 100644 (file)
@@ -82,6 +82,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
                                          sctp_scope_t scope,
                                          gfp_t gfp)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        int i;
        sctp_paramhdr_t *p;
@@ -124,7 +125,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         * socket values.
         */
        asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
-       asoc->pf_retrans  = sctp_pf_retrans;
+       asoc->pf_retrans  = net->sctp.pf_retrans;
 
        asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
        asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -175,7 +176,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
+               min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -281,7 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         * and will revert old behavior.
         */
        asoc->peer.asconf_capable = 0;
-       if (sctp_addip_noauth)
+       if (net->sctp.addip_noauth)
                asoc->peer.asconf_capable = 1;
        asoc->asconf_addr_del_pending = NULL;
        asoc->src_out_of_asoc_ok = 0;
@@ -641,6 +642,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                                           const gfp_t gfp,
                                           const int peer_state)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_transport *peer;
        struct sctp_sock *sp;
        unsigned short port;
@@ -674,7 +676,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
                return peer;
        }
 
-       peer = sctp_transport_new(addr, gfp);
+       peer = sctp_transport_new(net, addr, gfp);
        if (!peer)
                return NULL;
 
@@ -1089,13 +1091,15 @@ out:
 
 /* Is this the association we are looking for? */
 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
+                                          struct net *net,
                                           const union sctp_addr *laddr,
                                           const union sctp_addr *paddr)
 {
        struct sctp_transport *transport;
 
        if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
-           (htons(asoc->peer.port) == paddr->v4.sin_port)) {
+           (htons(asoc->peer.port) == paddr->v4.sin_port) &&
+           net_eq(sock_net(asoc->base.sk), net)) {
                transport = sctp_assoc_lookup_paddr(asoc, paddr);
                if (!transport)
                        goto out;
@@ -1116,6 +1120,7 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
        struct sctp_association *asoc =
                container_of(work, struct sctp_association,
                             base.inqueue.immediate);
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
        struct sctp_inq *inqueue;
@@ -1148,13 +1153,13 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
                if (sctp_chunk_is_data(chunk))
                        asoc->peer.last_data_from = chunk->transport;
                else
-                       SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+                       SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
 
                if (chunk->transport)
                        chunk->transport->last_time_heard = jiffies;
 
                /* Run through the state machine. */
-               error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype,
+               error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
                                   state, ep, asoc, chunk, GFP_ATOMIC);
 
                /* Check to see if the association is freed in response to
@@ -1414,6 +1419,7 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
 /* Should we send a SACK to update our peer? */
 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        switch (asoc->state) {
        case SCTP_STATE_ESTABLISHED:
        case SCTP_STATE_SHUTDOWN_PENDING:
@@ -1421,7 +1427,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
        case SCTP_STATE_SHUTDOWN_SENT:
                if ((asoc->rwnd > asoc->a_rwnd) &&
                    ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
-                          (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
+                          (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
                           asoc->pathmtu)))
                        return 1;
                break;
@@ -1542,7 +1548,8 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
        if (asoc->peer.ipv6_address)
                flags |= SCTP_ADDR6_PEERSUPP;
 
-       return sctp_bind_addr_copy(&asoc->base.bind_addr,
+       return sctp_bind_addr_copy(sock_net(asoc->base.sk),
+                                  &asoc->base.bind_addr,
                                   &asoc->ep->base.bind_addr,
                                   scope, gfp, flags);
 }
index bf812048cf6f7a244c547e0cd31a731351abfab3..159b9bc5d63300e53560cf6495f8f65b9fd06449 100644 (file)
@@ -392,13 +392,14 @@ nomem:
  */
 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_auth_bytes  *secret;
        struct sctp_shared_key *ep_key;
 
        /* If we don't support AUTH, or peer is not capable
         * we don't need to do anything.
         */
-       if (!sctp_auth_enable || !asoc->peer.auth_capable)
+       if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
                return 0;
 
        /* If the key_id is non-zero and we couldn't find an
@@ -445,11 +446,12 @@ struct sctp_shared_key *sctp_auth_get_shkey(
  */
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct crypto_hash *tfm = NULL;
        __u16   id;
 
        /* if the transforms are already allocted, we are done */
-       if (!sctp_auth_enable) {
+       if (!net->sctp.auth_enable) {
                ep->auth_hmacs = NULL;
                return 0;
        }
@@ -674,7 +676,12 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param)
 /* Check if peer requested that this chunk is authenticated */
 int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-       if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable)
+       struct net  *net;
+       if (!asoc)
+               return 0;
+
+       net = sock_net(asoc->base.sk);
+       if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
                return 0;
 
        return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
@@ -683,7 +690,12 @@ int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 /* Check if we requested that peer authenticate this chunk. */
 int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc)
 {
-       if (!sctp_auth_enable || !asoc)
+       struct net *net;
+       if (!asoc)
+               return 0;
+
+       net = sock_net(asoc->base.sk);
+       if (!net->sctp.auth_enable)
                return 0;
 
        return __sctp_auth_cid(chunk,
index 4ece451c8d27d59ba2a73fba1baf6cb8ca19fe4e..d886b3bf84f5a1823e208d3c784db05a6c88938a 100644 (file)
@@ -52,8 +52,8 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *,
-                             sctp_scope_t scope, gfp_t gfp,
+static int sctp_copy_one_addr(struct net *, struct sctp_bind_addr *,
+                             union sctp_addr *, sctp_scope_t scope, gfp_t gfp,
                              int flags);
 static void sctp_bind_addr_clean(struct sctp_bind_addr *);
 
@@ -62,7 +62,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *);
 /* Copy 'src' to 'dest' taking 'scope' into account.  Omit addresses
  * in 'src' which have a broader scope than 'scope'.
  */
-int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
+int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                        const struct sctp_bind_addr *src,
                        sctp_scope_t scope, gfp_t gfp,
                        int flags)
@@ -75,7 +75,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
 
        /* Extract the addresses which are relevant for this scope.  */
        list_for_each_entry(addr, &src->address_list, list) {
-               error = sctp_copy_one_addr(dest, &addr->a, scope,
+               error = sctp_copy_one_addr(net, dest, &addr->a, scope,
                                           gfp, flags);
                if (error < 0)
                        goto out;
@@ -87,7 +87,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
         */
        if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) {
                list_for_each_entry(addr, &src->address_list, list) {
-                       error = sctp_copy_one_addr(dest, &addr->a,
+                       error = sctp_copy_one_addr(net, dest, &addr->a,
                                                   SCTP_SCOPE_LINK, gfp,
                                                   flags);
                        if (error < 0)
@@ -448,7 +448,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr       *bp,
 }
 
 /* Copy out addresses from the global local address list. */
-static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
+static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
                              union sctp_addr *addr,
                              sctp_scope_t scope, gfp_t gfp,
                              int flags)
@@ -456,8 +456,8 @@ static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
        int error = 0;
 
        if (sctp_is_any(NULL, addr)) {
-               error = sctp_copy_local_addr_list(dest, scope, gfp, flags);
-       } else if (sctp_in_scope(addr, scope)) {
+               error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags);
+       } else if (sctp_in_scope(net, addr, scope)) {
                /* Now that the address is in scope, check to see if
                 * the address type is supported by local sock as
                 * well as the remote peer.
@@ -494,7 +494,7 @@ int sctp_is_any(struct sock *sk, const union sctp_addr *addr)
 }
 
 /* Is 'addr' valid for 'scope'?  */
-int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
+int sctp_in_scope(struct net *net, const union sctp_addr *addr, sctp_scope_t scope)
 {
        sctp_scope_t addr_scope = sctp_scope(addr);
 
@@ -512,7 +512,7 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
         * Address scoping can be selectively controlled via sysctl
         * option
         */
-       switch (sctp_scope_policy) {
+       switch (net->sctp.scope_policy) {
        case SCTP_SCOPE_POLICY_DISABLE:
                return 1;
        case SCTP_SCOPE_POLICY_ENABLE:
index 6c8556459a751b3e2faa6b0442b804396ff6de7e..7c2df9c33df37a588c426e7945cd71233edd4577 100644 (file)
@@ -257,7 +257,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        offset = 0;
 
        if ((whole > 1) || (whole && over))
-               SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS);
+               SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 
        /* Create chunks for all the full sized DATA chunks. */
        for (i=0, len=first_len; i < whole; i++) {
index 68a385d7c3bdaaab2ebed8c7f4bec94dc53d4c79..1859e2bc83d113d1a14d01f904475b01099e0626 100644 (file)
@@ -65,6 +65,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
                                                struct sock *sk,
                                                gfp_t gfp)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmac_algo_param *auth_hmacs = NULL;
        struct sctp_chunks_param *auth_chunks = NULL;
        struct sctp_shared_key *null_key;
@@ -74,7 +75,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        if (!ep->digest)
                return NULL;
 
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                /* Allocate space for HMACS and CHUNKS authentication
                 * variables.  There are arrays that we encode directly
                 * into parameters to make the rest of the operations easier.
@@ -106,7 +107,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
                /* If the Add-IP functionality is enabled, we must
                 * authenticate, ASCONF and ASCONF-ACK chunks
                 */
-               if (sctp_addip_enable) {
+               if (net->sctp.addip_enable) {
                        auth_chunks->chunks[0] = SCTP_CID_ASCONF;
                        auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
                        auth_chunks->param_hdr.length =
@@ -140,14 +141,14 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        INIT_LIST_HEAD(&ep->asocs);
 
        /* Use SCTP specific send buffer space queues.  */
-       ep->sndbuf_policy = sctp_sndbuf_policy;
+       ep->sndbuf_policy = net->sctp.sndbuf_policy;
 
        sk->sk_data_ready = sctp_data_ready;
        sk->sk_write_space = sctp_write_space;
        sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
        /* Get the receive buffer policy for this endpoint */
-       ep->rcvbuf_policy = sctp_rcvbuf_policy;
+       ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
 
        /* Initialize the secret key used with cookie. */
        get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
@@ -302,11 +303,13 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
 
 /* Is this the endpoint we are looking for?  */
 struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
+                                              struct net *net,
                                               const union sctp_addr *laddr)
 {
        struct sctp_endpoint *retval = NULL;
 
-       if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
+       if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) &&
+           net_eq(sock_net(ep->base.sk), net)) {
                if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
                                         sctp_sk(ep->base.sk)))
                        retval = ep;
@@ -343,7 +346,8 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
 
        rport = ntohs(paddr->v4.sin_port);
 
-       hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
+       hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port,
+                                rport);
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
@@ -386,13 +390,14 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
 {
        struct sctp_sockaddr_entry *addr;
        struct sctp_bind_addr *bp;
+       struct net *net = sock_net(ep->base.sk);
 
        bp = &ep->base.bind_addr;
        /* This function is called with the socket lock held,
         * so the address_list can not change.
         */
        list_for_each_entry(addr, &bp->address_list, list) {
-               if (sctp_has_association(&addr->a, paddr))
+               if (sctp_has_association(net, &addr->a, paddr))
                        return 1;
        }
 
@@ -409,6 +414,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
                             base.inqueue.immediate);
        struct sctp_association *asoc;
        struct sock *sk;
+       struct net *net;
        struct sctp_transport *transport;
        struct sctp_chunk *chunk;
        struct sctp_inq *inqueue;
@@ -423,6 +429,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work)
        asoc = NULL;
        inqueue = &ep->base.inqueue;
        sk = ep->base.sk;
+       net = sock_net(sk);
 
        while (NULL != (chunk = sctp_inq_pop(inqueue))) {
                subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
@@ -474,12 +481,12 @@ normal:
                if (asoc && sctp_chunk_is_data(chunk))
                        asoc->peer.last_data_from = chunk->transport;
                else
-                       SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
+                       SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
 
                if (chunk->transport)
                        chunk->transport->last_time_heard = jiffies;
 
-               error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
+               error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state,
                                   ep, asoc, chunk, GFP_ATOMIC);
 
                if (error && chunk)
index e64d5210ed130610402b261218360adc5295102c..25dfe7380479e9598e5732a753349e324aea463a 100644 (file)
 
 /* Forward declarations for internal helpers. */
 static int sctp_rcv_ootb(struct sk_buff *);
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      const union sctp_addr *paddr,
                                      struct sctp_transport **transportp);
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr);
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+                                               const union sctp_addr *laddr);
 static struct sctp_association *__sctp_lookup_association(
+                                       struct net *net,
                                        const union sctp_addr *local,
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt);
@@ -80,7 +83,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
 
 
 /* Calculate the SCTP checksum of an SCTP packet.  */
-static inline int sctp_rcv_checksum(struct sk_buff *skb)
+static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
 {
        struct sctphdr *sh = sctp_hdr(skb);
        __le32 cmp = sh->checksum;
@@ -96,7 +99,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
 
        if (val != cmp) {
                /* CRC failure, dump it. */
-               SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
                return -1;
        }
        return 0;
@@ -129,11 +132,12 @@ int sctp_rcv(struct sk_buff *skb)
        union sctp_addr dest;
        int family;
        struct sctp_af *af;
+       struct net *net = dev_net(skb->dev);
 
        if (skb->pkt_type!=PACKET_HOST)
                goto discard_it;
 
-       SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
+       SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
 
        if (skb_linearize(skb))
                goto discard_it;
@@ -145,7 +149,7 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb->len < sizeof(struct sctphdr))
                goto discard_it;
        if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
-                 sctp_rcv_checksum(skb) < 0)
+                 sctp_rcv_checksum(net, skb) < 0)
                goto discard_it;
 
        skb_pull(skb, sizeof(struct sctphdr));
@@ -178,10 +182,10 @@ int sctp_rcv(struct sk_buff *skb)
            !af->addr_valid(&dest, NULL, skb))
                goto discard_it;
 
-       asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
+       asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 
        if (!asoc)
-               ep = __sctp_rcv_lookup_endpoint(&dest);
+               ep = __sctp_rcv_lookup_endpoint(net, &dest);
 
        /* Retrieve the common input handling substructure. */
        rcvr = asoc ? &asoc->base : &ep->base;
@@ -200,7 +204,7 @@ int sctp_rcv(struct sk_buff *skb)
                        sctp_endpoint_put(ep);
                        ep = NULL;
                }
-               sk = sctp_get_ctl_sock();
+               sk = net->sctp.ctl_sock;
                ep = sctp_sk(sk)->ep;
                sctp_endpoint_hold(ep);
                rcvr = &ep->base;
@@ -216,7 +220,7 @@ int sctp_rcv(struct sk_buff *skb)
         */
        if (!asoc) {
                if (sctp_rcv_ootb(skb)) {
-                       SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
+                       SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
                        goto discard_release;
                }
        }
@@ -272,9 +276,9 @@ int sctp_rcv(struct sk_buff *skb)
                        skb = NULL; /* sctp_chunk_free already freed the skb */
                        goto discard_release;
                }
-               SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
        } else {
-               SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
+               SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
        }
 
@@ -289,7 +293,7 @@ int sctp_rcv(struct sk_buff *skb)
        return 0;
 
 discard_it:
-       SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
+       SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
        kfree_skb(skb);
        return 0;
 
@@ -462,11 +466,13 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
                }
                        
        } else {
+               struct net *net = sock_net(sk);
+
                if (timer_pending(&t->proto_unreach_timer) &&
                    del_timer(&t->proto_unreach_timer))
                        sctp_association_put(asoc);
 
-               sctp_do_sm(SCTP_EVENT_T_OTHER,
+               sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                           SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
                           asoc->state, asoc->ep, asoc, t,
                           GFP_ATOMIC);
@@ -474,7 +480,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
 }
 
 /* Common lookup code for icmp/icmpv6 error handler. */
-struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
+struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
                             struct sctphdr *sctphdr,
                             struct sctp_association **app,
                             struct sctp_transport **tpp)
@@ -503,7 +509,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
        /* Look for an association that matches the incoming ICMP error
         * packet.
         */
-       asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
+       asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
        if (!asoc)
                return NULL;
 
@@ -539,7 +545,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
         * servers this needs to be solved differently.
         */
        if (sock_owned_by_user(sk))
-               NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
+               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
        *app = asoc;
        *tpp = transport;
@@ -586,9 +592,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        struct inet_sock *inet;
        sk_buff_data_t saveip, savesctp;
        int err;
+       struct net *net = dev_net(skb->dev);
 
        if (skb->len < ihlen + 8) {
-               ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
 
@@ -597,12 +604,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        savesctp = skb->transport_header;
        skb_reset_network_header(skb);
        skb_set_transport_header(skb, ihlen);
-       sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
+       sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original values. */
        skb->network_header = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
+               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
                return;
        }
        /* Warning:  The sock lock is held.  Remember to call
@@ -723,12 +730,13 @@ discard:
 /* Insert endpoint into the hash table.  */
 static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct sctp_ep_common *epb;
        struct sctp_hashbucket *head;
 
        epb = &ep->base;
 
-       epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+       epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
        head = &sctp_ep_hashtable[epb->hashent];
 
        sctp_write_lock(&head->lock);
@@ -747,12 +755,13 @@ void sctp_hash_endpoint(struct sctp_endpoint *ep)
 /* Remove endpoint from the hash table.  */
 static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 {
+       struct net *net = sock_net(ep->base.sk);
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
 
        epb = &ep->base;
 
-       epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+       epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 
        head = &sctp_ep_hashtable[epb->hashent];
 
@@ -770,7 +779,8 @@ void sctp_unhash_endpoint(struct sctp_endpoint *ep)
 }
 
 /* Look up an endpoint. */
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
+                                               const union sctp_addr *laddr)
 {
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
@@ -778,16 +788,16 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
        struct hlist_node *node;
        int hash;
 
-       hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
+       hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
        head = &sctp_ep_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
                ep = sctp_ep(epb);
-               if (sctp_endpoint_is_match(ep, laddr))
+               if (sctp_endpoint_is_match(ep, net, laddr))
                        goto hit;
        }
 
-       ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+       ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
 hit:
        sctp_endpoint_hold(ep);
@@ -798,13 +808,15 @@ hit:
 /* Insert association into the hash table.  */
 static void __sctp_hash_established(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_ep_common *epb;
        struct sctp_hashbucket *head;
 
        epb = &asoc->base;
 
        /* Calculate which chain this entry will belong to. */
-       epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
+       epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
+                                        asoc->peer.port);
 
        head = &sctp_assoc_hashtable[epb->hashent];
 
@@ -827,12 +839,13 @@ void sctp_hash_established(struct sctp_association *asoc)
 /* Remove association from the hash table.  */
 static void __sctp_unhash_established(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
        struct sctp_hashbucket *head;
        struct sctp_ep_common *epb;
 
        epb = &asoc->base;
 
-       epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
+       epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
                                         asoc->peer.port);
 
        head = &sctp_assoc_hashtable[epb->hashent];
@@ -855,6 +868,7 @@ void sctp_unhash_established(struct sctp_association *asoc)
 
 /* Look up an association. */
 static struct sctp_association *__sctp_lookup_association(
+                                       struct net *net,
                                        const union sctp_addr *local,
                                        const union sctp_addr *peer,
                                        struct sctp_transport **pt)
@@ -869,12 +883,13 @@ static struct sctp_association *__sctp_lookup_association(
        /* Optimize here for direct hit, only listening connections can
         * have wildcards anyways.
         */
-       hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
+       hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
+                                ntohs(peer->v4.sin_port));
        head = &sctp_assoc_hashtable[hash];
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
                asoc = sctp_assoc(epb);
-               transport = sctp_assoc_is_match(asoc, local, peer);
+               transport = sctp_assoc_is_match(asoc, net, local, peer);
                if (transport)
                        goto hit;
        }
@@ -892,27 +907,29 @@ hit:
 
 /* Look up an association. BH-safe. */
 SCTP_STATIC
-struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr,
+struct sctp_association *sctp_lookup_association(struct net *net,
+                                                const union sctp_addr *laddr,
                                                 const union sctp_addr *paddr,
                                            struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
 
        sctp_local_bh_disable();
-       asoc = __sctp_lookup_association(laddr, paddr, transportp);
+       asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
        sctp_local_bh_enable();
 
        return asoc;
 }
 
 /* Is there an association matching the given local and peer addresses? */
-int sctp_has_association(const union sctp_addr *laddr,
+int sctp_has_association(struct net *net,
+                        const union sctp_addr *laddr,
                         const union sctp_addr *paddr)
 {
        struct sctp_association *asoc;
        struct sctp_transport *transport;
 
-       if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
+       if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
                sctp_association_put(asoc);
                return 1;
        }
@@ -938,7 +955,8 @@ int sctp_has_association(const union sctp_addr *laddr,
  * in certain circumstances.
  *
  */
-static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
+       struct sk_buff *skb,
        const union sctp_addr *laddr, struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
@@ -978,7 +996,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
 
                af->from_addr_param(paddr, params.addr, sh->source, 0);
 
-               asoc = __sctp_lookup_association(laddr, paddr, &transport);
+               asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
                if (asoc)
                        return asoc;
        }
@@ -1001,6 +1019,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
  * subsequent ASCONF Chunks. If found, proceed to rule D4.
  */
 static struct sctp_association *__sctp_rcv_asconf_lookup(
+                                       struct net *net,
                                        sctp_chunkhdr_t *ch,
                                        const union sctp_addr *laddr,
                                        __be16 peer_port,
@@ -1020,7 +1039,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
 
        af->from_addr_param(&paddr, param, peer_port, 0);
 
-       return __sctp_lookup_association(laddr, &paddr, transportp);
+       return __sctp_lookup_association(net, laddr, &paddr, transportp);
 }
 
 
@@ -1033,7 +1052,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
 * This means that any chunks that can help us identify the association need
 * to be looked at to find this association.
 */
-static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
@@ -1074,8 +1094,9 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
                            break;
 
                    case SCTP_CID_ASCONF:
-                           if (have_auth || sctp_addip_noauth)
-                                   asoc = __sctp_rcv_asconf_lookup(ch, laddr,
+                           if (have_auth || net->sctp.addip_noauth)
+                                   asoc = __sctp_rcv_asconf_lookup(
+                                                       net, ch, laddr,
                                                        sctp_hdr(skb)->source,
                                                        transportp);
                    default:
@@ -1098,7 +1119,8 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
  * include looking inside of INIT/INIT-ACK chunks or after the AUTH
  * chunks.
  */
-static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
@@ -1118,11 +1140,11 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
        switch (ch->type) {
        case SCTP_CID_INIT:
        case SCTP_CID_INIT_ACK:
-               return __sctp_rcv_init_lookup(skb, laddr, transportp);
+               return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
                break;
 
        default:
-               return __sctp_rcv_walk_lookup(skb, laddr, transportp);
+               return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
                break;
        }
 
@@ -1131,21 +1153,22 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
 }
 
 /* Lookup an association for an inbound skb. */
-static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
+static struct sctp_association *__sctp_rcv_lookup(struct net *net,
+                                     struct sk_buff *skb,
                                      const union sctp_addr *paddr,
                                      const union sctp_addr *laddr,
                                      struct sctp_transport **transportp)
 {
        struct sctp_association *asoc;
 
-       asoc = __sctp_lookup_association(laddr, paddr, transportp);
+       asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 
        /* Further lookup for INIT/INIT-ACK packets.
         * SCTP Implementors Guide, 2.18 Handling of address
         * parameters within the INIT or INIT-ACK.
         */
        if (!asoc)
-               asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp);
+               asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
 
        return asoc;
 }
index ed7139ea7978dc664f6dfbff33977cf31bbc4325..ea14cb44529528124e2bdd24988a59f1cacc2569 100644 (file)
@@ -99,6 +99,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
        struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
        struct sctp_sockaddr_entry *addr = NULL;
        struct sctp_sockaddr_entry *temp;
+       struct net *net = dev_net(ifa->idev->dev);
        int found = 0;
 
        switch (ev) {
@@ -110,27 +111,27 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                        addr->a.v6.sin6_addr = ifa->addr;
                        addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
                        addr->valid = 1;
-                       spin_lock_bh(&sctp_local_addr_lock);
-                       list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-                       sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-                       spin_unlock_bh(&sctp_local_addr_lock);
+                       spin_lock_bh(&net->sctp.local_addr_lock);
+                       list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+                       sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+                       spin_unlock_bh(&net->sctp.local_addr_lock);
                }
                break;
        case NETDEV_DOWN:
-               spin_lock_bh(&sctp_local_addr_lock);
+               spin_lock_bh(&net->sctp.local_addr_lock);
                list_for_each_entry_safe(addr, temp,
-                                       &sctp_local_addr_list, list) {
+                                       &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET6 &&
                                        ipv6_addr_equal(&addr->a.v6.sin6_addr,
                                                &ifa->addr)) {
-                               sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
                                list_del_rcu(&addr->list);
                                break;
                        }
                }
-               spin_unlock_bh(&sctp_local_addr_lock);
+               spin_unlock_bh(&net->sctp.local_addr_lock);
                if (found)
                        kfree_rcu(addr, rcu);
                break;
@@ -154,6 +155,7 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ipv6_pinfo *np;
        sk_buff_data_t saveip, savesctp;
        int err;
+       struct net *net = dev_net(skb->dev);
 
        idev = in6_dev_get(skb->dev);
 
@@ -162,12 +164,12 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        savesctp = skb->transport_header;
        skb_reset_network_header(skb);
        skb_set_transport_header(skb, offset);
-       sk = sctp_err_lookup(AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
+       sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
        /* Put back, the original pointers. */
        skb->network_header   = saveip;
        skb->transport_header = savesctp;
        if (!sk) {
-               ICMP6_INC_STATS_BH(dev_net(skb->dev), idev, ICMP6_MIB_INERRORS);
+               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
                goto out;
        }
 
@@ -241,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
                          __func__, skb, skb->len,
                          &fl6.saddr, &fl6.daddr);
 
-       SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+       SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
        if (!(transport->param_flags & SPP_PMTUD_ENABLE))
                skb->local_df = 1;
@@ -580,7 +582,7 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (!(type & IPV6_ADDR_UNICAST))
                return 0;
 
-       return ipv6_chk_addr(&init_net, in6, NULL, 0);
+       return ipv6_chk_addr(sock_net(&sp->inet.sk), in6, NULL, 0);
 }
 
 /* This function checks if the address is a valid address to be used for
@@ -857,14 +859,14 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                struct net_device *dev;
 
                if (type & IPV6_ADDR_LINKLOCAL) {
+                       struct net *net;
                        if (!addr->v6.sin6_scope_id)
                                return 0;
+                       net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
-                       dev = dev_get_by_index_rcu(&init_net,
-                                                  addr->v6.sin6_scope_id);
+                       dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
                        if (!dev ||
-                           !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
-                                          dev, 0)) {
+                           !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
                                rcu_read_unlock();
                                return 0;
                        }
@@ -897,7 +899,7 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
                        if (!addr->v6.sin6_scope_id)
                                return 0;
                        rcu_read_lock();
-                       dev = dev_get_by_index_rcu(&init_net,
+                       dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk),
                                                   addr->v6.sin6_scope_id);
                        rcu_read_unlock();
                        if (!dev)
index 8ef8e7d9eb61bbf74b6c023f4e33ee360085f84c..fe012c44f8dff15e4882165e2718b11f919cf260 100644 (file)
@@ -129,20 +129,20 @@ static const struct file_operations sctp_objcnt_ops = {
 };
 
 /* Initialize the objcount in the proc filesystem.  */
-void sctp_dbg_objcnt_init(void)
+void sctp_dbg_objcnt_init(struct net *net)
 {
        struct proc_dir_entry *ent;
 
        ent = proc_create("sctp_dbg_objcnt", 0,
-                         proc_net_sctp, &sctp_objcnt_ops);
+                         net->sctp.proc_net_sctp, &sctp_objcnt_ops);
        if (!ent)
                pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
 }
 
 /* Cleanup the objcount entry in the proc filesystem.  */
-void sctp_dbg_objcnt_exit(void)
+void sctp_dbg_objcnt_exit(struct net *net)
 {
-       remove_proc_entry("sctp_dbg_objcnt", proc_net_sctp);
+       remove_proc_entry("sctp_dbg_objcnt", net->sctp.proc_net_sctp);
 }
 
 
index be50aa234dcdea30a5c7986eaeaae3570f64a6e3..4e90188bf4895b95f89cdcd9ca0f024c31bbe08a 100644 (file)
@@ -616,7 +616,7 @@ out:
        return err;
 no_route:
        kfree_skb(nskb);
-       IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
+       IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
 
        /* FIXME: Returning the 'err' will effect all the associations
         * associated with a socket, although only one of the paths of the
index e7aa177c9522a232c1f1b58c6e5a40df2db03a29..d16632e1503a56a4c592157936ab568a2be3a3a4 100644 (file)
@@ -299,6 +299,7 @@ void sctp_outq_free(struct sctp_outq *q)
 /* Put a new chunk in an sctp_outq.  */
 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 {
+       struct net *net = sock_net(q->asoc->base.sk);
        int error = 0;
 
        SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
@@ -337,15 +338,15 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 
                        sctp_outq_tail_data(q, chunk);
                        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-                               SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
                        else
-                               SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
                        q->empty = 0;
                        break;
                }
        } else {
                list_add_tail(&chunk->list, &q->control_chunk_list);
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
        }
 
        if (error < 0)
@@ -478,11 +479,12 @@ void sctp_retransmit_mark(struct sctp_outq *q,
 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                     sctp_retransmit_reason_t reason)
 {
+       struct net *net = sock_net(q->asoc->base.sk);
        int error = 0;
 
        switch(reason) {
        case SCTP_RTXR_T3_RTX:
-               SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
                /* Update the retran path if the T3-rtx timer has expired for
                 * the current retran path.
@@ -493,15 +495,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                        transport->asoc->unack_data;
                break;
        case SCTP_RTXR_FAST_RTX:
-               SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
                q->fast_rtx = 1;
                break;
        case SCTP_RTXR_PMTUD:
-               SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
                break;
        case SCTP_RTXR_T1_RTX:
-               SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
+               SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
                transport->asoc->init_retries++;
                break;
        default:
@@ -589,9 +591,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
                 * next chunk.
                 */
                if (chunk->tsn_gap_acked) {
-                       list_del(&chunk->transmitted_list);
-                       list_add_tail(&chunk->transmitted_list,
-                                       &transport->transmitted);
+                       list_move_tail(&chunk->transmitted_list,
+                                      &transport->transmitted);
                        continue;
                }
 
@@ -655,9 +656,8 @@ redo:
                        /* The append was successful, so add this chunk to
                         * the transmitted list.
                         */
-                       list_del(&chunk->transmitted_list);
-                       list_add_tail(&chunk->transmitted_list,
-                                       &transport->transmitted);
+                       list_move_tail(&chunk->transmitted_list,
+                                      &transport->transmitted);
 
                        /* Mark the chunk as ineligible for fast retransmit
                         * after it is retransmitted.
@@ -1914,6 +1914,6 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
 
        if (ftsn_chunk) {
                list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
        }
 }
index 534c7eae9d15f9b0371b7cd2963f74e7cbf18e7c..794bb14decdea60ec58e68a59bc5a6b2feda4a01 100644 (file)
@@ -57,7 +57,7 @@
 
 #define DECLARE_PRIMITIVE(name) \
 /* This is called in the code as sctp_primitive_ ## name.  */ \
-int sctp_primitive_ ## name(struct sctp_association *asoc, \
+int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
                            void *arg) { \
        int error = 0; \
        sctp_event_t event_type; sctp_subtype_t subtype; \
@@ -69,7 +69,7 @@ int sctp_primitive_ ## name(struct sctp_association *asoc, \
        state = asoc ? asoc->state : SCTP_STATE_CLOSED; \
        ep = asoc ? asoc->ep : NULL; \
        \
-       error = sctp_do_sm(event_type, subtype, state, ep, asoc, \
+       error = sctp_do_sm(net, event_type, subtype, state, ep, asoc,   \
                           arg, GFP_KERNEL); \
        return error; \
 }
index 1e2eee88c3ea4750c093e44b3c3080f8ad3551fd..c3bea269faf4e6228143f70c6422046842922177 100644 (file)
@@ -80,11 +80,12 @@ static const struct snmp_mib sctp_snmp_list[] = {
 /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
 static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 {
+       struct net *net = seq->private;
        int i;
 
        for (i = 0; sctp_snmp_list[i].name != NULL; i++)
                seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
-                          snmp_fold_field((void __percpu **)sctp_statistics,
+                          snmp_fold_field((void __percpu **)net->sctp.sctp_statistics,
                                      sctp_snmp_list[i].entry));
 
        return 0;
@@ -93,7 +94,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 /* Initialize the seq file operations for 'snmp' object. */
 static int sctp_snmp_seq_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, sctp_snmp_seq_show, NULL);
+       return single_open_net(inode, file, sctp_snmp_seq_show);
 }
 
 static const struct file_operations sctp_snmp_seq_fops = {
@@ -105,11 +106,12 @@ static const struct file_operations sctp_snmp_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'snmp' object. */
-int __init sctp_snmp_proc_init(void)
+int __net_init sctp_snmp_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops);
+       p = proc_create("snmp", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_snmp_seq_fops);
        if (!p)
                return -ENOMEM;
 
@@ -117,9 +119,9 @@ int __init sctp_snmp_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'snmp' object. */
-void sctp_snmp_proc_exit(void)
+void sctp_snmp_proc_exit(struct net *net)
 {
-       remove_proc_entry("snmp", proc_net_sctp);
+       remove_proc_entry("snmp", net->sctp.proc_net_sctp);
 }
 
 /* Dump local addresses of an association/endpoint. */
@@ -213,10 +215,13 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
        sctp_for_each_hentry(epb, node, &head->chain) {
                ep = sctp_ep(epb);
                sk = epb->sk;
+               if (!net_eq(sock_net(sk), seq_file_net(seq)))
+                       continue;
                seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
                           sctp_sk(sk)->type, sk->sk_state, hash,
                           epb->bind_addr.port,
-                          sock_i_uid(sk), sock_i_ino(sk));
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+                          sock_i_ino(sk));
 
                sctp_seq_dump_local_addrs(seq, epb);
                seq_printf(seq, "\n");
@@ -238,7 +243,8 @@ static const struct seq_operations sctp_eps_ops = {
 /* Initialize the seq file operations for 'eps' object. */
 static int sctp_eps_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_eps_ops);
+       return seq_open_net(inode, file, &sctp_eps_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_eps_seq_fops = {
@@ -249,11 +255,12 @@ static const struct file_operations sctp_eps_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'eps' object. */
-int __init sctp_eps_proc_init(void)
+int __net_init sctp_eps_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops);
+       p = proc_create("eps", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_eps_seq_fops);
        if (!p)
                return -ENOMEM;
 
@@ -261,9 +268,9 @@ int __init sctp_eps_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'eps' object. */
-void sctp_eps_proc_exit(void)
+void sctp_eps_proc_exit(struct net *net)
 {
-       remove_proc_entry("eps", proc_net_sctp);
+       remove_proc_entry("eps", net->sctp.proc_net_sctp);
 }
 
 
@@ -316,6 +323,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        sctp_for_each_hentry(epb, node, &head->chain) {
                assoc = sctp_assoc(epb);
                sk = epb->sk;
+               if (!net_eq(sock_net(sk), seq_file_net(seq)))
+                       continue;
                seq_printf(seq,
                           "%8pK %8pK %-3d %-3d %-2d %-4d "
                           "%4d %8d %8d %7d %5lu %-5d %5d ",
@@ -324,7 +333,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                           assoc->assoc_id,
                           assoc->sndbuf_used,
                           atomic_read(&assoc->rmem_alloc),
-                          sock_i_uid(sk), sock_i_ino(sk),
+                          from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+                          sock_i_ino(sk),
                           epb->bind_addr.port,
                           assoc->peer.port);
                seq_printf(seq, " ");
@@ -354,7 +364,8 @@ static const struct seq_operations sctp_assoc_ops = {
 /* Initialize the seq file operations for 'assocs' object. */
 static int sctp_assocs_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_assoc_ops);
+       return seq_open_net(inode, file, &sctp_assoc_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_assocs_seq_fops = {
@@ -365,11 +376,11 @@ static const struct file_operations sctp_assocs_seq_fops = {
 };
 
 /* Set up the proc fs entry for 'assocs' object. */
-int __init sctp_assocs_proc_init(void)
+int __net_init sctp_assocs_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("assocs", S_IRUGO, proc_net_sctp,
+       p = proc_create("assocs", S_IRUGO, net->sctp.proc_net_sctp,
                        &sctp_assocs_seq_fops);
        if (!p)
                return -ENOMEM;
@@ -378,9 +389,9 @@ int __init sctp_assocs_proc_init(void)
 }
 
 /* Cleanup the proc fs entry for 'assocs' object. */
-void sctp_assocs_proc_exit(void)
+void sctp_assocs_proc_exit(struct net *net)
 {
-       remove_proc_entry("assocs", proc_net_sctp);
+       remove_proc_entry("assocs", net->sctp.proc_net_sctp);
 }
 
 static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
@@ -426,6 +437,8 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        sctp_local_bh_disable();
        read_lock(&head->lock);
        sctp_for_each_hentry(epb, node, &head->chain) {
+               if (!net_eq(sock_net(epb->sk), seq_file_net(seq)))
+                       continue;
                assoc = sctp_assoc(epb);
                list_for_each_entry(tsp, &assoc->peer.transport_addr_list,
                                        transports) {
@@ -489,14 +502,15 @@ static const struct seq_operations sctp_remaddr_ops = {
 };
 
 /* Cleanup the proc fs entry for 'remaddr' object. */
-void sctp_remaddr_proc_exit(void)
+void sctp_remaddr_proc_exit(struct net *net)
 {
-       remove_proc_entry("remaddr", proc_net_sctp);
+       remove_proc_entry("remaddr", net->sctp.proc_net_sctp);
 }
 
 static int sctp_remaddr_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open(file, &sctp_remaddr_ops);
+       return seq_open_net(inode, file, &sctp_remaddr_ops,
+                           sizeof(struct seq_net_private));
 }
 
 static const struct file_operations sctp_remaddr_seq_fops = {
@@ -506,11 +520,12 @@ static const struct file_operations sctp_remaddr_seq_fops = {
        .release = seq_release,
 };
 
-int __init sctp_remaddr_proc_init(void)
+int __net_init sctp_remaddr_proc_init(struct net *net)
 {
        struct proc_dir_entry *p;
 
-       p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops);
+       p = proc_create("remaddr", S_IRUGO, net->sctp.proc_net_sctp,
+                       &sctp_remaddr_seq_fops);
        if (!p)
                return -ENOMEM;
        return 0;
index 1f89c4e696457fc02948066052713cac4d2f46fc..2d518425d5984bf954c6ebba3d7db0abb7ef5dc7 100644 (file)
 
 /* Global data structures. */
 struct sctp_globals sctp_globals __read_mostly;
-DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
-
-#ifdef CONFIG_PROC_FS
-struct proc_dir_entry  *proc_net_sctp;
-#endif
 
 struct idr sctp_assocs_id;
 DEFINE_SPINLOCK(sctp_assocs_id_lock);
 
-/* This is the global socket data structure used for responding to
- * the Out-of-the-blue (OOTB) packets.  A control sock will be created
- * for this socket at the initialization time.
- */
-static struct sock *sctp_ctl_sock;
-
 static struct sctp_pf *sctp_pf_inet6_specific;
 static struct sctp_pf *sctp_pf_inet_specific;
 static struct sctp_af *sctp_af_v4_specific;
@@ -96,74 +85,54 @@ long sysctl_sctp_mem[3];
 int sysctl_sctp_rmem[3];
 int sysctl_sctp_wmem[3];
 
-/* Return the address of the control sock. */
-struct sock *sctp_get_ctl_sock(void)
-{
-       return sctp_ctl_sock;
-}
-
 /* Set up the proc fs entry for the SCTP protocol. */
-static __init int sctp_proc_init(void)
+static __net_init int sctp_proc_init(struct net *net)
 {
-       if (percpu_counter_init(&sctp_sockets_allocated, 0))
-               goto out_nomem;
 #ifdef CONFIG_PROC_FS
-       if (!proc_net_sctp) {
-               proc_net_sctp = proc_mkdir("sctp", init_net.proc_net);
-               if (!proc_net_sctp)
-                       goto out_free_percpu;
-       }
-
-       if (sctp_snmp_proc_init())
+       net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
+       if (!net->sctp.proc_net_sctp)
+               goto out_proc_net_sctp;
+       if (sctp_snmp_proc_init(net))
                goto out_snmp_proc_init;
-       if (sctp_eps_proc_init())
+       if (sctp_eps_proc_init(net))
                goto out_eps_proc_init;
-       if (sctp_assocs_proc_init())
+       if (sctp_assocs_proc_init(net))
                goto out_assocs_proc_init;
-       if (sctp_remaddr_proc_init())
+       if (sctp_remaddr_proc_init(net))
                goto out_remaddr_proc_init;
 
        return 0;
 
 out_remaddr_proc_init:
-       sctp_assocs_proc_exit();
+       sctp_assocs_proc_exit(net);
 out_assocs_proc_init:
-       sctp_eps_proc_exit();
+       sctp_eps_proc_exit(net);
 out_eps_proc_init:
-       sctp_snmp_proc_exit();
+       sctp_snmp_proc_exit(net);
 out_snmp_proc_init:
-       if (proc_net_sctp) {
-               proc_net_sctp = NULL;
-               remove_proc_entry("sctp", init_net.proc_net);
-       }
-out_free_percpu:
-       percpu_counter_destroy(&sctp_sockets_allocated);
-#else
-       return 0;
-#endif /* CONFIG_PROC_FS */
-
-out_nomem:
+       remove_proc_entry("sctp", net->proc_net);
+       net->sctp.proc_net_sctp = NULL;
+out_proc_net_sctp:
        return -ENOMEM;
+#endif /* CONFIG_PROC_FS */
+       return 0;
 }
 
 /* Clean up the proc fs entry for the SCTP protocol.
  * Note: Do not make this __exit as it is used in the init error
  * path.
  */
-static void sctp_proc_exit(void)
+static void sctp_proc_exit(struct net *net)
 {
 #ifdef CONFIG_PROC_FS
-       sctp_snmp_proc_exit();
-       sctp_eps_proc_exit();
-       sctp_assocs_proc_exit();
-       sctp_remaddr_proc_exit();
-
-       if (proc_net_sctp) {
-               proc_net_sctp = NULL;
-               remove_proc_entry("sctp", init_net.proc_net);
-       }
+       sctp_snmp_proc_exit(net);
+       sctp_eps_proc_exit(net);
+       sctp_assocs_proc_exit(net);
+       sctp_remaddr_proc_exit(net);
+
+       remove_proc_entry("sctp", net->proc_net);
+       net->sctp.proc_net_sctp = NULL;
 #endif
-       percpu_counter_destroy(&sctp_sockets_allocated);
 }
 
 /* Private helper to extract ipv4 address and stash them in
@@ -201,29 +170,29 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
 /* Extract our IP addresses from the system and stash them in the
  * protocol structure.
  */
-static void sctp_get_local_addr_list(void)
+static void sctp_get_local_addr_list(struct net *net)
 {
        struct net_device *dev;
        struct list_head *pos;
        struct sctp_af *af;
 
        rcu_read_lock();
-       for_each_netdev_rcu(&init_net, dev) {
+       for_each_netdev_rcu(net, dev) {
                __list_for_each(pos, &sctp_address_families) {
                        af = list_entry(pos, struct sctp_af, list);
-                       af->copy_addrlist(&sctp_local_addr_list, dev);
+                       af->copy_addrlist(&net->sctp.local_addr_list, dev);
                }
        }
        rcu_read_unlock();
 }
 
 /* Free the existing local addresses.  */
-static void sctp_free_local_addr_list(void)
+static void sctp_free_local_addr_list(struct net *net)
 {
        struct sctp_sockaddr_entry *addr;
        struct list_head *pos, *temp;
 
-       list_for_each_safe(pos, temp, &sctp_local_addr_list) {
+       list_for_each_safe(pos, temp, &net->sctp.local_addr_list) {
                addr = list_entry(pos, struct sctp_sockaddr_entry, list);
                list_del(pos);
                kfree(addr);
@@ -231,17 +200,17 @@ static void sctp_free_local_addr_list(void)
 }
 
 /* Copy the local addresses which are valid for 'scope' into 'bp'.  */
-int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
-                             gfp_t gfp, int copy_flags)
+int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
+                             sctp_scope_t scope, gfp_t gfp, int copy_flags)
 {
        struct sctp_sockaddr_entry *addr;
        int error = 0;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+       list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
                if (!addr->valid)
                        continue;
-               if (sctp_in_scope(&addr->a, scope)) {
+               if (sctp_in_scope(net, &addr->a, scope)) {
                        /* Now that the address is in scope, check to see if
                         * the address type is really supported by the local
                         * sock as well as the remote peer.
@@ -397,7 +366,8 @@ static int sctp_v4_addr_valid(union sctp_addr *addr,
 /* Should this be available for binding?   */
 static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
 {
-       int ret = inet_addr_type(&init_net, addr->v4.sin_addr.s_addr);
+       struct net *net = sock_net(&sp->inet.sk);
+       int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr);
 
 
        if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
@@ -484,7 +454,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
                          __func__, &fl4->daddr, &fl4->saddr);
 
-       rt = ip_route_output_key(&init_net, fl4);
+       rt = ip_route_output_key(sock_net(sk), fl4);
        if (!IS_ERR(rt))
                dst = &rt->dst;
 
@@ -530,7 +500,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                    (AF_INET == laddr->a.sa.sa_family)) {
                        fl4->saddr = laddr->a.v4.sin_addr.s_addr;
                        fl4->fl4_sport = laddr->a.v4.sin_port;
-                       rt = ip_route_output_key(&init_net, fl4);
+                       rt = ip_route_output_key(sock_net(sk), fl4);
                        if (!IS_ERR(rt)) {
                                dst = &rt->dst;
                                goto out_unlock;
@@ -627,14 +597,15 @@ static void sctp_v4_ecn_capable(struct sock *sk)
 
 void sctp_addr_wq_timeout_handler(unsigned long arg)
 {
+       struct net *net = (struct net *)arg;
        struct sctp_sockaddr_entry *addrw, *temp;
        struct sctp_sock *sp;
 
-       spin_lock_bh(&sctp_addr_wq_lock);
+       spin_lock_bh(&net->sctp.addr_wq_lock);
 
-       list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
                SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
-                   " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
+                   " for cmd %d at entry %p\n", &net->sctp.addr_waitq, &addrw->a, addrw->state,
                    addrw);
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -648,7 +619,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                goto free_next;
 
                        in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
-                       if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
+                       if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
                            addrw->state == SCTP_ADDR_NEW) {
                                unsigned long timeo_val;
 
@@ -656,12 +627,12 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                                    SCTP_ADDRESS_TICK_DELAY);
                                timeo_val = jiffies;
                                timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-                               mod_timer(&sctp_addr_wq_timer, timeo_val);
+                               mod_timer(&net->sctp.addr_wq_timer, timeo_val);
                                break;
                        }
                }
 #endif
-               list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
+               list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
                        struct sock *sk;
 
                        sk = sctp_opt2sk(sp);
@@ -679,31 +650,32 @@ free_next:
                list_del(&addrw->list);
                kfree(addrw);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
-static void sctp_free_addr_wq(void)
+static void sctp_free_addr_wq(struct net *net)
 {
        struct sctp_sockaddr_entry *addrw;
        struct sctp_sockaddr_entry *temp;
 
-       spin_lock_bh(&sctp_addr_wq_lock);
-       del_timer(&sctp_addr_wq_timer);
-       list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+       spin_lock_bh(&net->sctp.addr_wq_lock);
+       del_timer(&net->sctp.addr_wq_timer);
+       list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
                list_del(&addrw->list);
                kfree(addrw);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* lookup the entry for the same address in the addr_waitq
  * sctp_addr_wq MUST be locked
  */
-static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
+                                       struct sctp_sockaddr_entry *addr)
 {
        struct sctp_sockaddr_entry *addrw;
 
-       list_for_each_entry(addrw, &sctp_addr_waitq, list) {
+       list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
                if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
                        continue;
                if (addrw->a.sa.sa_family == AF_INET) {
@@ -719,7 +691,7 @@ static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entr
        return NULL;
 }
 
-void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
+void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
 {
        struct sctp_sockaddr_entry *addrw;
        unsigned long timeo_val;
@@ -730,38 +702,38 @@ void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
         * new address after a couple of addition and deletion of that address
         */
 
-       spin_lock_bh(&sctp_addr_wq_lock);
+       spin_lock_bh(&net->sctp.addr_wq_lock);
        /* Offsets existing events in addr_wq */
-       addrw = sctp_addr_wq_lookup(addr);
+       addrw = sctp_addr_wq_lookup(net, addr);
        if (addrw) {
                if (addrw->state != cmd) {
                        SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
                            " in wq %p\n", addrw->state, &addrw->a,
-                           &sctp_addr_waitq);
+                           &net->sctp.addr_waitq);
                        list_del(&addrw->list);
                        kfree(addrw);
                }
-               spin_unlock_bh(&sctp_addr_wq_lock);
+               spin_unlock_bh(&net->sctp.addr_wq_lock);
                return;
        }
 
        /* OK, we have to add the new address to the wait queue */
        addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
        if (addrw == NULL) {
-               spin_unlock_bh(&sctp_addr_wq_lock);
+               spin_unlock_bh(&net->sctp.addr_wq_lock);
                return;
        }
        addrw->state = cmd;
-       list_add_tail(&addrw->list, &sctp_addr_waitq);
+       list_add_tail(&addrw->list, &net->sctp.addr_waitq);
        SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
-           " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
+           " in wq %p\n", addrw->state, &addrw->a, &net->sctp.addr_waitq);
 
-       if (!timer_pending(&sctp_addr_wq_timer)) {
+       if (!timer_pending(&net->sctp.addr_wq_timer)) {
                timeo_val = jiffies;
                timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
-               mod_timer(&sctp_addr_wq_timer, timeo_val);
+               mod_timer(&net->sctp.addr_wq_timer, timeo_val);
        }
-       spin_unlock_bh(&sctp_addr_wq_lock);
+       spin_unlock_bh(&net->sctp.addr_wq_lock);
 }
 
 /* Event handler for inet address addition/deletion events.
@@ -776,11 +748,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
        struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
        struct sctp_sockaddr_entry *addr = NULL;
        struct sctp_sockaddr_entry *temp;
+       struct net *net = dev_net(ifa->ifa_dev->dev);
        int found = 0;
 
-       if (!net_eq(dev_net(ifa->ifa_dev->dev), &init_net))
-               return NOTIFY_DONE;
-
        switch (ev) {
        case NETDEV_UP:
                addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
@@ -789,27 +759,27 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
                        addr->a.v4.sin_port = 0;
                        addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
                        addr->valid = 1;
-                       spin_lock_bh(&sctp_local_addr_lock);
-                       list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
-                       sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
-                       spin_unlock_bh(&sctp_local_addr_lock);
+                       spin_lock_bh(&net->sctp.local_addr_lock);
+                       list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
+                       sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
+                       spin_unlock_bh(&net->sctp.local_addr_lock);
                }
                break;
        case NETDEV_DOWN:
-               spin_lock_bh(&sctp_local_addr_lock);
+               spin_lock_bh(&net->sctp.local_addr_lock);
                list_for_each_entry_safe(addr, temp,
-                                       &sctp_local_addr_list, list) {
+                                       &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET &&
                                        addr->a.v4.sin_addr.s_addr ==
                                        ifa->ifa_local) {
-                               sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
+                               sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
                                list_del_rcu(&addr->list);
                                break;
                        }
                }
-               spin_unlock_bh(&sctp_local_addr_lock);
+               spin_unlock_bh(&net->sctp.local_addr_lock);
                if (found)
                        kfree_rcu(addr, rcu);
                break;
@@ -822,7 +792,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
  * Initialize the control inode/socket with a control endpoint data
  * structure.  This endpoint is reserved exclusively for the OOTB processing.
  */
-static int sctp_ctl_sock_init(void)
+static int sctp_ctl_sock_init(struct net *net)
 {
        int err;
        sa_family_t family = PF_INET;
@@ -830,14 +800,14 @@ static int sctp_ctl_sock_init(void)
        if (sctp_get_pf_specific(PF_INET6))
                family = PF_INET6;
 
-       err = inet_ctl_sock_create(&sctp_ctl_sock, family,
-                                  SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);
+       err = inet_ctl_sock_create(&net->sctp.ctl_sock, family,
+                                  SOCK_SEQPACKET, IPPROTO_SCTP, net);
 
        /* If IPv6 socket could not be created, try the IPv4 socket */
        if (err < 0 && family == PF_INET6)
-               err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,
+               err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET,
                                           SOCK_SEQPACKET, IPPROTO_SCTP,
-                                          &init_net);
+                                          net);
 
        if (err < 0) {
                pr_err("Failed to create the SCTP control socket\n");
@@ -990,7 +960,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
        inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
                         IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
 
-       SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
+       SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
        return ip_queue_xmit(skb, &transport->fl);
 }
 
@@ -1063,6 +1033,7 @@ static const struct net_protocol sctp_protocol = {
        .handler     = sctp_rcv,
        .err_handler = sctp_v4_err,
        .no_policy   = 1,
+       .netns_ok    = 1,
 };
 
 /* IPv4 address related functions.  */
@@ -1130,16 +1101,16 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
        return 1;
 }
 
-static inline int init_sctp_mibs(void)
+static inline int init_sctp_mibs(struct net *net)
 {
-       return snmp_mib_init((void __percpu **)sctp_statistics,
+       return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics,
                             sizeof(struct sctp_mib),
                             __alignof__(struct sctp_mib));
 }
 
-static inline void cleanup_sctp_mibs(void)
+static inline void cleanup_sctp_mibs(struct net *net)
 {
-       snmp_mib_free((void __percpu **)sctp_statistics);
+       snmp_mib_free((void __percpu **)net->sctp.sctp_statistics);
 }
 
 static void sctp_v4_pf_init(void)
@@ -1194,6 +1165,143 @@ static void sctp_v4_del_protocol(void)
        unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
 }
 
+static int sctp_net_init(struct net *net)
+{
+       int status;
+
+       /*
+        * 14. Suggested SCTP Protocol Parameter Values
+        */
+       /* The following protocol parameters are RECOMMENDED:  */
+       /* RTO.Initial              - 3  seconds */
+       net->sctp.rto_initial                   = SCTP_RTO_INITIAL;
+       /* RTO.Min                  - 1  second */
+       net->sctp.rto_min                       = SCTP_RTO_MIN;
+       /* RTO.Max                 -  60 seconds */
+       net->sctp.rto_max                       = SCTP_RTO_MAX;
+       /* RTO.Alpha                - 1/8 */
+       net->sctp.rto_alpha                     = SCTP_RTO_ALPHA;
+       /* RTO.Beta                 - 1/4 */
+       net->sctp.rto_beta                      = SCTP_RTO_BETA;
+
+       /* Valid.Cookie.Life        - 60  seconds */
+       net->sctp.valid_cookie_life             = SCTP_DEFAULT_COOKIE_LIFE;
+
+       /* Whether Cookie Preservative is enabled(1) or not(0) */
+       net->sctp.cookie_preserve_enable        = 1;
+
+       /* Max.Burst                - 4 */
+       net->sctp.max_burst                     = SCTP_DEFAULT_MAX_BURST;
+
+       /* Association.Max.Retrans  - 10 attempts
+        * Path.Max.Retrans         - 5  attempts (per destination address)
+        * Max.Init.Retransmits     - 8  attempts
+        */
+       net->sctp.max_retrans_association       = 10;
+       net->sctp.max_retrans_path              = 5;
+       net->sctp.max_retrans_init              = 8;
+
+       /* Sendbuffer growth        - do per-socket accounting */
+       net->sctp.sndbuf_policy                 = 0;
+
+       /* Rcvbuffer growth         - do per-socket accounting */
+       net->sctp.rcvbuf_policy                 = 0;
+
+       /* HB.interval              - 30 seconds */
+       net->sctp.hb_interval                   = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
+
+       /* delayed SACK timeout */
+       net->sctp.sack_timeout                  = SCTP_DEFAULT_TIMEOUT_SACK;
+
+       /* Disable ADDIP by default. */
+       net->sctp.addip_enable = 0;
+       net->sctp.addip_noauth = 0;
+       net->sctp.default_auto_asconf = 0;
+
+       /* Enable PR-SCTP by default. */
+       net->sctp.prsctp_enable = 1;
+
+       /* Disable AUTH by default. */
+       net->sctp.auth_enable = 0;
+
+       /* Set SCOPE policy to enabled */
+       net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
+
+       /* Set the default rwnd update threshold */
+       net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
+
+       /* Initialize maximum autoclose timeout. */
+       net->sctp.max_autoclose         = INT_MAX / HZ;
+
+       status = sctp_sysctl_net_register(net);
+       if (status)
+               goto err_sysctl_register;
+
+       /* Allocate and initialise sctp mibs.  */
+       status = init_sctp_mibs(net);
+       if (status)
+               goto err_init_mibs;
+
+       /* Initialize proc fs directory.  */
+       status = sctp_proc_init(net);
+       if (status)
+               goto err_init_proc;
+
+       sctp_dbg_objcnt_init(net);
+
+       /* Initialize the control inode/socket for handling OOTB packets.  */
+       if ((status = sctp_ctl_sock_init(net))) {
+               pr_err("Failed to initialize the SCTP control sock\n");
+               goto err_ctl_sock_init;
+       }
+
+       /* Initialize the local address list. */
+       INIT_LIST_HEAD(&net->sctp.local_addr_list);
+       spin_lock_init(&net->sctp.local_addr_lock);
+       sctp_get_local_addr_list(net);
+
+       /* Initialize the address event list */
+       INIT_LIST_HEAD(&net->sctp.addr_waitq);
+       INIT_LIST_HEAD(&net->sctp.auto_asconf_splist);
+       spin_lock_init(&net->sctp.addr_wq_lock);
+       net->sctp.addr_wq_timer.expires = 0;
+       setup_timer(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler,
+                   (unsigned long)net);
+
+       return 0;
+
+err_ctl_sock_init:
+       sctp_dbg_objcnt_exit(net);
+       sctp_proc_exit(net);
+err_init_proc:
+       cleanup_sctp_mibs(net);
+err_init_mibs:
+       sctp_sysctl_net_unregister(net);
+err_sysctl_register:
+       return status;
+}
+
+static void sctp_net_exit(struct net *net)
+{
+       /* Free the local address list */
+       sctp_free_addr_wq(net);
+       sctp_free_local_addr_list(net);
+
+       /* Free the control endpoint.  */
+       inet_ctl_sock_destroy(net->sctp.ctl_sock);
+
+       sctp_dbg_objcnt_exit(net);
+
+       sctp_proc_exit(net);
+       cleanup_sctp_mibs(net);
+       sctp_sysctl_net_unregister(net);
+}
+
+static struct pernet_operations sctp_net_ops = {
+       .init = sctp_net_init,
+       .exit = sctp_net_exit,
+};
+
 /* Initialize the universe into something sensible.  */
 SCTP_STATIC __init int sctp_init(void)
 {
@@ -1224,62 +1332,9 @@ SCTP_STATIC __init int sctp_init(void)
        if (!sctp_chunk_cachep)
                goto err_chunk_cachep;
 
-       /* Allocate and initialise sctp mibs.  */
-       status = init_sctp_mibs();
+       status = percpu_counter_init(&sctp_sockets_allocated, 0);
        if (status)
-               goto err_init_mibs;
-
-       /* Initialize proc fs directory.  */
-       status = sctp_proc_init();
-       if (status)
-               goto err_init_proc;
-
-       /* Initialize object count debugging.  */
-       sctp_dbg_objcnt_init();
-
-       /*
-        * 14. Suggested SCTP Protocol Parameter Values
-        */
-       /* The following protocol parameters are RECOMMENDED:  */
-       /* RTO.Initial              - 3  seconds */
-       sctp_rto_initial                = SCTP_RTO_INITIAL;
-       /* RTO.Min                  - 1  second */
-       sctp_rto_min                    = SCTP_RTO_MIN;
-       /* RTO.Max                 -  60 seconds */
-       sctp_rto_max                    = SCTP_RTO_MAX;
-       /* RTO.Alpha                - 1/8 */
-       sctp_rto_alpha                  = SCTP_RTO_ALPHA;
-       /* RTO.Beta                 - 1/4 */
-       sctp_rto_beta                   = SCTP_RTO_BETA;
-
-       /* Valid.Cookie.Life        - 60  seconds */
-       sctp_valid_cookie_life          = SCTP_DEFAULT_COOKIE_LIFE;
-
-       /* Whether Cookie Preservative is enabled(1) or not(0) */
-       sctp_cookie_preserve_enable     = 1;
-
-       /* Max.Burst                - 4 */
-       sctp_max_burst                  = SCTP_DEFAULT_MAX_BURST;
-
-       /* Association.Max.Retrans  - 10 attempts
-        * Path.Max.Retrans         - 5  attempts (per destination address)
-        * Max.Init.Retransmits     - 8  attempts
-        */
-       sctp_max_retrans_association    = 10;
-       sctp_max_retrans_path           = 5;
-       sctp_max_retrans_init           = 8;
-
-       /* Sendbuffer growth        - do per-socket accounting */
-       sctp_sndbuf_policy              = 0;
-
-       /* Rcvbuffer growth         - do per-socket accounting */
-       sctp_rcvbuf_policy              = 0;
-
-       /* HB.interval              - 30 seconds */
-       sctp_hb_interval                = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
-
-       /* delayed SACK timeout */
-       sctp_sack_timeout               = SCTP_DEFAULT_TIMEOUT_SACK;
+               goto err_percpu_counter_init;
 
        /* Implementation specific variables. */
 
@@ -1287,9 +1342,6 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
-       /* Initialize maximum autoclose timeout. */
-       sctp_max_autoclose              = INT_MAX / HZ;
-
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
@@ -1376,41 +1428,12 @@ SCTP_STATIC __init int sctp_init(void)
        pr_info("Hash tables configured (established %d bind %d)\n",
                sctp_assoc_hashsize, sctp_port_hashsize);
 
-       /* Disable ADDIP by default. */
-       sctp_addip_enable = 0;
-       sctp_addip_noauth = 0;
-       sctp_default_auto_asconf = 0;
-
-       /* Enable PR-SCTP by default. */
-       sctp_prsctp_enable = 1;
-
-       /* Disable AUTH by default. */
-       sctp_auth_enable = 0;
-
-       /* Set SCOPE policy to enabled */
-       sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
-
-       /* Set the default rwnd update threshold */
-       sctp_rwnd_upd_shift             = SCTP_DEFAULT_RWND_SHIFT;
-
        sctp_sysctl_register();
 
        INIT_LIST_HEAD(&sctp_address_families);
        sctp_v4_pf_init();
        sctp_v6_pf_init();
 
-       /* Initialize the local address list. */
-       INIT_LIST_HEAD(&sctp_local_addr_list);
-       spin_lock_init(&sctp_local_addr_lock);
-       sctp_get_local_addr_list();
-
-       /* Initialize the address event list */
-       INIT_LIST_HEAD(&sctp_addr_waitq);
-       INIT_LIST_HEAD(&sctp_auto_asconf_splist);
-       spin_lock_init(&sctp_addr_wq_lock);
-       sctp_addr_wq_timer.expires = 0;
-       setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
-
        status = sctp_v4_protosw_init();
 
        if (status)
@@ -1420,11 +1443,9 @@ SCTP_STATIC __init int sctp_init(void)
        if (status)
                goto err_v6_protosw_init;
 
-       /* Initialize the control inode/socket for handling OOTB packets.  */
-       if ((status = sctp_ctl_sock_init())) {
-               pr_err("Failed to initialize the SCTP control sock\n");
-               goto err_ctl_sock_init;
-       }
+       status = register_pernet_subsys(&sctp_net_ops);
+       if (status)
+               goto err_register_pernet_subsys;
 
        status = sctp_v4_add_protocol();
        if (status)
@@ -1441,13 +1462,12 @@ out:
 err_v6_add_protocol:
        sctp_v4_del_protocol();
 err_add_protocol:
-       inet_ctl_sock_destroy(sctp_ctl_sock);
-err_ctl_sock_init:
+       unregister_pernet_subsys(&sctp_net_ops);
+err_register_pernet_subsys:
        sctp_v6_protosw_exit();
 err_v6_protosw_init:
        sctp_v4_protosw_exit();
 err_protosw_init:
-       sctp_free_local_addr_list();
        sctp_v4_pf_exit();
        sctp_v6_pf_exit();
        sctp_sysctl_unregister();
@@ -1461,11 +1481,8 @@ err_ehash_alloc:
                   get_order(sctp_assoc_hashsize *
                             sizeof(struct sctp_hashbucket)));
 err_ahash_alloc:
-       sctp_dbg_objcnt_exit();
-       sctp_proc_exit();
-err_init_proc:
-       cleanup_sctp_mibs();
-err_init_mibs:
+       percpu_counter_destroy(&sctp_sockets_allocated);
+err_percpu_counter_init:
        kmem_cache_destroy(sctp_chunk_cachep);
 err_chunk_cachep:
        kmem_cache_destroy(sctp_bucket_cachep);
@@ -1482,18 +1499,13 @@ SCTP_STATIC __exit void sctp_exit(void)
        /* Unregister with inet6/inet layers. */
        sctp_v6_del_protocol();
        sctp_v4_del_protocol();
-       sctp_free_addr_wq();
 
-       /* Free the control endpoint.  */
-       inet_ctl_sock_destroy(sctp_ctl_sock);
+       unregister_pernet_subsys(&sctp_net_ops);
 
        /* Free protosw registrations */
        sctp_v6_protosw_exit();
        sctp_v4_protosw_exit();
 
-       /* Free the local address list.  */
-       sctp_free_local_addr_list();
-
        /* Unregister with socket layer. */
        sctp_v6_pf_exit();
        sctp_v4_pf_exit();
@@ -1508,9 +1520,7 @@ SCTP_STATIC __exit void sctp_exit(void)
                   get_order(sctp_port_hashsize *
                             sizeof(struct sctp_bind_hashbucket)));
 
-       sctp_dbg_objcnt_exit();
-       sctp_proc_exit();
-       cleanup_sctp_mibs();
+       percpu_counter_destroy(&sctp_sockets_allocated);
 
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 
index 479a70ef6ff8abe2a59a16ddc672f6638c7b3369..fbe1636309a75ac054de225fe4d1cf245a3923d2 100644 (file)
@@ -198,6 +198,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
                             const struct sctp_bind_addr *bp,
                             gfp_t gfp, int vparam_len)
 {
+       struct net *net = sock_net(asoc->base.sk);
        sctp_inithdr_t init;
        union sctp_params addrs;
        size_t chunksize;
@@ -237,7 +238,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
        chunksize += sizeof(ecap_param);
 
-       if (sctp_prsctp_enable)
+       if (net->sctp.prsctp_enable)
                chunksize += sizeof(prsctp_param);
 
        /* ADDIP: Section 4.2.7:
@@ -245,7 +246,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
         *  the ASCONF,the ASCONF-ACK, and the AUTH  chunks in its INIT and
         *  INIT-ACK parameters.
         */
-       if (sctp_addip_enable) {
+       if (net->sctp.addip_enable) {
                extensions[num_ext] = SCTP_CID_ASCONF;
                extensions[num_ext+1] = SCTP_CID_ASCONF_ACK;
                num_ext += 2;
@@ -257,7 +258,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        chunksize += vparam_len;
 
        /* Account for AUTH related parameters */
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                /* Add random parameter length*/
                chunksize += sizeof(asoc->c.auth_random);
 
@@ -331,7 +332,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
                sctp_addto_param(retval, num_ext, extensions);
        }
 
-       if (sctp_prsctp_enable)
+       if (net->sctp.prsctp_enable)
                sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
 
        if (sp->adaptation_ind) {
@@ -342,7 +343,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
        }
 
        /* Add SCTP-AUTH chunks to the parameter list */
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                sctp_addto_chunk(retval, sizeof(asoc->c.auth_random),
                                 asoc->c.auth_random);
                if (auth_hmacs)
@@ -1940,7 +1941,7 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
        return 0;
 }
 
-static int sctp_verify_ext_param(union sctp_params param)
+static int sctp_verify_ext_param(struct net *net, union sctp_params param)
 {
        __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
        int have_auth = 0;
@@ -1964,10 +1965,10 @@ static int sctp_verify_ext_param(union sctp_params param)
         * only if ADD-IP is turned on and we are not backward-compatible
         * mode.
         */
-       if (sctp_addip_noauth)
+       if (net->sctp.addip_noauth)
                return 1;
 
-       if (sctp_addip_enable && !have_auth && have_asconf)
+       if (net->sctp.addip_enable && !have_auth && have_asconf)
                return 0;
 
        return 1;
@@ -1976,13 +1977,14 @@ static int sctp_verify_ext_param(union sctp_params param)
 static void sctp_process_ext_param(struct sctp_association *asoc,
                                    union sctp_params param)
 {
+       struct net *net = sock_net(asoc->base.sk);
        __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t);
        int i;
 
        for (i = 0; i < num_ext; i++) {
                switch (param.ext->chunks[i]) {
                    case SCTP_CID_FWD_TSN:
-                           if (sctp_prsctp_enable &&
+                           if (net->sctp.prsctp_enable &&
                                !asoc->peer.prsctp_capable)
                                    asoc->peer.prsctp_capable = 1;
                            break;
@@ -1990,12 +1992,12 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
                            /* if the peer reports AUTH, assume that he
                             * supports AUTH.
                             */
-                           if (sctp_auth_enable)
+                           if (net->sctp.auth_enable)
                                    asoc->peer.auth_capable = 1;
                            break;
                    case SCTP_CID_ASCONF:
                    case SCTP_CID_ASCONF_ACK:
-                           if (sctp_addip_enable)
+                           if (net->sctp.addip_enable)
                                    asoc->peer.asconf_capable = 1;
                            break;
                    default:
@@ -2081,7 +2083,8 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
  *     SCTP_IERROR_ERROR - stop processing, trigger an ERROR
  *     SCTP_IERROR_NO_ERROR - continue with the chunk
  */
-static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
+static sctp_ierror_t sctp_verify_param(struct net *net,
+                                       const struct sctp_association *asoc,
                                        union sctp_params param,
                                        sctp_cid_t cid,
                                        struct sctp_chunk *chunk,
@@ -2110,12 +2113,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_SUPPORTED_EXT:
-               if (!sctp_verify_ext_param(param))
+               if (!sctp_verify_ext_param(net, param))
                        return SCTP_IERROR_ABORT;
                break;
 
        case SCTP_PARAM_SET_PRIMARY:
-               if (sctp_addip_enable)
+               if (net->sctp.addip_enable)
                        break;
                goto fallthrough;
 
@@ -2126,12 +2129,12 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_FWD_TSN_SUPPORT:
-               if (sctp_prsctp_enable)
+               if (net->sctp.prsctp_enable)
                        break;
                goto fallthrough;
 
        case SCTP_PARAM_RANDOM:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                /* SCTP-AUTH: Secion 6.1
@@ -2148,7 +2151,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_CHUNKS:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                /* SCTP-AUTH: Section 3.2
@@ -2164,7 +2167,7 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
                break;
 
        case SCTP_PARAM_HMAC_ALGO:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fallthrough;
 
                hmacs = (struct sctp_hmac_algo_param *)param.p;
@@ -2198,7 +2201,7 @@ fallthrough:
 }
 
 /* Verify the INIT packet before we process it.  */
-int sctp_verify_init(const struct sctp_association *asoc,
+int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
                     sctp_cid_t cid,
                     sctp_init_chunk_t *peer_init,
                     struct sctp_chunk *chunk,
@@ -2245,7 +2248,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
        /* Verify all the variable length parameters */
        sctp_walk_params(param, peer_init, init_hdr.params) {
 
-               result = sctp_verify_param(asoc, param, cid, chunk, errp);
+               result = sctp_verify_param(net, asoc, param, cid, chunk, errp);
                switch (result) {
                    case SCTP_IERROR_ABORT:
                    case SCTP_IERROR_NOMEM:
@@ -2270,6 +2273,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
                      const union sctp_addr *peer_addr,
                      sctp_init_chunk_t *peer_init, gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        union sctp_params param;
        struct sctp_transport *transport;
        struct list_head *pos, *temp;
@@ -2326,7 +2330,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
         * also give us an option to silently ignore the packet, which
         * is what we'll do here.
         */
-       if (!sctp_addip_noauth &&
+       if (!net->sctp.addip_noauth &&
             (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
                asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
                                                  SCTP_PARAM_DEL_IP |
@@ -2466,6 +2470,7 @@ static int sctp_process_param(struct sctp_association *asoc,
                              const union sctp_addr *peer_addr,
                              gfp_t gfp)
 {
+       struct net *net = sock_net(asoc->base.sk);
        union sctp_addr addr;
        int i;
        __u16 sat;
@@ -2494,13 +2499,13 @@ do_addr_param:
                af = sctp_get_af_specific(param_type2af(param.p->type));
                af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
                scope = sctp_scope(peer_addr);
-               if (sctp_in_scope(&addr, scope))
+               if (sctp_in_scope(net, &addr, scope))
                        if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
                                return 0;
                break;
 
        case SCTP_PARAM_COOKIE_PRESERVATIVE:
-               if (!sctp_cookie_preserve_enable)
+               if (!net->sctp.cookie_preserve_enable)
                        break;
 
                stale = ntohl(param.life->lifespan_increment);
@@ -2580,7 +2585,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_SET_PRIMARY:
-               if (!sctp_addip_enable)
+               if (!net->sctp.addip_enable)
                        goto fall_through;
 
                addr_param = param.v + sizeof(sctp_addip_param_t);
@@ -2607,7 +2612,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_FWD_TSN_SUPPORT:
-               if (sctp_prsctp_enable) {
+               if (net->sctp.prsctp_enable) {
                        asoc->peer.prsctp_capable = 1;
                        break;
                }
@@ -2615,7 +2620,7 @@ do_addr_param:
                goto fall_through;
 
        case SCTP_PARAM_RANDOM:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                /* Save peer's random parameter */
@@ -2628,7 +2633,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_HMAC_ALGO:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                /* Save peer's HMAC list */
@@ -2644,7 +2649,7 @@ do_addr_param:
                break;
 
        case SCTP_PARAM_CHUNKS:
-               if (!sctp_auth_enable)
+               if (!net->sctp.auth_enable)
                        goto fall_through;
 
                asoc->peer.peer_chunks = kmemdup(param.p,
index fe99628e1257bd1173dadfa1826a0f2d5f35c7f6..bcfebb91559d1d9e9a0b6472986f626021a3f358 100644 (file)
@@ -251,6 +251,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
        int error;
        struct sctp_transport *transport = (struct sctp_transport *) peer;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
 
        /* Check whether a task is in the sock.  */
 
@@ -271,7 +272,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                goto out_unlock;
 
        /* Run through the state machine.  */
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
                           asoc->state,
                           asoc->ep, asoc,
@@ -291,6 +292,7 @@ out_unlock:
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
                                        sctp_event_timeout_t timeout_type)
 {
+       struct net *net = sock_net(asoc->base.sk);
        int error = 0;
 
        sctp_bh_lock_sock(asoc->base.sk);
@@ -312,7 +314,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
                goto out_unlock;
 
        /* Run through the state machine.  */
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(timeout_type),
                           asoc->state, asoc->ep, asoc,
                           (void *)timeout_type, GFP_ATOMIC);
@@ -371,6 +373,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        int error = 0;
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
 
        sctp_bh_lock_sock(asoc->base.sk);
        if (sock_owned_by_user(asoc->base.sk)) {
@@ -388,7 +391,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        if (transport->dead)
                goto out_unlock;
 
-       error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
                           transport, GFP_ATOMIC);
@@ -408,6 +411,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
 {
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
+       struct net *net = sock_net(asoc->base.sk);
        
        sctp_bh_lock_sock(asoc->base.sk);
        if (sock_owned_by_user(asoc->base.sk)) {
@@ -426,7 +430,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
        if (asoc->base.dead)
                goto out_unlock;
 
-       sctp_do_sm(SCTP_EVENT_T_OTHER,
+       sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
                   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
@@ -753,8 +757,10 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
        int err = 0;
 
        if (sctp_outq_sack(&asoc->outqueue, sackh)) {
+               struct net *net = sock_net(asoc->base.sk);
+
                /* There are no more TSNs awaiting SACK.  */
-               err = sctp_do_sm(SCTP_EVENT_T_OTHER,
+               err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
                                 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
                                 asoc->state, asoc->ep, asoc, NULL,
                                 GFP_ATOMIC);
@@ -1042,6 +1048,8 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
  */
 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
 {
+       struct net *net = sock_net(asoc->base.sk);
+
        /* Send the next asconf chunk from the addip chunk
         * queue.
         */
@@ -1053,7 +1061,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
 
                /* Hold the chunk until an ASCONF_ACK is received. */
                sctp_chunk_hold(asconf);
-               if (sctp_primitive_ASCONF(asoc, asconf))
+               if (sctp_primitive_ASCONF(net, asoc, asconf))
                        sctp_chunk_free(asconf);
                else
                        asoc->addip_last_asconf = asconf;
@@ -1089,7 +1097,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
  * If you want to understand all of lksctp, this is a
  * good place to start.
  */
-int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
+int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
               sctp_state_t state,
               struct sctp_endpoint *ep,
               struct sctp_association *asoc,
@@ -1110,12 +1118,12 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
        /* Look up the state function, run it, and then process the
         * side effects.  These three steps are the heart of lksctp.
         */
-       state_fn = sctp_sm_lookup_event(event_type, state, subtype);
+       state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
 
        sctp_init_cmd_seq(&commands);
 
        DEBUG_PRE;
-       status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands);
+       status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
        DEBUG_POST;
 
        error = sctp_side_effects(event_type, subtype, state,
index 9fca103573508aa6f0880455e0e2c2d66dbb6150..094813b6c3c3cb99dddf407eeb93a2397a676cae 100644 (file)
@@ -66,7 +66,8 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/structs.h>
 
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  struct sctp_chunk *chunk,
                                  const void *payload,
@@ -74,36 +75,43 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
 static int sctp_eat_data(const struct sctp_association *asoc,
                         struct sctp_chunk *chunk,
                         sctp_cmd_seq_t *commands);
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+                                            const struct sctp_association *asoc,
                                             const struct sctp_chunk *chunk);
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const struct sctp_chunk *chunk,
                                       sctp_cmd_seq_t *commands,
                                       struct sctp_chunk *err_chunk);
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+                                                const struct sctp_endpoint *ep,
                                                 const struct sctp_association *asoc,
                                                 const sctp_subtype_t type,
                                                 void *arg,
                                                 sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+                                            const struct sctp_endpoint *ep,
                                             const struct sctp_association *asoc,
                                             const sctp_subtype_t type,
                                             void *arg,
                                             sctp_cmd_seq_t *commands);
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
                                        sctp_cmd_seq_t *commands);
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+                                          sctp_cmd_seq_t *commands,
                                           __be16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport);
 
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
@@ -112,6 +120,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
                                     const size_t paylen);
 
 static sctp_disposition_t sctp_sf_violation_chunklen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -119,6 +128,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -126,6 +136,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_ctsn(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -133,18 +144,21 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
                                     sctp_cmd_seq_t *commands);
 
 static sctp_disposition_t sctp_sf_violation_chunk(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
                                     sctp_cmd_seq_t *commands);
 
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    struct sctp_chunk *chunk);
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -204,7 +218,8 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_4_C(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  const sctp_subtype_t type,
                                  void *arg,
@@ -214,7 +229,7 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* RFC 2960 6.10 Bundling
         *
@@ -222,11 +237,11 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* RFC 2960 10.2 SCTP-to-ULP
@@ -259,8 +274,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -289,7 +304,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -313,21 +329,21 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * with an INIT chunk that is bundled with other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-               SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+               SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
        }
 
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
        if (chunk->sctp_hdr->vtag != 0)
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT chunk has a valid length.
         * Normally, this would cause an ABORT with a Protocol Violation
@@ -335,7 +351,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * just discard the packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the INIT is coming toward a closing socket, we'll send back
         * and ABORT.  Essentially, this catches the race of INIT being
@@ -344,18 +360,18 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
         * can treat this OOTB
         */
        if (sctp_sstate(ep->base.sk, CLOSING))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
                /* This chunk contains fatal error. It is to be discarded.
                 * Send an ABORT, with causes if there is any.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -366,13 +382,13 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                return SCTP_DISPOSITION_CONSUME;
                        } else {
                                return SCTP_DISPOSITION_NOMEM;
                        }
                } else {
-                       return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+                       return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
                                                    commands);
                }
        }
@@ -484,7 +500,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -496,25 +513,25 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* 6.10 Bundling
         * An endpoint MUST NOT bundle INIT, INIT ACK or
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT-ACK chunk has a valid length */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* Grab the INIT header.  */
        chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
 
@@ -526,7 +543,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                 * the association.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -537,7 +554,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                error = SCTP_ERROR_INV_PARAM;
                        }
                }
@@ -554,10 +571,10 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
                 * was malformed.
                 */
                if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
                                                asoc, chunk->transport);
        }
 
@@ -633,7 +650,8 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type, void *arg,
                                      sctp_cmd_seq_t *commands)
@@ -650,9 +668,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        /* If the packet is an OOTB packet which is temporarily on the
         * control endpoint, respond with an ABORT.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) {
-               SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
+               SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the COOKIE_ECHO chunk has a valid length.
@@ -661,7 +679,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
         * in sctp_unpack_cookie().
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* If the endpoint is not listening or if the number of associations
         * on the TCP-style socket exceed the max backlog, respond with an
@@ -670,7 +688,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        sk = ep->base.sk;
        if (!sctp_sstate(sk, LISTENING) ||
            (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* "Decode" the chunk.  We have no optional parameters so we
         * are in good shape.
@@ -703,13 +721,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
                        goto nomem;
 
                case -SCTP_IERROR_STALE_COOKIE:
-                       sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+                       sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
                                                   err_chk_p);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                case -SCTP_IERROR_BAD_SIG:
                default:
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -756,14 +774,14 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
                skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
                auth.transport = chunk->transport;
 
-               ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
+               ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
 
                /* We can now safely free the auth_chunk clone */
                kfree_skb(chunk->auth_chunk);
 
                if (ret != SCTP_IERROR_NO_ERROR) {
                        sctp_association_free(new_asoc);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -804,8 +822,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
        if (new_asoc->autoclose)
@@ -856,7 +874,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type, void *arg,
                                      sctp_cmd_seq_t *commands)
@@ -865,13 +884,13 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Verify that the chunk length for the COOKIE-ACK is OK.
         * If we don't do this, any bundled chunks may be junked.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Reset init error count upon receipt of COOKIE-ACK,
@@ -892,8 +911,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
        if (asoc->autoclose)
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
@@ -958,7 +977,8 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
 }
 
 /* Generate a HEARTBEAT packet on the given transport.  */
-sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -972,8 +992,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
                /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_DELETE_TCB;
        }
 
@@ -1028,7 +1048,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -1039,11 +1060,11 @@ sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep,
        size_t paylen = 0;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the HEARTBEAT chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* 8.3 The receiver of the HEARTBEAT should immediately
@@ -1095,7 +1116,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1108,12 +1130,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
        unsigned long max_interval;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the HEARTBEAT-ACK chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
                                            sizeof(sctp_sender_hb_info_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
@@ -1171,7 +1193,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
 /* Helper function to send out an abort for the restart
  * condition.
  */
-static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
+static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
                                      struct sctp_chunk *init,
                                      sctp_cmd_seq_t *commands)
 {
@@ -1197,18 +1219,18 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
        errhdr->length = htons(len);
 
        /* Assign to the control socket. */
-       ep = sctp_sk((sctp_get_ctl_sock()))->ep;
+       ep = sctp_sk(net->sctp.ctl_sock)->ep;
 
        /* Association is NULL since this may be a restart attack and we
         * want to send back the attacker's vtag.
         */
-       pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len);
+       pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
 
        if (!pkt)
                goto out;
        sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
 
-       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
        /* Discard the rest of the inbound packet. */
        sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
@@ -1240,6 +1262,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                                       struct sctp_chunk *init,
                                       sctp_cmd_seq_t *commands)
 {
+       struct net *net = sock_net(new_asoc->base.sk);
        struct sctp_transport *new_addr;
        int ret = 1;
 
@@ -1258,7 +1281,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                            transports) {
                if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
                                        &new_addr->ipaddr)) {
-                       sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+                       sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
                                                   commands);
                        ret = 0;
                        break;
@@ -1358,6 +1381,7 @@ static char sctp_tietags_compare(struct sctp_association *new_asoc,
  * chunk handling.
  */
 static sctp_disposition_t sctp_sf_do_unexpected_init(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -1382,20 +1406,20 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
         * with an INIT chunk that is bundled with other chunks.
         */
        if (!chunk->singleton)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
         * Tag.
         */
        if (chunk->sctp_hdr->vtag != 0)
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the INIT chunk has a valid length.
         * In this case, we generate a protocol violation since we have
         * an association established.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* Grab the INIT header.  */
        chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
@@ -1405,14 +1429,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
 
        /* Verify the INIT chunk before processing it. */
        err_chunk = NULL;
-       if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
+       if (!sctp_verify_init(net, asoc, chunk->chunk_hdr->type,
                              (sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
                              &err_chunk)) {
                /* This chunk contains fatal error. It is to be discarded.
                 * Send an ABORT, with causes if there is any.
                 */
                if (err_chunk) {
-                       packet = sctp_abort_pkt_new(ep, asoc, arg,
+                       packet = sctp_abort_pkt_new(net, ep, asoc, arg,
                                        (__u8 *)(err_chunk->chunk_hdr) +
                                        sizeof(sctp_chunkhdr_t),
                                        ntohs(err_chunk->chunk_hdr->length) -
@@ -1421,14 +1445,14 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
                        if (packet) {
                                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                                SCTP_PACKET(packet));
-                               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                                retval = SCTP_DISPOSITION_CONSUME;
                        } else {
                                retval = SCTP_DISPOSITION_NOMEM;
                        }
                        goto cleanup;
                } else {
-                       return sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
+                       return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
                                                    commands);
                }
        }
@@ -1570,7 +1594,8 @@ cleanup:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -1579,7 +1604,7 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
        /* Call helper to do the real work for both simulataneous and
         * duplicate INIT chunk handling.
         */
-       return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+       return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -1623,7 +1648,8 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1632,7 +1658,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
        /* Call helper to do the real work for both simulataneous and
         * duplicate INIT chunk handling.
         */
-       return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands);
+       return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
 }
 
 
@@ -1645,7 +1671,8 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep,
  * An unexpected INIT ACK usually indicates the processing of an old or
  * duplicated INIT chunk.
 */
-sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net,
+                                           const struct sctp_endpoint *ep,
                                            const struct sctp_association *asoc,
                                            const sctp_subtype_t type,
                                            void *arg, sctp_cmd_seq_t *commands)
@@ -1653,10 +1680,10 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
        /* Per the above section, we'll discard the chunk if we have an
         * endpoint.  If this is an OOTB INIT-ACK, treat it as such.
         */
-       if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
-               return sctp_sf_ootb(ep, asoc, type, arg, commands);
+       if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
+               return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
        else
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 }
 
 /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
@@ -1664,7 +1691,8 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(const struct sctp_endpoint *ep,
  * Section 5.2.4
  *  A)  In this case, the peer may have restarted.
  */
-static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1700,7 +1728,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
         * its peer.
        */
        if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
-               disposition = sctp_sf_do_9_2_reshutack(ep, asoc,
+               disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
                                SCTP_ST_CHUNK(chunk->chunk_hdr->type),
                                chunk, commands);
                if (SCTP_DISPOSITION_NOMEM == disposition)
@@ -1763,7 +1791,8 @@ nomem:
  *      after responding to the local endpoint's INIT
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1784,7 +1813,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_ESTABLISHED));
-       SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
        sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
 
        repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1833,7 +1862,8 @@ nomem:
  *     but a new tag of its own.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1854,7 +1884,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep,
  *    enter the ESTABLISHED state, if it has not already done so.
  */
 /* This case represents an initialization collision.  */
-static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        struct sctp_chunk *chunk,
                                        sctp_cmd_seq_t *commands,
@@ -1876,7 +1907,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
                sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                                SCTP_STATE(SCTP_STATE_ESTABLISHED));
-               SCTP_INC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
                sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
                                SCTP_NULL());
 
@@ -1948,7 +1979,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -1967,7 +1999,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
         * done later.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* "Decode" the chunk.  We have no optional parameters so we
@@ -2001,12 +2033,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
                        goto nomem;
 
                case -SCTP_IERROR_STALE_COOKIE:
-                       sctp_send_stale_cookie_err(ep, asoc, chunk, commands,
+                       sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
                                                   err_chk_p);
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                case -SCTP_IERROR_BAD_SIG:
                default:
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                }
        }
 
@@ -2017,27 +2049,27 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
 
        switch (action) {
        case 'A': /* Association restart. */
-               retval = sctp_sf_do_dupcook_a(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'B': /* Collision case B. */
-               retval = sctp_sf_do_dupcook_b(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'C': /* Collision case C. */
-               retval = sctp_sf_do_dupcook_c(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        case 'D': /* Collision case D. */
-               retval = sctp_sf_do_dupcook_d(ep, asoc, chunk, commands,
+               retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
                                              new_asoc);
                break;
 
        default: /* Discard packet for all others. */
-               retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                break;
        }
 
@@ -2063,6 +2095,7 @@ nomem:
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_pending_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -2072,7 +2105,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2085,7 +2118,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2094,9 +2127,9 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2104,7 +2137,8 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
  *
  * See sctp_sf_do_9_1_abort().
  */
-sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2113,7 +2147,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2126,7 +2160,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2135,7 +2169,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Stop the T2-shutdown timer. */
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -2145,7 +2179,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2154,6 +2188,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
  * See sctp_sf_do_9_1_abort().
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -2163,7 +2198,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
        /* The same T2 timer, so we should be able to use
         * common function with the SHUTDOWN-SENT state.
         */
-       return sctp_sf_shutdown_sent_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2180,7 +2215,8 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2190,13 +2226,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
        sctp_errhdr_t *err;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ERROR chunk has a valid length.
         * The parameter walking depends on this as well.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Process the error here */
@@ -2206,7 +2242,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
         */
        sctp_walk_errors(err, chunk->chunk_hdr) {
                if (SCTP_ERROR_STALE_COOKIE == err->cause)
-                       return sctp_sf_do_5_2_6_stale(ep, asoc, type,
+                       return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
                                                        arg, commands);
        }
 
@@ -2215,7 +2251,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
         * we are discarding the packet, there should be no adverse
         * affects.
         */
-       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2243,7 +2279,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
+                                                const struct sctp_endpoint *ep,
                                                 const struct sctp_association *asoc,
                                                 const sctp_subtype_t type,
                                                 void *arg,
@@ -2365,7 +2402,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2374,7 +2412,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2387,7 +2425,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* ADD-IP: Special case for ABORT chunks
         * F4)  One special consideration is that ABORT Chunks arriving
@@ -2396,12 +2434,13 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
         */
        if (SCTP_ADDR_DEL ==
                    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
-       return __sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands);
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
 }
 
-static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
+static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2418,7 +2457,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
                sctp_errhdr_t *err;
                sctp_walk_errors(err, chunk->chunk_hdr);
                if ((void *)err != (void *)chunk->chunk_end)
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
        }
@@ -2426,8 +2465,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
        /* ASSOC_FAILED will DELETE_TCB. */
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return SCTP_DISPOSITION_ABORT;
 }
@@ -2437,7 +2476,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
  *
  * See sctp_sf_do_9_1_abort() above.
  */
-sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -2448,7 +2488,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
        __be16 error = SCTP_ERROR_NO_ERROR;
 
        if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ABORT chunk has a valid length.
         * Since this is an ABORT chunk, we have to discard it
@@ -2461,27 +2501,28 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
         * packet.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* See if we have an error cause code in the chunk.  */
        len = ntohs(chunk->chunk_hdr->length);
        if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
                error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
 
-       return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+       return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
                                      chunk->transport);
 }
 
 /*
  * Process an incoming ICMP as an ABORT.  (COOKIE-WAIT state)
  */
-sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
                                        sctp_cmd_seq_t *commands)
 {
-       return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+       return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
                                      ENOPROTOOPT, asoc,
                                      (struct sctp_transport *)arg);
 }
@@ -2489,7 +2530,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
 /*
  * Process an ABORT.  (COOKIE-ECHOED state)
  */
-sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net,
+                                              const struct sctp_endpoint *ep,
                                               const struct sctp_association *asoc,
                                               const sctp_subtype_t type,
                                               void *arg,
@@ -2498,7 +2540,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -2506,7 +2548,8 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
  *
  * This is common code called by several sctp_sf_*_abort() functions above.
  */
-static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
+static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
+                                          sctp_cmd_seq_t *commands,
                                           __be16 error, int sk_err,
                                           const struct sctp_association *asoc,
                                           struct sctp_transport *transport)
@@ -2514,7 +2557,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
        SCTP_DEBUG_PRINTK("ABORT received (INIT).\n");
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
@@ -2557,7 +2600,8 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -2570,12 +2614,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk,
                                      sizeof(struct sctp_shutdown_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Convert the elaborate header.  */
@@ -2595,7 +2639,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
         * When a peer sends a SHUTDOWN, SCTP delivers this notification to
@@ -2619,7 +2663,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
        disposition = SCTP_DISPOSITION_CONSUME;
 
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
                                                          arg, commands);
        }
 
@@ -2645,7 +2689,8 @@ out:
  * The Cumulative TSN Ack of the received SHUTDOWN chunk
  * MUST be processed.
  */
-sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -2656,12 +2701,12 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk,
                                      sizeof(struct sctp_shutdown_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
@@ -2678,7 +2723,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* verify, by checking the Cumulative TSN Ack field of the
         * chunk, that all its outstanding DATA chunks have been
@@ -2697,7 +2742,8 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
  * that belong to this association, it should discard the INIT chunk and
  * retransmit the SHUTDOWN ACK chunk.
  */
-sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -2708,7 +2754,7 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
 
        /* Make sure that the chunk has a valid length */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Since we are not going to really process this INIT, there
@@ -2760,7 +2806,8 @@ nomem:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type,
                                      void *arg,
@@ -2771,10 +2818,10 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
        u32 lowest_tsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        cwr = (sctp_cwrhdr_t *) chunk->skb->data;
@@ -2815,7 +2862,8 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_ecne(struct net *net,
+                                  const struct sctp_endpoint *ep,
                                   const struct sctp_association *asoc,
                                   const sctp_subtype_t type,
                                   void *arg,
@@ -2825,10 +2873,10 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        ecne = (sctp_ecnehdr_t *) chunk->skb->data;
@@ -2871,7 +2919,8 @@ sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2884,11 +2933,11 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        error = sctp_eat_data(asoc, chunk, commands );
@@ -2897,16 +2946,16 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
                break;
        case SCTP_IERROR_HIGH_TSN:
        case SCTP_IERROR_BAD_STREAM:
-               SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+               SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
                goto discard_noforce;
        case SCTP_IERROR_DUP_TSN:
        case SCTP_IERROR_IGNORE_TSN:
-               SCTP_INC_STATS(SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
+               SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
                goto discard_force;
        case SCTP_IERROR_NO_DATA:
                goto consume;
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+               return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
                        (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
        default:
                BUG();
@@ -2992,7 +3041,8 @@ consume:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -3004,11 +3054,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        error = sctp_eat_data(asoc, chunk, commands );
@@ -3022,7 +3072,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
        case SCTP_IERROR_NO_DATA:
                goto consume;
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+               return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
                        (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
        default:
                BUG();
@@ -3082,7 +3132,8 @@ consume:
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3093,18 +3144,18 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
        __u32 ctsn;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Pull the SACK chunk from the data buffer */
        sackh = sctp_sm_pull_sack(chunk);
        /* Was this a bogus SACK? */
        if (!sackh)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        chunk->subh.sack_hdr = sackh;
        ctsn = ntohl(sackh->cum_tsn_ack);
 
@@ -3125,7 +3176,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
         * sender with an ABORT.
         */
        if (!TSN_lt(ctsn, asoc->next_tsn))
-               return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+               return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
 
        /* Return this SACK for further processing.  */
        sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
@@ -3154,7 +3205,8 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
 */
-static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3164,7 +3216,7 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
        struct sctp_chunk *abort;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an ABORT. The T bit will be set if the asoc
@@ -3188,9 +3240,9 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
-               sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
        }
 
@@ -3205,7 +3257,8 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
 */
-sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_operr_notify(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3215,15 +3268,15 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
        sctp_errhdr_t *err;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ERROR chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        sctp_walk_errors(err, chunk->chunk_hdr);
        if ((void *)err != (void *)chunk->chunk_end)
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err, commands);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
@@ -3242,7 +3295,8 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_9_2_final(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -3253,11 +3307,11 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
        struct sctp_ulpevent *ev;
 
        if (!sctp_vtag_verify(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
        /* 10.2 H) SHUTDOWN COMPLETE notification
         *
@@ -3290,8 +3344,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep,
 
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
 
        /* ...and remove all record of the association. */
@@ -3324,7 +3378,8 @@ nomem:
  *    receiver of the OOTB packet shall discard the OOTB packet and take
  *    no further action.
  */
-sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ootb(struct net *net,
+                               const struct sctp_endpoint *ep,
                                const struct sctp_association *asoc,
                                const sctp_subtype_t type,
                                void *arg,
@@ -3338,13 +3393,13 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
        int ootb_shut_ack = 0;
        int ootb_cookie_ack = 0;
 
-       SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
        ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
        do {
                /* Report violation if the chunk is less then minimal */
                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
                /* Now that we know we at least have a chunk header,
@@ -3359,7 +3414,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                 *   sending an ABORT of its own.
                 */
                if (SCTP_CID_ABORT == ch->type)
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
                 * or a COOKIE ACK the SCTP Packet should be silently
@@ -3381,18 +3436,18 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                /* Report violation if chunk len overflows */
                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
                if (ch_end > skb_tail_pointer(skb))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
                ch = (sctp_chunkhdr_t *) ch_end;
        } while (ch_end < skb_tail_pointer(skb));
 
        if (ootb_shut_ack)
-               return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+               return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
        else if (ootb_cookie_ack)
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        else
-               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -3416,7 +3471,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
+                                            const struct sctp_endpoint *ep,
                                             const struct sctp_association *asoc,
                                             const sctp_subtype_t type,
                                             void *arg,
@@ -3426,7 +3482,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
        struct sctp_chunk *shut;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an SHUTDOWN_COMPLETE.
@@ -3450,19 +3506,19 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
                /* If the chunk length is invalid, we don't want to process
                 * the reset of the packet.
                 */
                if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* We need to discard the rest of the packet to prevent
                 * potential bomming attacks from additional bundled chunks.
                 * This is documented in SCTP Threats ID.
                 */
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        return SCTP_DISPOSITION_NOMEM;
@@ -3479,7 +3535,8 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
  *   chunks. --piggy ]
  *
  */
-sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net,
+                                     const struct sctp_endpoint *ep,
                                      const struct sctp_association *asoc,
                                      const sctp_subtype_t type,
                                      void *arg,
@@ -3489,7 +3546,7 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
 
        /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        /* Although we do have an association in this case, it corresponds
@@ -3497,13 +3554,14 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
         * packet and the state function that handles OOTB SHUTDOWN_ACK is
         * called with a NULL association.
         */
-       SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 
-       return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands);
+       return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
 }
 
 /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk.  */
-sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type, void *arg,
                                     sctp_cmd_seq_t *commands)
@@ -3519,7 +3577,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* ADD-IP: Section 4.1.1
@@ -3528,12 +3586,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
         * is received unauthenticated it MUST be silently discarded as
         * described in [I-D.ietf-tsvwg-sctp-auth].
         */
-       if (!sctp_addip_noauth && !chunk->auth)
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+       if (!net->sctp.addip_noauth && !chunk->auth)
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ASCONF ADDIP chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        hdr = (sctp_addiphdr_t *)chunk->skb->data;
@@ -3542,7 +3600,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
        addr_param = (union sctp_addr_param *)hdr->params;
        length = ntohs(addr_param->p.length);
        if (length < sizeof(sctp_paramhdr_t))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)addr_param, commands);
 
        /* Verify the ASCONF chunk before processing it. */
@@ -3550,7 +3608,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
                            (sctp_paramhdr_t *)((void *)addr_param + length),
                            (void *)chunk->chunk_end,
                            &err_param))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err_param, commands);
 
        /* ADDIP 5.2 E1) Compare the value of the serial number to the value
@@ -3630,7 +3688,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
  * When building TLV parameters for the ASCONF Chunk that will add or
  * delete IP addresses the D0 to D13 rules should be applied:
  */
-sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
+                                        const struct sctp_endpoint *ep,
                                         const struct sctp_association *asoc,
                                         const sctp_subtype_t type, void *arg,
                                         sctp_cmd_seq_t *commands)
@@ -3645,7 +3704,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(asconf_ack, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* ADD-IP, Section 4.1.2:
@@ -3654,12 +3713,12 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
         * is received unauthenticated it MUST be silently discarded as
         * described in [I-D.ietf-tsvwg-sctp-auth].
         */
-       if (!sctp_addip_noauth && !asconf_ack->auth)
-               return sctp_sf_discard_chunk(ep, asoc, type, arg, commands);
+       if (!net->sctp.addip_noauth && !asconf_ack->auth)
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the ADDIP chunk has a valid length.  */
        if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
@@ -3670,7 +3729,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
            (sctp_paramhdr_t *)addip_hdr->params,
            (void *)asconf_ack->chunk_end,
            &err_param))
-               return sctp_sf_violation_paramlen(ep, asoc, type, arg,
+               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)err_param, commands);
 
        if (last_asconf) {
@@ -3705,8 +3764,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -3739,8 +3798,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -3761,7 +3820,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -3776,12 +3836,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the FORWARD_TSN chunk has valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3828,6 +3888,7 @@ discard_noforce:
 }
 
 sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -3843,12 +3904,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the FORWARD_TSN chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
@@ -3915,7 +3976,8 @@ gen_shutdown:
  *
  * The return value is the disposition of the chunk.
  */
-static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    struct sctp_chunk *chunk)
@@ -3988,7 +4050,8 @@ nomem:
        return SCTP_IERROR_NOMEM;
 }
 
-sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_eat_auth(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -4001,21 +4064,21 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
 
        /* Make sure that the peer has AUTH capable */
        if (!asoc->peer.auth_capable)
-               return sctp_sf_unk_chunk(ep, asoc, type, arg, commands);
+               return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
                                SCTP_NULL());
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        /* Make sure that the AUTH chunk has valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
-       error = sctp_sf_authenticate(ep, asoc, type, chunk);
+       error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
        switch (error) {
        case SCTP_IERROR_AUTH_BAD_HMAC:
                /* Generate the ERROR chunk and discard the rest
@@ -4032,10 +4095,10 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
                /* Fall Through */
        case SCTP_IERROR_AUTH_BAD_KEYID:
        case SCTP_IERROR_BAD_SIG:
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        case SCTP_IERROR_PROTO_VIOLATION:
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        case SCTP_IERROR_NOMEM:
@@ -4084,7 +4147,8 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -4097,20 +4161,20 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
        SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk);
 
        if (!sctp_vtag_verify(unk_chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Make sure that the chunk has a valid length.
         * Since we don't know the chunk type, we use a general
         * chunkhdr structure to make a comparison.
         */
        if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        switch (type.chunk & SCTP_CID_ACTION_MASK) {
        case SCTP_CID_ACTION_DISCARD:
                /* Discard the packet.  */
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                break;
        case SCTP_CID_ACTION_DISCARD_ERR:
                /* Generate an ERROR chunk as response. */
@@ -4125,7 +4189,7 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
                }
 
                /* Discard the packet.  */
-               sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
                break;
        case SCTP_CID_ACTION_SKIP:
@@ -4167,7 +4231,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_discard_chunk(struct net *net,
+                                        const struct sctp_endpoint *ep,
                                         const struct sctp_association *asoc,
                                         const sctp_subtype_t type,
                                         void *arg,
@@ -4180,7 +4245,7 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
         * chunkhdr structure to make a comparison.
         */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
@@ -4205,13 +4270,14 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_pdiscard(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
                                    sctp_cmd_seq_t *commands)
 {
-       SCTP_INC_STATS(SCTP_MIB_IN_PKT_DISCARDS);
+       SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
 
        return SCTP_DISPOSITION_CONSUME;
@@ -4232,7 +4298,8 @@ sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep,
  * We simply tag the chunk as a violation.  The state machine will log
  * the violation and continue.
  */
-sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_violation(struct net *net,
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
                                     void *arg,
@@ -4242,7 +4309,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
 
        /* Make sure that the chunk has a valid length. */
        if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+               return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
        return SCTP_DISPOSITION_VIOLATION;
@@ -4252,6 +4319,7 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
  * Common function to handle a protocol violation.
  */
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
@@ -4302,7 +4370,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
                }
 
                sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
                if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
                        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -4316,10 +4384,10 @@ static sctp_disposition_t sctp_sf_abort_violation(
                                        SCTP_ERROR(ECONNABORTED));
                        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                        SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                }
        } else {
-               packet = sctp_ootb_pkt_new(asoc, chunk);
+               packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
                if (!packet)
                        goto nomem_pkt;
@@ -4334,13 +4402,13 @@ static sctp_disposition_t sctp_sf_abort_violation(
                sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                        SCTP_PACKET(packet));
 
-               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
        }
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-       sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+       sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
        return SCTP_DISPOSITION_ABORT;
 
 nomem_pkt:
@@ -4369,6 +4437,7 @@ nomem:
  * Generate an  ABORT chunk and terminate the association.
  */
 static sctp_disposition_t sctp_sf_violation_chunklen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4377,7 +4446,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
 {
        static const char err_str[]="The following chunk had invalid length:";
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
@@ -4388,6 +4457,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
  * the length is considered as invalid.
  */
 static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4407,17 +4477,17 @@ static sctp_disposition_t sctp_sf_violation_paramlen(
                goto nomem;
 
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
                        SCTP_ERROR(ECONNABORTED));
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
 discard:
-       sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
+       sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
        return SCTP_DISPOSITION_ABORT;
 nomem:
        return SCTP_DISPOSITION_NOMEM;
@@ -4430,6 +4500,7 @@ nomem:
  * error code.
  */
 static sctp_disposition_t sctp_sf_violation_ctsn(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4438,7 +4509,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
 {
        static const char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
@@ -4449,6 +4520,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
  * on the path and we may not want to continue this communication.
  */
 static sctp_disposition_t sctp_sf_violation_chunk(
+                                    struct net *net,
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     const sctp_subtype_t type,
@@ -4458,9 +4530,9 @@ static sctp_disposition_t sctp_sf_violation_chunk(
        static const char err_str[]="The following chunk violates protocol:";
 
        if (!asoc)
-               return sctp_sf_violation(ep, asoc, type, arg, commands);
+               return sctp_sf_violation(net, ep, asoc, type, arg, commands);
 
-       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 /***************************************************************************
@@ -4523,7 +4595,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
  *
  * The return value is a disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -4634,7 +4707,8 @@ nomem:
  *
  * The return value is the disposition.
  */
-sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_send(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
@@ -4673,6 +4747,7 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4694,7 +4769,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
 
        disposition = SCTP_DISPOSITION_CONSUME;
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
                                                            arg, commands);
        }
        return disposition;
@@ -4728,6 +4803,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_1_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4759,14 +4835,15 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_USER_ABORT));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return retval;
 }
 
 /* We tried an illegal operation on an association which is closed.  */
-sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_closed(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -4779,7 +4856,8 @@ sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep,
 /* We tried an illegal operation on an association which is shutting
  * down.
  */
-sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_error_shutdown(struct net *net,
+                                         const struct sctp_endpoint *ep,
                                          const struct sctp_association *asoc,
                                          const sctp_subtype_t type,
                                          void *arg,
@@ -4805,6 +4883,7 @@ sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep,
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4817,7 +4896,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS);
+       SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
 
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
@@ -4839,6 +4918,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4847,7 +4927,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_prm_shutdown(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4865,6 +4945,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4884,7 +4965,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
                        SCTP_STATE(SCTP_STATE_CLOSED));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
 
        /* Even if we can't send the ABORT due to low memory delete the
         * TCB.  This is a departure from our typical NOMEM handling.
@@ -4914,6 +4995,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4923,7 +5005,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
        /* There is a single T1 timer, so we should be able to use
         * common function with the COOKIE-WAIT state.
         */
-       return sctp_sf_cookie_wait_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4939,6 +5021,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4949,7 +5032,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4965,6 +5048,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -4979,7 +5063,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
        sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
                        SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
 
-       return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -4995,6 +5079,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
  * (timers)
  */
 sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5004,7 +5089,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
        /* The same T2 timer, so we should be able to use
         * common function with the SHUTDOWN-SENT state.
         */
-       return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands);
+       return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -5030,6 +5115,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
  *   association on which a heartbeat should be issued.
  */
 sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
+                                       struct net *net,
                                        const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
@@ -5061,7 +5147,8 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
  * When an endpoint has an ASCONF signaled change to be sent to the
  * remote endpoint it should do A1 to A9
  */
-sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5082,6 +5169,7 @@ sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep,
  * The return value is the disposition of the primitive.
  */
 sctp_disposition_t sctp_sf_ignore_primitive(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5103,6 +5191,7 @@ sctp_disposition_t sctp_sf_ignore_primitive(
  * retransmit, the stack will immediately send up this notification.
  */
 sctp_disposition_t sctp_sf_do_no_pending_tsn(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5134,6 +5223,7 @@ sctp_disposition_t sctp_sf_do_no_pending_tsn(
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5203,6 +5293,7 @@ nomem:
  * The return value is the disposition.
  */
 sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5221,11 +5312,11 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
         */
        if (chunk) {
                if (!sctp_vtag_verify(chunk, asoc))
-                       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
                /* Make sure that the SHUTDOWN chunk has a valid length. */
                if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
-                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                          commands);
        }
 
@@ -5273,7 +5364,8 @@ nomem:
  *
  * The return value is the disposition of the event.
  */
-sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_ignore_other(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5298,7 +5390,8 @@ sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5306,7 +5399,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
 {
        struct sctp_transport *transport = arg;
 
-       SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
 
        if (asoc->overall_error_count >= asoc->max_retrans) {
                if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
@@ -5327,8 +5420,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
                        /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                        SCTP_PERR(SCTP_ERROR_NO_ERROR));
-                       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+                       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+                       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                        return SCTP_DISPOSITION_DELETE_TCB;
                }
        }
@@ -5384,13 +5477,14 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
  * allow. However, an SCTP transmitter MUST NOT be more aggressive than
  * the following algorithms allow.
  */
-sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const sctp_subtype_t type,
                                       void *arg,
                                       sctp_cmd_seq_t *commands)
 {
-       SCTP_INC_STATS(SCTP_MIB_DELAY_SACK_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
        sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
        return SCTP_DISPOSITION_CONSUME;
 }
@@ -5414,7 +5508,8 @@ sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep,
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5425,7 +5520,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
        int attempts = asoc->init_err_counter + 1;
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
-       SCTP_INC_STATS(SCTP_MIB_T1_INIT_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
 
        if (attempts <= asoc->max_init_attempts) {
                bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
@@ -5475,7 +5570,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
  * (timers, events)
  *
  */
-sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5485,7 +5581,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
        int attempts = asoc->init_err_counter + 1;
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
-       SCTP_INC_STATS(SCTP_MIB_T1_COOKIE_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
 
        if (attempts <= asoc->max_init_attempts) {
                repl = sctp_make_cookie_echo(asoc, NULL);
@@ -5523,7 +5619,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
  * the T2-Shutdown timer,  giving its peer ample opportunity to transmit
  * all of its queued DATA chunks that have not yet been sent.
  */
-sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5532,7 +5629,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
        struct sctp_chunk *reply = NULL;
 
        SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
-       SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
 
        ((struct sctp_association *)asoc)->shutdown_retries++;
 
@@ -5542,8 +5639,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
                /* Note:  CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_DELETE_TCB;
        }
 
@@ -5592,6 +5689,7 @@ nomem:
  * If the T4 RTO timer expires the endpoint should do B1 to B5
  */
 sctp_disposition_t sctp_sf_t4_timer_expire(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5601,7 +5699,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
        struct sctp_chunk *chunk = asoc->addip_last_asconf;
        struct sctp_transport *transport = chunk->transport;
 
-       SCTP_INC_STATS(SCTP_MIB_T4_RTO_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
 
        /* ADDIP 4.1 B1) Increment the error counters and perform path failure
         * detection on the appropriate destination address as defined in
@@ -5626,8 +5724,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
                                SCTP_ERROR(ETIMEDOUT));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_ERROR));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_DISPOSITION_ABORT;
        }
 
@@ -5662,7 +5760,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
  * At the expiration of this timer the sender SHOULD abort the association
  * by sending an ABORT chunk.
  */
-sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net,
+                                          const struct sctp_endpoint *ep,
                                           const struct sctp_association *asoc,
                                           const sctp_subtype_t type,
                                           void *arg,
@@ -5671,7 +5770,7 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
        struct sctp_chunk *reply = NULL;
 
        SCTP_DEBUG_PRINTK("Timer T5 expired.\n");
-       SCTP_INC_STATS(SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
 
        reply = sctp_make_abort(asoc, NULL, 0);
        if (!reply)
@@ -5683,8 +5782,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
        sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                        SCTP_PERR(SCTP_ERROR_NO_ERROR));
 
-       SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+       SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+       SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
 
        return SCTP_DISPOSITION_DELETE_TCB;
 nomem:
@@ -5697,6 +5796,7 @@ nomem:
  * the user.  So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
  */
 sctp_disposition_t sctp_sf_autoclose_timer_expire(
+       struct net *net,
        const struct sctp_endpoint *ep,
        const struct sctp_association *asoc,
        const sctp_subtype_t type,
@@ -5705,7 +5805,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
 {
        int disposition;
 
-       SCTP_INC_STATS(SCTP_MIB_AUTOCLOSE_EXPIREDS);
+       SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
 
        /* From 9.2 Shutdown of an Association
         * Upon receipt of the SHUTDOWN primitive from its upper
@@ -5720,7 +5820,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
 
        disposition = SCTP_DISPOSITION_CONSUME;
        if (sctp_outq_is_empty(&asoc->outqueue)) {
-               disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type,
+               disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
                                                            arg, commands);
        }
        return disposition;
@@ -5738,7 +5838,8 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire(
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_not_impl(struct net *net,
+                                   const struct sctp_endpoint *ep,
                                    const struct sctp_association *asoc,
                                    const sctp_subtype_t type,
                                    void *arg,
@@ -5755,7 +5856,8 @@ sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_bug(struct net *net,
+                              const struct sctp_endpoint *ep,
                               const struct sctp_association *asoc,
                               const sctp_subtype_t type,
                               void *arg,
@@ -5775,7 +5877,8 @@ sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
  */
-sctp_disposition_t sctp_sf_timer_ignore(const struct sctp_endpoint *ep,
+sctp_disposition_t sctp_sf_timer_ignore(struct net *net,
+                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -5817,7 +5920,8 @@ static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
 /* Create an ABORT packet to be sent as a response, with the specified
  * error causes.
  */
-static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
+static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
+                                 const struct sctp_endpoint *ep,
                                  const struct sctp_association *asoc,
                                  struct sctp_chunk *chunk,
                                  const void *payload,
@@ -5826,7 +5930,7 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
        struct sctp_chunk *abort;
 
-       packet = sctp_ootb_pkt_new(asoc, chunk);
+       packet = sctp_ootb_pkt_new(net, asoc, chunk);
 
        if (packet) {
                /* Make an ABORT.
@@ -5858,7 +5962,8 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
 }
 
 /* Allocate a packet for responding in the OOTB conditions.  */
-static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc,
+static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
+                                            const struct sctp_association *asoc,
                                             const struct sctp_chunk *chunk)
 {
        struct sctp_packet *packet;
@@ -5911,7 +6016,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
        }
 
        /* Make a transport for the bucket, Eliza... */
-       transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
+       transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
        if (!transport)
                goto nomem;
 
@@ -5919,7 +6024,7 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
         * the source address.
         */
        sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
-                            sctp_sk(sctp_get_ctl_sock()));
+                            sctp_sk(net->sctp.ctl_sock));
 
        packet = sctp_packet_init(&transport->packet, transport, sport, dport);
        packet = sctp_packet_config(packet, vtag, 0);
@@ -5937,7 +6042,8 @@ void sctp_ootb_pkt_free(struct sctp_packet *packet)
 }
 
 /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found  */
-static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
+static void sctp_send_stale_cookie_err(struct net *net,
+                                      const struct sctp_endpoint *ep,
                                       const struct sctp_association *asoc,
                                       const struct sctp_chunk *chunk,
                                       sctp_cmd_seq_t *commands,
@@ -5946,7 +6052,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
        struct sctp_packet *packet;
 
        if (err_chunk) {
-               packet = sctp_ootb_pkt_new(asoc, chunk);
+               packet = sctp_ootb_pkt_new(net, asoc, chunk);
                if (packet) {
                        struct sctp_signed_cookie *cookie;
 
@@ -5959,7 +6065,7 @@ static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
                        sctp_packet_append_chunk(packet, err_chunk);
                        sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
                                        SCTP_PACKET(packet));
-                       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+                       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
                } else
                        sctp_chunk_free (err_chunk);
        }
@@ -5979,6 +6085,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
        __u32 tsn;
        struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
        struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
        u16 ssn;
        u16 sid;
        u8 ordered = 0;
@@ -6109,8 +6216,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
                                SCTP_ERROR(ECONNABORTED));
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_PERR(SCTP_ERROR_NO_DATA));
-               SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
                return SCTP_IERROR_NO_DATA;
        }
 
@@ -6120,9 +6227,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * if we renege and the chunk arrives again.
         */
        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-               SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
        else {
-               SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
+               SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
                ordered = 1;
        }
 
index 7c211a7f90f4d065eec82baa0cb751373e7eb0be..84d98d8a5a7417bd92ea919c56e0f8033073a6c4 100644 (file)
@@ -59,7 +59,8 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES];
 static const sctp_sm_table_entry_t
 timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES];
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+                                                           sctp_cid_t cid,
                                                            sctp_state_t state);
 
 
@@ -82,13 +83,14 @@ static const sctp_sm_table_entry_t bug = {
        rtn;                                                            \
 })
 
-const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
+const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net,
+                                                 sctp_event_t event_type,
                                                  sctp_state_t state,
                                                  sctp_subtype_t event_subtype)
 {
        switch (event_type) {
        case SCTP_EVENT_T_CHUNK:
-               return sctp_chunk_event_lookup(event_subtype.chunk, state);
+               return sctp_chunk_event_lookup(net, event_subtype.chunk, state);
        case SCTP_EVENT_T_TIMEOUT:
                return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
                                 timeout_event_table);
@@ -906,7 +908,8 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
        TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 };
 
-static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
+static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net,
+                                                           sctp_cid_t cid,
                                                            sctp_state_t state)
 {
        if (state > SCTP_STATE_MAX)
@@ -915,12 +918,12 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
        if (cid <= SCTP_CID_BASE_MAX)
                return &chunk_event_table[cid][state];
 
-       if (sctp_prsctp_enable) {
+       if (net->sctp.prsctp_enable) {
                if (cid == SCTP_CID_FWD_TSN)
                        return &prsctp_chunk_event_table[0][state];
        }
 
-       if (sctp_addip_enable) {
+       if (net->sctp.addip_enable) {
                if (cid == SCTP_CID_ASCONF)
                        return &addip_chunk_event_table[0][state];
 
@@ -928,7 +931,7 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid,
                        return &addip_chunk_event_table[1][state];
        }
 
-       if (sctp_auth_enable) {
+       if (net->sctp.auth_enable) {
                if (cid == SCTP_CID_AUTH)
                        return &auth_chunk_event_table[0][state];
        }
index 5e259817a7f34cd4a183139fe9c4bf5ee2ab6689..59d16ea927f0f83d706d3a59c79d13be0a95c1e8 100644 (file)
@@ -70,6 +70,7 @@
 #include <linux/init.h>
 #include <linux/crypto.h>
 #include <linux/slab.h>
+#include <linux/file.h>
 
 #include <net/ip.h>
 #include <net/icmp.h>
@@ -427,6 +428,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
 static int sctp_send_asconf(struct sctp_association *asoc,
                            struct sctp_chunk *chunk)
 {
+       struct net      *net = sock_net(asoc->base.sk);
        int             retval = 0;
 
        /* If there is an outstanding ASCONF chunk, queue it for later
@@ -439,7 +441,7 @@ static int sctp_send_asconf(struct sctp_association *asoc,
 
        /* Hold the chunk until an ASCONF_ACK is received. */
        sctp_chunk_hold(chunk);
-       retval = sctp_primitive_ASCONF(asoc, chunk);
+       retval = sctp_primitive_ASCONF(net, asoc, chunk);
        if (retval)
                sctp_chunk_free(chunk);
        else
@@ -515,6 +517,7 @@ static int sctp_send_asconf_add_ip(struct sock              *sk,
                                   struct sockaddr      *addrs,
                                   int                  addrcnt)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock                *sp;
        struct sctp_endpoint            *ep;
        struct sctp_association         *asoc;
@@ -529,7 +532,7 @@ static int sctp_send_asconf_add_ip(struct sock              *sk,
        int                             i;
        int                             retval = 0;
 
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return retval;
 
        sp = sctp_sk(sk);
@@ -717,6 +720,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
                                   struct sockaddr      *addrs,
                                   int                  addrcnt)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock        *sp;
        struct sctp_endpoint    *ep;
        struct sctp_association *asoc;
@@ -732,7 +736,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
        int                     stored = 0;
 
        chunk = NULL;
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return retval;
 
        sp = sctp_sk(sk);
@@ -1050,6 +1054,7 @@ static int __sctp_connect(struct sock* sk,
                          int addrs_size,
                          sctp_assoc_t *assoc_id)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
        struct sctp_association *asoc = NULL;
@@ -1200,7 +1205,7 @@ static int __sctp_connect(struct sock* sk,
                        goto out_free;
        }
 
-       err = sctp_primitive_ASSOCIATE(asoc, NULL);
+       err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
        if (err < 0) {
                goto out_free;
        }
@@ -1458,6 +1463,7 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
  */
 SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
        struct list_head *pos, *temp;
@@ -1499,9 +1505,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
 
                        chunk = sctp_make_abort_user(asoc, NULL, 0);
                        if (chunk)
-                               sctp_primitive_ABORT(asoc, chunk);
+                               sctp_primitive_ABORT(net, asoc, chunk);
                } else
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
        }
 
        /* On a TCP-style socket, block for at most linger_time if set. */
@@ -1569,6 +1575,7 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
 SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                             struct msghdr *msg, size_t msg_len)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
        struct sctp_association *new_asoc=NULL, *asoc=NULL;
@@ -1714,7 +1721,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                if (sinfo_flags & SCTP_EOF) {
                        SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
                                          asoc);
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
                        err = 0;
                        goto out_unlock;
                }
@@ -1727,7 +1734,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                        }
 
                        SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
-                       sctp_primitive_ABORT(asoc, chunk);
+                       sctp_primitive_ABORT(net, asoc, chunk);
                        err = 0;
                        goto out_unlock;
                }
@@ -1900,7 +1907,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
 
        /* Auto-connect, if we aren't connected already. */
        if (sctp_state(asoc, CLOSED)) {
-               err = sctp_primitive_ASSOCIATE(asoc, NULL);
+               err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
                if (err < 0)
                        goto out_free;
                SCTP_DEBUG_PRINTK("We associated primitively.\n");
@@ -1928,7 +1935,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
         * works that way today.  Keep it that way or this
         * breaks.
         */
-       err = sctp_primitive_SEND(asoc, datamsg);
+       err = sctp_primitive_SEND(net, asoc, datamsg);
        /* Did the lower layer accept the chunk? */
        if (err)
                sctp_datamsg_free(datamsg);
@@ -2320,7 +2327,9 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        int error;
 
        if (params->spp_flags & SPP_HB_DEMAND && trans) {
-               error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans);
+               struct net *net = sock_net(trans->asoc->base.sk);
+
+               error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
                if (error)
                        return error;
        }
@@ -3033,6 +3042,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
                                             unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_sock        *sp;
        struct sctp_association *asoc = NULL;
        struct sctp_setpeerprim prim;
@@ -3042,7 +3052,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
 
        sp = sctp_sk(sk);
 
-       if (!sctp_addip_enable)
+       if (!net->sctp.addip_enable)
                return -EPERM;
 
        if (optlen != sizeof(struct sctp_setpeerprim))
@@ -3279,9 +3289,10 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunk val;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authchunk))
@@ -3311,11 +3322,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmacalgo *hmacs;
        u32 idents;
        int err;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen < sizeof(struct sctp_hmacalgo))
@@ -3348,11 +3360,12 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
                                    char __user *optval,
                                    unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkey *authkey;
        struct sctp_association *asoc;
        int ret;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen <= sizeof(struct sctp_authkey))
@@ -3389,10 +3402,11 @@ static int sctp_setsockopt_active_key(struct sock *sk,
                                      char __user *optval,
                                      unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authkeyid))
@@ -3417,10 +3431,11 @@ static int sctp_setsockopt_del_key(struct sock *sk,
                                   char __user *optval,
                                   unsigned int optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authkeyid))
@@ -3471,7 +3486,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
                sp->do_auto_asconf = 0;
        } else if (val && !sp->do_auto_asconf) {
                list_add_tail(&sp->auto_asconf_list,
-                   &sctp_auto_asconf_splist);
+                   &sock_net(sk)->sctp.auto_asconf_splist);
                sp->do_auto_asconf = 1;
        }
        return 0;
@@ -3843,6 +3858,7 @@ out:
  */
 SCTP_STATIC int sctp_init_sock(struct sock *sk)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_sock *sp;
 
@@ -3872,7 +3888,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        sp->default_timetolive = 0;
 
        sp->default_rcv_context = 0;
-       sp->max_burst = sctp_max_burst;
+       sp->max_burst = net->sctp.max_burst;
 
        /* Initialize default setup parameters. These parameters
         * can be modified with the SCTP_INITMSG socket option or
@@ -3880,24 +3896,24 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
         */
        sp->initmsg.sinit_num_ostreams   = sctp_max_outstreams;
        sp->initmsg.sinit_max_instreams  = sctp_max_instreams;
-       sp->initmsg.sinit_max_attempts   = sctp_max_retrans_init;
-       sp->initmsg.sinit_max_init_timeo = sctp_rto_max;
+       sp->initmsg.sinit_max_attempts   = net->sctp.max_retrans_init;
+       sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
 
        /* Initialize default RTO related parameters.  These parameters can
         * be modified for with the SCTP_RTOINFO socket option.
         */
-       sp->rtoinfo.srto_initial = sctp_rto_initial;
-       sp->rtoinfo.srto_max     = sctp_rto_max;
-       sp->rtoinfo.srto_min     = sctp_rto_min;
+       sp->rtoinfo.srto_initial = net->sctp.rto_initial;
+       sp->rtoinfo.srto_max     = net->sctp.rto_max;
+       sp->rtoinfo.srto_min     = net->sctp.rto_min;
 
        /* Initialize default association related parameters. These parameters
         * can be modified with the SCTP_ASSOCINFO socket option.
         */
-       sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association;
+       sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
        sp->assocparams.sasoc_number_peer_destinations = 0;
        sp->assocparams.sasoc_peer_rwnd = 0;
        sp->assocparams.sasoc_local_rwnd = 0;
-       sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
+       sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
 
        /* Initialize default event subscriptions. By default, all the
         * options are off.
@@ -3907,10 +3923,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        /* Default Peer Address Parameters.  These defaults can
         * be modified via SCTP_PEER_ADDR_PARAMS
         */
-       sp->hbinterval  = sctp_hb_interval;
-       sp->pathmaxrxt  = sctp_max_retrans_path;
+       sp->hbinterval  = net->sctp.hb_interval;
+       sp->pathmaxrxt  = net->sctp.max_retrans_path;
        sp->pathmtu     = 0; // allow default discovery
-       sp->sackdelay   = sctp_sack_timeout;
+       sp->sackdelay   = net->sctp.sack_timeout;
        sp->sackfreq    = 2;
        sp->param_flags = SPP_HB_ENABLE |
                          SPP_PMTUD_ENABLE |
@@ -3961,10 +3977,10 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
 
        local_bh_disable();
        percpu_counter_inc(&sctp_sockets_allocated);
-       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       if (sctp_default_auto_asconf) {
+       sock_prot_inuse_add(net, sk->sk_prot, 1);
+       if (net->sctp.default_auto_asconf) {
                list_add_tail(&sp->auto_asconf_list,
-                   &sctp_auto_asconf_splist);
+                   &net->sctp.auto_asconf_splist);
                sp->do_auto_asconf = 1;
        } else
                sp->do_auto_asconf = 0;
@@ -4011,6 +4027,7 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
  */
 SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
 {
+       struct net *net = sock_net(sk);
        struct sctp_endpoint *ep;
        struct sctp_association *asoc;
 
@@ -4022,7 +4039,7 @@ SCTP_STATIC void sctp_shutdown(struct sock *sk, int how)
                if (!list_empty(&ep->asocs)) {
                        asoc = list_entry(ep->asocs.next,
                                          struct sctp_association, asocs);
-                       sctp_primitive_SHUTDOWN(asoc, NULL);
+                       sctp_primitive_SHUTDOWN(net, asoc, NULL);
                }
        }
 }
@@ -4276,6 +4293,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
 {
        sctp_peeloff_arg_t peeloff;
        struct socket *newsock;
+       struct file *newfile;
        int retval = 0;
 
        if (len < sizeof(sctp_peeloff_arg_t))
@@ -4289,22 +4307,35 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
                goto out;
 
        /* Map the socket to an unused fd that can be returned to the user.  */
-       retval = sock_map_fd(newsock, 0);
+       retval = get_unused_fd();
        if (retval < 0) {
                sock_release(newsock);
                goto out;
        }
 
+       newfile = sock_alloc_file(newsock, 0, NULL);
+       if (unlikely(IS_ERR(newfile))) {
+               put_unused_fd(retval);
+               sock_release(newsock);
+               return PTR_ERR(newfile);
+       }
+
        SCTP_DEBUG_PRINTK("%s: sk: %p newsk: %p sd: %d\n",
                          __func__, sk, newsock->sk, retval);
 
        /* Return the fd mapped to the new socket.  */
+       if (put_user(len, optlen)) {
+               fput(newfile);
+               put_unused_fd(retval);
+               return -EFAULT;
+       }
        peeloff.sd = retval;
-       if (put_user(len, optlen))
+       if (copy_to_user(optval, &peeloff, len)) {
+               fput(newfile);
+               put_unused_fd(retval);
                return -EFAULT;
-       if (copy_to_user(optval, &peeloff, len))
-               retval = -EFAULT;
-
+       }
+       fd_install(retval, newfile);
 out:
        return retval;
 }
@@ -4653,9 +4684,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
        union sctp_addr temp;
        int cnt = 0;
        int addrlen;
+       struct net *net = sock_net(sk);
 
        rcu_read_lock();
-       list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
+       list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
                if (!addr->valid)
                        continue;
 
@@ -5299,12 +5331,13 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_hmacalgo  __user *p = (void __user *)optval;
        struct sctp_hmac_algo_param *hmacs;
        __u16 data_len = 0;
        u32 num_idents;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
@@ -5328,10 +5361,11 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
 static int sctp_getsockopt_active_key(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authkeyid))
@@ -5360,6 +5394,7 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunks __user *p = (void __user *)optval;
        struct sctp_authchunks val;
        struct sctp_association *asoc;
@@ -5367,7 +5402,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
        u32    num_chunks = 0;
        char __user *to;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authchunks))
@@ -5403,6 +5438,7 @@ num:
 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
                                    char __user *optval, int __user *optlen)
 {
+       struct net *net = sock_net(sk);
        struct sctp_authchunks __user *p = (void __user *)optval;
        struct sctp_authchunks val;
        struct sctp_association *asoc;
@@ -5410,7 +5446,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
        u32    num_chunks = 0;
        char __user *to;
 
-       if (!sctp_auth_enable)
+       if (!net->sctp.auth_enable)
                return -EACCES;
 
        if (len < sizeof(struct sctp_authchunks))
@@ -5769,7 +5805,7 @@ static void sctp_unhash(struct sock *sk)
  * a fastreuse flag (FIXME: NPI ipg).
  */
 static struct sctp_bind_bucket *sctp_bucket_create(
-       struct sctp_bind_hashbucket *head, unsigned short snum);
+       struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
 
 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
@@ -5799,11 +5835,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                                rover = low;
                        if (inet_is_reserved_local_port(rover))
                                continue;
-                       index = sctp_phashfn(rover);
+                       index = sctp_phashfn(sock_net(sk), rover);
                        head = &sctp_port_hashtable[index];
                        sctp_spin_lock(&head->lock);
                        sctp_for_each_hentry(pp, node, &head->chain)
-                               if (pp->port == rover)
+                               if ((pp->port == rover) &&
+                                   net_eq(sock_net(sk), pp->net))
                                        goto next;
                        break;
                next:
@@ -5827,10 +5864,10 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                 * to the port number (snum) - we detect that with the
                 * port iterator, pp being NULL.
                 */
-               head = &sctp_port_hashtable[sctp_phashfn(snum)];
+               head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
                sctp_spin_lock(&head->lock);
                sctp_for_each_hentry(pp, node, &head->chain) {
-                       if (pp->port == snum)
+                       if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
                                goto pp_found;
                }
        }
@@ -5881,7 +5918,7 @@ pp_found:
 pp_not_found:
        /* If there was a hash table miss, create a new port.  */
        ret = 1;
-       if (!pp && !(pp = sctp_bucket_create(head, snum)))
+       if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
                goto fail_unlock;
 
        /* In either case (hit or miss), make sure fastreuse is 1 only
@@ -6113,7 +6150,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
  ********************************************************************/
 
 static struct sctp_bind_bucket *sctp_bucket_create(
-       struct sctp_bind_hashbucket *head, unsigned short snum)
+       struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
 {
        struct sctp_bind_bucket *pp;
 
@@ -6123,6 +6160,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
                pp->port = snum;
                pp->fastreuse = 0;
                INIT_HLIST_HEAD(&pp->owner);
+               pp->net = net;
                hlist_add_head(&pp->node, &head->chain);
        }
        return pp;
@@ -6142,7 +6180,8 @@ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
 static inline void __sctp_put_port(struct sock *sk)
 {
        struct sctp_bind_hashbucket *head =
-               &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->inet_num)];
+               &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
+                                                 inet_sk(sk)->inet_num)];
        struct sctp_bind_bucket *pp;
 
        sctp_spin_lock(&head->lock);
@@ -6809,7 +6848,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
        newsp->hmac = NULL;
 
        /* Hook this new socket in to the bind_hash list. */
-       head = &sctp_port_hashtable[sctp_phashfn(inet_sk(oldsk)->inet_num)];
+       head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
+                                                inet_sk(oldsk)->inet_num)];
        sctp_local_bh_disable();
        sctp_spin_lock(&head->lock);
        pp = sctp_sk(oldsk)->bind_hash;
index 2b2bfe933ff14413aa4970391eb25d038ff3d90a..70e3ba5cb50b319319e60c7bfa6fae69bc5c1fed 100644 (file)
@@ -63,9 +63,35 @@ extern int sysctl_sctp_rmem[3];
 extern int sysctl_sctp_wmem[3];
 
 static ctl_table sctp_table[] = {
+       {
+               .procname       = "sctp_mem",
+               .data           = &sysctl_sctp_mem,
+               .maxlen         = sizeof(sysctl_sctp_mem),
+               .mode           = 0644,
+               .proc_handler   = proc_doulongvec_minmax
+       },
+       {
+               .procname       = "sctp_rmem",
+               .data           = &sysctl_sctp_rmem,
+               .maxlen         = sizeof(sysctl_sctp_rmem),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sctp_wmem",
+               .data           = &sysctl_sctp_wmem,
+               .maxlen         = sizeof(sysctl_sctp_wmem),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+
+       { /* sentinel */ }
+};
+
+static ctl_table sctp_net_table[] = {
        {
                .procname       = "rto_initial",
-               .data           = &sctp_rto_initial,
+               .data           = &init_net.sctp.rto_initial,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -74,7 +100,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rto_min",
-               .data           = &sctp_rto_min,
+               .data           = &init_net.sctp.rto_min,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -83,7 +109,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rto_max",
-               .data           = &sctp_rto_max,
+               .data           = &init_net.sctp.rto_max,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -91,17 +117,22 @@ static ctl_table sctp_table[] = {
                .extra2         = &timer_max
        },
        {
-               .procname       = "valid_cookie_life",
-               .data           = &sctp_valid_cookie_life,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &timer_max
+               .procname       = "rto_alpha_exp_divisor",
+               .data           = &init_net.sctp.rto_alpha,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "rto_beta_exp_divisor",
+               .data           = &init_net.sctp.rto_beta,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "max_burst",
-               .data           = &sctp_max_burst,
+               .data           = &init_net.sctp.max_burst,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -109,31 +140,42 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "association_max_retrans",
-               .data           = &sctp_max_retrans_association,
+               .procname       = "cookie_preserve_enable",
+               .data           = &init_net.sctp.cookie_preserve_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "valid_cookie_life",
+               .data           = &init_net.sctp.valid_cookie_life,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &int_max
+               .extra1         = &one,
+               .extra2         = &timer_max
        },
        {
-               .procname       = "sndbuf_policy",
-               .data           = &sctp_sndbuf_policy,
+               .procname       = "sack_timeout",
+               .data           = &init_net.sctp.sack_timeout,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &sack_timer_min,
+               .extra2         = &sack_timer_max,
        },
        {
-               .procname       = "rcvbuf_policy",
-               .data           = &sctp_rcvbuf_policy,
-               .maxlen         = sizeof(int),
+               .procname       = "hb_interval",
+               .data           = &init_net.sctp.hb_interval,
+               .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &timer_max
        },
        {
-               .procname       = "path_max_retrans",
-               .data           = &sctp_max_retrans_path,
+               .procname       = "association_max_retrans",
+               .data           = &init_net.sctp.max_retrans_association,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -141,17 +183,17 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "pf_retrans",
-               .data           = &sctp_pf_retrans,
+               .procname       = "path_max_retrans",
+               .data           = &init_net.sctp.max_retrans_path,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &one,
                .extra2         = &int_max
        },
        {
                .procname       = "max_init_retransmits",
-               .data           = &sctp_max_retrans_init,
+               .data           = &init_net.sctp.max_retrans_init,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -159,103 +201,66 @@ static ctl_table sctp_table[] = {
                .extra2         = &int_max
        },
        {
-               .procname       = "hb_interval",
-               .data           = &sctp_hb_interval,
-               .maxlen         = sizeof(unsigned int),
+               .procname       = "pf_retrans",
+               .data           = &init_net.sctp.pf_retrans,
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
-               .extra2         = &timer_max
+               .extra1         = &zero,
+               .extra2         = &int_max
        },
        {
-               .procname       = "cookie_preserve_enable",
-               .data           = &sctp_cookie_preserve_enable,
+               .procname       = "sndbuf_policy",
+               .data           = &init_net.sctp.sndbuf_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "rto_alpha_exp_divisor",
-               .data           = &sctp_rto_alpha,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "rto_beta_exp_divisor",
-               .data           = &sctp_rto_beta,
-               .maxlen         = sizeof(int),
-               .mode           = 0444,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "addip_enable",
-               .data           = &sctp_addip_enable,
+               .procname       = "rcvbuf_policy",
+               .data           = &init_net.sctp.rcvbuf_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "default_auto_asconf",
-               .data           = &sctp_default_auto_asconf,
+               .data           = &init_net.sctp.default_auto_asconf,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "prsctp_enable",
-               .data           = &sctp_prsctp_enable,
+               .procname       = "addip_enable",
+               .data           = &init_net.sctp.addip_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "sack_timeout",
-               .data           = &sctp_sack_timeout,
+               .procname       = "addip_noauth_enable",
+               .data           = &init_net.sctp.addip_noauth,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &sack_timer_min,
-               .extra2         = &sack_timer_max,
-       },
-       {
-               .procname       = "sctp_mem",
-               .data           = &sysctl_sctp_mem,
-               .maxlen         = sizeof(sysctl_sctp_mem),
-               .mode           = 0644,
-               .proc_handler   = proc_doulongvec_minmax
-       },
-       {
-               .procname       = "sctp_rmem",
-               .data           = &sysctl_sctp_rmem,
-               .maxlen         = sizeof(sysctl_sctp_rmem),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
-       {
-               .procname       = "sctp_wmem",
-               .data           = &sysctl_sctp_wmem,
-               .maxlen         = sizeof(sysctl_sctp_wmem),
-               .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "auth_enable",
-               .data           = &sctp_auth_enable,
+               .procname       = "prsctp_enable",
+               .data           = &init_net.sctp.prsctp_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
-               .procname       = "addip_noauth_enable",
-               .data           = &sctp_addip_noauth,
+               .procname       = "auth_enable",
+               .data           = &init_net.sctp.auth_enable,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "addr_scope_policy",
-               .data           = &sctp_scope_policy,
+               .data           = &init_net.sctp.scope_policy,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
@@ -264,7 +269,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "rwnd_update_shift",
-               .data           = &sctp_rwnd_upd_shift,
+               .data           = &init_net.sctp.rwnd_upd_shift,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = &proc_dointvec_minmax,
@@ -273,7 +278,7 @@ static ctl_table sctp_table[] = {
        },
        {
                .procname       = "max_autoclose",
-               .data           = &sctp_max_autoclose,
+               .data           = &init_net.sctp.max_autoclose,
                .maxlen         = sizeof(unsigned long),
                .mode           = 0644,
                .proc_handler   = &proc_doulongvec_minmax,
@@ -284,6 +289,27 @@ static ctl_table sctp_table[] = {
        { /* sentinel */ }
 };
 
+int sctp_sysctl_net_register(struct net *net)
+{
+       struct ctl_table *table;
+       int i;
+
+       table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       for (i = 0; table[i].data; i++)
+               table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+
+       net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+       return 0;
+}
+
+void sctp_sysctl_net_unregister(struct net *net)
+{
+       unregister_net_sysctl_table(net->sctp.sysctl_header);
+}
+
 static struct ctl_table_header * sctp_sysctl_header;
 
 /* Sysctl registration.  */
index c97472b248a2b257972cd9e4a353e89874ad87aa..953c21e4af977a752362187976e84b578bdb085c 100644 (file)
@@ -59,7 +59,8 @@
 /* 1st Level Abstractions.  */
 
 /* Initialize a new transport from provided memory.  */
-static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
+static struct sctp_transport *sctp_transport_init(struct net *net,
+                                                 struct sctp_transport *peer,
                                                  const union sctp_addr *addr,
                                                  gfp_t gfp)
 {
@@ -76,7 +77,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
         * given destination transport address, set RTO to the protocol
         * parameter 'RTO.Initial'.
         */
-       peer->rto = msecs_to_jiffies(sctp_rto_initial);
+       peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
 
        peer->last_time_heard = jiffies;
        peer->last_time_ecne_reduced = jiffies;
@@ -86,8 +87,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
                            SPP_SACKDELAY_ENABLE;
 
        /* Initialize the default path max_retrans.  */
-       peer->pathmaxrxt  = sctp_max_retrans_path;
-       peer->pf_retrans  = sctp_pf_retrans;
+       peer->pathmaxrxt  = net->sctp.max_retrans_path;
+       peer->pf_retrans  = net->sctp.pf_retrans;
 
        INIT_LIST_HEAD(&peer->transmitted);
        INIT_LIST_HEAD(&peer->send_ready);
@@ -109,7 +110,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
 }
 
 /* Allocate and initialize a new transport.  */
-struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
+struct sctp_transport *sctp_transport_new(struct net *net,
+                                         const union sctp_addr *addr,
                                          gfp_t gfp)
 {
        struct sctp_transport *transport;
@@ -118,7 +120,7 @@ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
        if (!transport)
                goto fail;
 
-       if (!sctp_transport_init(transport, addr, gfp))
+       if (!sctp_transport_init(net, transport, addr, gfp))
                goto fail_init;
 
        transport->malloced = 1;
@@ -316,6 +318,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
        SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
 
        if (tp->rttvar || tp->srtt) {
+               struct net *net = sock_net(tp->asoc->base.sk);
                /* 6.3.1 C3) When a new RTT measurement R' is made, set
                 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
                 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
@@ -327,10 +330,10 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
                 * For example, assuming the default value of RTO.Alpha of
                 * 1/8, rto_alpha would be expressed as 3.
                 */
-               tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
-                       + ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
-               tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
-                       + (rtt >> sctp_rto_alpha);
+               tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
+                       + ((abs(tp->srtt - rtt)) >> net->sctp.rto_beta);
+               tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
+                       + (rtt >> net->sctp.rto_alpha);
        } else {
                /* 6.3.1 C2) When the first RTT measurement R is made, set
                 * SRTT <- R, RTTVAR <- R/2.
index f5a6a4f4faf721af4874538093cb003f4efc202c..360d8697b95c33408d6a4913b9b1d497d27e5ee7 100644 (file)
@@ -326,7 +326,9 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
+static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+       struct sk_buff_head *queue, struct sk_buff *f_frag,
+       struct sk_buff *l_frag)
 {
        struct sk_buff *pos;
        struct sk_buff *new = NULL;
@@ -394,7 +396,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
        }
 
        event = sctp_skb2event(f_frag);
-       SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
+       SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 
        return event;
 }
@@ -493,7 +495,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
                cevent = sctp_skb2event(pd_first);
                pd_point = sctp_sk(asoc->base.sk)->pd_point;
                if (pd_point && pd_point <= pd_len) {
-                       retval = sctp_make_reassembled_event(&ulpq->reasm,
+                       retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+                                                            &ulpq->reasm,
                                                             pd_first,
                                                             pd_last);
                        if (retval)
@@ -503,7 +506,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
 done:
        return retval;
 found:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                            &ulpq->reasm, first_frag, pos);
        if (retval)
                retval->msg_flags |= MSG_EOR;
        goto done;
@@ -563,7 +567,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                       &ulpq->reasm, first_frag, last_frag);
        if (retval && is_last)
                retval->msg_flags |= MSG_EOR;
 
@@ -655,7 +660,8 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
+       retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+                                       &ulpq->reasm, first_frag, last_frag);
        return retval;
 }
 
index edc3c4af9085362c7227e31babfc19a489bf9cf6..d92c490e66fa84e432f11086642c091f5831da65 100644 (file)
@@ -88,6 +88,7 @@
 #include <linux/nsproxy.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
+#include <linux/xattr.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -346,22 +347,22 @@ static struct file_system_type sock_fs_type = {
  *     but we take care of internal coherence yet.
  */
 
-static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
 {
        struct qstr name = { .name = "" };
        struct path path;
        struct file *file;
-       int fd;
 
-       fd = get_unused_fd_flags(flags);
-       if (unlikely(fd < 0))
-               return fd;
-
-       path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
-       if (unlikely(!path.dentry)) {
-               put_unused_fd(fd);
-               return -ENOMEM;
+       if (dname) {
+               name.name = dname;
+               name.len = strlen(name.name);
+       } else if (sock->sk) {
+               name.name = sock->sk->sk_prot_creator->name;
+               name.len = strlen(name.name);
        }
+       path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
+       if (unlikely(!path.dentry))
+               return ERR_PTR(-ENOMEM);
        path.mnt = mntget(sock_mnt);
 
        d_instantiate(path.dentry, SOCK_INODE(sock));
@@ -373,30 +374,33 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
                /* drop dentry, keep inode */
                ihold(path.dentry->d_inode);
                path_put(&path);
-               put_unused_fd(fd);
-               return -ENFILE;
+               return ERR_PTR(-ENFILE);
        }
 
        sock->file = file;
        file->f_flags = O_RDWR | (flags & O_NONBLOCK);
        file->f_pos = 0;
        file->private_data = sock;
-
-       *f = file;
-       return fd;
+       return file;
 }
+EXPORT_SYMBOL(sock_alloc_file);
 
-int sock_map_fd(struct socket *sock, int flags)
+static int sock_map_fd(struct socket *sock, int flags)
 {
        struct file *newfile;
-       int fd = sock_alloc_file(sock, &newfile, flags);
+       int fd = get_unused_fd_flags(flags);
+       if (unlikely(fd < 0))
+               return fd;
 
-       if (likely(fd >= 0))
+       newfile = sock_alloc_file(sock, flags, NULL);
+       if (likely(!IS_ERR(newfile))) {
                fd_install(fd, newfile);
+               return fd;
+       }
 
-       return fd;
+       put_unused_fd(fd);
+       return PTR_ERR(newfile);
 }
-EXPORT_SYMBOL(sock_map_fd);
 
 struct socket *sock_from_file(struct file *file, int *err)
 {
@@ -455,6 +459,68 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
        return NULL;
 }
 
+#define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname"
+#define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX)
+#define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1)
+static ssize_t sockfs_getxattr(struct dentry *dentry,
+                              const char *name, void *value, size_t size)
+{
+       const char *proto_name;
+       size_t proto_size;
+       int error;
+
+       error = -ENODATA;
+       if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) {
+               proto_name = dentry->d_name.name;
+               proto_size = strlen(proto_name);
+
+               if (value) {
+                       error = -ERANGE;
+                       if (proto_size + 1 > size)
+                               goto out;
+
+                       strncpy(value, proto_name, proto_size + 1);
+               }
+               error = proto_size + 1;
+       }
+
+out:
+       return error;
+}
+
+static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
+                               size_t size)
+{
+       ssize_t len;
+       ssize_t used = 0;
+
+       len = security_inode_listsecurity(dentry->d_inode, buffer, size);
+       if (len < 0)
+               return len;
+       used += len;
+       if (buffer) {
+               if (size < used)
+                       return -ERANGE;
+               buffer += len;
+       }
+
+       len = (XATTR_NAME_SOCKPROTONAME_LEN + 1);
+       used += len;
+       if (buffer) {
+               if (size < used)
+                       return -ERANGE;
+               memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len);
+               buffer += len;
+       }
+
+       return used;
+}
+
+static const struct inode_operations sockfs_inode_ops = {
+       .getxattr = sockfs_getxattr,
+       .listxattr = sockfs_listxattr,
+};
+
 /**
  *     sock_alloc      -       allocate a socket
  *
@@ -479,6 +545,7 @@ static struct socket *sock_alloc(void)
        inode->i_mode = S_IFSOCK | S_IRWXUGO;
        inode->i_uid = current_fsuid();
        inode->i_gid = current_fsgid();
+       inode->i_op = &sockfs_inode_ops;
 
        this_cpu_add(sockets_in_use, 1);
        return sock;
@@ -1394,17 +1461,32 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
        if (err < 0)
                goto out_release_both;
 
-       fd1 = sock_alloc_file(sock1, &newfile1, flags);
+       fd1 = get_unused_fd_flags(flags);
        if (unlikely(fd1 < 0)) {
                err = fd1;
                goto out_release_both;
        }
-
-       fd2 = sock_alloc_file(sock2, &newfile2, flags);
+       fd2 = get_unused_fd_flags(flags);
        if (unlikely(fd2 < 0)) {
                err = fd2;
+               put_unused_fd(fd1);
+               goto out_release_both;
+       }
+
+       newfile1 = sock_alloc_file(sock1, flags, NULL);
+       if (unlikely(IS_ERR(newfile1))) {
+               err = PTR_ERR(newfile1);
+               put_unused_fd(fd1);
+               put_unused_fd(fd2);
+               goto out_release_both;
+       }
+
+       newfile2 = sock_alloc_file(sock2, flags, NULL);
+       if (IS_ERR(newfile2)) {
+               err = PTR_ERR(newfile2);
                fput(newfile1);
                put_unused_fd(fd1);
+               put_unused_fd(fd2);
                sock_release(sock2);
                goto out;
        }
@@ -1536,12 +1618,19 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
         */
        __module_get(newsock->ops->owner);
 
-       newfd = sock_alloc_file(newsock, &newfile, flags);
+       newfd = get_unused_fd_flags(flags);
        if (unlikely(newfd < 0)) {
                err = newfd;
                sock_release(newsock);
                goto out_put;
        }
+       newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
+       if (unlikely(IS_ERR(newfile))) {
+               err = PTR_ERR(newfile);
+               put_unused_fd(newfd);
+               sock_release(newsock);
+               goto out_put;
+       }
 
        err = security_socket_accept(sock, newsock);
        if (err)
@@ -2527,12 +2616,6 @@ static int __init sock_init(void)
        if (err)
                goto out;
 
-       /*
-        *      Initialize sock SLAB cache.
-        */
-
-       sk_init();
-
        /*
         *      Initialize skbuff SLAB cache
         */
index 2afd2a84dc35aa5cab139b38f82f5c5206e83728..2a68bb3db772a4d6001716e28af4385bb9c066bb 100644 (file)
@@ -1635,7 +1635,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
 
 void __init cache_initialize(void)
 {
-       INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
+       INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
 }
 
 int cache_register_net(struct cache_detail *cd, struct net *net)
index 09e71241265ddf11ffec7ac0505c6377396f247b..4ec5c80e8a7ca0b20c7291db9641636f4dc0a3a6 100644 (file)
@@ -48,21 +48,6 @@ struct tipc_bearer tipc_bearers[MAX_BEARERS];
 
 static void bearer_disable(struct tipc_bearer *b_ptr);
 
-/**
- * media_name_valid - validate media name
- *
- * Returns 1 if media name is valid, otherwise 0.
- */
-static int media_name_valid(const char *name)
-{
-       u32 len;
-
-       len = strlen(name);
-       if ((len + 1) > TIPC_MAX_MEDIA_NAME)
-               return 0;
-       return strspn(name, tipc_alphabet) == len;
-}
-
 /**
  * tipc_media_find - locates specified media object by name
  */
@@ -102,7 +87,7 @@ int tipc_register_media(struct tipc_media *m_ptr)
 
        write_lock_bh(&tipc_net_lock);
 
-       if (!media_name_valid(m_ptr->name))
+       if ((strlen(m_ptr->name) + 1) > TIPC_MAX_MEDIA_NAME)
                goto exit;
        if ((m_ptr->bcast_addr.media_id != m_ptr->type_id) ||
            !m_ptr->bcast_addr.broadcast)
@@ -206,9 +191,7 @@ static int bearer_name_validate(const char *name,
 
        /* validate component parts of bearer name */
        if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
-           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
-           (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
-           (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
                return 0;
 
        /* return bearer name components, if necessary */
index a056a3852f71f0a63109c7fe5188f8117e7144c7..f67866c765dd574130bb17d5476c8c9723d4612a 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/config.c: TIPC configuration management code
  *
  * Copyright (c) 2002-2006, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
+ * Copyright (c) 2004-2007, 2010-2012, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -208,36 +208,6 @@ static struct sk_buff *cfg_set_remote_mng(void)
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_max_publications(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value < 1 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max publications must be 1-65535)");
-       tipc_max_publications = value;
-       return tipc_cfg_reply_none();
-}
-
-static struct sk_buff *cfg_set_max_subscriptions(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       if (value < 1 || value > 65535)
-               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                                  " (max subscriptions must be 1-65535");
-       tipc_max_subscriptions = value;
-       return tipc_cfg_reply_none();
-}
-
 static struct sk_buff *cfg_set_max_ports(void)
 {
        u32 value;
@@ -357,12 +327,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_MAX_PORTS:
                rep_tlv_buf = cfg_set_max_ports();
                break;
-       case TIPC_CMD_SET_MAX_PUBL:
-               rep_tlv_buf = cfg_set_max_publications();
-               break;
-       case TIPC_CMD_SET_MAX_SUBSCR:
-               rep_tlv_buf = cfg_set_max_subscriptions();
-               break;
        case TIPC_CMD_SET_NETID:
                rep_tlv_buf = cfg_set_netid();
                break;
@@ -372,12 +336,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_PORTS:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
                break;
-       case TIPC_CMD_GET_MAX_PUBL:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
-               break;
-       case TIPC_CMD_GET_MAX_SUBSCR:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
-               break;
        case TIPC_CMD_GET_NETID:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
                break;
@@ -393,6 +351,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_GET_MAX_CLUSTERS:
        case TIPC_CMD_SET_MAX_NODES:
        case TIPC_CMD_GET_MAX_NODES:
+       case TIPC_CMD_SET_MAX_SUBSCR:
+       case TIPC_CMD_GET_MAX_SUBSCR:
+       case TIPC_CMD_SET_MAX_PUBL:
+       case TIPC_CMD_GET_MAX_PUBL:
        case TIPC_CMD_SET_LOG_SIZE:
        case TIPC_CMD_DUMP_LOG:
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
index 6586eac6a50eb5508447c8d505b67d0d261d1f00..bfe8af88469a95b5012d1cb34e3e9415120a2808 100644 (file)
 
 
 /* global variables used by multiple sub-systems within TIPC */
-int tipc_random;
-
-const char tipc_alphabet[] =
-       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
+int tipc_random __read_mostly;
 
 /* configurable TIPC parameters */
-u32 tipc_own_addr;
-int tipc_max_ports;
-int tipc_max_subscriptions;
-int tipc_max_publications;
-int tipc_net_id;
-int tipc_remote_management;
+u32 tipc_own_addr __read_mostly;
+int tipc_max_ports __read_mostly;
+int tipc_net_id __read_mostly;
+int tipc_remote_management __read_mostly;
 
 
 /**
@@ -101,9 +96,8 @@ int tipc_core_start_net(unsigned long addr)
 {
        int res;
 
-       res = tipc_net_start(addr);
-       if (!res)
-               res = tipc_eth_media_start();
+       tipc_net_start(addr);
+       res = tipc_eth_media_start();
        if (res)
                tipc_core_stop_net();
        return res;
@@ -160,8 +154,6 @@ static int __init tipc_init(void)
 
        tipc_own_addr = 0;
        tipc_remote_management = 1;
-       tipc_max_publications = 10000;
-       tipc_max_subscriptions = 2000;
        tipc_max_ports = CONFIG_TIPC_PORTS;
        tipc_net_id = 4711;
 
index fd42e106c18539152822d1bfdceefc25a036b164..0207db04179a00feecf6f2cc04ded91f40f1db45 100644 (file)
@@ -60,7 +60,9 @@
 
 #define TIPC_MOD_VER "2.0.0"
 
-#define ULTRA_STRING_MAX_LEN 32768
+#define ULTRA_STRING_MAX_LEN   32768
+#define TIPC_MAX_SUBSCRIPTIONS 65535
+#define TIPC_MAX_PUBLICATIONS  65535
 
 struct tipc_msg;       /* msg.h */
 
@@ -74,19 +76,15 @@ int tipc_snprintf(char *buf, int len, const char *fmt, ...);
 /*
  * Global configuration variables
  */
-extern u32 tipc_own_addr;
-extern int tipc_max_ports;
-extern int tipc_max_subscriptions;
-extern int tipc_max_publications;
-extern int tipc_net_id;
-extern int tipc_remote_management;
+extern u32 tipc_own_addr __read_mostly;
+extern int tipc_max_ports __read_mostly;
+extern int tipc_net_id __read_mostly;
+extern int tipc_remote_management __read_mostly;
 
 /*
  * Other global variables
  */
-extern int tipc_random;
-extern const char tipc_alphabet[];
-
+extern int tipc_random __read_mostly;
 
 /*
  * Routines available to privileged subsystems
index 90ac9bfa7abb2d593d30580a16bfd5b78812753f..2132c1ef2951aa3c907e0650805f41aa88448a9b 100644 (file)
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ * @setup: work item used when enabling bearer
  * @cleanup: work item used when disabling bearer
  */
 struct eth_bearer {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
+       struct work_struct setup;
        struct work_struct cleanup;
 };
 
 static struct tipc_media eth_media_info;
 static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
 static int eth_started;
-static struct notifier_block notifier;
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt,
+                             void *dv);
+/*
+ * Network device notifier info
+ */
+static struct notifier_block notifier = {
+       .notifier_call  = recv_notification,
+       .priority       = 0
+};
 
 /**
  * eth_media_addr_set - initialize Ethernet media address structure
@@ -133,6 +144,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
        return 0;
 }
 
+/**
+ * setup_bearer - setup association between Ethernet bearer and interface
+ */
+static void setup_bearer(struct work_struct *work)
+{
+       struct eth_bearer *eb_ptr =
+               container_of(work, struct eth_bearer, setup);
+
+       dev_add_pack(&eb_ptr->tipc_packet_type);
+}
+
 /**
  * enable_bearer - attach TIPC bearer to an Ethernet interface
  */
@@ -173,7 +195,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        eb_ptr->tipc_packet_type.func = recv_msg;
        eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
        INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
-       dev_add_pack(&eb_ptr->tipc_packet_type);
+       INIT_WORK(&eb_ptr->setup, setup_bearer);
+       schedule_work(&eb_ptr->setup);
 
        /* Associate TIPC bearer with Ethernet bearer */
        eb_ptr->bearer = tb_ptr;
@@ -357,8 +380,6 @@ int tipc_eth_media_start(void)
        if (res)
                return res;
 
-       notifier.notifier_call = &recv_notification;
-       notifier.priority = 0;
        res = register_netdevice_notifier(&notifier);
        if (!res)
                eth_started = 1;
index 7a52d3922f3c2bde5b220cc96db7964fbe566e7a..111ff8300ae52ed43226f3ec8ab079bdb2e00b9c 100644 (file)
@@ -45,7 +45,7 @@ struct queue_item {
 static struct kmem_cache *tipc_queue_item_cache;
 static struct list_head signal_queue_head;
 static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled;
+static int handler_enabled __read_mostly;
 
 static void process_signal_queue(unsigned long dummy);
 
index 1c1e6151875e6a16c93096063b2065a419dd5e0d..a79c755cb41714bf40c66de615ce6d0cc737cb3b 100644 (file)
@@ -210,9 +210,7 @@ static int link_name_validate(const char *name,
            (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
            (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
            (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
-           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
-           (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
-           (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME))
                return 0;
 
        /* return link name components, if necessary */
index 360c478b0b533511b344cf143c4c42197c9ccd76..46754779fd3d78537faab41c07c1758632cf78e4 100644 (file)
@@ -41,7 +41,7 @@
 #include "subscr.h"
 #include "port.h"
 
-static int tipc_nametbl_size = 1024;           /* must be a power of 2 */
+#define TIPC_NAMETBL_SIZE 1024         /* must be a power of 2 */
 
 /**
  * struct name_info - name sequence publication info
@@ -114,7 +114,7 @@ DEFINE_RWLOCK(tipc_nametbl_lock);
 
 static int hash(int x)
 {
-       return x & (tipc_nametbl_size - 1);
+       return x & (TIPC_NAMETBL_SIZE - 1);
 }
 
 /**
@@ -667,9 +667,9 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
 {
        struct publication *publ;
 
-       if (table.local_publ_count >= tipc_max_publications) {
+       if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
                pr_warn("Publication failed, local publication limit reached (%u)\n",
-                       tipc_max_publications);
+                       TIPC_MAX_PUBLICATIONS);
                return NULL;
        }
 
@@ -783,7 +783,7 @@ static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
                if (!list_is_last(&publ->zone_list, &info->zone_list))
                        ret += tipc_snprintf(buf + ret, len - ret,
                                             "\n%33s", " ");
-       };
+       }
 
        ret += tipc_snprintf(buf + ret, len - ret, "\n");
        return ret;
@@ -871,7 +871,7 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
                ret += nametbl_header(buf, len, depth);
                lowbound = 0;
                upbound = ~0;
-               for (i = 0; i < tipc_nametbl_size; i++) {
+               for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                        seq_head = &table.types[i];
                        hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
                                ret += nameseq_list(seq, buf + ret, len - ret,
@@ -935,7 +935,7 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
 
 int tipc_nametbl_init(void)
 {
-       table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head),
+       table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head),
                              GFP_ATOMIC);
        if (!table.types)
                return -ENOMEM;
@@ -953,7 +953,7 @@ void tipc_nametbl_stop(void)
 
        /* Verify name table is empty, then release it */
        write_lock_bh(&tipc_nametbl_lock);
-       for (i = 0; i < tipc_nametbl_size; i++) {
+       for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
                if (hlist_empty(&table.types[i]))
                        continue;
                pr_err("nametbl_stop(): orphaned hash chain detected\n");
index 5b5cea259caf5efde1151318f498ca3924099eb1..7d305ecc09c2bf053376bb147c5cf917113022ae 100644 (file)
@@ -171,7 +171,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
        tipc_link_send(buf, dnode, msg_link_selector(msg));
 }
 
-int tipc_net_start(u32 addr)
+void tipc_net_start(u32 addr)
 {
        char addr_string[16];
 
@@ -187,7 +187,6 @@ int tipc_net_start(u32 addr)
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
                tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
-       return 0;
 }
 
 void tipc_net_stop(void)
index 9eb4b9e220ebbb5146a2d455126b6b46d6cf48f6..079daadb3f7286471cd5146798f6b06328bf99ad 100644 (file)
@@ -41,7 +41,7 @@ extern rwlock_t tipc_net_lock;
 
 void tipc_net_route_msg(struct sk_buff *buf);
 
-int tipc_net_start(u32 addr);
+void tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
 #endif
index 47a839df27dc2387b0067ef38228872135860993..6675914dc592cd54b13e6320051f80296a7223c8 100644 (file)
@@ -62,7 +62,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
                rep_nlh = nlmsg_hdr(rep_buf);
                memcpy(rep_nlh, req_nlh, hdr_space);
                rep_nlh->nlmsg_len = rep_buf->len;
-               genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).pid);
+               genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
        }
 
        return 0;
index 5ed5965eb0bee40ec7e475d814370426b1e45a88..0f7d0d007e22b9cbe94665c19b1d1ac2d6328f02 100644 (file)
@@ -304,9 +304,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
        }
 
        /* Refuse subscription if global limit exceeded */
-       if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
+       if (atomic_read(&topsrv.subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
-                       tipc_max_subscriptions);
+                       TIPC_MAX_SUBSCRIPTIONS);
                subscr_terminate(subscriber);
                return NULL;
        }
index c5ee4ff613641b3f8439f1c9cb6b22d55a1ff7f2..5b5c876c80e9b543bd7b773c1a1f45f83f696e7b 100644 (file)
@@ -441,7 +441,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
        /* ---- Socket is dead now and most probably destroyed ---- */
 
        /*
-        * Fixme: BSD difference: In BSD all sockets connected to use get
+        * Fixme: BSD difference: In BSD all sockets connected to us get
         *        ECONNRESET and we die on the spot. In Linux we behave
         *        like files and pipes do and wait for the last
         *        dereference.
@@ -481,7 +481,6 @@ static int unix_listen(struct socket *sock, int backlog)
        struct sock *sk = sock->sk;
        struct unix_sock *u = unix_sk(sk);
        struct pid *old_pid = NULL;
-       const struct cred *old_cred = NULL;
 
        err = -EOPNOTSUPP;
        if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -503,8 +502,6 @@ static int unix_listen(struct socket *sock, int backlog)
 out_unlock:
        unix_state_unlock(sk);
        put_pid(old_pid);
-       if (old_cred)
-               put_cred(old_cred);
 out:
        return err;
 }
@@ -2060,10 +2057,14 @@ static int unix_shutdown(struct socket *sock, int mode)
        struct sock *sk = sock->sk;
        struct sock *other;
 
-       mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
-
-       if (!mode)
-               return 0;
+       if (mode < SHUT_RD || mode > SHUT_RDWR)
+               return -EINVAL;
+       /* This maps:
+        * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
+        * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
+        * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
+        */
+       ++mode;
 
        unix_state_lock(sk);
        sk->sk_shutdown |= mode;
index 750b13408449ac018b3d8ca1bad4def492ffaea6..06748f108a5732e9f847cdffd0dafe0cb996c191 100644 (file)
@@ -110,12 +110,12 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
-               u32 pid, u32 seq, u32 flags, int sk_ino)
+               u32 portid, u32 seq, u32 flags, int sk_ino)
 {
        struct nlmsghdr *nlh;
        struct unix_diag_msg *rep;
 
-       nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
+       nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
                        flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -159,7 +159,7 @@ out_nlmsg_trim:
 }
 
 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
-               u32 pid, u32 seq, u32 flags)
+               u32 portid, u32 seq, u32 flags)
 {
        int sk_ino;
 
@@ -170,7 +170,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        if (!sk_ino)
                return 0;
 
-       return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
+       return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
 }
 
 static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
@@ -200,7 +200,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        if (!(req->udiag_states & (1 << sk->sk_state)))
                                goto next;
                        if (sk_diag_dump(sk, skb, req,
-                                        NETLINK_CB(cb->skb).pid,
+                                        NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq,
                                         NLM_F_MULTI) < 0)
                                goto done;
@@ -267,7 +267,7 @@ again:
        if (!rep)
                goto out;
 
-       err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
+       err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
                           nlh->nlmsg_seq, 0, req->udiag_ino);
        if (err < 0) {
                nlmsg_free(rep);
@@ -277,7 +277,7 @@ again:
 
                goto again;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
index d355f67d0cdd1ff64ac68f917c2eb8c11e8b1af2..2f876b9ee3443b05efc54445b747e7ee7101e50d 100644 (file)
@@ -105,7 +105,7 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (!netif_running(wdev->netdev))
+       if (wdev->netdev && !netif_running(wdev->netdev))
                return;
 
        switch (wdev->iftype) {
@@ -143,6 +143,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
        case NL80211_IFTYPE_WDS:
                /* these interface types don't really have a channel */
                return;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (wdev->wiphy->features &
+                               NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
+                       *chanmode = CHAN_MODE_EXCLUSIVE;
+               return;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
                WARN_ON(1);
index dcd64d5b07aadfba26a799506452a9b04fe8e7d3..443d4d7deea299c7e997045d22d8b2b146d2c877 100644 (file)
@@ -230,9 +230,24 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
        rtnl_lock();
        mutex_lock(&rdev->devlist_mtx);
 
-       list_for_each_entry(wdev, &rdev->wdev_list, list)
-               if (wdev->netdev)
+       list_for_each_entry(wdev, &rdev->wdev_list, list) {
+               if (wdev->netdev) {
                        dev_close(wdev->netdev);
+                       continue;
+               }
+               /* otherwise, check iftype */
+               switch (wdev->iftype) {
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       if (!wdev->p2p_started)
+                               break;
+                       rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+                       wdev->p2p_started = false;
+                       rdev->opencount--;
+                       break;
+               default:
+                       break;
+               }
+       }
 
        mutex_unlock(&rdev->devlist_mtx);
        rtnl_unlock();
@@ -407,6 +422,11 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
                        if (WARN_ON(wiphy->software_iftypes & types))
                                return -EINVAL;
 
+                       /* Only a single P2P_DEVICE can be allowed */
+                       if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) &&
+                                   c->limits[j].max > 1))
+                               return -EINVAL;
+
                        cnt += c->limits[j].max;
                        /*
                         * Don't advertise an unsupported type
@@ -734,6 +754,35 @@ static void wdev_cleanup_work(struct work_struct *work)
        dev_put(wdev->netdev);
 }
 
+void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+       ASSERT_RTNL();
+
+       if (WARN_ON(wdev->netdev))
+               return;
+
+       mutex_lock(&rdev->devlist_mtx);
+       list_del_rcu(&wdev->list);
+       rdev->devlist_generation++;
+
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (!wdev->p2p_started)
+                       break;
+               rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+               wdev->p2p_started = false;
+               rdev->opencount--;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+       mutex_unlock(&rdev->devlist_mtx);
+}
+EXPORT_SYMBOL(cfg80211_unregister_wdev);
+
 static struct device_type wiphy_type = {
        .name   = "wlan",
 };
index bc7430b54771af18e903ee1d263ede3b4eb1b78f..a343be4a52bd0e16b0fdb41e565f39f3d705c823 100644 (file)
@@ -55,7 +55,7 @@ struct cfg80211_registered_device {
        int opencount; /* also protected by devlist_mtx */
        wait_queue_head_t dev_wait;
 
-       u32 ap_beacons_nlpid;
+       u32 ap_beacons_nlportid;
 
        /* protected by RTNL only */
        int num_running_ifaces;
index 1cdb1d5e6b0f4bef5cc9f49a7cd499aa8c48924e..8016fee0752b0325a20409b7b1b93b5433c73872 100644 (file)
@@ -612,10 +612,21 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
 }
 EXPORT_SYMBOL(cfg80211_del_sta);
 
+void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
+                         enum nl80211_connect_failed_reason reason,
+                         gfp_t gfp)
+{
+       struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+       nl80211_send_conn_failed_event(rdev, dev, mac_addr, reason, gfp);
+}
+EXPORT_SYMBOL(cfg80211_conn_failed);
+
 struct cfg80211_mgmt_registration {
        struct list_head list;
 
-       u32 nlpid;
+       u32 nlportid;
 
        int match_len;
 
@@ -624,7 +635,7 @@ struct cfg80211_mgmt_registration {
        u8 match[];
 };
 
-int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
+int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
                                u16 frame_type, const u8 *match_data,
                                int match_len)
 {
@@ -672,7 +683,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
 
        memcpy(nreg->match, match_data, match_len);
        nreg->match_len = match_len;
-       nreg->nlpid = snd_pid;
+       nreg->nlportid = snd_portid;
        nreg->frame_type = cpu_to_le16(frame_type);
        list_add(&nreg->list, &wdev->mgmt_registrations);
 
@@ -685,7 +696,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
        return err;
 }
 
-void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
+void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
 {
        struct wiphy *wiphy = wdev->wiphy;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
@@ -694,7 +705,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
        spin_lock_bh(&wdev->mgmt_registrations_lock);
 
        list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
-               if (reg->nlpid != nlpid)
+               if (reg->nlportid != nlportid)
                        continue;
 
                if (rdev->ops->mgmt_frame_register) {
@@ -710,8 +721,8 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
 
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
 
-       if (nlpid == wdev->ap_unexpected_nlpid)
-               wdev->ap_unexpected_nlpid = 0;
+       if (nlportid == wdev->ap_unexpected_nlportid)
+               wdev->ap_unexpected_nlportid = 0;
 }
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
@@ -736,7 +747,6 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                          const u8 *buf, size_t len, bool no_cck,
                          bool dont_wait_for_ack, u64 *cookie)
 {
-       struct net_device *dev = wdev->netdev;
        const struct ieee80211_mgmt *mgmt;
        u16 stype;
 
@@ -796,7 +806,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                case NL80211_IFTYPE_AP:
                case NL80211_IFTYPE_P2P_GO:
                case NL80211_IFTYPE_AP_VLAN:
-                       if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
+                       if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
                                err = -EINVAL;
                        break;
                case NL80211_IFTYPE_MESH_POINT:
@@ -809,6 +819,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                         * cfg80211 doesn't track the stations
                         */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       /*
+                        * fall through, P2P device only supports
+                        * public action frames
+                        */
                default:
                        err = -EOPNOTSUPP;
                        break;
@@ -819,7 +834,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                        return err;
        }
 
-       if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
+       if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
                return -EINVAL;
 
        /* Transmit the Action frame as requested by user space */
@@ -868,7 +883,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
                /* found match! */
 
                /* Indicate the received Action frame to user space */
-               if (nl80211_send_mgmt(rdev, wdev, reg->nlpid,
+               if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
                                      freq, sig_mbm,
                                      buf, len, gfp))
                        continue;
index 1e37dbf00cb3f3850d3785827f896ca09339873b..0418a6d5c1a683f95542c64628e66f487ddea196 100644 (file)
@@ -496,11 +496,11 @@ static bool is_valid_ie_attr(const struct nlattr *attr)
 }
 
 /* message building helper */
-static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
+static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
                                   int flags, u8 cmd)
 {
        /* since there is no private header just add the generic one */
-       return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd);
+       return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd);
 }
 
 static int nl80211_msg_put_channel(struct sk_buff *msg,
@@ -851,7 +851,7 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
-static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
+static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                              struct cfg80211_registered_device *dev)
 {
        void *hdr;
@@ -866,7 +866,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        const struct ieee80211_txrx_stypes *mgmt_stypes =
                                dev->wiphy.mgmt_stypes;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
        if (!hdr)
                return -1;
 
@@ -1100,6 +1100,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
                if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
                        goto nla_put_failure;
        }
+       CMD(start_p2p_device, START_P2P_DEVICE);
 
 #ifdef CONFIG_NL80211_TESTMODE
        CMD(testmode_cmd, TESTMODE);
@@ -1266,7 +1267,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                        continue;
                if (++idx <= start)
                        continue;
-               if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid,
+               if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                       dev) < 0) {
                        idx--;
@@ -1289,7 +1290,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) {
+       if (nl80211_send_wiphy(msg, info->snd_portid, info->snd_seq, 0, dev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
        }
@@ -1735,26 +1736,26 @@ static inline u64 wdev_id(struct wireless_dev *wdev)
               ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
 }
 
-static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
+static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                              struct cfg80211_registered_device *rdev,
                              struct wireless_dev *wdev)
 {
        struct net_device *dev = wdev->netdev;
        void *hdr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_INTERFACE);
        if (!hdr)
                return -1;
 
        if (dev &&
            (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
-            nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
-            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
+            nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name)))
                goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
            nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
            nla_put_u32(msg, NL80211_ATTR_GENERATION,
                        rdev->devlist_generation ^
                        (cfg80211_rdev_list_generation << 2)))
@@ -1806,7 +1807,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                                if_idx++;
                                continue;
                        }
-                       if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
+                       if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                               rdev, wdev) < 0) {
                                mutex_unlock(&rdev->devlist_mtx);
@@ -1837,7 +1838,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
                               dev, wdev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -2021,8 +2022,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                return PTR_ERR(wdev);
        }
 
-       if (type == NL80211_IFTYPE_MESH_POINT &&
-           info->attrs[NL80211_ATTR_MESH_ID]) {
+       switch (type) {
+       case NL80211_IFTYPE_MESH_POINT:
+               if (!info->attrs[NL80211_ATTR_MESH_ID])
+                       break;
                wdev_lock(wdev);
                BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
                             IEEE80211_MAX_MESH_ID_LEN);
@@ -2031,9 +2034,29 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
                       wdev->mesh_id_up_len);
                wdev_unlock(wdev);
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               /*
+                * P2P Device doesn't have a netdev, so doesn't go
+                * through the netdev notifier and must be added here
+                */
+               mutex_init(&wdev->mtx);
+               INIT_LIST_HEAD(&wdev->event_list);
+               spin_lock_init(&wdev->event_lock);
+               INIT_LIST_HEAD(&wdev->mgmt_registrations);
+               spin_lock_init(&wdev->mgmt_registrations_lock);
+
+               mutex_lock(&rdev->devlist_mtx);
+               wdev->identifier = ++rdev->wdev_id;
+               list_add_rcu(&wdev->list, &rdev->wdev_list);
+               rdev->devlist_generation++;
+               mutex_unlock(&rdev->devlist_mtx);
+               break;
+       default:
+               break;
        }
 
-       if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
                               rdev, wdev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -2168,7 +2191,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_NEW_KEY);
        if (IS_ERR(hdr))
                return PTR_ERR(hdr);
@@ -2746,7 +2769,7 @@ nla_put_failure:
        return false;
 }
 
-static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
+static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
                                int flags,
                                struct cfg80211_registered_device *rdev,
                                struct net_device *dev,
@@ -2755,7 +2778,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
        void *hdr;
        struct nlattr *sinfoattr, *bss_param;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
        if (!hdr)
                return -1;
 
@@ -2908,7 +2931,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
                        goto out_err;
 
                if (nl80211_send_station(skb,
-                               NETLINK_CB(cb->skb).pid,
+                               NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                dev, netdev, mac_addr,
                                &sinfo) < 0)
@@ -2954,7 +2977,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_station(msg, info->snd_portid, info->snd_seq, 0,
                                 rdev, dev, mac_addr, &sinfo) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -3280,7 +3303,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
        return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr);
 }
 
-static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
+static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq,
                                int flags, struct net_device *dev,
                                u8 *dst, u8 *next_hop,
                                struct mpath_info *pinfo)
@@ -3288,7 +3311,7 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
        void *hdr;
        struct nlattr *pinfoattr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_STATION);
        if (!hdr)
                return -1;
 
@@ -3366,7 +3389,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
                if (err)
                        goto out_err;
 
-               if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid,
+               if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid,
                                       cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                       netdev, dst, next_hop,
                                       &pinfo) < 0)
@@ -3415,7 +3438,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0,
+       if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0,
                                 dev, dst, next_hop, &pinfo) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -3656,7 +3679,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_MESH_CONFIG);
        if (!hdr)
                goto out;
@@ -3975,7 +3998,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_REG);
        if (!hdr)
                goto put_failure;
@@ -4593,7 +4616,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).pid, seq, flags,
+       hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags,
                             NL80211_CMD_NEW_SCAN_RESULTS);
        if (!hdr)
                return -1;
@@ -4712,14 +4735,14 @@ static int nl80211_dump_scan(struct sk_buff *skb,
        return skb->len;
 }
 
-static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
+static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
                                int flags, struct net_device *dev,
                                struct survey_info *survey)
 {
        void *hdr;
        struct nlattr *infoattr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags,
+       hdr = nl80211hdr_put(msg, portid, seq, flags,
                             NL80211_CMD_NEW_SURVEY_RESULTS);
        if (!hdr)
                return -ENOMEM;
@@ -4813,7 +4836,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                }
 
                if (nl80211_send_survey(skb,
-                               NETLINK_CB(cb->skb).pid,
+                               NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                netdev,
                                &survey) < 0)
@@ -5428,7 +5451,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
        }
 
        while (1) {
-               void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).pid,
+               void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           NL80211_CMD_TESTMODE);
                struct nlattr *tmdata;
@@ -5468,7 +5491,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
 
 static struct sk_buff *
 __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
-                             int approxlen, u32 pid, u32 seq, gfp_t gfp)
+                             int approxlen, u32 portid, u32 seq, gfp_t gfp)
 {
        struct sk_buff *skb;
        void *hdr;
@@ -5478,7 +5501,7 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
        if (!skb)
                return NULL;
 
-       hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE);
+       hdr = nl80211hdr_put(skb, portid, seq, 0, NL80211_CMD_TESTMODE);
        if (!hdr) {
                kfree_skb(skb);
                return NULL;
@@ -5508,7 +5531,7 @@ struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
                return NULL;
 
        return __cfg80211_testmode_alloc_skb(rdev, approxlen,
-                               rdev->testmode_info->snd_pid,
+                               rdev->testmode_info->snd_portid,
                                rdev->testmode_info->snd_seq,
                                GFP_KERNEL);
 }
@@ -5846,7 +5869,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_REMAIN_ON_CHANNEL);
 
        if (IS_ERR(hdr)) {
@@ -6055,6 +6078,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6064,7 +6088,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->mgmt_tx)
                return -EOPNOTSUPP;
 
-       return cfg80211_mlme_register_mgmt(wdev, info->snd_pid, frame_type,
+       return cfg80211_mlme_register_mgmt(wdev, info->snd_portid, frame_type,
                        nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
                        nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
 }
@@ -6101,6 +6125,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6144,7 +6169,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
                if (!msg)
                        return -ENOMEM;
 
-               hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+               hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                                     NL80211_CMD_FRAME);
 
                if (IS_ERR(hdr)) {
@@ -6197,6 +6222,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -6260,7 +6286,7 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_POWER_SAVE);
        if (!hdr) {
                err = -ENOBUFS;
@@ -6462,7 +6488,7 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_WOWLAN);
        if (!hdr)
                goto nla_put_failure;
@@ -6736,10 +6762,10 @@ static int nl80211_register_unexpected_frame(struct sk_buff *skb,
            wdev->iftype != NL80211_IFTYPE_P2P_GO)
                return -EINVAL;
 
-       if (wdev->ap_unexpected_nlpid)
+       if (wdev->ap_unexpected_nlportid)
                return -EBUSY;
 
-       wdev->ap_unexpected_nlpid = info->snd_pid;
+       wdev->ap_unexpected_nlportid = info->snd_portid;
        return 0;
 }
 
@@ -6769,7 +6795,7 @@ static int nl80211_probe_client(struct sk_buff *skb,
        if (!msg)
                return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_PROBE_CLIENT);
 
        if (IS_ERR(hdr)) {
@@ -6804,10 +6830,72 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
        if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
                return -EOPNOTSUPP;
 
-       if (rdev->ap_beacons_nlpid)
+       if (rdev->ap_beacons_nlportid)
                return -EBUSY;
 
-       rdev->ap_beacons_nlpid = info->snd_pid;
+       rdev->ap_beacons_nlportid = info->snd_portid;
+
+       return 0;
+}
+
+static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev = info->user_ptr[1];
+       int err;
+
+       if (!rdev->ops->start_p2p_device)
+               return -EOPNOTSUPP;
+
+       if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
+       if (wdev->p2p_started)
+               return 0;
+
+       mutex_lock(&rdev->devlist_mtx);
+       err = cfg80211_can_add_interface(rdev, wdev->iftype);
+       mutex_unlock(&rdev->devlist_mtx);
+       if (err)
+               return err;
+
+       err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev);
+       if (err)
+               return err;
+
+       wdev->p2p_started = true;
+       mutex_lock(&rdev->devlist_mtx);
+       rdev->opencount++;
+       mutex_unlock(&rdev->devlist_mtx);
+
+       return 0;
+}
+
+static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev = info->user_ptr[1];
+
+       if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
+       if (!rdev->ops->stop_p2p_device)
+               return -EOPNOTSUPP;
+
+       if (!wdev->p2p_started)
+               return 0;
+
+       rdev->ops->stop_p2p_device(&rdev->wiphy, wdev);
+       wdev->p2p_started = false;
+
+       mutex_lock(&rdev->devlist_mtx);
+       rdev->opencount--;
+       mutex_unlock(&rdev->devlist_mtx);
+
+       if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
+               rdev->scan_req->aborted = true;
+               ___cfg80211_scan_done(rdev, true);
+       }
 
        return 0;
 }
@@ -6819,7 +6907,7 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
 #define NL80211_FLAG_NEED_NETDEV_UP    (NL80211_FLAG_NEED_NETDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 #define NL80211_FLAG_NEED_WDEV         0x10
-/* If a netdev is associated, it must be UP */
+/* If a netdev is associated, it must be UP, P2P must be started */
 #define NL80211_FLAG_NEED_WDEV_UP      (NL80211_FLAG_NEED_WDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 
@@ -6880,6 +6968,13 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
                        }
 
                        dev_hold(dev);
+               } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
+                       if (!wdev->p2p_started) {
+                               mutex_unlock(&cfg80211_mutex);
+                               if (rtnl)
+                                       rtnl_unlock();
+                               return -ENETDOWN;
+                       }
                }
 
                cfg80211_lock_rdev(rdev);
@@ -7441,7 +7536,22 @@ static struct genl_ops nl80211_ops[] = {
                .internal_flags = NL80211_FLAG_NEED_NETDEV |
                                  NL80211_FLAG_NEED_RTNL,
        },
-
+       {
+               .cmd = NL80211_CMD_START_P2P_DEVICE,
+               .doit = nl80211_start_p2p_device,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_STOP_P2P_DEVICE,
+               .doit = nl80211_stop_p2p_device,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7520,12 +7630,12 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
 static int nl80211_send_scan_msg(struct sk_buff *msg,
                                 struct cfg80211_registered_device *rdev,
                                 struct wireless_dev *wdev,
-                                u32 pid, u32 seq, int flags,
+                                u32 portid, u32 seq, int flags,
                                 u32 cmd)
 {
        void *hdr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, cmd);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
        if (!hdr)
                return -1;
 
@@ -7549,11 +7659,11 @@ static int
 nl80211_send_sched_scan_msg(struct sk_buff *msg,
                            struct cfg80211_registered_device *rdev,
                            struct net_device *netdev,
-                           u32 pid, u32 seq, int flags, u32 cmd)
+                           u32 portid, u32 seq, int flags, u32 cmd)
 {
        void *hdr;
 
-       hdr = nl80211hdr_put(msg, pid, seq, flags, cmd);
+       hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
        if (!hdr)
                return -1;
 
@@ -8254,6 +8364,40 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
        nlmsg_free(msg);
 }
 
+void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
+                                   struct net_device *dev, const u8 *mac_addr,
+                                   enum nl80211_connect_failed_reason reason,
+                                   gfp_t gfp)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+       if (!msg)
+               return;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONN_FAILED);
+       if (!hdr) {
+               nlmsg_free(msg);
+               return;
+       }
+
+       if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+           nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
+           nla_put_u32(msg, NL80211_ATTR_CONN_FAILED_REASON, reason))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+                               nl80211_mlme_mcgrp.id, gfp);
+       return;
+
+ nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       nlmsg_free(msg);
+}
+
 static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
                                       const u8 *addr, gfp_t gfp)
 {
@@ -8262,9 +8406,9 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
        struct sk_buff *msg;
        void *hdr;
        int err;
-       u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid);
+       u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
 
-       if (!nlpid)
+       if (!nlportid)
                return false;
 
        msg = nlmsg_new(100, gfp);
@@ -8288,7 +8432,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
                return true;
        }
 
-       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
        return true;
 
  nla_put_failure:
@@ -8312,7 +8456,7 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
 }
 
 int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
-                     struct wireless_dev *wdev, u32 nlpid,
+                     struct wireless_dev *wdev, u32 nlportid,
                      int freq, int sig_dbm,
                      const u8 *buf, size_t len, gfp_t gfp)
 {
@@ -8341,7 +8485,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
 
        genlmsg_end(msg, hdr);
 
-       return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -8696,9 +8840,9 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
        struct sk_buff *msg;
        void *hdr;
-       u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid);
+       u32 nlportid = ACCESS_ONCE(rdev->ap_beacons_nlportid);
 
-       if (!nlpid)
+       if (!nlportid)
                return;
 
        msg = nlmsg_new(len + 100, gfp);
@@ -8721,7 +8865,7 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
 
        genlmsg_end(msg, hdr);
 
-       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
        return;
 
  nla_put_failure:
@@ -8745,9 +8889,9 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
 
        list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
                list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
-                       cfg80211_mlme_unregister_socket(wdev, notify->pid);
-               if (rdev->ap_beacons_nlpid == notify->pid)
-                       rdev->ap_beacons_nlpid = 0;
+                       cfg80211_mlme_unregister_socket(wdev, notify->portid);
+               if (rdev->ap_beacons_nlportid == notify->portid)
+                       rdev->ap_beacons_nlportid = 0;
        }
 
        rcu_read_unlock();
index 9f2616fffb4001958600d72dac222ff7edc7a9b4..f6153516068c30dce59e4283d9abfc0591adebfb 100644 (file)
@@ -91,6 +91,11 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
                                struct net_device *dev, const u8 *mac_addr,
                                gfp_t gfp);
 
+void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
+                                   struct net_device *dev, const u8 *mac_addr,
+                                   enum nl80211_connect_failed_reason reason,
+                                   gfp_t gfp);
+
 int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
                      struct wireless_dev *wdev, u32 nlpid,
                      int freq, int sig_dbm,
index c4ad7958af52e84754b073915d009531299b9864..7d604c06c3dc38d1155366a52f184971be1197e3 100644 (file)
@@ -41,6 +41,8 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
        [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
        [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
        [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
+       [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, },
+       [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, },
        /*
         * add more here as they are defined in radiotap.h
         */
index 72d170ca340665ea5c893cc1bec2316219abbf7c..3b8cbbc214db563ba962ecda1e49fe6929c263cc 100644 (file)
@@ -510,9 +510,11 @@ static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
  *
  * This lets us know if a specific frequency rule is or is not relevant to
  * a specific frequency's band. Bands are device specific and artificial
- * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is
- * safe for now to assume that a frequency rule should not be part of a
- * frequency's band if the start freq or end freq are off by more than 2 GHz.
+ * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
+ * however it is safe for now to assume that a frequency rule should not be
+ * part of a frequency's band if the start freq or end freq are off by more
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * 60 GHz band.
  * This resolution can be lowered and should be considered as we add
  * regulatory rule support for other "bands".
  **/
@@ -520,9 +522,16 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
        u32 freq_khz)
 {
 #define ONE_GHZ_IN_KHZ 1000000
-       if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
+       /*
+        * From 802.11ad: directional multi-gigabit (DMG):
+        * Pertaining to operation in a frequency band containing a channel
+        * with the Channel starting frequency above 45 GHz.
+        */
+       u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
+                       10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+       if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
                return true;
-       if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
+       if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
                return true;
        return false;
 #undef ONE_GHZ_IN_KHZ
@@ -1955,8 +1964,7 @@ static void restore_regulatory_settings(bool reset_user)
                        if (reg_request->initiator !=
                            NL80211_REGDOM_SET_BY_USER)
                                continue;
-                       list_del(&reg_request->list);
-                       list_add_tail(&reg_request->list, &tmp_reg_req_list);
+                       list_move_tail(&reg_request->list, &tmp_reg_req_list);
                }
        }
        spin_unlock(&reg_requests_lock);
@@ -2015,8 +2023,7 @@ static void restore_regulatory_settings(bool reset_user)
                              "into the queue\n",
                              reg_request->alpha2[0],
                              reg_request->alpha2[1]);
-               list_del(&reg_request->list);
-               list_add_tail(&reg_request->list, &reg_requests_list);
+               list_move_tail(&reg_request->list, &reg_requests_list);
        }
        spin_unlock(&reg_requests_lock);
 
@@ -2201,7 +2208,6 @@ static void print_regdomain_info(const struct ieee80211_regdomain *rd)
 static int __set_regdom(const struct ieee80211_regdomain *rd)
 {
        const struct ieee80211_regdomain *intersected_rd = NULL;
-       struct cfg80211_registered_device *rdev = NULL;
        struct wiphy *request_wiphy;
        /* Some basic sanity checks first */
 
@@ -2313,24 +2319,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                return 0;
        }
 
-       if (!intersected_rd)
-               return -EINVAL;
-
-       rdev = wiphy_to_dev(request_wiphy);
-
-       rdev->country_ie_alpha2[0] = rd->alpha2[0];
-       rdev->country_ie_alpha2[1] = rd->alpha2[1];
-       rdev->env = last_request->country_ie_env;
-
-       BUG_ON(intersected_rd == rd);
-
-       kfree(rd);
-       rd = NULL;
-
-       reset_regdomains(false);
-       cfg80211_regdomain = intersected_rd;
-
-       return 0;
+       return -EINVAL;
 }
 
 
index 848523a2b22f02c9a8975ff942073c0794671b54..9730c9862bdcfd624af15641deefe95c0b12c8de 100644 (file)
@@ -815,7 +815,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
                return NULL;
 
        if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
-                   (signal < 0 || signal > 100)))
+                   (signal < 0 || signal > 100)))
                return NULL;
 
        if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
index 994e2f0cc7a8a12fc34cbe61fdee97afde3df10b..ef35f4ef2aa623d16f3556a5e3f4709fba363db4 100644 (file)
@@ -684,22 +684,10 @@ EXPORT_SYMBOL(cfg80211_classify8021d);
 
 const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
 {
-       u8 *end, *pos;
-
-       pos = bss->information_elements;
-       if (pos == NULL)
+       if (bss->information_elements == NULL)
                return NULL;
-       end = pos + bss->len_information_elements;
-
-       while (pos + 1 < end) {
-               if (pos + 2 + pos[1] > end)
-                       break;
-               if (pos[0] == ie)
-                       return pos;
-               pos += 2 + pos[1];
-       }
-
-       return NULL;
+       return cfg80211_find_ie(ie, bss->information_elements,
+                                bss->len_information_elements);
 }
 EXPORT_SYMBOL(ieee80211_bss_get_ie);
 
@@ -812,6 +800,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
        if (otype == NL80211_IFTYPE_AP_VLAN)
                return -EOPNOTSUPP;
 
+       /* cannot change into P2P device type */
+       if (ntype == NL80211_IFTYPE_P2P_DEVICE)
+               return -EOPNOTSUPP;
+
        if (!rdev->ops->change_virtual_intf ||
            !(rdev->wiphy.interface_modes & (1 << ntype)))
                return -EOPNOTSUPP;
@@ -889,6 +881,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                case NUM_NL80211_IFTYPES:
                        /* not happening */
                        break;
+               case NL80211_IFTYPE_P2P_DEVICE:
+                       WARN_ON(1);
+                       break;
                }
        }
 
@@ -1053,8 +1048,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
                if (wdev_iter == wdev)
                        continue;
-               if (!netif_running(wdev_iter->netdev))
-                       continue;
+               if (wdev_iter->netdev) {
+                       if (!netif_running(wdev_iter->netdev))
+                               continue;
+               } else if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+                       if (!wdev_iter->p2p_started)
+                               continue;
+               } else {
+                       WARN_ON(1);
+               }
 
                if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
                        continue;
index b0eb7aa49b60a7c87d242213a4630c5d3402310f..c8717c1d082e702f9b071c480e873b408b400daf 100644 (file)
@@ -478,13 +478,13 @@ void wireless_send_event(struct net_device *      dev,
        if (descr->header_type == IW_HEADER_TYPE_POINT) {
                /* Check if number of token fits within bounds */
                if (wrqu->data.length > descr->max_tokens) {
-                       netdev_err(dev, "(WE) : Wireless Event too big (%d)\n",
-                                  wrqu->data.length);
+                       netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too big (%d)\n",
+                                  cmd, wrqu->data.length);
                        return;
                }
                if (wrqu->data.length < descr->min_tokens) {
-                       netdev_err(dev, "(WE) : Wireless Event too small (%d)\n",
-                                  wrqu->data.length);
+                       netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too small (%d)\n",
+                                  cmd, wrqu->data.length);
                        return;
                }
                /* Calculate extra_len - extra is NULL for restricted events */
index 387848e900783f6e6862c6d2c0b52ab45bbcfe50..41eabc46f110d9cb607cb24f8af68054b91bcccd 100644 (file)
@@ -42,13 +42,12 @@ static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
 static struct dst_entry *xfrm_policy_sk_bundles;
 static DEFINE_RWLOCK(xfrm_policy_lock);
 
-static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
-static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
+static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
+static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+                                               __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
 static void xfrm_init_pmtu(struct dst_entry *dst);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
@@ -95,6 +94,24 @@ bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl
        return false;
 }
 
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+{
+       struct xfrm_policy_afinfo *afinfo;
+
+       if (unlikely(family >= NPROTO))
+               return NULL;
+       rcu_read_lock();
+       afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
+       if (unlikely(!afinfo))
+               rcu_read_unlock();
+       return afinfo;
+}
+
+static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
+{
+       rcu_read_unlock();
+}
+
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
                                                  const xfrm_address_t *saddr,
                                                  const xfrm_address_t *daddr,
@@ -2421,7 +2438,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                return -EINVAL;
        if (unlikely(afinfo->family >= NPROTO))
                return -EAFNOSUPPORT;
-       write_lock_bh(&xfrm_policy_afinfo_lock);
+       spin_lock(&xfrm_policy_afinfo_lock);
        if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
                err = -ENOBUFS;
        else {
@@ -2442,9 +2459,9 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
                        afinfo->garbage_collect = xfrm_garbage_collect_deferred;
-               xfrm_policy_afinfo[afinfo->family] = afinfo;
+               rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
        }
-       write_unlock_bh(&xfrm_policy_afinfo_lock);
+       spin_unlock(&xfrm_policy_afinfo_lock);
 
        rtnl_lock();
        for_each_net(net) {
@@ -2477,21 +2494,26 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
                return -EINVAL;
        if (unlikely(afinfo->family >= NPROTO))
                return -EAFNOSUPPORT;
-       write_lock_bh(&xfrm_policy_afinfo_lock);
+       spin_lock(&xfrm_policy_afinfo_lock);
        if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
                if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
                        err = -EINVAL;
-               else {
-                       struct dst_ops *dst_ops = afinfo->dst_ops;
-                       xfrm_policy_afinfo[afinfo->family] = NULL;
-                       dst_ops->kmem_cachep = NULL;
-                       dst_ops->check = NULL;
-                       dst_ops->negative_advice = NULL;
-                       dst_ops->link_failure = NULL;
-                       afinfo->garbage_collect = NULL;
-               }
+               else
+                       RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
+                                        NULL);
+       }
+       spin_unlock(&xfrm_policy_afinfo_lock);
+       if (!err) {
+               struct dst_ops *dst_ops = afinfo->dst_ops;
+
+               synchronize_rcu();
+
+               dst_ops->kmem_cachep = NULL;
+               dst_ops->check = NULL;
+               dst_ops->negative_advice = NULL;
+               dst_ops->link_failure = NULL;
+               afinfo->garbage_collect = NULL;
        }
-       write_unlock_bh(&xfrm_policy_afinfo_lock);
        return err;
 }
 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
@@ -2500,33 +2522,16 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
 {
        struct xfrm_policy_afinfo *afinfo;
 
-       read_lock_bh(&xfrm_policy_afinfo_lock);
-       afinfo = xfrm_policy_afinfo[AF_INET];
+       rcu_read_lock();
+       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
        if (afinfo)
                net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
 #if IS_ENABLED(CONFIG_IPV6)
-       afinfo = xfrm_policy_afinfo[AF_INET6];
+       afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
        if (afinfo)
                net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
 #endif
-       read_unlock_bh(&xfrm_policy_afinfo_lock);
-}
-
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
-{
-       struct xfrm_policy_afinfo *afinfo;
-       if (unlikely(family >= NPROTO))
-               return NULL;
-       read_lock(&xfrm_policy_afinfo_lock);
-       afinfo = xfrm_policy_afinfo[family];
-       if (unlikely(!afinfo))
-               read_unlock(&xfrm_policy_afinfo_lock);
-       return afinfo;
-}
-
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
-{
-       read_unlock(&xfrm_policy_afinfo_lock);
+       rcu_read_unlock();
 }
 
 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
@@ -2633,12 +2638,12 @@ static void xfrm_policy_fini(struct net *net)
 
        flush_work(&net->xfrm.policy_hash_work);
 #ifdef CONFIG_XFRM_SUB_POLICY
-       audit_info.loginuid = -1;
+       audit_info.loginuid = INVALID_UID;
        audit_info.sessionid = -1;
        audit_info.secid = 0;
        xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
 #endif
-       audit_info.loginuid = -1;
+       audit_info.loginuid = INVALID_UID;
        audit_info.sessionid = -1;
        audit_info.secid = 0;
        xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
@@ -2745,7 +2750,7 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
 }
 
 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                          uid_t auid, u32 sessionid, u32 secid)
+                          kuid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
@@ -2760,7 +2765,7 @@ void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
 
 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                             uid_t auid, u32 sessionid, u32 secid)
+                             kuid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
index 210be48d8ae3c295a3f9e9642c356acd9866c92c..3459692092ec1f44220d02a4d73d82613896793d 100644 (file)
@@ -166,7 +166,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
 int __xfrm_state_delete(struct xfrm_state *x);
 
 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
 
 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
 {
@@ -1674,13 +1674,13 @@ void km_state_notify(struct xfrm_state *x, const struct km_event *c)
 EXPORT_SYMBOL(km_policy_notify);
 EXPORT_SYMBOL(km_state_notify);
 
-void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
 {
        struct net *net = xs_net(x);
        struct km_event c;
 
        c.data.hard = hard;
-       c.pid = pid;
+       c.portid = portid;
        c.event = XFRM_MSG_EXPIRE;
        km_state_notify(x, &c);
 
@@ -1700,7 +1700,7 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
 
        read_lock(&xfrm_km_lock);
        list_for_each_entry(km, &xfrm_km_list, list) {
-               acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
+               acqret = km->acquire(x, t, pol);
                if (!acqret)
                        err = acqret;
        }
@@ -1726,13 +1726,13 @@ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
 }
 EXPORT_SYMBOL(km_new_mapping);
 
-void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
 {
        struct net *net = xp_net(pol);
        struct km_event c;
 
        c.data.hard = hard;
-       c.pid = pid;
+       c.portid = portid;
        c.event = XFRM_MSG_POLEXPIRE;
        km_policy_notify(pol, dir, &c);
 
@@ -2060,7 +2060,7 @@ void xfrm_state_fini(struct net *net)
        unsigned int sz;
 
        flush_work(&net->xfrm.state_hash_work);
-       audit_info.loginuid = -1;
+       audit_info.loginuid = INVALID_UID;
        audit_info.sessionid = -1;
        audit_info.secid = 0;
        xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info);
@@ -2127,7 +2127,7 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
 }
 
 void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                         uid_t auid, u32 sessionid, u32 secid)
+                         kuid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
@@ -2142,7 +2142,7 @@ void xfrm_audit_state_add(struct xfrm_state *x, int result,
 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
 
 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                            uid_t auid, u32 sessionid, u32 secid)
+                            kuid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
index 289f4bf18ff05751c5938c8722b098f6a560ee00..421f9844433519eac0c41e24265d2be9c1155b49 100644 (file)
@@ -595,7 +595,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_state *x;
        int err;
        struct km_event c;
-       uid_t loginuid = audit_get_loginuid(current);
+       kuid_t loginuid = audit_get_loginuid(current);
        u32 sessionid = audit_get_sessionid(current);
        u32 sid;
 
@@ -623,7 +623,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        }
 
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.event = nlh->nlmsg_type;
 
        km_state_notify(x, &c);
@@ -674,7 +674,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        int err = -ESRCH;
        struct km_event c;
        struct xfrm_usersa_id *p = nlmsg_data(nlh);
-       uid_t loginuid = audit_get_loginuid(current);
+       kuid_t loginuid = audit_get_loginuid(current);
        u32 sessionid = audit_get_sessionid(current);
        u32 sid;
 
@@ -696,7 +696,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
 
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.event = nlh->nlmsg_type;
        km_state_notify(x, &c);
 
@@ -847,7 +847,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
                        XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
        if (nlh == NULL)
                return -EMSGSIZE;
@@ -927,7 +927,7 @@ static inline size_t xfrm_spdinfo_msgsize(void)
 }
 
 static int build_spdinfo(struct sk_buff *skb, struct net *net,
-                        u32 pid, u32 seq, u32 flags)
+                        u32 portid, u32 seq, u32 flags)
 {
        struct xfrmk_spdinfo si;
        struct xfrmu_spdinfo spc;
@@ -936,7 +936,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
        int err;
        u32 *f;
 
-       nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
+       nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
        if (nlh == NULL) /* shouldn't really happen ... */
                return -EMSGSIZE;
 
@@ -969,17 +969,17 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct sk_buff *r_skb;
        u32 *flags = nlmsg_data(nlh);
-       u32 spid = NETLINK_CB(skb).pid;
+       u32 sportid = NETLINK_CB(skb).portid;
        u32 seq = nlh->nlmsg_seq;
 
        r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
        if (r_skb == NULL)
                return -ENOMEM;
 
-       if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
+       if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
                BUG();
 
-       return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
+       return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
 }
 
 static inline size_t xfrm_sadinfo_msgsize(void)
@@ -990,7 +990,7 @@ static inline size_t xfrm_sadinfo_msgsize(void)
 }
 
 static int build_sadinfo(struct sk_buff *skb, struct net *net,
-                        u32 pid, u32 seq, u32 flags)
+                        u32 portid, u32 seq, u32 flags)
 {
        struct xfrmk_sadinfo si;
        struct xfrmu_sadhinfo sh;
@@ -998,7 +998,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
        int err;
        u32 *f;
 
-       nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
+       nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
        if (nlh == NULL) /* shouldn't really happen ... */
                return -EMSGSIZE;
 
@@ -1026,17 +1026,17 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net *net = sock_net(skb->sk);
        struct sk_buff *r_skb;
        u32 *flags = nlmsg_data(nlh);
-       u32 spid = NETLINK_CB(skb).pid;
+       u32 sportid = NETLINK_CB(skb).portid;
        u32 seq = nlh->nlmsg_seq;
 
        r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
        if (r_skb == NULL)
                return -ENOMEM;
 
-       if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
+       if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
                BUG();
 
-       return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
+       return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
 }
 
 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1056,7 +1056,7 @@ static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (IS_ERR(resp_skb)) {
                err = PTR_ERR(resp_skb);
        } else {
-               err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
+               err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
        }
        xfrm_state_put(x);
 out_noput:
@@ -1137,7 +1137,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
        }
 
-       err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
+       err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
 
 out:
        xfrm_state_put(x);
@@ -1393,7 +1393,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct km_event c;
        int err;
        int excl;
-       uid_t loginuid = audit_get_loginuid(current);
+       kuid_t loginuid = audit_get_loginuid(current);
        u32 sessionid = audit_get_sessionid(current);
        u32 sid;
 
@@ -1425,7 +1425,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        km_policy_notify(xp, p->dir, &c);
 
        xfrm_pol_put(xp);
@@ -1511,7 +1511,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
                        XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
        if (nlh == NULL)
                return -EMSGSIZE;
@@ -1648,10 +1648,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                        err = PTR_ERR(resp_skb);
                } else {
                        err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
-                                           NETLINK_CB(skb).pid);
+                                           NETLINK_CB(skb).portid);
                }
        } else {
-               uid_t loginuid = audit_get_loginuid(current);
+               kuid_t loginuid = audit_get_loginuid(current);
                u32 sessionid = audit_get_sessionid(current);
                u32 sid;
 
@@ -1665,7 +1665,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                c.data.byid = p->index;
                c.event = nlh->nlmsg_type;
                c.seq = nlh->nlmsg_seq;
-               c.pid = nlh->nlmsg_pid;
+               c.portid = nlh->nlmsg_pid;
                km_policy_notify(xp, p->dir, &c);
        }
 
@@ -1695,7 +1695,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        c.data.proto = p->proto;
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.net = net;
        km_state_notify(NULL, &c);
 
@@ -1722,7 +1722,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -1804,11 +1804,11 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        spin_lock_bh(&x->lock);
        c.data.aevent = p->flags;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
 
        if (build_aevent(r_skb, x, &c) < 0)
                BUG();
-       err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
+       err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
        spin_unlock_bh(&x->lock);
        xfrm_state_put(x);
        return err;
@@ -1854,7 +1854,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.data.aevent = XFRM_AE_CU;
        km_state_notify(x, &c);
        err = 0;
@@ -1889,7 +1889,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        c.data.type = type;
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
-       c.pid = nlh->nlmsg_pid;
+       c.portid = nlh->nlmsg_pid;
        c.net = net;
        km_policy_notify(NULL, 0, &c);
        return 0;
@@ -1945,7 +1945,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = 0;
        if (up->hard) {
-               uid_t loginuid = audit_get_loginuid(current);
+               kuid_t loginuid = audit_get_loginuid(current);
                u32 sessionid = audit_get_sessionid(current);
                u32 sid;
 
@@ -1957,7 +1957,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
                // reset the timers here?
                WARN(1, "Dont know what to do with soft policy expire\n");
        }
-       km_policy_expired(xp, p->dir, up->hard, current->pid);
+       km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
 out:
        xfrm_pol_put(xp);
@@ -1985,10 +1985,10 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = -EINVAL;
        if (x->km.state != XFRM_STATE_VALID)
                goto out;
-       km_state_expired(x, ue->hard, current->pid);
+       km_state_expired(x, ue->hard, nlh->nlmsg_pid);
 
        if (ue->hard) {
-               uid_t loginuid = audit_get_loginuid(current);
+               kuid_t loginuid = audit_get_loginuid(current);
                u32 sessionid = audit_get_sessionid(current);
                u32 sid;
 
@@ -2397,7 +2397,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
+       nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2456,7 +2456,7 @@ static int xfrm_notify_sa_flush(const struct km_event *c)
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
        if (nlh == NULL) {
                kfree_skb(skb);
                return -EMSGSIZE;
@@ -2524,7 +2524,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
        err = -EMSGSIZE;
        if (nlh == NULL)
                goto out_free_skb;
@@ -2594,8 +2594,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
 }
 
 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
-                        struct xfrm_tmpl *xt, struct xfrm_policy *xp,
-                        int dir)
+                        struct xfrm_tmpl *xt, struct xfrm_policy *xp)
 {
        __u32 seq = xfrm_get_acqseq();
        struct xfrm_user_acquire *ua;
@@ -2610,7 +2609,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
        memcpy(&ua->id, &x->id, sizeof(ua->id));
        memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
        memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
-       copy_to_user_policy(xp, &ua->policy, dir);
+       copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
        ua->aalgos = xt->aalgos;
        ua->ealgos = xt->ealgos;
        ua->calgos = xt->calgos;
@@ -2632,7 +2631,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
 }
 
 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
-                            struct xfrm_policy *xp, int dir)
+                            struct xfrm_policy *xp)
 {
        struct net *net = xs_net(x);
        struct sk_buff *skb;
@@ -2641,7 +2640,7 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
        if (skb == NULL)
                return -ENOMEM;
 
-       if (build_acquire(skb, x, xt, xp, dir) < 0)
+       if (build_acquire(skb, x, xt, xp) < 0)
                BUG();
 
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
@@ -2724,7 +2723,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
        struct nlmsghdr *nlh;
        int err;
 
-       nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
+       nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
        if (nlh == NULL)
                return -EMSGSIZE;
 
@@ -2784,7 +2783,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
        err = -EMSGSIZE;
        if (nlh == NULL)
                goto out_free_skb;
@@ -2838,7 +2837,7 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
        if (skb == NULL)
                return -ENOMEM;
 
-       nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
+       nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
        err = -EMSGSIZE;
        if (nlh == NULL)
                goto out_free_skb;
@@ -2991,7 +2990,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
                .input  = xfrm_netlink_rcv,
        };
 
-       nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
+       nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
        if (nlsk == NULL)
                return -ENOMEM;
        net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
index 16aa2d424985fa524e4eb8a60122be8d7ad817b0..bbbd276659ba5edad0e01b915d03639c7980803f 100644 (file)
@@ -18,14 +18,22 @@ HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include
 bpf-direct-objs := bpf-direct.o
 
 # Try to match the kernel target.
-ifeq ($(CONFIG_64BIT),)
-HOSTCFLAGS_bpf-direct.o += -m32
-HOSTCFLAGS_dropper.o += -m32
-HOSTCFLAGS_bpf-helper.o += -m32
-HOSTCFLAGS_bpf-fancy.o += -m32
-HOSTLOADLIBES_bpf-direct += -m32
-HOSTLOADLIBES_bpf-fancy += -m32
-HOSTLOADLIBES_dropper += -m32
+ifndef CONFIG_64BIT
+
+# s390 has -m31 flag to build 31 bit binaries
+ifndef CONFIG_S390
+MFLAG = -m32
+else
+MFLAG = -m31
+endif
+
+HOSTCFLAGS_bpf-direct.o += $(MFLAG)
+HOSTCFLAGS_dropper.o += $(MFLAG)
+HOSTCFLAGS_bpf-helper.o += $(MFLAG)
+HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
+HOSTLOADLIBES_bpf-direct += $(MFLAG)
+HOSTLOADLIBES_bpf-fancy += $(MFLAG)
+HOSTLOADLIBES_dropper += $(MFLAG)
 endif
 
 # Tell kbuild to always build the programs
index 643279dd30fbbe6151578e328be864745cc03216..38ee70f3cd5b970101edd6d2edb52b4df8d73d14 100644 (file)
@@ -59,6 +59,16 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count);
 #define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
 
 #define EXPAND(...) __VA_ARGS__
+
+/* Ensure that we load the logically correct offset. */
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
+#else
+#error "Unknown endianness"
+#endif
+
 /* Map all width-sensitive operations */
 #if __BITS_PER_LONG == 32
 
@@ -70,21 +80,16 @@ void seccomp_bpf_print(struct sock_filter *filter, size_t count);
 #define JLE(x, jt) JLE32(x, EXPAND(jt))
 #define JA(x, jt) JA32(x, EXPAND(jt))
 #define ARG(i) ARG_32(i)
-#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
 
 #elif __BITS_PER_LONG == 64
 
 /* Ensure that we load the logically correct offset. */
 #if __BYTE_ORDER == __LITTLE_ENDIAN
 #define ENDIAN(_lo, _hi) _lo, _hi
-#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
 #elif __BYTE_ORDER == __BIG_ENDIAN
 #define ENDIAN(_lo, _hi) _hi, _lo
-#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
 #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
-#else
-#error "Unknown endianness"
 #endif
 
 union arg64 {
index 6ddf9ecac669861511d9dbbeed7b4b7b831d1883..bece49b3553596cc330f5d47cf20f5b3dedf524e 100644 (file)
@@ -3,7 +3,16 @@
 # This is not a complete Makefile of itself.  Instead, it is designed to
 # be easily embeddable into other systems of Makefiles.
 #
-DTC_SRCS = dtc.c flattree.c fstree.c data.c livetree.c treesource.c srcpos.c \
-       checks.c
+DTC_SRCS = \
+       checks.c \
+       data.c \
+       dtc.c \
+       flattree.c \
+       fstree.c \
+       livetree.c \
+       srcpos.c \
+       treesource.c \
+       util.c
+
 DTC_GEN_SRCS = dtc-lexer.lex.c dtc-parser.tab.c
 DTC_OBJS = $(DTC_SRCS:%.c=%.o) $(DTC_GEN_SRCS:%.c=%.o)
index a662a0044798f3b8f58fbd5f9011e82979f82bd2..ee96a2519eff6f39d2f093ae88774c612d71906e 100644 (file)
 #define TRACE(c, fmt, ...)     do { } while (0)
 #endif
 
-enum checklevel {
-       IGNORE = 0,
-       WARN = 1,
-       ERROR = 2,
-};
-
 enum checkstatus {
        UNCHECKED = 0,
        PREREQ,
@@ -57,14 +51,14 @@ struct check {
        node_check_fn node_fn;
        prop_check_fn prop_fn;
        void *data;
-       enum checklevel level;
+       bool warn, error;
        enum checkstatus status;
        int inprogress;
        int num_prereqs;
        struct check **prereq;
 };
 
-#define CHECK(nm, tfn, nfn, pfn, d, lvl, ...) \
+#define CHECK_ENTRY(nm, tfn, nfn, pfn, d, w, e, ...)          \
        static struct check *nm##_prereqs[] = { __VA_ARGS__ }; \
        static struct check nm = { \
                .name = #nm, \
@@ -72,20 +66,37 @@ struct check {
                .node_fn = (nfn), \
                .prop_fn = (pfn), \
                .data = (d), \
-               .level = (lvl), \
+               .warn = (w), \
+               .error = (e), \
                .status = UNCHECKED, \
                .num_prereqs = ARRAY_SIZE(nm##_prereqs), \
                .prereq = nm##_prereqs, \
        };
-
-#define TREE_CHECK(nm, d, lvl, ...) \
-       CHECK(nm, check_##nm, NULL, NULL, d, lvl, __VA_ARGS__)
-#define NODE_CHECK(nm, d, lvl, ...) \
-       CHECK(nm, NULL, check_##nm, NULL, d, lvl, __VA_ARGS__)
-#define PROP_CHECK(nm, d, lvl, ...) \
-       CHECK(nm, NULL, NULL, check_##nm, d, lvl, __VA_ARGS__)
-#define BATCH_CHECK(nm, lvl, ...) \
-       CHECK(nm, NULL, NULL, NULL, NULL, lvl, __VA_ARGS__)
+#define WARNING(nm, tfn, nfn, pfn, d, ...) \
+       CHECK_ENTRY(nm, tfn, nfn, pfn, d, true, false, __VA_ARGS__)
+#define ERROR(nm, tfn, nfn, pfn, d, ...) \
+       CHECK_ENTRY(nm, tfn, nfn, pfn, d, false, true, __VA_ARGS__)
+#define CHECK(nm, tfn, nfn, pfn, d, ...) \
+       CHECK_ENTRY(nm, tfn, nfn, pfn, d, false, false, __VA_ARGS__)
+
+#define TREE_WARNING(nm, d, ...) \
+       WARNING(nm, check_##nm, NULL, NULL, d, __VA_ARGS__)
+#define TREE_ERROR(nm, d, ...) \
+       ERROR(nm, check_##nm, NULL, NULL, d, __VA_ARGS__)
+#define TREE_CHECK(nm, d, ...) \
+       CHECK(nm, check_##nm, NULL, NULL, d, __VA_ARGS__)
+#define NODE_WARNING(nm, d, ...) \
+       WARNING(nm, NULL, check_##nm, NULL, d,  __VA_ARGS__)
+#define NODE_ERROR(nm, d, ...) \
+       ERROR(nm, NULL, check_##nm, NULL, d, __VA_ARGS__)
+#define NODE_CHECK(nm, d, ...) \
+       CHECK(nm, NULL, check_##nm, NULL, d, __VA_ARGS__)
+#define PROP_WARNING(nm, d, ...) \
+       WARNING(nm, NULL, NULL, check_##nm, d, __VA_ARGS__)
+#define PROP_ERROR(nm, d, ...) \
+       ERROR(nm, NULL, NULL, check_##nm, d, __VA_ARGS__)
+#define PROP_CHECK(nm, d, ...) \
+       CHECK(nm, NULL, NULL, check_##nm, d, __VA_ARGS__)
 
 #ifdef __GNUC__
 static inline void check_msg(struct check *c, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
@@ -95,13 +106,13 @@ static inline void check_msg(struct check *c, const char *fmt, ...)
        va_list ap;
        va_start(ap, fmt);
 
-       if ((c->level < WARN) || (c->level <= quiet))
-               return; /* Suppress message */
-
-       fprintf(stderr, "%s (%s): ",
-               (c->level == ERROR) ? "ERROR" : "Warning", c->name);
-       vfprintf(stderr, fmt, ap);
-       fprintf(stderr, "\n");
+       if ((c->warn && (quiet < 1))
+           || (c->error && (quiet < 2))) {
+               fprintf(stderr, "%s (%s): ",
+                       (c->error) ? "ERROR" : "Warning", c->name);
+               vfprintf(stderr, fmt, ap);
+               fprintf(stderr, "\n");
+       }
 }
 
 #define FAIL(c, ...) \
@@ -167,7 +178,7 @@ static int run_check(struct check *c, struct node *dt)
 
 out:
        c->inprogress = 0;
-       if ((c->status != PASSED) && (c->level == ERROR))
+       if ((c->status != PASSED) && (c->error))
                error = 1;
        return error;
 }
@@ -176,6 +187,13 @@ out:
  * Utility check functions
  */
 
+/* A check which always fails, for testing purposes only */
+static inline void check_always_fail(struct check *c, struct node *dt)
+{
+       FAIL(c, "always_fail check");
+}
+TREE_CHECK(always_fail, NULL);
+
 static void check_is_string(struct check *c, struct node *root,
                            struct node *node)
 {
@@ -190,8 +208,10 @@ static void check_is_string(struct check *c, struct node *root,
                FAIL(c, "\"%s\" property in %s is not a string",
                     propname, node->fullpath);
 }
-#define CHECK_IS_STRING(nm, propname, lvl) \
-       CHECK(nm, NULL, check_is_string, NULL, (propname), (lvl))
+#define WARNING_IF_NOT_STRING(nm, propname) \
+       WARNING(nm, NULL, check_is_string, NULL, (propname))
+#define ERROR_IF_NOT_STRING(nm, propname) \
+       ERROR(nm, NULL, check_is_string, NULL, (propname))
 
 static void check_is_cell(struct check *c, struct node *root,
                          struct node *node)
@@ -207,8 +227,10 @@ static void check_is_cell(struct check *c, struct node *root,
                FAIL(c, "\"%s\" property in %s is not a single cell",
                     propname, node->fullpath);
 }
-#define CHECK_IS_CELL(nm, propname, lvl) \
-       CHECK(nm, NULL, check_is_cell, NULL, (propname), (lvl))
+#define WARNING_IF_NOT_CELL(nm, propname) \
+       WARNING(nm, NULL, check_is_cell, NULL, (propname))
+#define ERROR_IF_NOT_CELL(nm, propname) \
+       ERROR(nm, NULL, check_is_cell, NULL, (propname))
 
 /*
  * Structural check functions
@@ -227,20 +249,24 @@ static void check_duplicate_node_names(struct check *c, struct node *dt,
                                FAIL(c, "Duplicate node name %s",
                                     child->fullpath);
 }
-NODE_CHECK(duplicate_node_names, NULL, ERROR);
+NODE_ERROR(duplicate_node_names, NULL);
 
 static void check_duplicate_property_names(struct check *c, struct node *dt,
                                           struct node *node)
 {
        struct property *prop, *prop2;
 
-       for_each_property(node, prop)
-               for (prop2 = prop->next; prop2; prop2 = prop2->next)
+       for_each_property(node, prop) {
+               for (prop2 = prop->next; prop2; prop2 = prop2->next) {
+                       if (prop2->deleted)
+                               continue;
                        if (streq(prop->name, prop2->name))
                                FAIL(c, "Duplicate property name %s in %s",
                                     prop->name, node->fullpath);
+               }
+       }
 }
-NODE_CHECK(duplicate_property_names, NULL, ERROR);
+NODE_ERROR(duplicate_property_names, NULL);
 
 #define LOWERCASE      "abcdefghijklmnopqrstuvwxyz"
 #define UPPERCASE      "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -256,7 +282,7 @@ static void check_node_name_chars(struct check *c, struct node *dt,
                FAIL(c, "Bad character '%c' in node %s",
                     node->name[n], node->fullpath);
 }
-NODE_CHECK(node_name_chars, PROPNODECHARS "@", ERROR);
+NODE_ERROR(node_name_chars, PROPNODECHARS "@");
 
 static void check_node_name_format(struct check *c, struct node *dt,
                                   struct node *node)
@@ -265,7 +291,7 @@ static void check_node_name_format(struct check *c, struct node *dt,
                FAIL(c, "Node %s has multiple '@' characters in name",
                     node->fullpath);
 }
-NODE_CHECK(node_name_format, NULL, ERROR, &node_name_chars);
+NODE_ERROR(node_name_format, NULL, &node_name_chars);
 
 static void check_property_name_chars(struct check *c, struct node *dt,
                                      struct node *node, struct property *prop)
@@ -276,7 +302,7 @@ static void check_property_name_chars(struct check *c, struct node *dt,
                FAIL(c, "Bad character '%c' in property name \"%s\", node %s",
                     prop->name[n], prop->name, node->fullpath);
 }
-PROP_CHECK(property_name_chars, PROPNODECHARS, ERROR);
+PROP_ERROR(property_name_chars, PROPNODECHARS);
 
 #define DESCLABEL_FMT  "%s%s%s%s%s"
 #define DESCLABEL_ARGS(node,prop,mark)         \
@@ -331,8 +357,8 @@ static void check_duplicate_label_prop(struct check *c, struct node *dt,
        for_each_marker_of_type(m, LABEL)
                check_duplicate_label(c, dt, m->ref, node, prop, m);
 }
-CHECK(duplicate_label, NULL, check_duplicate_label_node,
-      check_duplicate_label_prop, NULL, ERROR);
+ERROR(duplicate_label, NULL, check_duplicate_label_node,
+      check_duplicate_label_prop, NULL);
 
 static void check_explicit_phandles(struct check *c, struct node *root,
                                    struct node *node, struct property *prop)
@@ -391,7 +417,7 @@ static void check_explicit_phandles(struct check *c, struct node *root,
 
        node->phandle = phandle;
 }
-PROP_CHECK(explicit_phandles, NULL, ERROR);
+PROP_ERROR(explicit_phandles, NULL);
 
 static void check_name_properties(struct check *c, struct node *root,
                                  struct node *node)
@@ -420,8 +446,8 @@ static void check_name_properties(struct check *c, struct node *root,
                free(prop);
        }
 }
-CHECK_IS_STRING(name_is_string, "name", ERROR);
-NODE_CHECK(name_properties, NULL, ERROR, &name_is_string);
+ERROR_IF_NOT_STRING(name_is_string, "name");
+NODE_ERROR(name_properties, NULL, &name_is_string);
 
 /*
  * Reference fixup functions
@@ -448,7 +474,7 @@ static void fixup_phandle_references(struct check *c, struct node *dt,
                *((cell_t *)(prop->val.val + m->offset)) = cpu_to_fdt32(phandle);
        }
 }
-CHECK(phandle_references, NULL, NULL, fixup_phandle_references, NULL, ERROR,
+ERROR(phandle_references, NULL, NULL, fixup_phandle_references, NULL,
       &duplicate_node_names, &explicit_phandles);
 
 static void fixup_path_references(struct check *c, struct node *dt,
@@ -473,19 +499,19 @@ static void fixup_path_references(struct check *c, struct node *dt,
                                                  strlen(path) + 1);
        }
 }
-CHECK(path_references, NULL, NULL, fixup_path_references, NULL, ERROR,
+ERROR(path_references, NULL, NULL, fixup_path_references, NULL,
       &duplicate_node_names);
 
 /*
  * Semantic checks
  */
-CHECK_IS_CELL(address_cells_is_cell, "#address-cells", WARN);
-CHECK_IS_CELL(size_cells_is_cell, "#size-cells", WARN);
-CHECK_IS_CELL(interrupt_cells_is_cell, "#interrupt-cells", WARN);
+WARNING_IF_NOT_CELL(address_cells_is_cell, "#address-cells");
+WARNING_IF_NOT_CELL(size_cells_is_cell, "#size-cells");
+WARNING_IF_NOT_CELL(interrupt_cells_is_cell, "#interrupt-cells");
 
-CHECK_IS_STRING(device_type_is_string, "device_type", WARN);
-CHECK_IS_STRING(model_is_string, "model", WARN);
-CHECK_IS_STRING(status_is_string, "status", WARN);
+WARNING_IF_NOT_STRING(device_type_is_string, "device_type");
+WARNING_IF_NOT_STRING(model_is_string, "model");
+WARNING_IF_NOT_STRING(status_is_string, "status");
 
 static void fixup_addr_size_cells(struct check *c, struct node *dt,
                                  struct node *node)
@@ -503,8 +529,8 @@ static void fixup_addr_size_cells(struct check *c, struct node *dt,
        if (prop)
                node->size_cells = propval_cell(prop);
 }
-CHECK(addr_size_cells, NULL, fixup_addr_size_cells, NULL, NULL, WARN,
-      &address_cells_is_cell, &size_cells_is_cell);
+WARNING(addr_size_cells, NULL, fixup_addr_size_cells, NULL, NULL,
+       &address_cells_is_cell, &size_cells_is_cell);
 
 #define node_addr_cells(n) \
        (((n)->addr_cells == -1) ? 2 : (n)->addr_cells)
@@ -538,7 +564,7 @@ static void check_reg_format(struct check *c, struct node *dt,
                     "(#address-cells == %d, #size-cells == %d)",
                     node->fullpath, prop->val.len, addr_cells, size_cells);
 }
-NODE_CHECK(reg_format, NULL, WARN, &addr_size_cells);
+NODE_WARNING(reg_format, NULL, &addr_size_cells);
 
 static void check_ranges_format(struct check *c, struct node *dt,
                                struct node *node)
@@ -579,7 +605,7 @@ static void check_ranges_format(struct check *c, struct node *dt,
                     p_addr_cells, c_addr_cells, c_size_cells);
        }
 }
-NODE_CHECK(ranges_format, NULL, WARN, &addr_size_cells);
+NODE_WARNING(ranges_format, NULL, &addr_size_cells);
 
 /*
  * Style checks
@@ -606,7 +632,7 @@ static void check_avoid_default_addr_size(struct check *c, struct node *dt,
                FAIL(c, "Relying on default #size-cells value for %s",
                     node->fullpath);
 }
-NODE_CHECK(avoid_default_addr_size, NULL, WARN, &addr_size_cells);
+NODE_WARNING(avoid_default_addr_size, NULL, &addr_size_cells);
 
 static void check_obsolete_chosen_interrupt_controller(struct check *c,
                                                       struct node *dt)
@@ -623,7 +649,7 @@ static void check_obsolete_chosen_interrupt_controller(struct check *c,
                FAIL(c, "/chosen has obsolete \"interrupt-controller\" "
                     "property");
 }
-TREE_CHECK(obsolete_chosen_interrupt_controller, NULL, WARN);
+TREE_WARNING(obsolete_chosen_interrupt_controller, NULL);
 
 static struct check *check_table[] = {
        &duplicate_node_names, &duplicate_property_names,
@@ -642,8 +668,71 @@ static struct check *check_table[] = {
 
        &avoid_default_addr_size,
        &obsolete_chosen_interrupt_controller,
+
+       &always_fail,
 };
 
+static void enable_warning_error(struct check *c, bool warn, bool error)
+{
+       int i;
+
+       /* Raising level, also raise it for prereqs */
+       if ((warn && !c->warn) || (error && !c->error))
+               for (i = 0; i < c->num_prereqs; i++)
+                       enable_warning_error(c->prereq[i], warn, error);
+
+       c->warn = c->warn || warn;
+       c->error = c->error || error;
+}
+
+static void disable_warning_error(struct check *c, bool warn, bool error)
+{
+       int i;
+
+       /* Lowering level, also lower it for things this is the prereq
+        * for */
+       if ((warn && c->warn) || (error && c->error)) {
+               for (i = 0; i < ARRAY_SIZE(check_table); i++) {
+                       struct check *cc = check_table[i];
+                       int j;
+
+                       for (j = 0; j < cc->num_prereqs; j++)
+                               if (cc->prereq[j] == c)
+                                       disable_warning_error(cc, warn, error);
+               }
+       }
+
+       c->warn = c->warn && !warn;
+       c->error = c->error && !error;
+}
+
+void parse_checks_option(bool warn, bool error, const char *optarg)
+{
+       int i;
+       const char *name = optarg;
+       bool enable = true;
+
+       if ((strncmp(optarg, "no-", 3) == 0)
+           || (strncmp(optarg, "no_", 3) == 0)) {
+               name = optarg + 3;
+               enable = false;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(check_table); i++) {
+               struct check *c = check_table[i];
+
+               if (streq(c->name, name)) {
+                       if (enable)
+                               enable_warning_error(c, warn, error);
+                       else
+                               disable_warning_error(c, warn, error);
+                       return;
+               }
+       }
+
+       die("Unrecognized check name \"%s\"\n", name);
+}
+
 void process_checks(int force, struct boot_info *bi)
 {
        struct node *dt = bi->dt;
@@ -653,7 +742,7 @@ void process_checks(int force, struct boot_info *bi)
        for (i = 0; i < ARRAY_SIZE(check_table); i++) {
                struct check *c = check_table[i];
 
-               if (c->level != IGNORE)
+               if (c->warn || c->error)
                        error = error || run_check(c, dt);
        }
 
index fe555e819bf84e298891e4717dd0a6f06bef715c..4a40c5b92474fede88049a2de4876ad641e9e063 100644 (file)
@@ -68,40 +68,6 @@ struct data data_copy_mem(const char *mem, int len)
        return d;
 }
 
-static char get_oct_char(const char *s, int *i)
-{
-       char x[4];
-       char *endx;
-       long val;
-
-       x[3] = '\0';
-       strncpy(x, s + *i, 3);
-
-       val = strtol(x, &endx, 8);
-
-       assert(endx > x);
-
-       (*i) += endx - x;
-       return val;
-}
-
-static char get_hex_char(const char *s, int *i)
-{
-       char x[3];
-       char *endx;
-       long val;
-
-       x[2] = '\0';
-       strncpy(x, s + *i, 2);
-
-       val = strtol(x, &endx, 16);
-       if (!(endx  > x))
-               die("\\x used with no following hex digits\n");
-
-       (*i) += endx - x;
-       return val;
-}
-
 struct data data_copy_escape_string(const char *s, int len)
 {
        int i = 0;
@@ -114,53 +80,10 @@ struct data data_copy_escape_string(const char *s, int len)
        while (i < len) {
                char c = s[i++];
 
-               if (c != '\\') {
-                       q[d.len++] = c;
-                       continue;
-               }
-
-               c = s[i++];
-               assert(c);
-               switch (c) {
-               case 'a':
-                       q[d.len++] = '\a';
-                       break;
-               case 'b':
-                       q[d.len++] = '\b';
-                       break;
-               case 't':
-                       q[d.len++] = '\t';
-                       break;
-               case 'n':
-                       q[d.len++] = '\n';
-                       break;
-               case 'v':
-                       q[d.len++] = '\v';
-                       break;
-               case 'f':
-                       q[d.len++] = '\f';
-                       break;
-               case 'r':
-                       q[d.len++] = '\r';
-                       break;
-               case '0':
-               case '1':
-               case '2':
-               case '3':
-               case '4':
-               case '5':
-               case '6':
-               case '7':
-                       i--; /* need to re-read the first digit as
-                             * part of the octal value */
-                       q[d.len++] = get_oct_char(s, &i);
-                       break;
-               case 'x':
-                       q[d.len++] = get_hex_char(s, &i);
-                       break;
-               default:
-                       q[d.len++] = c;
-               }
+               if (c == '\\')
+                       c = get_escape_char(s, &i);
+
+               q[d.len++] = c;
        }
 
        q[d.len++] = '\0';
@@ -245,11 +168,33 @@ struct data data_merge(struct data d1, struct data d2)
        return d;
 }
 
-struct data data_append_cell(struct data d, cell_t word)
+struct data data_append_integer(struct data d, uint64_t value, int bits)
 {
-       cell_t beword = cpu_to_fdt32(word);
-
-       return data_append_data(d, &beword, sizeof(beword));
+       uint8_t value_8;
+       uint16_t value_16;
+       uint32_t value_32;
+       uint64_t value_64;
+
+       switch (bits) {
+       case 8:
+               value_8 = value;
+               return data_append_data(d, &value_8, 1);
+
+       case 16:
+               value_16 = cpu_to_fdt16(value);
+               return data_append_data(d, &value_16, 2);
+
+       case 32:
+               value_32 = cpu_to_fdt32(value);
+               return data_append_data(d, &value_32, 4);
+
+       case 64:
+               value_64 = cpu_to_fdt64(value);
+               return data_append_data(d, &value_64, 8);
+
+       default:
+               die("Invalid literal size (%d)\n", bits);
+       }
 }
 
 struct data data_append_re(struct data d, const struct fdt_reserve_entry *re)
@@ -262,11 +207,14 @@ struct data data_append_re(struct data d, const struct fdt_reserve_entry *re)
        return data_append_data(d, &bere, sizeof(bere));
 }
 
-struct data data_append_addr(struct data d, uint64_t addr)
+struct data data_append_cell(struct data d, cell_t word)
 {
-       uint64_t beaddr = cpu_to_fdt64(addr);
+       return data_append_integer(d, word, sizeof(word) * 8);
+}
 
-       return data_append_data(d, &beaddr, sizeof(beaddr));
+struct data data_append_addr(struct data d, uint64_t addr)
+{
+       return data_append_integer(d, addr, sizeof(addr) * 8);
 }
 
 struct data data_append_byte(struct data d, uint8_t byte)
index e866ea5166ac40a140092c4987170b48a04ef398..254d5af889562f5e5987db2de1f45f42249d7723 100644 (file)
@@ -29,6 +29,7 @@ PROPNODECHAR  [a-zA-Z0-9,._+*#?@-]
 PATHCHAR       ({PROPNODECHAR}|[/])
 LABEL          [a-zA-Z_][a-zA-Z0-9_]*
 STRING         \"([^\\"]|\\.)*\"
+CHAR_LITERAL   '([^']|\\')*'
 WS             [[:space:]]
 COMMENT                "/*"([^*]|\*+[^*/])*\*+"/"
 LINECOMMENT    "//".*\n
@@ -70,6 +71,27 @@ static int pop_input_file(void);
                        push_input_file(name);
                }
 
+<*>^"#"(line)?{WS}+[0-9]+{WS}+{STRING}({WS}+[0-9]+)? {
+                       char *line, *tmp, *fn;
+                       /* skip text before line # */
+                       line = yytext;
+                       while (!isdigit(*line))
+                               line++;
+                       /* skip digits in line # */
+                       tmp = line;
+                       while (!isspace(*tmp))
+                               tmp++;
+                       /* "NULL"-terminate line # */
+                       *tmp = '\0';
+                       /* start of filename */
+                       fn = strchr(tmp + 1, '"') + 1;
+                       /* strip trailing " from filename */
+                       tmp = strchr(fn, '"');
+                       *tmp = 0;
+                       /* -1 since #line is the number of the next line */
+                       srcpos_set_line(xstrdup(fn), atoi(line) - 1);
+               }
+
 <*><<EOF>>             {
                        if (!pop_input_file()) {
                                yyterminate();
@@ -96,6 +118,26 @@ static int pop_input_file(void);
                        return DT_MEMRESERVE;
                }
 
+<*>"/bits/"    {
+                       DPRINT("Keyword: /bits/\n");
+                       BEGIN_DEFAULT();
+                       return DT_BITS;
+               }
+
+<*>"/delete-property/" {
+                       DPRINT("Keyword: /delete-property/\n");
+                       DPRINT("<PROPNODENAME>\n");
+                       BEGIN(PROPNODENAME);
+                       return DT_DEL_PROP;
+               }
+
+<*>"/delete-node/"     {
+                       DPRINT("Keyword: /delete-node/\n");
+                       DPRINT("<PROPNODENAME>\n");
+                       BEGIN(PROPNODENAME);
+                       return DT_DEL_NODE;
+               }
+
 <*>{LABEL}:    {
                        DPRINT("Label: %s\n", yytext);
                        yylval.labelref = xstrdup(yytext);
@@ -103,12 +145,19 @@ static int pop_input_file(void);
                        return DT_LABEL;
                }
 
-<V1>[0-9]+|0[xX][0-9a-fA-F]+      {
+<V1>([0-9]+|0[xX][0-9a-fA-F]+)(U|L|UL|LL|ULL)? {
                        yylval.literal = xstrdup(yytext);
                        DPRINT("Literal: '%s'\n", yylval.literal);
                        return DT_LITERAL;
                }
 
+<*>{CHAR_LITERAL}      {
+                       yytext[yyleng-1] = '\0';
+                       yylval.literal = xstrdup(yytext+1);
+                       DPRINT("Character literal: %s\n", yylval.literal);
+                       return DT_CHAR_LITERAL;
+               }
+
 <*>\&{LABEL}   {       /* label reference */
                        DPRINT("Ref: %s\n", yytext+1);
                        yylval.labelref = xstrdup(yytext+1);
@@ -134,9 +183,10 @@ static int pop_input_file(void);
                        return ']';
                }
 
-<PROPNODENAME>{PROPNODECHAR}+ {
+<PROPNODENAME>\\?{PROPNODECHAR}+ {
                        DPRINT("PropNodeName: %s\n", yytext);
-                       yylval.propnodename = xstrdup(yytext);
+                       yylval.propnodename = xstrdup((yytext[0] == '\\') ?
+                                                       yytext + 1 : yytext);
                        BEGIN_DEFAULT();
                        return DT_PROPNODENAME;
                }
@@ -150,6 +200,15 @@ static int pop_input_file(void);
 <*>{COMMENT}+  /* eat C-style comments */
 <*>{LINECOMMENT}+ /* eat C++-style comments */
 
+<*>"<<"                { return DT_LSHIFT; };
+<*>">>"                { return DT_RSHIFT; };
+<*>"<="                { return DT_LE; };
+<*>">="                { return DT_GE; };
+<*>"=="                { return DT_EQ; };
+<*>"!="                { return DT_NE; };
+<*>"&&"                { return DT_AND; };
+<*>"||"                { return DT_OR; };
+
 <*>.           {
                        DPRINT("Char: %c (\\x%02x)\n", yytext[0],
                                (unsigned)yytext[0]);
index 8bbe128170505175f3182fb192ff5d9a55caf988..a6c5fcdfc032d6103e329bd0e5d9f6ad96de48f5 100644 (file)
@@ -1,5 +1,6 @@
+#line 2 "dtc-lexer.lex.c"
 
-#line 3 "scripts/dtc/dtc-lexer.lex.c_shipped"
+#line 4 "dtc-lexer.lex.c"
 
 #define  YY_INT_ALIGNED short int
 
@@ -53,7 +54,6 @@ typedef int flex_int32_t;
 typedef unsigned char flex_uint8_t; 
 typedef unsigned short int flex_uint16_t;
 typedef unsigned int flex_uint32_t;
-#endif /* ! C99 */
 
 /* Limits of integral types. */
 #ifndef INT8_MIN
@@ -84,6 +84,8 @@ typedef unsigned int flex_uint32_t;
 #define UINT32_MAX             (4294967295U)
 #endif
 
+#endif /* ! C99 */
+
 #endif /* ! FLEXINT_H */
 
 #ifdef __cplusplus
@@ -140,7 +142,15 @@ typedef unsigned int flex_uint32_t;
 
 /* Size of default input buffer. */
 #ifndef YY_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k.
+ * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
+ * Ditto for the __ia64__ case accordingly.
+ */
+#define YY_BUF_SIZE 32768
+#else
 #define YY_BUF_SIZE 16384
+#endif /* __ia64__ */
 #endif
 
 /* The state buf must be large enough to hold one state per character in the main buffer.
@@ -362,8 +372,8 @@ static void yy_fatal_error (yyconst char msg[]  );
        *yy_cp = '\0'; \
        (yy_c_buf_p) = yy_cp;
 
-#define YY_NUM_RULES 17
-#define YY_END_OF_BUFFER 18
+#define YY_NUM_RULES 30
+#define YY_END_OF_BUFFER 31
 /* This struct is not used in this scanner,
    but its presence is necessary. */
 struct yy_trans_info
@@ -371,19 +381,25 @@ struct yy_trans_info
        flex_int32_t yy_verify;
        flex_int32_t yy_nxt;
        };
-static yyconst flex_int16_t yy_accept[94] =
+static yyconst flex_int16_t yy_accept[161] =
     {   0,
         0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-       18,   16,   13,   13,   16,   16,   16,   16,   16,   16,
-       16,   10,   11,   11,    6,    6,   13,    0,    2,    0,
-        7,    0,    0,    0,    0,    0,    0,    0,    5,    0,
-        9,    9,   11,   11,    6,    0,    7,    0,    0,    0,
-        0,   15,    0,    0,    0,    0,    6,    0,   14,    0,
-        0,    0,    0,    0,    8,    0,    0,    0,    0,    0,
-        0,    0,    0,    0,    0,    0,    0,    0,    3,   12,
-        0,    0,    0,    0,    0,    0,    0,    0,    1,    0,
-        0,    4,    0
-
+       31,   29,   18,   18,   29,   29,   29,   29,   29,   29,
+       29,   29,   29,   29,   29,   29,   29,   29,   15,   16,
+       16,   29,   16,   10,   10,   18,   26,    0,    3,    0,
+       27,   12,    0,    0,   11,    0,    0,    0,    0,    0,
+        0,    0,   21,   23,   25,   24,   22,    0,    9,   28,
+        0,    0,    0,   14,   14,   16,   16,   16,   10,   10,
+       10,    0,   12,    0,   11,    0,    0,    0,   20,    0,
+        0,    0,    0,    0,    0,    0,    0,   16,   10,   10,
+       10,    0,   19,    0,    0,    0,    0,    0,    0,    0,
+
+        0,    0,   16,   13,    0,    0,    0,    0,    0,    0,
+        0,    0,    0,   16,    6,    0,    0,    0,    0,    0,
+        0,    2,    0,    0,    0,    0,    0,    0,    0,    0,
+        4,   17,    0,    0,    2,    0,    0,    0,    0,    0,
+        0,    0,    0,    0,    0,    0,    0,    1,    0,    0,
+        0,    0,    5,    8,    0,    0,    0,    0,    7,    0
     } ;
 
 static yyconst flex_int32_t yy_ec[256] =
@@ -391,17 +407,17 @@ static yyconst flex_int32_t yy_ec[256] =
         1,    1,    1,    1,    1,    1,    1,    1,    2,    3,
         2,    2,    2,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    2,    1,    4,    5,    1,    1,    6,    1,    1,
-        1,    7,    5,    5,    8,    5,    9,   10,   11,   12,
-       12,   12,   12,   12,   12,   12,   12,   13,    1,    1,
-        1,    1,    5,    5,   14,   14,   14,   14,   14,   14,
-       15,   15,   15,   15,   15,   15,   15,   15,   15,   15,
-       15,   15,   15,   15,   15,   15,   15,   16,   15,   15,
-        1,   17,   18,    1,   15,    1,   14,   19,   20,   21,
-
-       22,   14,   15,   15,   23,   15,   15,   24,   25,   26,
-       15,   15,   15,   27,   28,   29,   30,   31,   15,   16,
-       15,   15,   32,    1,   33,    1,    1,    1,    1,    1,
+        1,    2,    4,    5,    6,    1,    1,    7,    8,    1,
+        1,    9,   10,   10,   11,   10,   12,   13,   14,   15,
+       15,   15,   15,   15,   15,   15,   15,   16,    1,   17,
+       18,   19,   10,   10,   20,   20,   20,   20,   20,   20,
+       21,   21,   21,   21,   21,   22,   21,   21,   21,   21,
+       21,   21,   21,   21,   23,   21,   21,   24,   21,   21,
+        1,   25,   26,    1,   21,    1,   20,   27,   28,   29,
+
+       30,   20,   21,   21,   31,   21,   21,   32,   33,   34,
+       35,   36,   21,   37,   38,   39,   40,   41,   21,   24,
+       42,   21,   43,   44,   45,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
@@ -418,112 +434,163 @@ static yyconst flex_int32_t yy_ec[256] =
         1,    1,    1,    1,    1
     } ;
 
-static yyconst flex_int32_t yy_meta[34] =
+static yyconst flex_int32_t yy_meta[46] =
     {   0,
-        1,    1,    1,    1,    2,    1,    2,    2,    3,    4,
-        4,    4,    5,    6,    7,    7,    1,    1,    6,    6,
-        6,    6,    7,    7,    7,    7,    7,    7,    7,    7,
-        7,    8,    1
+        1,    1,    1,    1,    1,    2,    3,    1,    2,    2,
+        2,    4,    5,    5,    5,    6,    1,    1,    1,    7,
+        8,    8,    8,    8,    1,    1,    7,    7,    7,    7,
+        8,    8,    8,    8,    8,    8,    8,    8,    8,    8,
+        8,    8,    3,    1,    1
     } ;
 
-static yyconst flex_int16_t yy_base[106] =
+static yyconst flex_int16_t yy_base[175] =
     {   0,
-        0,    0,  237,  236,   25,    0,   47,    0,   30,   71,
-      244,  247,   82,   84,   84,  211,   95,  229,  218,    0,
-      111,  247,    0,   84,   83,   95,  106,   86,  247,  237,
-        0,  230,  231,  234,  207,  209,  212,  220,  247,  206,
-      247,  218,    0,  106,  116,    0,    0,    0,  223,   89,
-      226,  219,  199,  206,  200,  204,    0,  190,  213,  212,
-      202,   91,  178,  161,  247,  172,  144,  150,  140,  130,
-      140,  124,  128,  120,  138,  137,  123,  122,  247,  247,
-      134,  114,  132,   86,  135,  125,   90,  136,  247,   97,
-       29,  247,  247,  153,  156,  161,  165,  170,  176,  180,
-
-      187,  195,  200,  205,  212
+        0,  388,  381,   40,   41,  386,   71,  385,   34,   44,
+      390,  395,   60,   62,  371,  112,  111,  111,  111,  104,
+      370,  106,  371,  342,  124,  119,    0,  144,  395,    0,
+      123,    0,  159,  153,  165,  167,  395,  130,  395,  382,
+      395,    0,  372,  122,  395,  157,  374,  379,  350,   21,
+      346,  349,  395,  395,  395,  395,  395,  362,  395,  395,
+      181,  346,  342,  395,  359,    0,  191,  343,  190,  351,
+      350,    0,    0,    0,  173,  362,  177,  367,  357,  329,
+      335,  328,  337,  331,  206,  329,  334,  327,  395,  338,
+      170,  314,  346,  345,  318,  325,  343,  158,  316,  212,
+
+      322,  319,  320,  395,  340,  336,  308,  305,  314,  304,
+      295,  138,  208,  220,  395,  292,  305,  265,  264,  254,
+      201,  222,  285,  275,  273,  270,  236,  235,  225,  115,
+      395,  395,  252,  216,  216,  217,  214,  230,  209,  220,
+      213,  239,  211,  217,  216,  209,  229,  395,  240,  225,
+      206,  169,  395,  395,  116,  106,   99,   54,  395,  395,
+      254,  260,  268,  272,  276,  282,  289,  293,  301,  309,
+      313,  319,  327,  335
     } ;
 
-static yyconst flex_int16_t yy_def[106] =
+static yyconst flex_int16_t yy_def[175] =
     {   0,
-       93,    1,    1,    1,    1,    5,   93,    7,    1,    1,
-       93,   93,   93,   93,   94,   95,   93,   96,   17,   97,
-       96,   93,   98,   99,   93,   93,   93,   94,   93,   94,
-      100,   93,  101,  102,   93,   93,   93,   96,   93,   93,
-       93,   96,   98,   99,   93,  103,  100,  104,  101,  101,
-      102,   93,   93,   93,   93,   93,  103,  104,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
-       93,   93,   93,   93,   93,  105,   93,  105,   93,  105,
-       93,   93,    0,   93,   93,   93,   93,   93,   93,   93,
-
-       93,   93,   93,   93,   93
+      160,    1,    1,    1,    1,    5,  160,    7,    1,    1,
+      160,  160,  160,  160,  160,  161,  162,  163,  160,  160,
+      160,  160,  164,  160,  160,  160,  165,  164,  160,  166,
+      167,  166,  166,  160,  160,  160,  160,  161,  160,  161,
+      160,  168,  160,  163,  160,  163,  169,  170,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  164,  160,  160,
+      160,  160,  160,  160,  164,  166,  167,  166,  160,  160,
+      160,  171,  168,  172,  163,  169,  169,  170,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  166,  160,  160,
+      171,  172,  160,  160,  160,  160,  160,  160,  160,  160,
+
+      160,  160,  166,  160,  160,  160,  160,  160,  160,  160,
+      160,  173,  160,  166,  160,  160,  160,  160,  160,  160,
+      173,  160,  173,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  174,  160,  160,  160,  174,  160,  174,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,    0,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160
     } ;
 
-static yyconst flex_int16_t yy_nxt[281] =
+static yyconst flex_int16_t yy_nxt[441] =
     {   0,
-       12,   13,   14,   15,   12,   16,   12,   12,   17,   12,
-       12,   12,   12,   18,   18,   18,   12,   12,   18,   18,
-       18,   18,   18,   18,   18,   18,   18,   18,   18,   18,
-       18,   12,   12,   19,   20,   20,   20,   92,   21,   25,
-       26,   26,   22,   21,   21,   21,   21,   12,   13,   14,
-       15,   23,   16,   23,   23,   19,   23,   23,   23,   12,
-       24,   24,   24,   12,   12,   24,   24,   24,   24,   24,
-       24,   24,   24,   24,   24,   24,   24,   24,   12,   12,
-       25,   26,   26,   27,   27,   27,   27,   29,   43,   29,
-       43,   43,   45,   45,   45,   50,   39,   59,   46,   93,
-
-       30,   33,   30,   34,   45,   45,   45,   27,   27,   68,
-       43,   91,   43,   43,   69,   35,   87,   36,   39,   37,
-       42,   42,   42,   39,   42,   45,   45,   45,   89,   42,
-       42,   42,   42,   85,   85,   86,   85,   85,   86,   89,
-       84,   90,   83,   82,   81,   80,   79,   78,   77,   76,
-       75,   74,   90,   28,   28,   28,   28,   28,   28,   28,
-       28,   31,   31,   31,   38,   38,   38,   38,   41,   73,
-       41,   43,   72,   43,   71,   43,   43,   44,   33,   44,
-       44,   44,   44,   47,   69,   47,   47,   49,   49,   49,
-       49,   49,   49,   49,   49,   51,   51,   51,   51,   51,
-
-       51,   51,   51,   57,   70,   57,   58,   58,   58,   67,
-       58,   58,   88,   88,   88,   88,   88,   88,   88,   88,
-       34,   66,   65,   64,   63,   62,   61,   60,   52,   50,
-       39,   56,   39,   55,   54,   53,   52,   50,   48,   93,
-       40,   39,   32,   93,   19,   19,   11,   93,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93
+       12,   13,   14,   15,   16,   12,   17,   18,   12,   12,
+       12,   19,   12,   12,   12,   12,   20,   21,   22,   23,
+       23,   23,   23,   23,   12,   12,   23,   23,   23,   23,
+       23,   23,   23,   23,   23,   23,   23,   23,   23,   23,
+       23,   23,   12,   24,   12,   25,   34,   35,   35,   25,
+       81,   26,   26,   27,   27,   27,   34,   35,   35,   82,
+       28,   36,   36,   36,   36,  159,   29,   28,   28,   28,
+       28,   12,   13,   14,   15,   16,   30,   17,   18,   30,
+       30,   30,   26,   30,   30,   30,   12,   20,   21,   22,
+       31,   31,   31,   31,   31,   32,   12,   31,   31,   31,
+
+       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
+       31,   31,   31,   12,   24,   12,   39,   41,   45,   47,
+       53,   54,   48,   56,   57,   61,   61,   47,   66,   45,
+       48,   66,   66,   66,   39,   46,   40,   49,   59,   50,
+      158,   51,  122,   52,  157,   49,   46,   50,  136,   63,
+      137,   52,  156,   43,   40,   62,   65,   65,   65,   59,
+       61,   61,  123,   65,   75,   69,   69,   69,   36,   36,
+       65,   65,   65,   65,   70,   71,   72,   69,   69,   69,
+       45,   46,   61,   61,  109,   77,   70,   71,   93,  110,
+       68,   70,   71,   85,   85,   85,   66,   46,  155,   66,
+
+       66,   66,   69,   69,   69,  122,   59,  100,  100,   61,
+       61,   70,   71,  100,  100,  148,  112,  154,   85,   85,
+       85,   61,   61,  129,  129,  123,  129,  129,  135,  135,
+      135,  142,  142,  148,  143,  149,  153,  135,  135,  135,
+      142,  142,  160,  143,  152,  151,  150,  146,  145,  144,
+      141,  140,  139,  149,   38,   38,   38,   38,   38,   38,
+       38,   38,   42,  138,  134,  133,   42,   42,   44,   44,
+       44,   44,   44,   44,   44,   44,   58,   58,   58,   58,
+       64,  132,   64,   66,  131,  130,   66,  160,   66,   66,
+       67,  128,  127,   67,   67,   67,   67,   73,  126,   73,
+
+       73,   76,   76,   76,   76,   76,   76,   76,   76,   78,
+       78,   78,   78,   78,   78,   78,   78,   91,  125,   91,
+       92,  124,   92,   92,  120,   92,   92,  121,  121,  121,
+      121,  121,  121,  121,  121,  147,  147,  147,  147,  147,
+      147,  147,  147,  119,  118,  117,  116,  115,   47,  114,
+      110,  113,  111,  108,  107,  106,   48,  105,  104,   89,
+      103,  102,  101,   99,   98,   97,   96,   95,   94,   79,
+       77,   90,   89,   88,   59,   87,   86,   59,   84,   83,
+       80,   79,   77,   74,  160,   60,   59,   55,   37,  160,
+       33,   25,   26,   25,   11,  160,  160,  160,  160,  160,
+
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160
     } ;
 
-static yyconst flex_int16_t yy_chk[281] =
+static yyconst flex_int16_t yy_chk[441] =
     {   0,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    1,    1,    5,    5,    5,    5,   91,    5,    9,
-        9,    9,    5,    5,    5,    5,    5,    7,    7,    7,
+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
+        1,    1,    1,    1,    1,    4,    9,    9,    9,   10,
+       50,    4,    5,    5,    5,    5,   10,   10,   10,   50,
+        5,   13,   13,   14,   14,  158,    5,    5,    5,    5,
+        5,    7,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
+
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
-       10,   10,   10,   13,   13,   14,   14,   15,   24,   28,
-       24,   24,   25,   25,   25,   50,   24,   50,   25,   90,
-
-       15,   17,   28,   17,   26,   26,   26,   27,   27,   62,
-       44,   87,   44,   44,   62,   17,   84,   17,   44,   17,
-       21,   21,   21,   21,   21,   45,   45,   45,   86,   21,
-       21,   21,   21,   83,   83,   83,   85,   85,   85,   88,
-       82,   86,   81,   78,   77,   76,   75,   74,   73,   72,
-       71,   70,   88,   94,   94,   94,   94,   94,   94,   94,
-       94,   95,   95,   95,   96,   96,   96,   96,   97,   69,
-       97,   98,   68,   98,   67,   98,   98,   99,   66,   99,
-       99,   99,   99,  100,   64,  100,  100,  101,  101,  101,
-      101,  101,  101,  101,  101,  102,  102,  102,  102,  102,
-
-      102,  102,  102,  103,   63,  103,  104,  104,  104,   61,
-      104,  104,  105,  105,  105,  105,  105,  105,  105,  105,
-       60,   59,   58,   56,   55,   54,   53,   52,   51,   49,
-       42,   40,   38,   37,   36,   35,   34,   33,   32,   30,
-       19,   18,   16,   11,    4,    3,   93,   93,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
-       93,   93,   93,   93,   93,   93,   93,   93,   93,   93
+        7,    7,    7,    7,    7,    7,   16,   17,   18,   19,
+       20,   20,   19,   22,   22,   25,   25,   26,   31,   44,
+       26,   31,   31,   31,   38,   18,   16,   19,   31,   19,
+      157,   19,  112,   19,  156,   26,   44,   26,  130,   26,
+      130,   26,  155,   17,   38,   25,   28,   28,   28,   28,
+       33,   33,  112,   28,   46,   34,   34,   34,   36,   36,
+       28,   28,   28,   28,   34,   34,   34,   35,   35,   35,
+       75,   46,   61,   61,   98,   77,   35,   35,   77,   98,
+       33,   91,   91,   61,   61,   61,   67,   75,  152,   67,
+
+       67,   67,   69,   69,   69,  121,   67,   85,   85,  113,
+      113,   69,   69,  100,  100,  143,  100,  151,   85,   85,
+       85,  114,  114,  122,  122,  121,  129,  129,  135,  135,
+      135,  138,  138,  147,  138,  143,  150,  129,  129,  129,
+      142,  142,  149,  142,  146,  145,  144,  141,  140,  139,
+      137,  136,  134,  147,  161,  161,  161,  161,  161,  161,
+      161,  161,  162,  133,  128,  127,  162,  162,  163,  163,
+      163,  163,  163,  163,  163,  163,  164,  164,  164,  164,
+      165,  126,  165,  166,  125,  124,  166,  123,  166,  166,
+      167,  120,  119,  167,  167,  167,  167,  168,  118,  168,
+
+      168,  169,  169,  169,  169,  169,  169,  169,  169,  170,
+      170,  170,  170,  170,  170,  170,  170,  171,  117,  171,
+      172,  116,  172,  172,  111,  172,  172,  173,  173,  173,
+      173,  173,  173,  173,  173,  174,  174,  174,  174,  174,
+      174,  174,  174,  110,  109,  108,  107,  106,  105,  103,
+      102,  101,   99,   97,   96,   95,   94,   93,   92,   90,
+       88,   87,   86,   84,   83,   82,   81,   80,   79,   78,
+       76,   71,   70,   68,   65,   63,   62,   58,   52,   51,
+       49,   48,   47,   43,   40,   24,   23,   21,   15,   11,
+        8,    6,    3,    2,  160,  160,  160,  160,  160,  160,
+
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
+      160,  160,  160,  160,  160,  160,  160,  160,  160,  160
     } ;
 
 static yy_state_type yy_last_accepting_state;
@@ -540,6 +607,7 @@ int yy_flex_debug = 0;
 #define YY_MORE_ADJ 0
 #define YY_RESTORE_YY_MORE_OFFSET
 char *yytext;
+#line 1 "dtc-lexer.l"
 /*
  * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation.  2005.
  *
@@ -561,6 +629,10 @@ char *yytext;
  */
 #define YY_NO_INPUT 1
 
+
+
+
+#line 38 "dtc-lexer.l"
 #include "dtc.h"
 #include "srcpos.h"
 #include "dtc-parser.tab.h"
@@ -588,6 +660,7 @@ static int dts_version = 1;
 
 static void push_input_file(const char *filename);
 static int pop_input_file(void);
+#line 664 "dtc-lexer.lex.c"
 
 #define INITIAL 0
 #define INCLUDE 1
@@ -670,7 +743,12 @@ static int input (void );
 
 /* Amount of stuff to slurp up with each read. */
 #ifndef YY_READ_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k */
+#define YY_READ_BUF_SIZE 16384
+#else
 #define YY_READ_BUF_SIZE 8192
+#endif /* __ia64__ */
 #endif
 
 /* Copy whatever the last rule matched to the standard output. */
@@ -689,7 +767,7 @@ static int input (void );
        if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
                { \
                int c = '*'; \
-               unsigned n; \
+               size_t n; \
                for ( n = 0; n < max_size && \
                             (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
                        buf[n] = (char) c; \
@@ -761,6 +839,9 @@ extern int yylex (void);
 #endif
 
 #define YY_RULE_SETUP \
+       if ( yyleng > 0 ) \
+               YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \
+                               (yytext[yyleng - 1] == '\n'); \
        YY_USER_ACTION
 
 /** The main scanner function which does all the work.
@@ -771,6 +852,10 @@ YY_DECL
        register char *yy_cp, *yy_bp;
        register int yy_act;
     
+#line 67 "dtc-lexer.l"
+
+#line 858 "dtc-lexer.lex.c"
+
        if ( !(yy_init) )
                {
                (yy_init) = 1;
@@ -810,6 +895,7 @@ YY_DECL
                yy_bp = yy_cp;
 
                yy_current_state = (yy_start);
+               yy_current_state += YY_AT_BOL();
 yy_match:
                do
                        {
@@ -822,13 +908,13 @@ yy_match:
                        while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
                                {
                                yy_current_state = (int) yy_def[yy_current_state];
-                               if ( yy_current_state >= 94 )
+                               if ( yy_current_state >= 161 )
                                        yy_c = yy_meta[(unsigned int) yy_c];
                                }
                        yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
                        ++yy_cp;
                        }
-               while ( yy_current_state != 93 );
+               while ( yy_current_state != 160 );
                yy_cp = (yy_last_accepting_cpos);
                yy_current_state = (yy_last_accepting_state);
 
@@ -851,26 +937,54 @@ do_action:        /* This label is used only to access EOF actions. */
 case 1:
 /* rule 1 can match eol */
 YY_RULE_SETUP
+#line 68 "dtc-lexer.l"
 {
                        char *name = strchr(yytext, '\"') + 1;
                        yytext[yyleng-1] = '\0';
                        push_input_file(name);
                }
        YY_BREAK
+case 2:
+/* rule 2 can match eol */
+YY_RULE_SETUP
+#line 74 "dtc-lexer.l"
+{
+                       char *line, *tmp, *fn;
+                       /* skip text before line # */
+                       line = yytext;
+                       while (!isdigit(*line))
+                               line++;
+                       /* skip digits in line # */
+                       tmp = line;
+                       while (!isspace(*tmp))
+                               tmp++;
+                       /* "NULL"-terminate line # */
+                       *tmp = '\0';
+                       /* start of filename */
+                       fn = strchr(tmp + 1, '"') + 1;
+                       /* strip trailing " from filename */
+                       tmp = strchr(fn, '"');
+                       *tmp = 0;
+                       /* -1 since #line is the number of the next line */
+                       srcpos_set_line(xstrdup(fn), atoi(line) - 1);
+               }
+       YY_BREAK
 case YY_STATE_EOF(INITIAL):
 case YY_STATE_EOF(INCLUDE):
 case YY_STATE_EOF(BYTESTRING):
 case YY_STATE_EOF(PROPNODENAME):
 case YY_STATE_EOF(V1):
+#line 95 "dtc-lexer.l"
 {
                        if (!pop_input_file()) {
                                yyterminate();
                        }
                }
        YY_BREAK
-case 2:
-/* rule 2 can match eol */
+case 3:
+/* rule 3 can match eol */
 YY_RULE_SETUP
+#line 101 "dtc-lexer.l"
 {
                        DPRINT("String: %s\n", yytext);
                        yylval.data = data_copy_escape_string(yytext+1,
@@ -878,8 +992,9 @@ YY_RULE_SETUP
                        return DT_STRING;
                }
        YY_BREAK
-case 3:
+case 4:
 YY_RULE_SETUP
+#line 108 "dtc-lexer.l"
 {
                        DPRINT("Keyword: /dts-v1/\n");
                        dts_version = 1;
@@ -887,16 +1002,47 @@ YY_RULE_SETUP
                        return DT_V1;
                }
        YY_BREAK
-case 4:
+case 5:
 YY_RULE_SETUP
+#line 115 "dtc-lexer.l"
 {
                        DPRINT("Keyword: /memreserve/\n");
                        BEGIN_DEFAULT();
                        return DT_MEMRESERVE;
                }
        YY_BREAK
-case 5:
+case 6:
+YY_RULE_SETUP
+#line 121 "dtc-lexer.l"
+{
+                       DPRINT("Keyword: /bits/\n");
+                       BEGIN_DEFAULT();
+                       return DT_BITS;
+               }
+       YY_BREAK
+case 7:
 YY_RULE_SETUP
+#line 127 "dtc-lexer.l"
+{
+                       DPRINT("Keyword: /delete-property/\n");
+                       DPRINT("<PROPNODENAME>\n");
+                       BEGIN(PROPNODENAME);
+                       return DT_DEL_PROP;
+               }
+       YY_BREAK
+case 8:
+YY_RULE_SETUP
+#line 134 "dtc-lexer.l"
+{
+                       DPRINT("Keyword: /delete-node/\n");
+                       DPRINT("<PROPNODENAME>\n");
+                       BEGIN(PROPNODENAME);
+                       return DT_DEL_NODE;
+               }
+       YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 141 "dtc-lexer.l"
 {
                        DPRINT("Label: %s\n", yytext);
                        yylval.labelref = xstrdup(yytext);
@@ -904,24 +1050,38 @@ YY_RULE_SETUP
                        return DT_LABEL;
                }
        YY_BREAK
-case 6:
+case 10:
 YY_RULE_SETUP
+#line 148 "dtc-lexer.l"
 {
                        yylval.literal = xstrdup(yytext);
                        DPRINT("Literal: '%s'\n", yylval.literal);
                        return DT_LITERAL;
                }
        YY_BREAK
-case 7:
+case 11:
+/* rule 11 can match eol */
+YY_RULE_SETUP
+#line 154 "dtc-lexer.l"
+{
+                       yytext[yyleng-1] = '\0';
+                       yylval.literal = xstrdup(yytext+1);
+                       DPRINT("Character literal: %s\n", yylval.literal);
+                       return DT_CHAR_LITERAL;
+               }
+       YY_BREAK
+case 12:
 YY_RULE_SETUP
+#line 161 "dtc-lexer.l"
 {      /* label reference */
                        DPRINT("Ref: %s\n", yytext+1);
                        yylval.labelref = xstrdup(yytext+1);
                        return DT_REF;
                }
        YY_BREAK
-case 8:
+case 13:
 YY_RULE_SETUP
+#line 167 "dtc-lexer.l"
 {      /* new-style path reference */
                        yytext[yyleng-1] = '\0';
                        DPRINT("Ref: %s\n", yytext+2);
@@ -929,55 +1089,104 @@ YY_RULE_SETUP
                        return DT_REF;
                }
        YY_BREAK
-case 9:
+case 14:
 YY_RULE_SETUP
+#line 174 "dtc-lexer.l"
 {
                        yylval.byte = strtol(yytext, NULL, 16);
                        DPRINT("Byte: %02x\n", (int)yylval.byte);
                        return DT_BYTE;
                }
        YY_BREAK
-case 10:
+case 15:
 YY_RULE_SETUP
+#line 180 "dtc-lexer.l"
 {
                        DPRINT("/BYTESTRING\n");
                        BEGIN_DEFAULT();
                        return ']';
                }
        YY_BREAK
-case 11:
+case 16:
 YY_RULE_SETUP
+#line 186 "dtc-lexer.l"
 {
                        DPRINT("PropNodeName: %s\n", yytext);
-                       yylval.propnodename = xstrdup(yytext);
+                       yylval.propnodename = xstrdup((yytext[0] == '\\') ?
+                                                       yytext + 1 : yytext);
                        BEGIN_DEFAULT();
                        return DT_PROPNODENAME;
                }
        YY_BREAK
-case 12:
+case 17:
 YY_RULE_SETUP
+#line 194 "dtc-lexer.l"
 {
                        DPRINT("Binary Include\n");
                        return DT_INCBIN;
                }
        YY_BREAK
-case 13:
-/* rule 13 can match eol */
+case 18:
+/* rule 18 can match eol */
 YY_RULE_SETUP
+#line 199 "dtc-lexer.l"
 /* eat whitespace */
        YY_BREAK
-case 14:
-/* rule 14 can match eol */
+case 19:
+/* rule 19 can match eol */
 YY_RULE_SETUP
+#line 200 "dtc-lexer.l"
 /* eat C-style comments */
        YY_BREAK
-case 15:
-/* rule 15 can match eol */
+case 20:
+/* rule 20 can match eol */
 YY_RULE_SETUP
+#line 201 "dtc-lexer.l"
 /* eat C++-style comments */
        YY_BREAK
-case 16:
+case 21:
 YY_RULE_SETUP
+#line 203 "dtc-lexer.l"
+{ return DT_LSHIFT; };
+       YY_BREAK
+case 22:
+YY_RULE_SETUP
+#line 204 "dtc-lexer.l"
+{ return DT_RSHIFT; };
+       YY_BREAK
+case 23:
+YY_RULE_SETUP
+#line 205 "dtc-lexer.l"
+{ return DT_LE; };
+       YY_BREAK
+case 24:
+YY_RULE_SETUP
+#line 206 "dtc-lexer.l"
+{ return DT_GE; };
+       YY_BREAK
+case 25:
+YY_RULE_SETUP
+#line 207 "dtc-lexer.l"
+{ return DT_EQ; };
+       YY_BREAK
+case 26:
+YY_RULE_SETUP
+#line 208 "dtc-lexer.l"
+{ return DT_NE; };
+       YY_BREAK
+case 27:
+YY_RULE_SETUP
+#line 209 "dtc-lexer.l"
+{ return DT_AND; };
+       YY_BREAK
+case 28:
+YY_RULE_SETUP
+#line 210 "dtc-lexer.l"
+{ return DT_OR; };
+       YY_BREAK
+case 29:
+YY_RULE_SETUP
+#line 212 "dtc-lexer.l"
 {
                        DPRINT("Char: %c (\\x%02x)\n", yytext[0],
                                (unsigned)yytext[0]);
@@ -993,10 +1202,12 @@ YY_RULE_SETUP
                        return yytext[0];
                }
        YY_BREAK
-case 17:
+case 30:
 YY_RULE_SETUP
+#line 227 "dtc-lexer.l"
 ECHO;
        YY_BREAK
+#line 1211 "dtc-lexer.lex.c"
 
        case YY_END_OF_BUFFER:
                {
@@ -1275,6 +1486,7 @@ static int yy_get_next_buffer (void)
        register char *yy_cp;
     
        yy_current_state = (yy_start);
+       yy_current_state += YY_AT_BOL();
 
        for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
                {
@@ -1287,7 +1499,7 @@ static int yy_get_next_buffer (void)
                while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
                        {
                        yy_current_state = (int) yy_def[yy_current_state];
-                       if ( yy_current_state >= 94 )
+                       if ( yy_current_state >= 161 )
                                yy_c = yy_meta[(unsigned int) yy_c];
                        }
                yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
@@ -1315,11 +1527,11 @@ static int yy_get_next_buffer (void)
        while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
                {
                yy_current_state = (int) yy_def[yy_current_state];
-               if ( yy_current_state >= 94 )
+               if ( yy_current_state >= 161 )
                        yy_c = yy_meta[(unsigned int) yy_c];
                }
        yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
-       yy_is_jam = (yy_current_state == 93);
+       yy_is_jam = (yy_current_state == 160);
 
        return yy_is_jam ? 0 : yy_current_state;
 }
@@ -1394,6 +1606,8 @@ static int yy_get_next_buffer (void)
        *(yy_c_buf_p) = '\0';   /* preserve yytext */
        (yy_hold_char) = *++(yy_c_buf_p);
 
+       YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n');
+
        return c;
 }
 #endif /* ifndef YY_NO_INPUT */
@@ -1712,8 +1926,8 @@ YY_BUFFER_STATE yy_scan_string (yyconst char * yystr )
 
 /** Setup the input buffer state to scan the given bytes. The next call to yylex() will
  * scan from a @e copy of @a bytes.
- * @param bytes the byte buffer to scan
- * @param len the number of bytes in the buffer pointed to by @a bytes.
+ * @param yybytes the byte buffer to scan
+ * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
  * 
  * @return the newly allocated buffer state object.
  */
@@ -1952,6 +2166,10 @@ void yyfree (void * ptr )
 
 #define YYTABLES_NAME "yytables"
 
+#line 227 "dtc-lexer.l"
+
+
+
 static void push_input_file(const char *filename)
 {
        assert(filename);
@@ -1963,6 +2181,7 @@ static void push_input_file(const char *filename)
        yypush_buffer_state(yy_create_buffer(yyin,YY_BUF_SIZE));
 }
 
+
 static int pop_input_file(void)
 {
        if (srcfile_pop() == 0)
index b05921e1e848c3e682eedcdeb8c901ea54b8b2eb..4af55900a15ba22f1c5b37239038e7d8bb4090ed 100644 (file)
@@ -1,9 +1,10 @@
-/* A Bison parser, made by GNU Bison 2.4.3.  */
+
+/* A Bison parser, made by GNU Bison 2.4.1.  */
 
 /* Skeleton implementation for Bison's Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
-   2009, 2010 Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+   Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -45,7 +46,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.4.3"
+#define YYBISON_VERSION "2.4.1"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
@@ -66,6 +67,8 @@
 
 /* Copy the first part of user declarations.  */
 
+/* Line 189 of yacc.c  */
+#line 21 "dtc-parser.y"
 
 #include <stdio.h>
 
@@ -82,12 +85,15 @@ extern struct boot_info *the_boot_info;
 extern int treesource_error;
 
 static unsigned long long eval_literal(const char *s, int base, int bits);
+static unsigned char eval_char_literal(const char *s);
 
 
+/* Line 189 of yacc.c  */
+#line 93 "dtc-parser.tab.c"
 
 /* Enabling traces.  */
 #ifndef YYDEBUG
-# define YYDEBUG 1
+# define YYDEBUG 0
 #endif
 
 /* Enabling verbose error messages.  */
@@ -112,14 +118,26 @@ static unsigned long long eval_literal(const char *s, int base, int bits);
    enum yytokentype {
      DT_V1 = 258,
      DT_MEMRESERVE = 259,
-     DT_PROPNODENAME = 260,
-     DT_LITERAL = 261,
-     DT_BASE = 262,
-     DT_BYTE = 263,
-     DT_STRING = 264,
-     DT_LABEL = 265,
-     DT_REF = 266,
-     DT_INCBIN = 267
+     DT_LSHIFT = 260,
+     DT_RSHIFT = 261,
+     DT_LE = 262,
+     DT_GE = 263,
+     DT_EQ = 264,
+     DT_NE = 265,
+     DT_AND = 266,
+     DT_OR = 267,
+     DT_BITS = 268,
+     DT_DEL_PROP = 269,
+     DT_DEL_NODE = 270,
+     DT_PROPNODENAME = 271,
+     DT_LITERAL = 272,
+     DT_CHAR_LITERAL = 273,
+     DT_BASE = 274,
+     DT_BYTE = 275,
+     DT_STRING = 276,
+     DT_LABEL = 277,
+     DT_REF = 278,
+     DT_INCBIN = 279
    };
 #endif
 
@@ -129,6 +147,8 @@ static unsigned long long eval_literal(const char *s, int base, int bits);
 typedef union YYSTYPE
 {
 
+/* Line 214 of yacc.c  */
+#line 40 "dtc-parser.y"
 
        char *propnodename;
        char *literal;
@@ -137,16 +157,22 @@ typedef union YYSTYPE
        uint8_t byte;
        struct data data;
 
-       uint64_t addr;
-       cell_t cell;
+       struct {
+               struct data     data;
+               int             bits;
+       } array;
+
        struct property *prop;
        struct property *proplist;
        struct node *node;
        struct node *nodelist;
        struct reserve_info *re;
+       uint64_t integer;
 
 
 
+/* Line 214 of yacc.c  */
+#line 176 "dtc-parser.tab.c"
 } YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
@@ -157,6 +183,8 @@ typedef union YYSTYPE
 /* Copy the second part of user declarations.  */
 
 
+/* Line 264 of yacc.c  */
+#line 188 "dtc-parser.tab.c"
 
 #ifdef short
 # undef short
@@ -206,7 +234,7 @@ typedef short int yytype_int16;
 #define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
 
 #ifndef YY_
-# if defined YYENABLE_NLS && YYENABLE_NLS
+# if YYENABLE_NLS
 #  if ENABLE_NLS
 #   include <libintl.h> /* INFRINGES ON USER NAME SPACE */
 #   define YY_(msgid) dgettext ("bison-runtime", msgid)
@@ -371,20 +399,20 @@ union yyalloc
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  4
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   56
+#define YYLAST   133
 
 /* YYNTOKENS -- Number of terminals.  */
-#define YYNTOKENS  25
+#define YYNTOKENS  48
 /* YYNNTS -- Number of nonterminals.  */
-#define YYNNTS  16
+#define YYNNTS  28
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  39
+#define YYNRULES  79
 /* YYNRULES -- Number of states.  */
-#define YYNSTATES  67
+#define YYNSTATES  141
 
 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
 #define YYUNDEFTOK  2
-#define YYMAXUTOK   267
+#define YYMAXUTOK   279
 
 #define YYTRANSLATE(YYX)                                               \
   ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
@@ -395,16 +423,16 @@ static const yytype_uint8 yytranslate[] =
        0,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-      22,    24,     2,     2,    23,     2,     2,    14,     2,     2,
-       2,     2,     2,     2,     2,     2,     2,     2,     2,    13,
-      18,    17,    19,     2,     2,     2,     2,     2,     2,     2,
+       2,     2,     2,    47,     2,     2,     2,    45,    41,     2,
+      33,    35,    44,    42,    34,    43,     2,    26,     2,     2,
+       2,     2,     2,     2,     2,     2,     2,     2,    38,    25,
+      36,    29,    30,    37,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,    20,     2,    21,     2,     2,     2,     2,     2,     2,
+       2,    31,     2,    32,    40,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,     2,     2,    15,     2,    16,     2,     2,     2,     2,
+       2,     2,     2,    27,    39,    28,    46,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
@@ -418,45 +446,68 @@ static const yytype_uint8 yytranslate[] =
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     1,     2,     3,     4,
-       5,     6,     7,     8,     9,    10,    11,    12
+       5,     6,     7,     8,     9,    10,    11,    12,    13,    14,
+      15,    16,    17,    18,    19,    20,    21,    22,    23,    24
 };
 
 #if YYDEBUG
 /* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
    YYRHS.  */
-static const yytype_uint8 yyprhs[] =
+static const yytype_uint16 yyprhs[] =
 {
-       0,     0,     3,     8,     9,    12,    17,    20,    22,    25,
-      29,    33,    39,    40,    43,    48,    51,    54,    57,    62,
-      67,    70,    80,    86,    89,    90,    93,    96,    97,   100,
-     103,   106,   108,   109,   112,   115,   116,   119,   122,   125
+       0,     0,     3,     8,     9,    12,    17,    20,    23,    27,
+      31,    36,    42,    43,    46,    51,    54,    58,    61,    64,
+      68,    73,    76,    86,    92,    95,    96,    99,   102,   106,
+     108,   111,   114,   117,   119,   121,   125,   127,   129,   135,
+     137,   141,   143,   147,   149,   153,   155,   159,   161,   165,
+     167,   171,   175,   177,   181,   185,   189,   193,   197,   201,
+     203,   207,   211,   213,   217,   221,   225,   227,   229,   232,
+     235,   238,   239,   242,   245,   246,   249,   252,   255,   259
 };
 
 /* YYRHS -- A `-1'-separated list of the rules' RHS.  */
 static const yytype_int8 yyrhs[] =
 {
-      26,     0,    -1,     3,    13,    27,    30,    -1,    -1,    28,
-      27,    -1,     4,    29,    29,    13,    -1,    10,    28,    -1,
-       6,    -1,    14,    31,    -1,    30,    14,    31,    -1,    30,
-      11,    31,    -1,    15,    32,    39,    16,    13,    -1,    -1,
-      32,    33,    -1,     5,    17,    34,    13,    -1,     5,    13,
-      -1,    10,    33,    -1,    35,     9,    -1,    35,    18,    36,
-      19,    -1,    35,    20,    38,    21,    -1,    35,    11,    -1,
-      35,    12,    22,     9,    23,    29,    23,    29,    24,    -1,
-      35,    12,    22,     9,    24,    -1,    34,    10,    -1,    -1,
-      34,    23,    -1,    35,    10,    -1,    -1,    36,    37,    -1,
-      36,    11,    -1,    36,    10,    -1,     6,    -1,    -1,    38,
-       8,    -1,    38,    10,    -1,    -1,    40,    39,    -1,    40,
-      33,    -1,     5,    31,    -1,    10,    40,    -1
+      49,     0,    -1,     3,    25,    50,    52,    -1,    -1,    51,
+      50,    -1,     4,    59,    59,    25,    -1,    22,    51,    -1,
+      26,    53,    -1,    52,    26,    53,    -1,    52,    23,    53,
+      -1,    52,    15,    23,    25,    -1,    27,    54,    74,    28,
+      25,    -1,    -1,    54,    55,    -1,    16,    29,    56,    25,
+      -1,    16,    25,    -1,    14,    16,    25,    -1,    22,    55,
+      -1,    57,    21,    -1,    57,    58,    30,    -1,    57,    31,
+      73,    32,    -1,    57,    23,    -1,    57,    24,    33,    21,
+      34,    59,    34,    59,    35,    -1,    57,    24,    33,    21,
+      35,    -1,    56,    22,    -1,    -1,    56,    34,    -1,    57,
+      22,    -1,    13,    17,    36,    -1,    36,    -1,    58,    59,
+      -1,    58,    23,    -1,    58,    22,    -1,    17,    -1,    18,
+      -1,    33,    60,    35,    -1,    61,    -1,    62,    -1,    62,
+      37,    60,    38,    61,    -1,    63,    -1,    62,    12,    63,
+      -1,    64,    -1,    63,    11,    64,    -1,    65,    -1,    64,
+      39,    65,    -1,    66,    -1,    65,    40,    66,    -1,    67,
+      -1,    66,    41,    67,    -1,    68,    -1,    67,     9,    68,
+      -1,    67,    10,    68,    -1,    69,    -1,    68,    36,    69,
+      -1,    68,    30,    69,    -1,    68,     7,    69,    -1,    68,
+       8,    69,    -1,    69,     5,    70,    -1,    69,     6,    70,
+      -1,    70,    -1,    70,    42,    71,    -1,    70,    43,    71,
+      -1,    71,    -1,    71,    44,    72,    -1,    71,    26,    72,
+      -1,    71,    45,    72,    -1,    72,    -1,    59,    -1,    43,
+      72,    -1,    46,    72,    -1,    47,    72,    -1,    -1,    73,
+      20,    -1,    73,    22,    -1,    -1,    75,    74,    -1,    75,
+      55,    -1,    16,    53,    -1,    15,    16,    25,    -1,    22,
+      75,    -1
 };
 
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
 static const yytype_uint16 yyrline[] =
 {
-       0,    86,    86,    95,    98,   105,   109,   117,   124,   128,
-     132,   145,   153,   156,   163,   167,   171,   179,   183,   187,
-     191,   195,   212,   222,   230,   233,   237,   245,   248,   252,
-     257,   264,   272,   275,   279,   287,   290,   294,   302,   306
+       0,   109,   109,   118,   121,   128,   132,   140,   144,   148,
+     158,   172,   180,   183,   190,   194,   198,   202,   210,   214,
+     218,   222,   226,   243,   253,   261,   264,   268,   275,   290,
+     295,   315,   329,   336,   340,   344,   351,   355,   356,   360,
+     361,   365,   366,   370,   371,   375,   376,   380,   381,   385,
+     386,   387,   391,   392,   393,   394,   395,   399,   400,   401,
+     405,   406,   407,   411,   412,   413,   414,   418,   419,   420,
+     421,   426,   429,   433,   441,   444,   448,   456,   460,   464
 };
 #endif
 
@@ -465,13 +516,19 @@ static const yytype_uint16 yyrline[] =
    First, the terminals, then, starting at YYNTOKENS, nonterminals.  */
 static const char *const yytname[] =
 {
-  "$end", "error", "$undefined", "DT_V1", "DT_MEMRESERVE",
-  "DT_PROPNODENAME", "DT_LITERAL", "DT_BASE", "DT_BYTE", "DT_STRING",
-  "DT_LABEL", "DT_REF", "DT_INCBIN", "';'", "'/'", "'{'", "'}'", "'='",
-  "'<'", "'>'", "'['", "']'", "'('", "','", "')'", "$accept", "sourcefile",
-  "memreserves", "memreserve", "addr", "devicetree", "nodedef", "proplist",
-  "propdef", "propdata", "propdataprefix", "celllist", "cellval",
-  "bytestring", "subnodes", "subnode", 0
+  "$end", "error", "$undefined", "DT_V1", "DT_MEMRESERVE", "DT_LSHIFT",
+  "DT_RSHIFT", "DT_LE", "DT_GE", "DT_EQ", "DT_NE", "DT_AND", "DT_OR",
+  "DT_BITS", "DT_DEL_PROP", "DT_DEL_NODE", "DT_PROPNODENAME", "DT_LITERAL",
+  "DT_CHAR_LITERAL", "DT_BASE", "DT_BYTE", "DT_STRING", "DT_LABEL",
+  "DT_REF", "DT_INCBIN", "';'", "'/'", "'{'", "'}'", "'='", "'>'", "'['",
+  "']'", "'('", "','", "')'", "'<'", "'?'", "':'", "'|'", "'^'", "'&'",
+  "'+'", "'-'", "'*'", "'%'", "'~'", "'!'", "$accept", "sourcefile",
+  "memreserves", "memreserve", "devicetree", "nodedef", "proplist",
+  "propdef", "propdata", "propdataprefix", "arrayprefix", "integer_prim",
+  "integer_expr", "integer_trinary", "integer_or", "integer_and",
+  "integer_bitor", "integer_bitxor", "integer_bitand", "integer_eq",
+  "integer_rela", "integer_shift", "integer_add", "integer_mul",
+  "integer_unary", "bytestring", "subnodes", "subnode", 0
 };
 #endif
 
@@ -481,27 +538,37 @@ static const char *const yytname[] =
 static const yytype_uint16 yytoknum[] =
 {
        0,   256,   257,   258,   259,   260,   261,   262,   263,   264,
-     265,   266,   267,    59,    47,   123,   125,    61,    60,    62,
-      91,    93,    40,    44,    41
+     265,   266,   267,   268,   269,   270,   271,   272,   273,   274,
+     275,   276,   277,   278,   279,    59,    47,   123,   125,    61,
+      62,    91,    93,    40,    44,    41,    60,    63,    58,   124,
+      94,    38,    43,    45,    42,    37,   126,    33
 };
 # endif
 
 /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
 static const yytype_uint8 yyr1[] =
 {
-       0,    25,    26,    27,    27,    28,    28,    29,    30,    30,
-      30,    31,    32,    32,    33,    33,    33,    34,    34,    34,
-      34,    34,    34,    34,    35,    35,    35,    36,    36,    36,
-      36,    37,    38,    38,    38,    39,    39,    39,    40,    40
+       0,    48,    49,    50,    50,    51,    51,    52,    52,    52,
+      52,    53,    54,    54,    55,    55,    55,    55,    56,    56,
+      56,    56,    56,    56,    56,    57,    57,    57,    58,    58,
+      58,    58,    58,    59,    59,    59,    60,    61,    61,    62,
+      62,    63,    63,    64,    64,    65,    65,    66,    66,    67,
+      67,    67,    68,    68,    68,    68,    68,    69,    69,    69,
+      70,    70,    70,    71,    71,    71,    71,    72,    72,    72,
+      72,    73,    73,    73,    74,    74,    74,    75,    75,    75
 };
 
 /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
 static const yytype_uint8 yyr2[] =
 {
-       0,     2,     4,     0,     2,     4,     2,     1,     2,     3,
-       3,     5,     0,     2,     4,     2,     2,     2,     4,     4,
-       2,     9,     5,     2,     0,     2,     2,     0,     2,     2,
-       2,     1,     0,     2,     2,     0,     2,     2,     2,     2
+       0,     2,     4,     0,     2,     4,     2,     2,     3,     3,
+       4,     5,     0,     2,     4,     2,     3,     2,     2,     3,
+       4,     2,     9,     5,     2,     0,     2,     2,     3,     1,
+       2,     2,     2,     1,     1,     3,     1,     1,     5,     1,
+       3,     1,     3,     1,     3,     1,     3,     1,     3,     1,
+       3,     3,     1,     3,     3,     3,     3,     3,     3,     1,
+       3,     3,     1,     3,     3,     3,     1,     1,     2,     2,
+       2,     0,     2,     2,     0,     2,     2,     2,     3,     2
 };
 
 /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
@@ -509,41 +576,59 @@ static const yytype_uint8 yyr2[] =
    means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
-       0,     0,     0,     3,     1,     0,     0,     0,     3,     7,
-       0,     6,     0,     2,     4,     0,    12,     8,     0,     0,
-       5,    35,    10,     9,     0,     0,    13,     0,    35,    15,
-      24,    38,    16,    39,     0,    37,    36,     0,     0,    11,
-      23,    14,    25,    17,    26,    20,     0,    27,    32,     0,
-       0,     0,     0,    31,    30,    29,    18,    28,    33,    34,
-      19,     0,    22,     0,     0,     0,    21
+       0,     0,     0,     3,     1,     0,     0,     0,     3,    33,
+      34,     0,     0,     6,     0,     2,     4,     0,     0,     0,
+      67,     0,    36,    37,    39,    41,    43,    45,    47,    49,
+      52,    59,    62,    66,     0,    12,     7,     0,     0,     0,
+      68,    69,    70,    35,     0,     0,     0,     0,     0,     0,
+       0,     0,     0,     0,     0,     0,     0,     0,     0,     0,
+       0,     0,     0,     5,    74,     0,     9,     8,    40,     0,
+      42,    44,    46,    48,    50,    51,    55,    56,    54,    53,
+      57,    58,    60,    61,    64,    63,    65,     0,     0,     0,
+       0,    13,     0,    74,    10,     0,     0,     0,    15,    25,
+      77,    17,    79,     0,    76,    75,    38,    16,    78,     0,
+       0,    11,    24,    14,    26,     0,    18,    27,    21,     0,
+      71,    29,     0,     0,     0,     0,    32,    31,    19,    30,
+      28,     0,    72,    73,    20,     0,    23,     0,     0,     0,
+      22
 };
 
 /* YYDEFGOTO[NTERM-NUM].  */
 static const yytype_int8 yydefgoto[] =
 {
-      -1,     2,     7,     8,    10,    13,    17,    21,    26,    37,
-      38,    50,    57,    51,    27,    28
+      -1,     2,     7,     8,    15,    36,    64,    91,   109,   110,
+     122,    20,    21,    22,    23,    24,    25,    26,    27,    28,
+      29,    30,    31,    32,    33,   125,    92,    93
 };
 
 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
    STATE-NUM.  */
-#define YYPACT_NINF -12
+#define YYPACT_NINF -78
 static const yytype_int8 yypact[] =
 {
-      10,   -11,    18,    -1,   -12,    22,    -1,    15,    -1,   -12,
-      22,   -12,    20,     1,   -12,    17,   -12,   -12,    20,    20,
-     -12,     6,   -12,   -12,    21,     6,   -12,    23,     6,   -12,
-     -12,   -12,   -12,   -12,    28,   -12,   -12,    -6,    13,   -12,
-     -12,   -12,   -12,   -12,   -12,   -12,    24,   -12,   -12,    33,
-      -5,     0,    -4,   -12,   -12,   -12,   -12,   -12,   -12,   -12,
-     -12,    22,   -12,    25,    22,    19,   -12
+      22,    11,    51,    10,   -78,    23,    10,     2,    10,   -78,
+     -78,    -9,    23,   -78,    30,    38,   -78,    -9,    -9,    -9,
+     -78,    35,   -78,    -6,    52,    29,    48,    49,    33,     3,
+      71,    36,     0,   -78,    64,   -78,   -78,    68,    30,    30,
+     -78,   -78,   -78,   -78,    -9,    -9,    -9,    -9,    -9,    -9,
+      -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,    -9,
+      -9,    -9,    -9,   -78,    44,    67,   -78,   -78,    52,    55,
+      29,    48,    49,    33,     3,     3,    71,    71,    71,    71,
+      36,    36,     0,     0,   -78,   -78,   -78,    78,    79,    42,
+      44,   -78,    69,    44,   -78,    -9,    73,    74,   -78,   -78,
+     -78,   -78,   -78,    75,   -78,   -78,   -78,   -78,   -78,    -7,
+      -1,   -78,   -78,   -78,   -78,    84,   -78,   -78,   -78,    63,
+     -78,   -78,    32,    66,    82,    -3,   -78,   -78,   -78,   -78,
+     -78,    46,   -78,   -78,   -78,    23,   -78,    70,    23,    72,
+     -78
 };
 
 /* YYPGOTO[NTERM-NUM].  */
 static const yytype_int8 yypgoto[] =
 {
-     -12,   -12,    36,    39,   -10,   -12,     8,   -12,    12,   -12,
-     -12,   -12,   -12,   -12,    27,    31
+     -78,   -78,    97,   100,   -78,   -37,   -78,   -77,   -78,   -78,
+     -78,    -5,    65,    13,   -78,    76,    77,    62,    80,    83,
+      34,    20,    26,    28,   -14,   -78,    18,    24
 };
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
@@ -553,35 +638,59 @@ static const yytype_int8 yypgoto[] =
 #define YYTABLE_NINF -1
 static const yytype_uint8 yytable[] =
 {
-      15,    53,     3,     5,    40,    54,    55,    41,    58,     6,
-      59,    24,    18,     1,    56,    19,    25,    42,     4,    61,
-      62,    60,    43,    44,    45,    46,    22,    23,     9,    12,
-      20,    47,    31,    48,    29,    16,    16,    32,    30,    34,
-      35,    39,    52,    66,    14,    11,    49,     0,    64,     0,
-       0,    63,     0,     0,    65,    36,    33
+      12,    66,    67,    40,    41,    42,    44,    34,     9,    10,
+      52,    53,   115,   101,     5,   112,   104,   132,   113,   133,
+     116,   117,   118,   119,    11,     1,    60,   114,    14,   134,
+     120,    45,     6,    54,    17,   121,     3,    18,    19,    55,
+       9,    10,    50,    51,    61,    62,    84,    85,    86,     9,
+      10,     4,   100,    37,   126,   127,    11,    35,    87,    88,
+      89,    38,   128,    46,    39,    11,    90,    98,    47,    35,
+      43,    99,    76,    77,    78,    79,    56,    57,    58,    59,
+     135,   136,    80,    81,    74,    75,    82,    83,    48,    63,
+      49,    65,    94,    95,    96,    97,   124,   103,   107,   108,
+     111,   123,   130,   131,   138,    16,    13,   140,   106,    71,
+      69,   105,     0,     0,   102,     0,     0,   129,     0,     0,
+      68,     0,     0,    70,     0,     0,     0,     0,    72,     0,
+     137,     0,    73,   139
 };
 
-static const yytype_int8 yycheck[] =
+static const yytype_int16 yycheck[] =
 {
-      10,     6,    13,     4,    10,    10,    11,    13,     8,    10,
-      10,     5,    11,     3,    19,    14,    10,    23,     0,    23,
-      24,    21,     9,    10,    11,    12,    18,    19,     6,    14,
-      13,    18,    24,    20,    13,    15,    15,    25,    17,    16,
-      28,    13,     9,    24,     8,     6,    22,    -1,    23,    -1,
-      -1,    61,    -1,    -1,    64,    28,    25
+       5,    38,    39,    17,    18,    19,    12,    12,    17,    18,
+       7,     8,    13,    90,     4,    22,    93,    20,    25,    22,
+      21,    22,    23,    24,    33,     3,    26,    34,    26,    32,
+      31,    37,    22,    30,    43,    36,    25,    46,    47,    36,
+      17,    18,     9,    10,    44,    45,    60,    61,    62,    17,
+      18,     0,    89,    15,    22,    23,    33,    27,    14,    15,
+      16,    23,    30,    11,    26,    33,    22,    25,    39,    27,
+      35,    29,    52,    53,    54,    55,     5,     6,    42,    43,
+      34,    35,    56,    57,    50,    51,    58,    59,    40,    25,
+      41,    23,    25,    38,    16,    16,    33,    28,    25,    25,
+      25,    17,    36,    21,    34,     8,     6,    35,    95,    47,
+      45,    93,    -1,    -1,    90,    -1,    -1,   122,    -1,    -1,
+      44,    -1,    -1,    46,    -1,    -1,    -1,    -1,    48,    -1,
+     135,    -1,    49,   138
 };
 
 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
    symbol of state STATE-NUM.  */
 static const yytype_uint8 yystos[] =
 {
-       0,     3,    26,    13,     0,     4,    10,    27,    28,     6,
-      29,    28,    14,    30,    27,    29,    15,    31,    11,    14,
-      13,    32,    31,    31,     5,    10,    33,    39,    40,    13,
-      17,    31,    33,    40,    16,    33,    39,    34,    35,    13,
-      10,    13,    23,     9,    10,    11,    12,    18,    20,    22,
-      36,    38,     9,     6,    10,    11,    19,    37,     8,    10,
-      21,    23,    24,    29,    23,    29,    24
+       0,     3,    49,    25,     0,     4,    22,    50,    51,    17,
+      18,    33,    59,    51,    26,    52,    50,    43,    46,    47,
+      59,    60,    61,    62,    63,    64,    65,    66,    67,    68,
+      69,    70,    71,    72,    59,    27,    53,    15,    23,    26,
+      72,    72,    72,    35,    12,    37,    11,    39,    40,    41,
+       9,    10,     7,     8,    30,    36,     5,     6,    42,    43,
+      26,    44,    45,    25,    54,    23,    53,    53,    63,    60,
+      64,    65,    66,    67,    68,    68,    69,    69,    69,    69,
+      70,    70,    71,    71,    72,    72,    72,    14,    15,    16,
+      22,    55,    74,    75,    25,    38,    16,    16,    25,    29,
+      53,    55,    75,    28,    55,    74,    61,    25,    25,    56,
+      57,    25,    22,    25,    34,    13,    21,    22,    23,    24,
+      31,    36,    58,    17,    33,    73,    22,    23,    30,    59,
+      36,    21,    20,    22,    32,    34,    35,    59,    34,    59,
+      35
 };
 
 #define yyerrok                (yyerrstatus = 0)
@@ -596,18 +705,9 @@ static const yytype_uint8 yystos[] =
 
 /* Like YYERROR except do call yyerror.  This remains here temporarily
    to ease the transition to the new meaning of YYERROR, for GCC.
-   Once GCC version 2 has supplanted version 1, this can go.  However,
-   YYFAIL appears to be in use.  Nevertheless, it is formally deprecated
-   in Bison 2.4.2's NEWS entry, where a plan to phase it out is
-   discussed.  */
+   Once GCC version 2 has supplanted version 1, this can go.  */
 
 #define YYFAIL         goto yyerrlab
-#if defined YYFAIL
-  /* This is here to suppress warnings from the GCC cpp's
-     -Wunused-macros.  Normally we don't worry about that warning, but
-     some users do, and we want to make it easy for users to remove
-     YYFAIL uses, which will produce warnings from Bison 2.5.  */
-#endif
 
 #define YYRECOVERING()  (!!yyerrstatus)
 
@@ -664,7 +764,7 @@ while (YYID (0))
    we won't break user code: when these are the locations we know.  */
 
 #ifndef YY_LOCATION_PRINT
-# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
+# if YYLTYPE_IS_TRIVIAL
 #  define YY_LOCATION_PRINT(File, Loc)                 \
      fprintf (File, "%d.%d-%d.%d",                     \
              (Loc).first_line, (Loc).first_column,     \
@@ -1403,6 +1503,8 @@ yyreduce:
     {
         case 2:
 
+/* Line 1455 of yacc.c  */
+#line 110 "dtc-parser.y"
     {
                        the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node),
                                                        guess_boot_cpuid((yyvsp[(4) - (4)].node)));
@@ -1411,6 +1513,8 @@ yyreduce:
 
   case 3:
 
+/* Line 1455 of yacc.c  */
+#line 118 "dtc-parser.y"
     {
                        (yyval.re) = NULL;
                ;}
@@ -1418,6 +1522,8 @@ yyreduce:
 
   case 4:
 
+/* Line 1455 of yacc.c  */
+#line 122 "dtc-parser.y"
     {
                        (yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
                ;}
@@ -1425,13 +1531,17 @@ yyreduce:
 
   case 5:
 
+/* Line 1455 of yacc.c  */
+#line 129 "dtc-parser.y"
     {
-                       (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].addr), (yyvsp[(3) - (4)].addr));
+                       (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer));
                ;}
     break;
 
   case 6:
 
+/* Line 1455 of yacc.c  */
+#line 133 "dtc-parser.y"
     {
                        add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref));
                        (yyval.re) = (yyvsp[(2) - (2)].re);
@@ -1440,40 +1550,57 @@ yyreduce:
 
   case 7:
 
+/* Line 1455 of yacc.c  */
+#line 141 "dtc-parser.y"
     {
-                       (yyval.addr) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
+                       (yyval.node) = name_node((yyvsp[(2) - (2)].node), "");
                ;}
     break;
 
   case 8:
 
+/* Line 1455 of yacc.c  */
+#line 145 "dtc-parser.y"
     {
-                       (yyval.node) = name_node((yyvsp[(2) - (2)].node), "");
+                       (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
                ;}
     break;
 
   case 9:
 
+/* Line 1455 of yacc.c  */
+#line 149 "dtc-parser.y"
     {
-                       (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
+                       struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref));
+
+                       if (target)
+                               merge_nodes(target, (yyvsp[(3) - (3)].node));
+                       else
+                               print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref));
+                       (yyval.node) = (yyvsp[(1) - (3)].node);
                ;}
     break;
 
   case 10:
 
+/* Line 1455 of yacc.c  */
+#line 159 "dtc-parser.y"
     {
-                       struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref));
+                       struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref));
 
-                       if (target)
-                               merge_nodes(target, (yyvsp[(3) - (3)].node));
+                       if (!target)
+                               print_error("label or path, '%s', not found", (yyvsp[(3) - (4)].labelref));
                        else
-                               print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref));
-                       (yyval.node) = (yyvsp[(1) - (3)].node);
+                               delete_node(target);
+
+                       (yyval.node) = (yyvsp[(1) - (4)].node);
                ;}
     break;
 
   case 11:
 
+/* Line 1455 of yacc.c  */
+#line 173 "dtc-parser.y"
     {
                        (yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist));
                ;}
@@ -1481,6 +1608,8 @@ yyreduce:
 
   case 12:
 
+/* Line 1455 of yacc.c  */
+#line 180 "dtc-parser.y"
     {
                        (yyval.proplist) = NULL;
                ;}
@@ -1488,6 +1617,8 @@ yyreduce:
 
   case 13:
 
+/* Line 1455 of yacc.c  */
+#line 184 "dtc-parser.y"
     {
                        (yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist));
                ;}
@@ -1495,6 +1626,8 @@ yyreduce:
 
   case 14:
 
+/* Line 1455 of yacc.c  */
+#line 191 "dtc-parser.y"
     {
                        (yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data));
                ;}
@@ -1502,6 +1635,8 @@ yyreduce:
 
   case 15:
 
+/* Line 1455 of yacc.c  */
+#line 195 "dtc-parser.y"
     {
                        (yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data);
                ;}
@@ -1509,62 +1644,85 @@ yyreduce:
 
   case 16:
 
+/* Line 1455 of yacc.c  */
+#line 199 "dtc-parser.y"
     {
-                       add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref));
-                       (yyval.prop) = (yyvsp[(2) - (2)].prop);
+                       (yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename));
                ;}
     break;
 
   case 17:
 
+/* Line 1455 of yacc.c  */
+#line 203 "dtc-parser.y"
     {
-                       (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
+                       add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref));
+                       (yyval.prop) = (yyvsp[(2) - (2)].prop);
                ;}
     break;
 
   case 18:
 
+/* Line 1455 of yacc.c  */
+#line 211 "dtc-parser.y"
     {
-                       (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
+                       (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
                ;}
     break;
 
   case 19:
 
+/* Line 1455 of yacc.c  */
+#line 215 "dtc-parser.y"
     {
-                       (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
+                       (yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data);
                ;}
     break;
 
   case 20:
 
+/* Line 1455 of yacc.c  */
+#line 219 "dtc-parser.y"
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
+                       (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
                ;}
     break;
 
   case 21:
 
+/* Line 1455 of yacc.c  */
+#line 223 "dtc-parser.y"
+    {
+                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
+               ;}
+    break;
+
+  case 22:
+
+/* Line 1455 of yacc.c  */
+#line 227 "dtc-parser.y"
     {
                        FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL);
                        struct data d;
 
-                       if ((yyvsp[(6) - (9)].addr) != 0)
-                               if (fseek(f, (yyvsp[(6) - (9)].addr), SEEK_SET) != 0)
+                       if ((yyvsp[(6) - (9)].integer) != 0)
+                               if (fseek(f, (yyvsp[(6) - (9)].integer), SEEK_SET) != 0)
                                        print_error("Couldn't seek to offset %llu in \"%s\": %s",
-                                                    (unsigned long long)(yyvsp[(6) - (9)].addr),
+                                                    (unsigned long long)(yyvsp[(6) - (9)].integer),
                                                     (yyvsp[(4) - (9)].data).val,
                                                     strerror(errno));
 
-                       d = data_copy_file(f, (yyvsp[(8) - (9)].addr));
+                       d = data_copy_file(f, (yyvsp[(8) - (9)].integer));
 
                        (yyval.data) = data_merge((yyvsp[(1) - (9)].data), d);
                        fclose(f);
                ;}
     break;
 
-  case 22:
+  case 23:
 
+/* Line 1455 of yacc.c  */
+#line 244 "dtc-parser.y"
     {
                        FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL);
                        struct data d = empty_data;
@@ -1576,122 +1734,383 @@ yyreduce:
                ;}
     break;
 
-  case 23:
-
-    {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
-               ;}
-    break;
-
   case 24:
 
+/* Line 1455 of yacc.c  */
+#line 254 "dtc-parser.y"
     {
-                       (yyval.data) = empty_data;
+                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
                ;}
     break;
 
   case 25:
 
+/* Line 1455 of yacc.c  */
+#line 261 "dtc-parser.y"
     {
-                       (yyval.data) = (yyvsp[(1) - (2)].data);
+                       (yyval.data) = empty_data;
                ;}
     break;
 
   case 26:
 
+/* Line 1455 of yacc.c  */
+#line 265 "dtc-parser.y"
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
+                       (yyval.data) = (yyvsp[(1) - (2)].data);
                ;}
     break;
 
   case 27:
 
+/* Line 1455 of yacc.c  */
+#line 269 "dtc-parser.y"
     {
-                       (yyval.data) = empty_data;
+                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
                ;}
     break;
 
   case 28:
 
+/* Line 1455 of yacc.c  */
+#line 276 "dtc-parser.y"
     {
-                       (yyval.data) = data_append_cell((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].cell));
+                       (yyval.array).data = empty_data;
+                       (yyval.array).bits = eval_literal((yyvsp[(2) - (3)].literal), 0, 7);
+
+                       if (((yyval.array).bits !=  8) &&
+                           ((yyval.array).bits != 16) &&
+                           ((yyval.array).bits != 32) &&
+                           ((yyval.array).bits != 64))
+                       {
+                               print_error("Only 8, 16, 32 and 64-bit elements"
+                                           " are currently supported");
+                               (yyval.array).bits = 32;
+                       }
                ;}
     break;
 
   case 29:
 
+/* Line 1455 of yacc.c  */
+#line 291 "dtc-parser.y"
     {
-                       (yyval.data) = data_append_cell(data_add_marker((yyvsp[(1) - (2)].data), REF_PHANDLE,
-                                                             (yyvsp[(2) - (2)].labelref)), -1);
+                       (yyval.array).data = empty_data;
+                       (yyval.array).bits = 32;
                ;}
     break;
 
   case 30:
 
+/* Line 1455 of yacc.c  */
+#line 296 "dtc-parser.y"
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
+                       if ((yyvsp[(1) - (2)].array).bits < 64) {
+                               uint64_t mask = (1ULL << (yyvsp[(1) - (2)].array).bits) - 1;
+                               /*
+                                * Bits above mask must either be all zero
+                                * (positive within range of mask) or all one
+                                * (negative and sign-extended). The second
+                                * condition is true if when we set all bits
+                                * within the mask to one (i.e. | in the
+                                * mask), all bits are one.
+                                */
+                               if (((yyvsp[(2) - (2)].integer) > mask) && (((yyvsp[(2) - (2)].integer) | mask) != -1ULL))
+                                       print_error(
+                                               "integer value out of range "
+                                               "%016lx (%d bits)", (yyvsp[(1) - (2)].array).bits);
+                       }
+
+                       (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits);
                ;}
     break;
 
   case 31:
 
+/* Line 1455 of yacc.c  */
+#line 316 "dtc-parser.y"
     {
-                       (yyval.cell) = eval_literal((yyvsp[(1) - (1)].literal), 0, 32);
+                       uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits);
+
+                       if ((yyvsp[(1) - (2)].array).bits == 32)
+                               (yyvsp[(1) - (2)].array).data = data_add_marker((yyvsp[(1) - (2)].array).data,
+                                                         REF_PHANDLE,
+                                                         (yyvsp[(2) - (2)].labelref));
+                       else
+                               print_error("References are only allowed in "
+                                           "arrays with 32-bit elements.");
+
+                       (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits);
                ;}
     break;
 
   case 32:
 
+/* Line 1455 of yacc.c  */
+#line 330 "dtc-parser.y"
     {
-                       (yyval.data) = empty_data;
+                       (yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref));
                ;}
     break;
 
   case 33:
 
+/* Line 1455 of yacc.c  */
+#line 337 "dtc-parser.y"
     {
-                       (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
+                       (yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
                ;}
     break;
 
   case 34:
 
+/* Line 1455 of yacc.c  */
+#line 341 "dtc-parser.y"
     {
-                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
+                       (yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal));
                ;}
     break;
 
   case 35:
 
+/* Line 1455 of yacc.c  */
+#line 345 "dtc-parser.y"
+    {
+                       (yyval.integer) = (yyvsp[(2) - (3)].integer);
+               ;}
+    break;
+
+  case 38:
+
+/* Line 1455 of yacc.c  */
+#line 356 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); ;}
+    break;
+
+  case 40:
+
+/* Line 1455 of yacc.c  */
+#line 361 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 42:
+
+/* Line 1455 of yacc.c  */
+#line 366 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 44:
+
+/* Line 1455 of yacc.c  */
+#line 371 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 46:
+
+/* Line 1455 of yacc.c  */
+#line 376 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 48:
+
+/* Line 1455 of yacc.c  */
+#line 381 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 50:
+
+/* Line 1455 of yacc.c  */
+#line 386 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 51:
+
+/* Line 1455 of yacc.c  */
+#line 387 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 53:
+
+/* Line 1455 of yacc.c  */
+#line 392 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 54:
+
+/* Line 1455 of yacc.c  */
+#line 393 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 55:
+
+/* Line 1455 of yacc.c  */
+#line 394 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 56:
+
+/* Line 1455 of yacc.c  */
+#line 395 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 57:
+
+/* Line 1455 of yacc.c  */
+#line 399 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 58:
+
+/* Line 1455 of yacc.c  */
+#line 400 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 60:
+
+/* Line 1455 of yacc.c  */
+#line 405 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 61:
+
+/* Line 1455 of yacc.c  */
+#line 406 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 63:
+
+/* Line 1455 of yacc.c  */
+#line 411 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 64:
+
+/* Line 1455 of yacc.c  */
+#line 412 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 65:
+
+/* Line 1455 of yacc.c  */
+#line 413 "dtc-parser.y"
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); ;}
+    break;
+
+  case 68:
+
+/* Line 1455 of yacc.c  */
+#line 419 "dtc-parser.y"
+    { (yyval.integer) = -(yyvsp[(2) - (2)].integer); ;}
+    break;
+
+  case 69:
+
+/* Line 1455 of yacc.c  */
+#line 420 "dtc-parser.y"
+    { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); ;}
+    break;
+
+  case 70:
+
+/* Line 1455 of yacc.c  */
+#line 421 "dtc-parser.y"
+    { (yyval.integer) = !(yyvsp[(2) - (2)].integer); ;}
+    break;
+
+  case 71:
+
+/* Line 1455 of yacc.c  */
+#line 426 "dtc-parser.y"
+    {
+                       (yyval.data) = empty_data;
+               ;}
+    break;
+
+  case 72:
+
+/* Line 1455 of yacc.c  */
+#line 430 "dtc-parser.y"
+    {
+                       (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
+               ;}
+    break;
+
+  case 73:
+
+/* Line 1455 of yacc.c  */
+#line 434 "dtc-parser.y"
+    {
+                       (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
+               ;}
+    break;
+
+  case 74:
+
+/* Line 1455 of yacc.c  */
+#line 441 "dtc-parser.y"
     {
                        (yyval.nodelist) = NULL;
                ;}
     break;
 
-  case 36:
+  case 75:
 
+/* Line 1455 of yacc.c  */
+#line 445 "dtc-parser.y"
     {
                        (yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist));
                ;}
     break;
 
-  case 37:
+  case 76:
 
+/* Line 1455 of yacc.c  */
+#line 449 "dtc-parser.y"
     {
                        print_error("syntax error: properties must precede subnodes");
                        YYERROR;
                ;}
     break;
 
-  case 38:
+  case 77:
 
+/* Line 1455 of yacc.c  */
+#line 457 "dtc-parser.y"
     {
                        (yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename));
                ;}
     break;
 
-  case 39:
+  case 78:
+
+/* Line 1455 of yacc.c  */
+#line 461 "dtc-parser.y"
+    {
+                       (yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename));
+               ;}
+    break;
+
+  case 79:
 
+/* Line 1455 of yacc.c  */
+#line 465 "dtc-parser.y"
     {
                        add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref));
                        (yyval.node) = (yyvsp[(2) - (2)].node);
@@ -1700,6 +2119,8 @@ yyreduce:
 
 
 
+/* Line 1455 of yacc.c  */
+#line 2124 "dtc-parser.tab.c"
       default: break;
     }
   YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
@@ -1910,6 +2331,8 @@ yyreturn:
 
 
 
+/* Line 1675 of yacc.c  */
+#line 471 "dtc-parser.y"
 
 
 void print_error(char const *fmt, ...)
@@ -1934,9 +2357,12 @@ static unsigned long long eval_literal(const char *s, int base, int bits)
 
        errno = 0;
        val = strtoull(s, &e, base);
-       if (*e)
-               print_error("bad characters in literal");
-       else if ((errno == ERANGE)
+       if (*e) {
+               size_t uls = strspn(e, "UL");
+               if (e[uls])
+                       print_error("bad characters in literal");
+       }
+       if ((errno == ERANGE)
                 || ((bits < 64) && (val >= (1ULL << bits))))
                print_error("literal out of range");
        else if (errno != 0)
@@ -1944,3 +2370,29 @@ static unsigned long long eval_literal(const char *s, int base, int bits)
        return val;
 }
 
+static unsigned char eval_char_literal(const char *s)
+{
+       int i = 1;
+       char c = s[0];
+
+       if (c == '\0')
+       {
+               print_error("empty character literal");
+               return 0;
+       }
+
+       /*
+        * If the first character in the character literal is a \ then process
+        * the remaining characters as an escape encoding. If the first
+        * character is neither an escape or a terminator it should be the only
+        * character in the literal and will be returned.
+        */
+       if (c == '\\')
+               c = get_escape_char(s, &i);
+
+       if (s[i] != '\0')
+               print_error("malformed character literal");
+
+       return c;
+}
+
index 4ee682bb7d332e1488b3b30f5e3a368424c1c5f9..9d2dce41211f05774ee7c26425fd3c5e1b3e3609 100644 (file)
@@ -1,9 +1,10 @@
-/* A Bison parser, made by GNU Bison 2.4.3.  */
+
+/* A Bison parser, made by GNU Bison 2.4.1.  */
 
 /* Skeleton interface for Bison's Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
-   2009, 2010 Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+   Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    enum yytokentype {
      DT_V1 = 258,
      DT_MEMRESERVE = 259,
-     DT_PROPNODENAME = 260,
-     DT_LITERAL = 261,
-     DT_BASE = 262,
-     DT_BYTE = 263,
-     DT_STRING = 264,
-     DT_LABEL = 265,
-     DT_REF = 266,
-     DT_INCBIN = 267
+     DT_LSHIFT = 260,
+     DT_RSHIFT = 261,
+     DT_LE = 262,
+     DT_GE = 263,
+     DT_EQ = 264,
+     DT_NE = 265,
+     DT_AND = 266,
+     DT_OR = 267,
+     DT_BITS = 268,
+     DT_DEL_PROP = 269,
+     DT_DEL_NODE = 270,
+     DT_PROPNODENAME = 271,
+     DT_LITERAL = 272,
+     DT_CHAR_LITERAL = 273,
+     DT_BASE = 274,
+     DT_BYTE = 275,
+     DT_STRING = 276,
+     DT_LABEL = 277,
+     DT_REF = 278,
+     DT_INCBIN = 279
    };
 #endif
 
@@ -57,6 +70,8 @@
 typedef union YYSTYPE
 {
 
+/* Line 1676 of yacc.c  */
+#line 40 "dtc-parser.y"
 
        char *propnodename;
        char *literal;
@@ -65,16 +80,22 @@ typedef union YYSTYPE
        uint8_t byte;
        struct data data;
 
-       uint64_t addr;
-       cell_t cell;
+       struct {
+               struct data     data;
+               int             bits;
+       } array;
+
        struct property *prop;
        struct property *proplist;
        struct node *node;
        struct node *nodelist;
        struct reserve_info *re;
+       uint64_t integer;
 
 
 
+/* Line 1676 of yacc.c  */
+#line 99 "dtc-parser.tab.h"
 } YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
index 5e84a67fc1d2bf110d0d33040d9d3178f5bbddb6..f412460f94d7478ad3fb9a81951f5c6a5e60c4ad 100644 (file)
@@ -34,6 +34,7 @@ extern struct boot_info *the_boot_info;
 extern int treesource_error;
 
 static unsigned long long eval_literal(const char *s, int base, int bits);
+static unsigned char eval_char_literal(const char *s);
 %}
 
 %union {
@@ -44,19 +45,28 @@ static unsigned long long eval_literal(const char *s, int base, int bits);
        uint8_t byte;
        struct data data;
 
-       uint64_t addr;
-       cell_t cell;
+       struct {
+               struct data     data;
+               int             bits;
+       } array;
+
        struct property *prop;
        struct property *proplist;
        struct node *node;
        struct node *nodelist;
        struct reserve_info *re;
+       uint64_t integer;
 }
 
 %token DT_V1
 %token DT_MEMRESERVE
+%token DT_LSHIFT DT_RSHIFT DT_LE DT_GE DT_EQ DT_NE DT_AND DT_OR
+%token DT_BITS
+%token DT_DEL_PROP
+%token DT_DEL_NODE
 %token <propnodename> DT_PROPNODENAME
 %token <literal> DT_LITERAL
+%token <literal> DT_CHAR_LITERAL
 %token <cbase> DT_BASE
 %token <byte> DT_BYTE
 %token <data> DT_STRING
@@ -68,9 +78,7 @@ static unsigned long long eval_literal(const char *s, int base, int bits);
 %type <data> propdataprefix
 %type <re> memreserve
 %type <re> memreserves
-%type <addr> addr
-%type <data> celllist
-%type <cell> cellval
+%type <array> arrayprefix
 %type <data> bytestring
 %type <prop> propdef
 %type <proplist> proplist
@@ -80,6 +88,21 @@ static unsigned long long eval_literal(const char *s, int base, int bits);
 %type <node> subnode
 %type <nodelist> subnodes
 
+%type <integer> integer_prim
+%type <integer> integer_unary
+%type <integer> integer_mul
+%type <integer> integer_add
+%type <integer> integer_shift
+%type <integer> integer_rela
+%type <integer> integer_eq
+%type <integer> integer_bitand
+%type <integer> integer_bitxor
+%type <integer> integer_bitor
+%type <integer> integer_and
+%type <integer> integer_or
+%type <integer> integer_trinary
+%type <integer> integer_expr
+
 %%
 
 sourcefile:
@@ -102,7 +125,7 @@ memreserves:
        ;
 
 memreserve:
-         DT_MEMRESERVE addr addr ';'
+         DT_MEMRESERVE integer_prim integer_prim ';'
                {
                        $$ = build_reserve_entry($2, $3);
                }
@@ -113,13 +136,6 @@ memreserve:
                }
        ;
 
-addr:
-         DT_LITERAL
-               {
-                       $$ = eval_literal($1, 0, 64);
-               }
-         ;
-
 devicetree:
          '/' nodedef
                {
@@ -139,6 +155,17 @@ devicetree:
                                print_error("label or path, '%s', not found", $2);
                        $$ = $1;
                }
+       | devicetree DT_DEL_NODE DT_REF ';'
+               {
+                       struct node *target = get_node_by_ref($1, $3);
+
+                       if (!target)
+                               print_error("label or path, '%s', not found", $3);
+                       else
+                               delete_node(target);
+
+                       $$ = $1;
+               }
        ;
 
 nodedef:
@@ -168,6 +195,10 @@ propdef:
                {
                        $$ = build_property($1, empty_data);
                }
+       | DT_DEL_PROP DT_PROPNODENAME ';'
+               {
+                       $$ = build_property_delete($2);
+               }
        | DT_LABEL propdef
                {
                        add_label(&$2->labels, $1);
@@ -180,9 +211,9 @@ propdata:
                {
                        $$ = data_merge($1, $2);
                }
-       | propdataprefix '<' celllist '>'
+       | propdataprefix arrayprefix '>'
                {
-                       $$ = data_merge($1, $3);
+                       $$ = data_merge($1, $2.data);
                }
        | propdataprefix '[' bytestring ']'
                {
@@ -192,7 +223,7 @@ propdata:
                {
                        $$ = data_add_marker($1, REF_PATH, $2);
                }
-       | propdataprefix DT_INCBIN '(' DT_STRING ',' addr ',' addr ')'
+       | propdataprefix DT_INCBIN '(' DT_STRING ',' integer_prim ',' integer_prim ')'
                {
                        FILE *f = srcfile_relative_open($4.val, NULL);
                        struct data d;
@@ -240,31 +271,154 @@ propdataprefix:
                }
        ;
 
-celllist:
-         /* empty */
+arrayprefix:
+       DT_BITS DT_LITERAL '<'
+               {
+                       $$.data = empty_data;
+                       $$.bits = eval_literal($2, 0, 7);
+
+                       if (($$.bits !=  8) &&
+                           ($$.bits != 16) &&
+                           ($$.bits != 32) &&
+                           ($$.bits != 64))
+                       {
+                               print_error("Only 8, 16, 32 and 64-bit elements"
+                                           " are currently supported");
+                               $$.bits = 32;
+                       }
+               }
+       | '<'
+               {
+                       $$.data = empty_data;
+                       $$.bits = 32;
+               }
+       | arrayprefix integer_prim
+               {
+                       if ($1.bits < 64) {
+                               uint64_t mask = (1ULL << $1.bits) - 1;
+                               /*
+                                * Bits above mask must either be all zero
+                                * (positive within range of mask) or all one
+                                * (negative and sign-extended). The second
+                                * condition is true if when we set all bits
+                                * within the mask to one (i.e. | in the
+                                * mask), all bits are one.
+                                */
+                               if (($2 > mask) && (($2 | mask) != -1ULL))
+                                       print_error(
+                                               "integer value out of range "
+                                               "%016lx (%d bits)", $1.bits);
+                       }
+
+                       $$.data = data_append_integer($1.data, $2, $1.bits);
+               }
+       | arrayprefix DT_REF
+               {
+                       uint64_t val = ~0ULL >> (64 - $1.bits);
+
+                       if ($1.bits == 32)
+                               $1.data = data_add_marker($1.data,
+                                                         REF_PHANDLE,
+                                                         $2);
+                       else
+                               print_error("References are only allowed in "
+                                           "arrays with 32-bit elements.");
+
+                       $$.data = data_append_integer($1.data, val, $1.bits);
+               }
+       | arrayprefix DT_LABEL
                {
-                       $$ = empty_data;
+                       $$.data = data_add_marker($1.data, LABEL, $2);
                }
-       | celllist cellval
+       ;
+
+integer_prim:
+         DT_LITERAL
                {
-                       $$ = data_append_cell($1, $2);
+                       $$ = eval_literal($1, 0, 64);
                }
-       | celllist DT_REF
+       | DT_CHAR_LITERAL
                {
-                       $$ = data_append_cell(data_add_marker($1, REF_PHANDLE,
-                                                             $2), -1);
+                       $$ = eval_char_literal($1);
                }
-       | celllist DT_LABEL
+       | '(' integer_expr ')'
                {
-                       $$ = data_add_marker($1, LABEL, $2);
+                       $$ = $2;
                }
        ;
 
-cellval:
-         DT_LITERAL
-               {
-                       $$ = eval_literal($1, 0, 32);
-               }
+integer_expr:
+       integer_trinary
+       ;
+
+integer_trinary:
+         integer_or
+       | integer_or '?' integer_expr ':' integer_trinary { $$ = $1 ? $3 : $5; }
+       ;
+
+integer_or:
+         integer_and
+       | integer_or DT_OR integer_and { $$ = $1 || $3; }
+       ;
+
+integer_and:
+         integer_bitor
+       | integer_and DT_AND integer_bitor { $$ = $1 && $3; }
+       ;
+
+integer_bitor:
+         integer_bitxor
+       | integer_bitor '|' integer_bitxor { $$ = $1 | $3; }
+       ;
+
+integer_bitxor:
+         integer_bitand
+       | integer_bitxor '^' integer_bitand { $$ = $1 ^ $3; }
+       ;
+
+integer_bitand:
+         integer_eq
+       | integer_bitand '&' integer_eq { $$ = $1 & $3; }
+       ;
+
+integer_eq:
+         integer_rela
+       | integer_eq DT_EQ integer_rela { $$ = $1 == $3; }
+       | integer_eq DT_NE integer_rela { $$ = $1 != $3; }
+       ;
+
+integer_rela:
+         integer_shift
+       | integer_rela '<' integer_shift { $$ = $1 < $3; }
+       | integer_rela '>' integer_shift { $$ = $1 > $3; }
+       | integer_rela DT_LE integer_shift { $$ = $1 <= $3; }
+       | integer_rela DT_GE integer_shift { $$ = $1 >= $3; }
+       ;
+
+integer_shift:
+         integer_shift DT_LSHIFT integer_add { $$ = $1 << $3; }
+       | integer_shift DT_RSHIFT integer_add { $$ = $1 >> $3; }
+       | integer_add
+       ;
+
+integer_add:
+         integer_add '+' integer_mul { $$ = $1 + $3; }
+       | integer_add '-' integer_mul { $$ = $1 - $3; }
+       | integer_mul
+       ;
+
+integer_mul:
+         integer_mul '*' integer_unary { $$ = $1 * $3; }
+       | integer_mul '/' integer_unary { $$ = $1 / $3; }
+       | integer_mul '%' integer_unary { $$ = $1 % $3; }
+       | integer_unary
+       ;
+
+integer_unary:
+         integer_prim
+       | '-' integer_unary { $$ = -$2; }
+       | '~' integer_unary { $$ = ~$2; }
+       | '!' integer_unary { $$ = !$2; }
        ;
 
 bytestring:
@@ -303,6 +457,10 @@ subnode:
                {
                        $$ = name_node($2, $1);
                }
+       | DT_DEL_NODE DT_PROPNODENAME ';'
+               {
+                       $$ = name_node(build_node_delete(), $2);
+               }
        | DT_LABEL subnode
                {
                        add_label(&$2->labels, $1);
@@ -334,12 +492,41 @@ static unsigned long long eval_literal(const char *s, int base, int bits)
 
        errno = 0;
        val = strtoull(s, &e, base);
-       if (*e)
-               print_error("bad characters in literal");
-       else if ((errno == ERANGE)
+       if (*e) {
+               size_t uls = strspn(e, "UL");
+               if (e[uls])
+                       print_error("bad characters in literal");
+       }
+       if ((errno == ERANGE)
                 || ((bits < 64) && (val >= (1ULL << bits))))
                print_error("literal out of range");
        else if (errno != 0)
                print_error("bad literal");
        return val;
 }
+
+static unsigned char eval_char_literal(const char *s)
+{
+       int i = 1;
+       char c = s[0];
+
+       if (c == '\0')
+       {
+               print_error("empty character literal");
+               return 0;
+       }
+
+       /*
+        * If the first character in the character literal is a \ then process
+        * the remaining characters as an escape encoding. If the first
+        * character is neither an escape or a terminator it should be the only
+        * character in the literal and will be returned.
+        */
+       if (c == '\\')
+               c = get_escape_char(s, &i);
+
+       if (s[i] != '\0')
+               print_error("malformed character literal");
+
+       return c;
+}
index 2ef5e2e3dd38f781ef92fdd1d702e30c96fd7b20..a375683c1534f2a1a3791ab34d75f8c15450f58b 100644 (file)
@@ -82,6 +82,8 @@ static void  __attribute__ ((noreturn)) usage(void)
        fprintf(stderr, "\t\tSet the physical boot cpu\n");
        fprintf(stderr, "\t-f\n");
        fprintf(stderr, "\t\tForce - try to produce output even if the input tree has errors\n");
+       fprintf(stderr, "\t-i\n");
+       fprintf(stderr, "\t\tAdd a path to search for include files\n");
        fprintf(stderr, "\t-s\n");
        fprintf(stderr, "\t\tSort nodes and properties before outputting (only useful for\n\t\tcomparing trees)\n");
        fprintf(stderr, "\t-v\n");
@@ -91,6 +93,9 @@ static void  __attribute__ ((noreturn)) usage(void)
        fprintf(stderr, "\t\t\tlegacy - \"linux,phandle\" properties only\n");
        fprintf(stderr, "\t\t\tepapr - \"phandle\" properties only\n");
        fprintf(stderr, "\t\t\tboth - Both \"linux,phandle\" and \"phandle\" properties\n");
+       fprintf(stderr, "\t-W [no-]<checkname>\n");
+       fprintf(stderr, "\t-E [no-]<checkname>\n");
+       fprintf(stderr, "\t\t\tenable or disable warnings and errors\n");
        exit(3);
 }
 
@@ -113,7 +118,7 @@ int main(int argc, char *argv[])
        minsize    = 0;
        padsize    = 0;
 
-       while ((opt = getopt(argc, argv, "hI:O:o:V:d:R:S:p:fcqb:vH:s"))
+       while ((opt = getopt(argc, argv, "hI:O:o:V:d:R:S:p:fqb:i:vH:sW:E:"))
                        != EOF) {
                switch (opt) {
                case 'I':
@@ -149,6 +154,9 @@ int main(int argc, char *argv[])
                case 'b':
                        cmdline_boot_cpuid = strtoll(optarg, NULL, 0);
                        break;
+               case 'i':
+                       srcfile_add_search_path(optarg);
+                       break;
                case 'v':
                        printf("Version: %s\n", DTC_VERSION);
                        exit(0);
@@ -168,6 +176,14 @@ int main(int argc, char *argv[])
                        sort = 1;
                        break;
 
+               case 'W':
+                       parse_checks_option(true, false, optarg);
+                       break;
+
+               case 'E':
+                       parse_checks_option(false, true, optarg);
+                       break;
+
                case 'h':
                default:
                        usage();
@@ -188,9 +204,6 @@ int main(int argc, char *argv[])
        if (minsize)
                fprintf(stderr, "DTC: Use of \"-S\" is deprecated; it will be removed soon, use \"-p\" instead\n");
 
-       fprintf(stderr, "DTC: %s->%s  on file \"%s\"\n",
-               inform, outform, arg);
-
        if (depname) {
                depfile = fopen(depname, "w");
                if (!depfile)
index f37c97eb3dfc2ca1694e6c3a2bb053949faa12e5..d501c8605f2670627b4a03fe92d47c2be9b65af1 100644 (file)
@@ -25,6 +25,7 @@
 #include <string.h>
 #include <stdlib.h>
 #include <stdint.h>
+#include <stdbool.h>
 #include <stdarg.h>
 #include <assert.h>
 #include <ctype.h>
@@ -109,6 +110,7 @@ struct data data_insert_at_marker(struct data d, struct marker *m,
                                  const void *p, int len);
 struct data data_merge(struct data d1, struct data d2);
 struct data data_append_cell(struct data d, cell_t word);
+struct data data_append_integer(struct data d, uint64_t word, int bits);
 struct data data_append_re(struct data d, const struct fdt_reserve_entry *re);
 struct data data_append_addr(struct data d, uint64_t addr);
 struct data data_append_byte(struct data d, uint8_t byte);
@@ -126,11 +128,13 @@ int data_is_one_string(struct data d);
 
 /* Live trees */
 struct label {
+       int deleted;
        char *label;
        struct label *next;
 };
 
 struct property {
+       int deleted;
        char *name;
        struct data val;
 
@@ -140,6 +144,7 @@ struct property {
 };
 
 struct node {
+       int deleted;
        char *name;
        struct property *proplist;
        struct node *children;
@@ -156,28 +161,71 @@ struct node {
        struct label *labels;
 };
 
+static inline struct label *for_each_label_next(struct label *l)
+{
+       do {
+               l = l->next;
+       } while (l && l->deleted);
+
+       return l;
+}
+
 #define for_each_label(l0, l) \
+       for ((l) = (l0); (l); (l) = for_each_label_next(l))
+
+#define for_each_label_withdel(l0, l) \
        for ((l) = (l0); (l); (l) = (l)->next)
 
+static inline struct property *for_each_property_next(struct property *p)
+{
+       do {
+               p = p->next;
+       } while (p && p->deleted);
+
+       return p;
+}
+
 #define for_each_property(n, p) \
+       for ((p) = (n)->proplist; (p); (p) = for_each_property_next(p))
+
+#define for_each_property_withdel(n, p) \
        for ((p) = (n)->proplist; (p); (p) = (p)->next)
 
-#define for_each_child(n, c)   \
+static inline struct node *for_each_child_next(struct node *c)
+{
+       do {
+               c = c->next_sibling;
+       } while (c && c->deleted);
+
+       return c;
+}
+
+#define for_each_child(n, c) \
+       for ((c) = (n)->children; (c); (c) = for_each_child_next(c))
+
+#define for_each_child_withdel(n, c) \
        for ((c) = (n)->children; (c); (c) = (c)->next_sibling)
 
 void add_label(struct label **labels, char *label);
+void delete_labels(struct label **labels);
 
 struct property *build_property(char *name, struct data val);
+struct property *build_property_delete(char *name);
 struct property *chain_property(struct property *first, struct property *list);
 struct property *reverse_properties(struct property *first);
 
 struct node *build_node(struct property *proplist, struct node *children);
+struct node *build_node_delete(void);
 struct node *name_node(struct node *node, char *name);
 struct node *chain_node(struct node *first, struct node *list);
 struct node *merge_nodes(struct node *old_node, struct node *new_node);
 
 void add_property(struct node *node, struct property *prop);
+void delete_property_by_name(struct node *node, char *name);
+void delete_property(struct property *prop);
 void add_child(struct node *parent, struct node *child);
+void delete_node_by_name(struct node *parent, char *name);
+void delete_node(struct node *node);
 
 const char *get_unitname(struct node *node);
 struct property *get_property(struct node *node, const char *propname);
@@ -224,6 +272,7 @@ void sort_tree(struct boot_info *bi);
 
 /* Checks */
 
+void parse_checks_option(bool warn, bool error, const char *optarg);
 void process_checks(int force, struct boot_info *bi);
 
 /* Flattened trees */
diff --git a/scripts/dtc/fdtdump.c b/scripts/dtc/fdtdump.c
new file mode 100644 (file)
index 0000000..207a46d
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * fdtdump.c - Contributed by Pantelis Antoniou <pantelis.antoniou AT gmail.com>
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+#include <fdt.h>
+#include <libfdt_env.h>
+
+#include "util.h"
+
+#define ALIGN(x, a)    (((x) + ((a) - 1)) & ~((a) - 1))
+#define PALIGN(p, a)   ((void *)(ALIGN((unsigned long)(p), (a))))
+#define GET_CELL(p)    (p += 4, *((const uint32_t *)(p-4)))
+
+static void print_data(const char *data, int len)
+{
+       int i;
+       const char *p = data;
+
+       /* no data, don't print */
+       if (len == 0)
+               return;
+
+       if (util_is_printable_string(data, len)) {
+               printf(" = \"%s\"", (const char *)data);
+       } else if ((len % 4) == 0) {
+               printf(" = <");
+               for (i = 0; i < len; i += 4)
+                       printf("0x%08x%s", fdt32_to_cpu(GET_CELL(p)),
+                              i < (len - 4) ? " " : "");
+               printf(">");
+       } else {
+               printf(" = [");
+               for (i = 0; i < len; i++)
+                       printf("%02x%s", *p++, i < len - 1 ? " " : "");
+               printf("]");
+       }
+}
+
+static void dump_blob(void *blob)
+{
+       struct fdt_header *bph = blob;
+       uint32_t off_mem_rsvmap = fdt32_to_cpu(bph->off_mem_rsvmap);
+       uint32_t off_dt = fdt32_to_cpu(bph->off_dt_struct);
+       uint32_t off_str = fdt32_to_cpu(bph->off_dt_strings);
+       struct fdt_reserve_entry *p_rsvmap =
+               (struct fdt_reserve_entry *)((char *)blob + off_mem_rsvmap);
+       const char *p_struct = (const char *)blob + off_dt;
+       const char *p_strings = (const char *)blob + off_str;
+       uint32_t version = fdt32_to_cpu(bph->version);
+       uint32_t totalsize = fdt32_to_cpu(bph->totalsize);
+       uint32_t tag;
+       const char *p, *s, *t;
+       int depth, sz, shift;
+       int i;
+       uint64_t addr, size;
+
+       depth = 0;
+       shift = 4;
+
+       printf("/dts-v1/;\n");
+       printf("// magic:\t\t0x%x\n", fdt32_to_cpu(bph->magic));
+       printf("// totalsize:\t\t0x%x (%d)\n", totalsize, totalsize);
+       printf("// off_dt_struct:\t0x%x\n", off_dt);
+       printf("// off_dt_strings:\t0x%x\n", off_str);
+       printf("// off_mem_rsvmap:\t0x%x\n", off_mem_rsvmap);
+       printf("// version:\t\t%d\n", version);
+       printf("// last_comp_version:\t%d\n",
+              fdt32_to_cpu(bph->last_comp_version));
+       if (version >= 2)
+               printf("// boot_cpuid_phys:\t0x%x\n",
+                      fdt32_to_cpu(bph->boot_cpuid_phys));
+
+       if (version >= 3)
+               printf("// size_dt_strings:\t0x%x\n",
+                      fdt32_to_cpu(bph->size_dt_strings));
+       if (version >= 17)
+               printf("// size_dt_struct:\t0x%x\n",
+                      fdt32_to_cpu(bph->size_dt_struct));
+       printf("\n");
+
+       for (i = 0; ; i++) {
+               addr = fdt64_to_cpu(p_rsvmap[i].address);
+               size = fdt64_to_cpu(p_rsvmap[i].size);
+               if (addr == 0 && size == 0)
+                       break;
+
+               printf("/memreserve/ %llx %llx;\n",
+                      (unsigned long long)addr, (unsigned long long)size);
+       }
+
+       p = p_struct;
+       while ((tag = fdt32_to_cpu(GET_CELL(p))) != FDT_END) {
+
+               /* printf("tag: 0x%08x (%d)\n", tag, p - p_struct); */
+
+               if (tag == FDT_BEGIN_NODE) {
+                       s = p;
+                       p = PALIGN(p + strlen(s) + 1, 4);
+
+                       if (*s == '\0')
+                               s = "/";
+
+                       printf("%*s%s {\n", depth * shift, "", s);
+
+                       depth++;
+                       continue;
+               }
+
+               if (tag == FDT_END_NODE) {
+                       depth--;
+
+                       printf("%*s};\n", depth * shift, "");
+                       continue;
+               }
+
+               if (tag == FDT_NOP) {
+                       printf("%*s// [NOP]\n", depth * shift, "");
+                       continue;
+               }
+
+               if (tag != FDT_PROP) {
+                       fprintf(stderr, "%*s ** Unknown tag 0x%08x\n", depth * shift, "", tag);
+                       break;
+               }
+               sz = fdt32_to_cpu(GET_CELL(p));
+               s = p_strings + fdt32_to_cpu(GET_CELL(p));
+               if (version < 16 && sz >= 8)
+                       p = PALIGN(p, 8);
+               t = p;
+
+               p = PALIGN(p + sz, 4);
+
+               printf("%*s%s", depth * shift, "", s);
+               print_data(t, sz);
+               printf(";\n");
+       }
+}
+
+
+int main(int argc, char *argv[])
+{
+       char *buf;
+
+       if (argc < 2) {
+               fprintf(stderr, "supply input filename\n");
+               return 5;
+       }
+
+       buf = utilfdt_read(argv[1]);
+       if (buf)
+               dump_blob(buf);
+       else
+               return 10;
+
+       return 0;
+}
diff --git a/scripts/dtc/fdtget.c b/scripts/dtc/fdtget.c
new file mode 100644 (file)
index 0000000..c2fbab2
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+ *
+ * Portions from U-Boot cmd_fdt.c (C) Copyright 2007
+ * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com
+ * Based on code written by:
+ *   Pantelis Antoniou <pantelis.antoniou@gmail.com> and
+ *   Matthew McClintock <msm@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <libfdt.h>
+
+#include "util.h"
+
+enum display_mode {
+       MODE_SHOW_VALUE,        /* show values for node properties */
+       MODE_LIST_PROPS,        /* list the properties for a node */
+       MODE_LIST_SUBNODES,     /* list the subnodes of a node */
+};
+
+/* Holds information which controls our output and options */
+struct display_info {
+       int type;               /* data type (s/i/u/x or 0 for default) */
+       int size;               /* data size (1/2/4) */
+       enum display_mode mode; /* display mode that we are using */
+       const char *default_val; /* default value if node/property not found */
+};
+
+static void report_error(const char *where, int err)
+{
+       fprintf(stderr, "Error at '%s': %s\n", where, fdt_strerror(err));
+}
+
+/**
+ * Displays data of a given length according to selected options
+ *
+ * If a specific data type is provided in disp, then this is used. Otherwise
+ * we try to guess the data type / size from the contents.
+ *
+ * @param disp         Display information / options
+ * @param data         Data to display
+ * @param len          Maximum length of buffer
+ * @return 0 if ok, -1 if data does not match format
+ */
+static int show_data(struct display_info *disp, const char *data, int len)
+{
+       int i, size;
+       const uint8_t *p = (const uint8_t *)data;
+       const char *s;
+       int value;
+       int is_string;
+       char fmt[3];
+
+       /* no data, don't print */
+       if (len == 0)
+               return 0;
+
+       is_string = (disp->type) == 's' ||
+               (!disp->type && util_is_printable_string(data, len));
+       if (is_string) {
+               if (data[len - 1] != '\0') {
+                       fprintf(stderr, "Unterminated string\n");
+                       return -1;
+               }
+               for (s = data; s - data < len; s += strlen(s) + 1) {
+                       if (s != data)
+                               printf(" ");
+                       printf("%s", (const char *)s);
+               }
+               return 0;
+       }
+       size = disp->size;
+       if (size == -1) {
+               size = (len % 4) == 0 ? 4 : 1;
+       } else if (len % size) {
+               fprintf(stderr, "Property length must be a multiple of "
+                               "selected data size\n");
+               return -1;
+       }
+       fmt[0] = '%';
+       fmt[1] = disp->type ? disp->type : 'd';
+       fmt[2] = '\0';
+       for (i = 0; i < len; i += size, p += size) {
+               if (i)
+                       printf(" ");
+               value = size == 4 ? fdt32_to_cpu(*(const uint32_t *)p) :
+                       size == 2 ? (*p << 8) | p[1] : *p;
+               printf(fmt, value);
+       }
+       return 0;
+}
+
+/**
+ * List all properties in a node, one per line.
+ *
+ * @param blob         FDT blob
+ * @param node         Node to display
+ * @return 0 if ok, or FDT_ERR... if not.
+ */
+static int list_properties(const void *blob, int node)
+{
+       const struct fdt_property *data;
+       const char *name;
+       int prop;
+
+       prop = fdt_first_property_offset(blob, node);
+       do {
+               /* Stop silently when there are no more properties */
+               if (prop < 0)
+                       return prop == -FDT_ERR_NOTFOUND ? 0 : prop;
+               data = fdt_get_property_by_offset(blob, prop, NULL);
+               name = fdt_string(blob, fdt32_to_cpu(data->nameoff));
+               if (name)
+                       puts(name);
+               prop = fdt_next_property_offset(blob, prop);
+       } while (1);
+}
+
+#define MAX_LEVEL      32              /* how deeply nested we will go */
+
+/**
+ * List all subnodes in a node, one per line
+ *
+ * @param blob         FDT blob
+ * @param node         Node to display
+ * @return 0 if ok, or FDT_ERR... if not.
+ */
+static int list_subnodes(const void *blob, int node)
+{
+       int nextoffset;         /* next node offset from libfdt */
+       uint32_t tag;           /* current tag */
+       int level = 0;          /* keep track of nesting level */
+       const char *pathp;
+       int depth = 1;          /* the assumed depth of this node */
+
+       while (level >= 0) {
+               tag = fdt_next_tag(blob, node, &nextoffset);
+               switch (tag) {
+               case FDT_BEGIN_NODE:
+                       pathp = fdt_get_name(blob, node, NULL);
+                       if (level <= depth) {
+                               if (pathp == NULL)
+                                       pathp = "/* NULL pointer error */";
+                               if (*pathp == '\0')
+                                       pathp = "/";    /* root is nameless */
+                               if (level == 1)
+                                       puts(pathp);
+                       }
+                       level++;
+                       if (level >= MAX_LEVEL) {
+                               printf("Nested too deep, aborting.\n");
+                               return 1;
+                       }
+                       break;
+               case FDT_END_NODE:
+                       level--;
+                       if (level == 0)
+                               level = -1;             /* exit the loop */
+                       break;
+               case FDT_END:
+                       return 1;
+               case FDT_PROP:
+                       break;
+               default:
+                       if (level <= depth)
+                               printf("Unknown tag 0x%08X\n", tag);
+                       return 1;
+               }
+               node = nextoffset;
+       }
+       return 0;
+}
+
+/**
+ * Show the data for a given node (and perhaps property) according to the
+ * display option provided.
+ *
+ * @param blob         FDT blob
+ * @param disp         Display information / options
+ * @param node         Node to display
+ * @param property     Name of property to display, or NULL if none
+ * @return 0 if ok, -ve on error
+ */
+static int show_data_for_item(const void *blob, struct display_info *disp,
+               int node, const char *property)
+{
+       const void *value = NULL;
+       int len, err = 0;
+
+       switch (disp->mode) {
+       case MODE_LIST_PROPS:
+               err = list_properties(blob, node);
+               break;
+
+       case MODE_LIST_SUBNODES:
+               err = list_subnodes(blob, node);
+               break;
+
+       default:
+               assert(property);
+               value = fdt_getprop(blob, node, property, &len);
+               if (value) {
+                       if (show_data(disp, value, len))
+                               err = -1;
+                       else
+                               printf("\n");
+               } else if (disp->default_val) {
+                       puts(disp->default_val);
+               } else {
+                       report_error(property, len);
+                       err = -1;
+               }
+               break;
+       }
+
+       return err;
+}
+
+/**
+ * Run the main fdtget operation, given a filename and valid arguments
+ *
+ * @param disp         Display information / options
+ * @param filename     Filename of blob file
+ * @param arg          List of arguments to process
+ * @param arg_count    Number of arguments
+ * @param return 0 if ok, -ve on error
+ */
+static int do_fdtget(struct display_info *disp, const char *filename,
+                    char **arg, int arg_count, int args_per_step)
+{
+       char *blob;
+       const char *prop;
+       int i, node;
+
+       blob = utilfdt_read(filename);
+       if (!blob)
+               return -1;
+
+       for (i = 0; i + args_per_step <= arg_count; i += args_per_step) {
+               node = fdt_path_offset(blob, arg[i]);
+               if (node < 0) {
+                       if (disp->default_val) {
+                               puts(disp->default_val);
+                               continue;
+                       } else {
+                               report_error(arg[i], node);
+                               return -1;
+                       }
+               }
+               prop = args_per_step == 1 ? NULL : arg[i + 1];
+
+               if (show_data_for_item(blob, disp, node, prop))
+                       return -1;
+       }
+       return 0;
+}
+
+static const char *usage_msg =
+       "fdtget - read values from device tree\n"
+       "\n"
+       "Each value is printed on a new line.\n\n"
+       "Usage:\n"
+       "       fdtget <options> <dt file> [<node> <property>]...\n"
+       "       fdtget -p <options> <dt file> [<node> ]...\n"
+       "Options:\n"
+       "\t-t <type>\tType of data\n"
+       "\t-p\t\tList properties for each node\n"
+       "\t-l\t\tList subnodes for each node\n"
+       "\t-d\t\tDefault value to display when the property is "
+                       "missing\n"
+       "\t-h\t\tPrint this help\n\n"
+       USAGE_TYPE_MSG;
+
+static void usage(const char *msg)
+{
+       if (msg)
+               fprintf(stderr, "Error: %s\n\n", msg);
+
+       fprintf(stderr, "%s", usage_msg);
+       exit(2);
+}
+
+int main(int argc, char *argv[])
+{
+       char *filename = NULL;
+       struct display_info disp;
+       int args_per_step = 2;
+
+       /* set defaults */
+       memset(&disp, '\0', sizeof(disp));
+       disp.size = -1;
+       disp.mode = MODE_SHOW_VALUE;
+       for (;;) {
+               int c = getopt(argc, argv, "d:hlpt:");
+               if (c == -1)
+                       break;
+
+               switch (c) {
+               case 'h':
+               case '?':
+                       usage(NULL);
+
+               case 't':
+                       if (utilfdt_decode_type(optarg, &disp.type,
+                                       &disp.size))
+                               usage("Invalid type string");
+                       break;
+
+               case 'p':
+                       disp.mode = MODE_LIST_PROPS;
+                       args_per_step = 1;
+                       break;
+
+               case 'l':
+                       disp.mode = MODE_LIST_SUBNODES;
+                       args_per_step = 1;
+                       break;
+
+               case 'd':
+                       disp.default_val = optarg;
+                       break;
+               }
+       }
+
+       if (optind < argc)
+               filename = argv[optind++];
+       if (!filename)
+               usage("Missing filename");
+
+       argv += optind;
+       argc -= optind;
+
+       /* Allow no arguments, and silently succeed */
+       if (!argc)
+               return 0;
+
+       /* Check for node, property arguments */
+       if (args_per_step == 2 && (argc % 2))
+               usage("Must have an even number of arguments");
+
+       if (do_fdtget(&disp, filename, argv, argc, args_per_step))
+               return 1;
+       return 0;
+}
diff --git a/scripts/dtc/fdtput.c b/scripts/dtc/fdtput.c
new file mode 100644 (file)
index 0000000..f2197f5
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <libfdt.h>
+
+#include "util.h"
+
+/* These are the operations we support */
+enum oper_type {
+       OPER_WRITE_PROP,                /* Write a property in a node */
+       OPER_CREATE_NODE,               /* Create a new node */
+};
+
+struct display_info {
+       enum oper_type oper;    /* operation to perform */
+       int type;               /* data type (s/i/u/x or 0 for default) */
+       int size;               /* data size (1/2/4) */
+       int verbose;            /* verbose output */
+       int auto_path;          /* automatically create all path components */
+};
+
+
+/**
+ * Report an error with a particular node.
+ *
+ * @param name         Node name to report error on
+ * @param namelen      Length of node name, or -1 to use entire string
+ * @param err          Error number to report (-FDT_ERR_...)
+ */
+static void report_error(const char *name, int namelen, int err)
+{
+       if (namelen == -1)
+               namelen = strlen(name);
+       fprintf(stderr, "Error at '%1.*s': %s\n", namelen, name,
+               fdt_strerror(err));
+}
+
+/**
+ * Encode a series of arguments in a property value.
+ *
+ * @param disp         Display information / options
+ * @param arg          List of arguments from command line
+ * @param arg_count    Number of arguments (may be 0)
+ * @param valuep       Returns buffer containing value
+ * @param *value_len   Returns length of value encoded
+ */
+static int encode_value(struct display_info *disp, char **arg, int arg_count,
+                       char **valuep, int *value_len)
+{
+       char *value = NULL;     /* holding area for value */
+       int value_size = 0;     /* size of holding area */
+       char *ptr;              /* pointer to current value position */
+       int len;                /* length of this cell/string/byte */
+       int ival;
+       int upto;       /* the number of bytes we have written to buf */
+       char fmt[3];
+
+       upto = 0;
+
+       if (disp->verbose)
+               fprintf(stderr, "Decoding value:\n");
+
+       fmt[0] = '%';
+       fmt[1] = disp->type ? disp->type : 'd';
+       fmt[2] = '\0';
+       for (; arg_count > 0; arg++, arg_count--, upto += len) {
+               /* assume integer unless told otherwise */
+               if (disp->type == 's')
+                       len = strlen(*arg) + 1;
+               else
+                       len = disp->size == -1 ? 4 : disp->size;
+
+               /* enlarge our value buffer by a suitable margin if needed */
+               if (upto + len > value_size) {
+                       value_size = (upto + len) + 500;
+                       value = realloc(value, value_size);
+                       if (!value) {
+                               fprintf(stderr, "Out of mmory: cannot alloc "
+                                       "%d bytes\n", value_size);
+                               return -1;
+                       }
+               }
+
+               ptr = value + upto;
+               if (disp->type == 's') {
+                       memcpy(ptr, *arg, len);
+                       if (disp->verbose)
+                               fprintf(stderr, "\tstring: '%s'\n", ptr);
+               } else {
+                       int *iptr = (int *)ptr;
+                       sscanf(*arg, fmt, &ival);
+                       if (len == 4)
+                               *iptr = cpu_to_fdt32(ival);
+                       else
+                               *ptr = (uint8_t)ival;
+                       if (disp->verbose) {
+                               fprintf(stderr, "\t%s: %d\n",
+                                       disp->size == 1 ? "byte" :
+                                       disp->size == 2 ? "short" : "int",
+                                       ival);
+                       }
+               }
+       }
+       *value_len = upto;
+       *valuep = value;
+       if (disp->verbose)
+               fprintf(stderr, "Value size %d\n", upto);
+       return 0;
+}
+
+static int store_key_value(void *blob, const char *node_name,
+               const char *property, const char *buf, int len)
+{
+       int node;
+       int err;
+
+       node = fdt_path_offset(blob, node_name);
+       if (node < 0) {
+               report_error(node_name, -1, node);
+               return -1;
+       }
+
+       err = fdt_setprop(blob, node, property, buf, len);
+       if (err) {
+               report_error(property, -1, err);
+               return -1;
+       }
+       return 0;
+}
+
+/**
+ * Create paths as needed for all components of a path
+ *
+ * Any components of the path that do not exist are created. Errors are
+ * reported.
+ *
+ * @param blob         FDT blob to write into
+ * @param in_path      Path to process
+ * @return 0 if ok, -1 on error
+ */
+static int create_paths(void *blob, const char *in_path)
+{
+       const char *path = in_path;
+       const char *sep;
+       int node, offset = 0;
+
+       /* skip leading '/' */
+       while (*path == '/')
+               path++;
+
+       for (sep = path; *sep; path = sep + 1, offset = node) {
+               /* equivalent to strchrnul(), but it requires _GNU_SOURCE */
+               sep = strchr(path, '/');
+               if (!sep)
+                       sep = path + strlen(path);
+
+               node = fdt_subnode_offset_namelen(blob, offset, path,
+                               sep - path);
+               if (node == -FDT_ERR_NOTFOUND) {
+                       node = fdt_add_subnode_namelen(blob, offset, path,
+                                                      sep - path);
+               }
+               if (node < 0) {
+                       report_error(path, sep - path, node);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * Create a new node in the fdt.
+ *
+ * This will overwrite the node_name string. Any error is reported.
+ *
+ * TODO: Perhaps create fdt_path_offset_namelen() so we don't need to do this.
+ *
+ * @param blob         FDT blob to write into
+ * @param node_name    Name of node to create
+ * @return new node offset if found, or -1 on failure
+ */
+static int create_node(void *blob, const char *node_name)
+{
+       int node = 0;
+       char *p;
+
+       p = strrchr(node_name, '/');
+       if (!p) {
+               report_error(node_name, -1, -FDT_ERR_BADPATH);
+               return -1;
+       }
+       *p = '\0';
+
+       if (p > node_name) {
+               node = fdt_path_offset(blob, node_name);
+               if (node < 0) {
+                       report_error(node_name, -1, node);
+                       return -1;
+               }
+       }
+
+       node = fdt_add_subnode(blob, node, p + 1);
+       if (node < 0) {
+               report_error(p + 1, -1, node);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int do_fdtput(struct display_info *disp, const char *filename,
+                   char **arg, int arg_count)
+{
+       char *value;
+       char *blob;
+       int len, ret = 0;
+
+       blob = utilfdt_read(filename);
+       if (!blob)
+               return -1;
+
+       switch (disp->oper) {
+       case OPER_WRITE_PROP:
+               /*
+                * Convert the arguments into a single binary value, then
+                * store them into the property.
+                */
+               assert(arg_count >= 2);
+               if (disp->auto_path && create_paths(blob, *arg))
+                       return -1;
+               if (encode_value(disp, arg + 2, arg_count - 2, &value, &len) ||
+                       store_key_value(blob, *arg, arg[1], value, len))
+                       ret = -1;
+               break;
+       case OPER_CREATE_NODE:
+               for (; ret >= 0 && arg_count--; arg++) {
+                       if (disp->auto_path)
+                               ret = create_paths(blob, *arg);
+                       else
+                               ret = create_node(blob, *arg);
+               }
+               break;
+       }
+       if (ret >= 0)
+               ret = utilfdt_write(filename, blob);
+
+       free(blob);
+       return ret;
+}
+
+static const char *usage_msg =
+       "fdtput - write a property value to a device tree\n"
+       "\n"
+       "The command line arguments are joined together into a single value.\n"
+       "\n"
+       "Usage:\n"
+       "       fdtput <options> <dt file> <node> <property> [<value>...]\n"
+       "       fdtput -c <options> <dt file> [<node>...]\n"
+       "Options:\n"
+       "\t-c\t\tCreate nodes if they don't already exist\n"
+       "\t-p\t\tAutomatically create nodes as needed for the node path\n"
+       "\t-t <type>\tType of data\n"
+       "\t-v\t\tVerbose: display each value decoded from command line\n"
+       "\t-h\t\tPrint this help\n\n"
+       USAGE_TYPE_MSG;
+
+static void usage(const char *msg)
+{
+       if (msg)
+               fprintf(stderr, "Error: %s\n\n", msg);
+
+       fprintf(stderr, "%s", usage_msg);
+       exit(2);
+}
+
+int main(int argc, char *argv[])
+{
+       struct display_info disp;
+       char *filename = NULL;
+
+       memset(&disp, '\0', sizeof(disp));
+       disp.size = -1;
+       disp.oper = OPER_WRITE_PROP;
+       for (;;) {
+               int c = getopt(argc, argv, "chpt:v");
+               if (c == -1)
+                       break;
+
+               /*
+                * TODO: add options to:
+                * - delete property
+                * - delete node (optionally recursively)
+                * - rename node
+                * - pack fdt before writing
+                * - set amount of free space when writing
+                * - expand fdt if value doesn't fit
+                */
+               switch (c) {
+               case 'c':
+                       disp.oper = OPER_CREATE_NODE;
+                       break;
+               case 'h':
+               case '?':
+                       usage(NULL);
+               case 'p':
+                       disp.auto_path = 1;
+                       break;
+               case 't':
+                       if (utilfdt_decode_type(optarg, &disp.type,
+                                       &disp.size))
+                               usage("Invalid type string");
+                       break;
+
+               case 'v':
+                       disp.verbose = 1;
+                       break;
+               }
+       }
+
+       if (optind < argc)
+               filename = argv[optind++];
+       if (!filename)
+               usage("Missing filename");
+
+       argv += optind;
+       argc -= optind;
+
+       if (disp.oper == OPER_WRITE_PROP) {
+               if (argc < 1)
+                       usage("Missing node");
+               if (argc < 2)
+                       usage("Missing property");
+       }
+
+       if (do_fdtput(&disp, filename, argv, argc))
+               return 1;
+       return 0;
+}
index 28d0b2381df6e526a09043da4b79247ce1998c41..665dad7bb465b474387af9996e1d36e96a579d6b 100644 (file)
@@ -263,6 +263,9 @@ static void flatten_tree(struct node *tree, struct emitter *emit,
        struct node *child;
        int seen_name_prop = 0;
 
+       if (tree->deleted)
+               return;
+
        emit->beginnode(etarget, tree->labels);
 
        if (vi->flags & FTF_FULLPATH)
index 6c42acfa21ec61282dd53f67dbb70c795c5cc649..91126c000a1ed82dbf58d1d2c7cd9ff59a4e96ae 100644 (file)
@@ -3,6 +3,8 @@
 # This is not a complete Makefile of itself.  Instead, it is designed to
 # be easily embeddable into other systems of Makefiles.
 #
-LIBFDT_INCLUDES = fdt.h libfdt.h
-LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
+LIBFDT_soname = libfdt.$(SHAREDLIB_EXT).1
+LIBFDT_INCLUDES = fdt.h libfdt.h libfdt_env.h
+LIBFDT_VERSION = version.lds
+LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c fdt_empty_tree.c
 LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)
index 2acaec5923aea4fc5cb3f8360a1e6105f7c99897..e56833ae9b6ffaee67281dc959b0c263ccc24c15 100644 (file)
@@ -74,7 +74,7 @@ int fdt_check_header(const void *fdt)
        return 0;
 }
 
-const void *fdt_offset_ptr(const void *fdt, int offset, int len)
+const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
 {
        const char *p;
 
@@ -90,42 +90,53 @@ const void *fdt_offset_ptr(const void *fdt, int offset, int len)
        return p;
 }
 
-uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset)
+uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
 {
        const uint32_t *tagp, *lenp;
        uint32_t tag;
+       int offset = startoffset;
        const char *p;
 
-       if (offset % FDT_TAGSIZE)
-               return -1;
-
+       *nextoffset = -FDT_ERR_TRUNCATED;
        tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
-       if (! tagp)
+       if (!tagp)
                return FDT_END; /* premature end */
        tag = fdt32_to_cpu(*tagp);
        offset += FDT_TAGSIZE;
 
+       *nextoffset = -FDT_ERR_BADSTRUCTURE;
        switch (tag) {
        case FDT_BEGIN_NODE:
                /* skip name */
                do {
                        p = fdt_offset_ptr(fdt, offset++, 1);
                } while (p && (*p != '\0'));
-               if (! p)
-                       return FDT_END;
+               if (!p)
+                       return FDT_END; /* premature end */
                break;
+
        case FDT_PROP:
                lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
-               if (! lenp)
-                       return FDT_END;
-               /* skip name offset, length and value */
-               offset += 2*FDT_TAGSIZE + fdt32_to_cpu(*lenp);
+               if (!lenp)
+                       return FDT_END; /* premature end */
+               /* skip-name offset, length and value */
+               offset += sizeof(struct fdt_property) - FDT_TAGSIZE
+                       + fdt32_to_cpu(*lenp);
+               break;
+
+       case FDT_END:
+       case FDT_END_NODE:
+       case FDT_NOP:
                break;
+
+       default:
+               return FDT_END;
        }
 
-       if (nextoffset)
-               *nextoffset = FDT_TAGALIGN(offset);
+       if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset))
+               return FDT_END; /* premature end */
 
+       *nextoffset = FDT_TAGALIGN(offset);
        return tag;
 }
 
@@ -138,6 +149,15 @@ int _fdt_check_node_offset(const void *fdt, int offset)
        return offset;
 }
 
+int _fdt_check_prop_offset(const void *fdt, int offset)
+{
+       if ((offset < 0) || (offset % FDT_TAGSIZE)
+           || (fdt_next_tag(fdt, offset, &offset) != FDT_PROP))
+               return -FDT_ERR_BADOFFSET;
+
+       return offset;
+}
+
 int fdt_next_node(const void *fdt, int offset, int *depth)
 {
        int nextoffset = 0;
@@ -162,15 +182,16 @@ int fdt_next_node(const void *fdt, int offset, int *depth)
                        break;
 
                case FDT_END_NODE:
-                       if (depth)
-                               (*depth)--;
+                       if (depth && ((--(*depth)) < 0))
+                               return nextoffset;
                        break;
 
                case FDT_END:
-                       return -FDT_ERR_NOTFOUND;
-
-               default:
-                       return -FDT_ERR_BADSTRUCTURE;
+                       if ((nextoffset >= 0)
+                           || ((nextoffset == -FDT_ERR_TRUNCATED) && !depth))
+                               return -FDT_ERR_NOTFOUND;
+                       else
+                               return nextoffset;
                }
        } while (tag != FDT_BEGIN_NODE);
 
diff --git a/scripts/dtc/libfdt/fdt_empty_tree.c b/scripts/dtc/libfdt/fdt_empty_tree.c
new file mode 100644 (file)
index 0000000..f72d13b
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2012 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this library; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_create_empty_tree(void *buf, int bufsize)
+{
+       int err;
+
+       err = fdt_create(buf, bufsize);
+       if (err)
+               return err;
+
+       err = fdt_finish_reservemap(buf);
+       if (err)
+               return err;
+
+       err = fdt_begin_node(buf, "");
+       if (err)
+               return err;
+
+       err =  fdt_end_node(buf);
+       if (err)
+               return err;
+
+       err = fdt_finish(buf);
+       if (err)
+               return err;
+
+       return fdt_open_into(buf, buf, bufsize);
+}
+
index 22e692919ff9ccba391480514c66ff3e86f62c58..02b6d687537fac11e13a835502b1c16c98fb7603 100644 (file)
@@ -80,6 +80,14 @@ const char *fdt_string(const void *fdt, int stroffset)
        return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
 }
 
+static int _fdt_string_eq(const void *fdt, int stroffset,
+                         const char *s, int len)
+{
+       const char *p = fdt_string(fdt, stroffset);
+
+       return (strlen(p) == len) && (memcmp(p, s, len) == 0);
+}
+
 int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
 {
        FDT_CHECK_HEADER(fdt);
@@ -97,6 +105,30 @@ int fdt_num_mem_rsv(const void *fdt)
        return i;
 }
 
+static int _nextprop(const void *fdt, int offset)
+{
+       uint32_t tag;
+       int nextoffset;
+
+       do {
+               tag = fdt_next_tag(fdt, offset, &nextoffset);
+
+               switch (tag) {
+               case FDT_END:
+                       if (nextoffset >= 0)
+                               return -FDT_ERR_BADSTRUCTURE;
+                       else
+                               return nextoffset;
+
+               case FDT_PROP:
+                       return offset;
+               }
+               offset = nextoffset;
+       } while (tag == FDT_NOP);
+
+       return -FDT_ERR_NOTFOUND;
+}
+
 int fdt_subnode_offset_namelen(const void *fdt, int offset,
                               const char *name, int namelen)
 {
@@ -104,20 +136,16 @@ int fdt_subnode_offset_namelen(const void *fdt, int offset,
 
        FDT_CHECK_HEADER(fdt);
 
-       for (depth = 0, offset = fdt_next_node(fdt, offset, &depth);
-            (offset >= 0) && (depth > 0);
-            offset = fdt_next_node(fdt, offset, &depth)) {
-               if (depth < 0)
-                       return -FDT_ERR_NOTFOUND;
-               else if ((depth == 1)
-                        && _fdt_nodename_eq(fdt, offset, name, namelen))
+       for (depth = 0;
+            (offset >= 0) && (depth >= 0);
+            offset = fdt_next_node(fdt, offset, &depth))
+               if ((depth == 1)
+                   && _fdt_nodename_eq(fdt, offset, name, namelen))
                        return offset;
-       }
 
-       if (offset < 0)
-               return offset; /* error */
-       else
+       if (depth < 0)
                return -FDT_ERR_NOTFOUND;
+       return offset; /* error */
 }
 
 int fdt_subnode_offset(const void *fdt, int parentoffset,
@@ -134,8 +162,20 @@ int fdt_path_offset(const void *fdt, const char *path)
 
        FDT_CHECK_HEADER(fdt);
 
-       if (*path != '/')
-               return -FDT_ERR_BADPATH;
+       /* see if we have an alias */
+       if (*path != '/') {
+               const char *q = strchr(path, '/');
+
+               if (!q)
+                       q = end;
+
+               p = fdt_get_alias_namelen(fdt, p, q - p);
+               if (!p)
+                       return -FDT_ERR_BADPATH;
+               offset = fdt_path_offset(fdt, p);
+
+               p = q;
+       }
 
        while (*p) {
                const char *q;
@@ -178,93 +218,142 @@ const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
        return NULL;
 }
 
-const struct fdt_property *fdt_get_property(const void *fdt,
-                                           int nodeoffset,
-                                           const char *name, int *lenp)
+int fdt_first_property_offset(const void *fdt, int nodeoffset)
+{
+       int offset;
+
+       if ((offset = _fdt_check_node_offset(fdt, nodeoffset)) < 0)
+               return offset;
+
+       return _nextprop(fdt, offset);
+}
+
+int fdt_next_property_offset(const void *fdt, int offset)
+{
+       if ((offset = _fdt_check_prop_offset(fdt, offset)) < 0)
+               return offset;
+
+       return _nextprop(fdt, offset);
+}
+
+const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
+                                                     int offset,
+                                                     int *lenp)
 {
-       uint32_t tag;
-       const struct fdt_property *prop;
-       int namestroff;
-       int offset, nextoffset;
        int err;
+       const struct fdt_property *prop;
 
-       if (((err = fdt_check_header(fdt)) != 0)
-           || ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
-                       goto fail;
+       if ((err = _fdt_check_prop_offset(fdt, offset)) < 0) {
+               if (lenp)
+                       *lenp = err;
+               return NULL;
+       }
 
-       nextoffset = err;
-       do {
-               offset = nextoffset;
+       prop = _fdt_offset_ptr(fdt, offset);
 
-               tag = fdt_next_tag(fdt, offset, &nextoffset);
-               switch (tag) {
-               case FDT_END:
-                       err = -FDT_ERR_TRUNCATED;
-                       goto fail;
+       if (lenp)
+               *lenp = fdt32_to_cpu(prop->len);
 
-               case FDT_BEGIN_NODE:
-               case FDT_END_NODE:
-               case FDT_NOP:
-                       break;
+       return prop;
+}
 
-               case FDT_PROP:
-                       err = -FDT_ERR_BADSTRUCTURE;
-                       prop = fdt_offset_ptr(fdt, offset, sizeof(*prop));
-                       if (! prop)
-                               goto fail;
-                       namestroff = fdt32_to_cpu(prop->nameoff);
-                       if (strcmp(fdt_string(fdt, namestroff), name) == 0) {
-                               /* Found it! */
-                               int len = fdt32_to_cpu(prop->len);
-                               prop = fdt_offset_ptr(fdt, offset,
-                                                     sizeof(*prop)+len);
-                               if (! prop)
-                                       goto fail;
-
-                               if (lenp)
-                                       *lenp = len;
-
-                               return prop;
-                       }
-                       break;
+const struct fdt_property *fdt_get_property_namelen(const void *fdt,
+                                                   int offset,
+                                                   const char *name,
+                                                   int namelen, int *lenp)
+{
+       for (offset = fdt_first_property_offset(fdt, offset);
+            (offset >= 0);
+            (offset = fdt_next_property_offset(fdt, offset))) {
+               const struct fdt_property *prop;
 
-               default:
-                       err = -FDT_ERR_BADSTRUCTURE;
-                       goto fail;
+               if (!(prop = fdt_get_property_by_offset(fdt, offset, lenp))) {
+                       offset = -FDT_ERR_INTERNAL;
+                       break;
                }
-       } while ((tag != FDT_BEGIN_NODE) && (tag != FDT_END_NODE));
+               if (_fdt_string_eq(fdt, fdt32_to_cpu(prop->nameoff),
+                                  name, namelen))
+                       return prop;
+       }
 
-       err = -FDT_ERR_NOTFOUND;
- fail:
        if (lenp)
-               *lenp = err;
+               *lenp = offset;
        return NULL;
 }
 
-const void *fdt_getprop(const void *fdt, int nodeoffset,
-                 const char *name, int *lenp)
+const struct fdt_property *fdt_get_property(const void *fdt,
+                                           int nodeoffset,
+                                           const char *name, int *lenp)
+{
+       return fdt_get_property_namelen(fdt, nodeoffset, name,
+                                       strlen(name), lenp);
+}
+
+const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
+                               const char *name, int namelen, int *lenp)
 {
        const struct fdt_property *prop;
 
-       prop = fdt_get_property(fdt, nodeoffset, name, lenp);
+       prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp);
        if (! prop)
                return NULL;
 
        return prop->data;
 }
 
+const void *fdt_getprop_by_offset(const void *fdt, int offset,
+                                 const char **namep, int *lenp)
+{
+       const struct fdt_property *prop;
+
+       prop = fdt_get_property_by_offset(fdt, offset, lenp);
+       if (!prop)
+               return NULL;
+       if (namep)
+               *namep = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+       return prop->data;
+}
+
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+                       const char *name, int *lenp)
+{
+       return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp);
+}
+
 uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
 {
        const uint32_t *php;
        int len;
 
-       php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
-       if (!php || (len != sizeof(*php)))
-               return 0;
+       /* FIXME: This is a bit sub-optimal, since we potentially scan
+        * over all the properties twice. */
+       php = fdt_getprop(fdt, nodeoffset, "phandle", &len);
+       if (!php || (len != sizeof(*php))) {
+               php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
+               if (!php || (len != sizeof(*php)))
+                       return 0;
+       }
 
        return fdt32_to_cpu(*php);
 }
 
+const char *fdt_get_alias_namelen(const void *fdt,
+                                 const char *name, int namelen)
+{
+       int aliasoffset;
+
+       aliasoffset = fdt_path_offset(fdt, "/aliases");
+       if (aliasoffset < 0)
+               return NULL;
+
+       return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL);
+}
+
+const char *fdt_get_alias(const void *fdt, const char *name)
+{
+       return fdt_get_alias_namelen(fdt, name, strlen(name));
+}
+
 int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
 {
        int pdepth = 0, p = 0;
@@ -279,9 +368,6 @@ int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
        for (offset = 0, depth = 0;
             (offset >= 0) && (offset <= nodeoffset);
             offset = fdt_next_node(fdt, offset, &depth)) {
-               if (pdepth < depth)
-                       continue; /* overflowed buffer */
-
                while (pdepth > depth) {
                        do {
                                p--;
@@ -289,14 +375,16 @@ int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
                        pdepth--;
                }
 
-               name = fdt_get_name(fdt, offset, &namelen);
-               if (!name)
-                       return namelen;
-               if ((p + namelen + 1) <= buflen) {
-                       memcpy(buf + p, name, namelen);
-                       p += namelen;
-                       buf[p++] = '/';
-                       pdepth++;
+               if (pdepth >= depth) {
+                       name = fdt_get_name(fdt, offset, &namelen);
+                       if (!name)
+                               return namelen;
+                       if ((p + namelen + 1) <= buflen) {
+                               memcpy(buf + p, name, namelen);
+                               p += namelen;
+                               buf[p++] = '/';
+                               pdepth++;
+                       }
                }
 
                if (offset == nodeoffset) {
@@ -306,7 +394,7 @@ int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
                        if (p > 1) /* special case so that root path is "/", not "" */
                                p--;
                        buf[p] = '\0';
-                       return p;
+                       return 0;
                }
        }
 
@@ -404,14 +492,31 @@ int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
 
 int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
 {
+       int offset;
+
        if ((phandle == 0) || (phandle == -1))
                return -FDT_ERR_BADPHANDLE;
-       phandle = cpu_to_fdt32(phandle);
-       return fdt_node_offset_by_prop_value(fdt, -1, "linux,phandle",
-                                            &phandle, sizeof(phandle));
+
+       FDT_CHECK_HEADER(fdt);
+
+       /* FIXME: The algorithm here is pretty horrible: we
+        * potentially scan each property of a node in
+        * fdt_get_phandle(), then if that didn't find what
+        * we want, we scan over them again making our way to the next
+        * node.  Still it's the easiest to implement approach;
+        * performance can come later. */
+       for (offset = fdt_next_node(fdt, -1, NULL);
+            offset >= 0;
+            offset = fdt_next_node(fdt, offset, NULL)) {
+               if (fdt_get_phandle(fdt, offset) == phandle)
+                       return offset;
+       }
+
+       return offset; /* error from fdt_next_node() */
 }
 
-static int _stringlist_contains(const char *strlist, int listlen, const char *str)
+static int _fdt_stringlist_contains(const char *strlist, int listlen,
+                                   const char *str)
 {
        int len = strlen(str);
        const char *p;
@@ -437,7 +542,7 @@ int fdt_node_check_compatible(const void *fdt, int nodeoffset,
        prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
        if (!prop)
                return len;
-       if (_stringlist_contains(prop, len, compatible))
+       if (_fdt_stringlist_contains(prop, len, compatible))
                return 0;
        else
                return 1;
index 8e7ec4cb7bcdc91abe0e4b77e5f2ffdd0b5d0a00..24437dfc32b8843f22ddc6f123867f8ee0f3987c 100644 (file)
@@ -289,6 +289,33 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
        return 0;
 }
 
+int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
+                  const void *val, int len)
+{
+       struct fdt_property *prop;
+       int err, oldlen, newlen;
+
+       FDT_RW_CHECK_HEADER(fdt);
+
+       prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
+       if (prop) {
+               newlen = len + oldlen;
+               err = _fdt_splice_struct(fdt, prop->data,
+                                        FDT_TAGALIGN(oldlen),
+                                        FDT_TAGALIGN(newlen));
+               if (err)
+                       return err;
+               prop->len = cpu_to_fdt32(newlen);
+               memcpy(prop->data + oldlen, val, len);
+       } else {
+               err = _fdt_add_property(fdt, nodeoffset, name, len, &prop);
+               if (err)
+                       return err;
+               memcpy(prop->data, val, len);
+       }
+       return 0;
+}
+
 int fdt_delprop(void *fdt, int nodeoffset, const char *name)
 {
        struct fdt_property *prop;
@@ -406,6 +433,8 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
                struct_size = 0;
                while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END)
                        ;
+               if (struct_size < 0)
+                       return struct_size;
        }
 
        if (!_fdt_blocks_misordered(fdt, mem_rsv_size, struct_size)) {
index 698329e0ccaf8f94d2e333b277a78ab3dcff9adc..55ebebf1eb20e8c7f9200881f99ecf03632db9a3 100644 (file)
@@ -70,7 +70,7 @@ static int _fdt_sw_check_header(void *fdt)
                        return err; \
        }
 
-static void *_fdt_grab_space(void *fdt, int len)
+static void *_fdt_grab_space(void *fdt, size_t len)
 {
        int offset = fdt_size_dt_struct(fdt);
        int spaceleft;
@@ -82,7 +82,7 @@ static void *_fdt_grab_space(void *fdt, int len)
                return NULL;
 
        fdt_set_size_dt_struct(fdt, offset + len);
-       return fdt_offset_ptr_w(fdt, offset, len);
+       return _fdt_offset_ptr_w(fdt, offset);
 }
 
 int fdt_create(void *buf, int bufsize)
@@ -237,18 +237,17 @@ int fdt_finish(void *fdt)
        while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) {
                if (tag == FDT_PROP) {
                        struct fdt_property *prop =
-                               fdt_offset_ptr_w(fdt, offset, sizeof(*prop));
+                               _fdt_offset_ptr_w(fdt, offset);
                        int nameoff;
 
-                       if (! prop)
-                               return -FDT_ERR_BADSTRUCTURE;
-
                        nameoff = fdt32_to_cpu(prop->nameoff);
                        nameoff += fdt_size_dt_strings(fdt);
                        prop->nameoff = cpu_to_fdt32(nameoff);
                }
                offset = nextoffset;
        }
+       if (nextoffset < 0)
+               return nextoffset;
 
        /* Finally, adjust the header */
        fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt));
index a4652c6e787ec8ef6823454eaa4c8932343863e2..6025fa1fe8feeb046a68605776d0d1ecac1efe9f 100644 (file)
@@ -94,41 +94,14 @@ int fdt_nop_property(void *fdt, int nodeoffset, const char *name)
        return 0;
 }
 
-int _fdt_node_end_offset(void *fdt, int nodeoffset)
+int _fdt_node_end_offset(void *fdt, int offset)
 {
-       int level = 0;
-       uint32_t tag;
-       int offset, nextoffset;
-
-       tag = fdt_next_tag(fdt, nodeoffset, &nextoffset);
-       if (tag != FDT_BEGIN_NODE)
-               return -FDT_ERR_BADOFFSET;
-       do {
-               offset = nextoffset;
-               tag = fdt_next_tag(fdt, offset, &nextoffset);
-
-               switch (tag) {
-               case FDT_END:
-                       return offset;
-
-               case FDT_BEGIN_NODE:
-                       level++;
-                       break;
-
-               case FDT_END_NODE:
-                       level--;
-                       break;
-
-               case FDT_PROP:
-               case FDT_NOP:
-                       break;
-
-               default:
-                       return -FDT_ERR_BADSTRUCTURE;
-               }
-       } while (level >= 0);
-
-       return nextoffset;
+       int depth = 0;
+
+       while ((offset >= 0) && (depth >= 0))
+               offset = fdt_next_node(fdt, offset, &depth);
+
+       return offset;
 }
 
 int fdt_nop_node(void *fdt, int nodeoffset)
index ff6246f000ce1267f3efaa5432772b7550273623..73f49759a5e71b79aa5bb4ae172681ce329cd294 100644 (file)
@@ -61,7 +61,7 @@
 #define FDT_ERR_NOTFOUND       1
        /* FDT_ERR_NOTFOUND: The requested node or property does not exist */
 #define FDT_ERR_EXISTS         2
-       /* FDT_ERR_EXISTS: Attempted to create a node or property which
+       /* FDT_ERR_EXISTS: Attemped to create a node or property which
         * already exists */
 #define FDT_ERR_NOSPACE                3
        /* FDT_ERR_NOSPACE: Operation needed to expand the device
 /* Low-level functions (you probably don't need these)                */
 /**********************************************************************/
 
-const void *fdt_offset_ptr(const void *fdt, int offset, int checklen);
+const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int checklen);
 static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
 {
        return (void *)(uintptr_t)fdt_offset_ptr(fdt, offset, checklen);
@@ -156,7 +156,7 @@ int fdt_next_node(const void *fdt, int offset, int *depth);
 #define __fdt_set_hdr(name) \
        static inline void fdt_set_##name(void *fdt, uint32_t val) \
        { \
-               struct fdt_header *fdth = fdt; \
+               struct fdt_header *fdth = (struct fdt_header*)fdt; \
                fdth->name = cpu_to_fdt32(val); \
        }
 __fdt_set_hdr(magic);
@@ -342,6 +342,91 @@ int fdt_path_offset(const void *fdt, const char *path);
  */
 const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp);
 
+/**
+ * fdt_first_property_offset - find the offset of a node's first property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of a node
+ *
+ * fdt_first_property_offset() finds the first property of the node at
+ * the given structure block offset.
+ *
+ * returns:
+ *     structure block offset of the property (>=0), on success
+ *     -FDT_ERR_NOTFOUND, if the requested node has no properties
+ *     -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_BEGIN_NODE tag
+ *      -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_first_property_offset(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_next_property_offset - step through a node's properties
+ * @fdt: pointer to the device tree blob
+ * @offset: structure block offset of a property
+ *
+ * fdt_next_property_offset() finds the property immediately after the
+ * one at the given structure block offset.  This will be a property
+ * of the same node as the given property.
+ *
+ * returns:
+ *     structure block offset of the next property (>=0), on success
+ *     -FDT_ERR_NOTFOUND, if the given property is the last in its node
+ *     -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_PROP tag
+ *      -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_next_property_offset(const void *fdt, int offset);
+
+/**
+ * fdt_get_property_by_offset - retrieve the property at a given offset
+ * @fdt: pointer to the device tree blob
+ * @offset: offset of the property to retrieve
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_property_by_offset() retrieves a pointer to the
+ * fdt_property structure within the device tree blob at the given
+ * offset.  If lenp is non-NULL, the length of the property value is
+ * also returned, in the integer pointed to by lenp.
+ *
+ * returns:
+ *     pointer to the structure representing the property
+ *             if lenp is non-NULL, *lenp contains the length of the property
+ *             value (>=0)
+ *     NULL, on error
+ *             if lenp is non-NULL, *lenp contains an error code (<0):
+ *             -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag
+ *             -FDT_ERR_BADMAGIC,
+ *             -FDT_ERR_BADVERSION,
+ *             -FDT_ERR_BADSTATE,
+ *             -FDT_ERR_BADSTRUCTURE,
+ *             -FDT_ERR_TRUNCATED, standard meanings
+ */
+const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
+                                                     int offset,
+                                                     int *lenp);
+
+/**
+ * fdt_get_property_namelen - find a property based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_get_property_namelen(), but only examine the first
+ * namelen characters of name for matching the property name.
+ */
+const struct fdt_property *fdt_get_property_namelen(const void *fdt,
+                                                   int nodeoffset,
+                                                   const char *name,
+                                                   int namelen, int *lenp);
+
 /**
  * fdt_get_property - find a given property in a given node
  * @fdt: pointer to the device tree blob
@@ -379,6 +464,54 @@ static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
                fdt_get_property(fdt, nodeoffset, name, lenp);
 }
 
+/**
+ * fdt_getprop_by_offset - retrieve the value of a property at a given offset
+ * @fdt: pointer to the device tree blob
+ * @ffset: offset of the property to read
+ * @namep: pointer to a string variable (will be overwritten) or NULL
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_getprop_by_offset() retrieves a pointer to the value of the
+ * property at structure block offset 'offset' (this will be a pointer
+ * to within the device blob itself, not a copy of the value).  If
+ * lenp is non-NULL, the length of the property value is also
+ * returned, in the integer pointed to by lenp.  If namep is non-NULL,
+ * the property's namne will also be returned in the char * pointed to
+ * by namep (this will be a pointer to within the device tree's string
+ * block, not a new copy of the name).
+ *
+ * returns:
+ *     pointer to the property's value
+ *             if lenp is non-NULL, *lenp contains the length of the property
+ *             value (>=0)
+ *             if namep is non-NULL *namep contiains a pointer to the property
+ *             name.
+ *     NULL, on error
+ *             if lenp is non-NULL, *lenp contains an error code (<0):
+ *             -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag
+ *             -FDT_ERR_BADMAGIC,
+ *             -FDT_ERR_BADVERSION,
+ *             -FDT_ERR_BADSTATE,
+ *             -FDT_ERR_BADSTRUCTURE,
+ *             -FDT_ERR_TRUNCATED, standard meanings
+ */
+const void *fdt_getprop_by_offset(const void *fdt, int offset,
+                                 const char **namep, int *lenp);
+
+/**
+ * fdt_getprop_namelen - get property value based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_getprop(), but only examine the first namelen
+ * characters of name for matching the property name.
+ */
+const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
+                               const char *name, int namelen, int *lenp);
+
 /**
  * fdt_getprop - retrieve the value of a given property
  * @fdt: pointer to the device tree blob
@@ -428,6 +561,32 @@ static inline void *fdt_getprop_w(void *fdt, int nodeoffset,
  */
 uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
 
+/**
+ * fdt_get_alias_namelen - get alias based on substring
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_get_alias(), but only examine the first namelen
+ * characters of name for matching the alias name.
+ */
+const char *fdt_get_alias_namelen(const void *fdt,
+                                 const char *name, int namelen);
+
+/**
+ * fdt_get_alias - retreive the path referenced by a given alias
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ *
+ * fdt_get_alias() retrieves the value of a given alias.  That is, the
+ * value of the property named 'name' in the node /aliases.
+ *
+ * returns:
+ *     a pointer to the expansion of the alias named 'name', of it exists
+ *     NULL, if the given alias or the /aliases node does not exist
+ */
+const char *fdt_get_alias(const void *fdt, const char *name);
+
 /**
  * fdt_get_path - determine the full path of a node
  * @fdt: pointer to the device tree blob
@@ -693,17 +852,17 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
                        const void *val, int len);
 
 /**
- * fdt_setprop_inplace_cell - change the value of a single-cell property
+ * fdt_setprop_inplace_u32 - change the value of a 32-bit integer property
  * @fdt: pointer to the device tree blob
  * @nodeoffset: offset of the node whose property to change
  * @name: name of the property to change
- * @val: cell (32-bit integer) value to replace the property with
+ * @val: 32-bit integer value to replace the property with
  *
- * fdt_setprop_inplace_cell() replaces the value of a given property
- * with the 32-bit integer cell value in val, converting val to
- * big-endian if necessary.  This function cannot change the size of a
- * property, and so will only work if the property already exists and
- * has length 4.
+ * fdt_setprop_inplace_u32() replaces the value of a given property
+ * with the 32-bit integer value in val, converting val to big-endian
+ * if necessary.  This function cannot change the size of a property,
+ * and so will only work if the property already exists and has length
+ * 4.
  *
  * This function will alter only the bytes in the blob which contain
  * the given property value, and will not alter or move any other part
@@ -712,7 +871,7 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
  * returns:
  *     0, on success
  *     -FDT_ERR_NOSPACE, if the property's length is not equal to 4
 *    -FDT_ERR_NOTFOUND, node does not have the named property
    -FDT_ERR_NOTFOUND, node does not have the named property
  *     -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
  *     -FDT_ERR_BADMAGIC,
  *     -FDT_ERR_BADVERSION,
@@ -720,13 +879,59 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
  *     -FDT_ERR_BADSTRUCTURE,
  *     -FDT_ERR_TRUNCATED, standard meanings
  */
-static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset,
-                                          const char *name, uint32_t val)
+static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset,
+                                         const char *name, uint32_t val)
 {
        val = cpu_to_fdt32(val);
        return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val));
 }
 
+/**
+ * fdt_setprop_inplace_u64 - change the value of a 64-bit integer property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value to replace the property with
+ *
+ * fdt_setprop_inplace_u64() replaces the value of a given property
+ * with the 64-bit integer value in val, converting val to big-endian
+ * if necessary.  This function cannot change the size of a property,
+ * and so will only work if the property already exists and has length
+ * 8.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_NOSPACE, if the property's length is not equal to 8
+ *     -FDT_ERR_NOTFOUND, node does not have the named property
+ *     -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_inplace_u64(void *fdt, int nodeoffset,
+                                         const char *name, uint64_t val)
+{
+       val = cpu_to_fdt64(val);
+       return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_setprop_inplace_cell - change the value of a single-cell property
+ *
+ * This is an alternative name for fdt_setprop_inplace_u32()
+ */
+static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset,
+                                          const char *name, uint32_t val)
+{
+       return fdt_setprop_inplace_u32(fdt, nodeoffset, name, val);
+}
+
 /**
  * fdt_nop_property - replace a property with nop tags
  * @fdt: pointer to the device tree blob
@@ -786,11 +991,20 @@ int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size);
 int fdt_finish_reservemap(void *fdt);
 int fdt_begin_node(void *fdt, const char *name);
 int fdt_property(void *fdt, const char *name, const void *val, int len);
-static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
+static inline int fdt_property_u32(void *fdt, const char *name, uint32_t val)
 {
        val = cpu_to_fdt32(val);
        return fdt_property(fdt, name, &val, sizeof(val));
 }
+static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val)
+{
+       val = cpu_to_fdt64(val);
+       return fdt_property(fdt, name, &val, sizeof(val));
+}
+static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
+{
+       return fdt_property_u32(fdt, name, val);
+}
 #define fdt_property_string(fdt, name, str) \
        fdt_property(fdt, name, str, strlen(str)+1)
 int fdt_end_node(void *fdt);
@@ -800,6 +1014,7 @@ int fdt_finish(void *fdt);
 /* Read-write functions                                               */
 /**********************************************************************/
 
+int fdt_create_empty_tree(void *buf, int bufsize);
 int fdt_open_into(const void *fdt, void *buf, int bufsize);
 int fdt_pack(void *fdt);
 
@@ -909,14 +1124,14 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
                const void *val, int len);
 
 /**
- * fdt_setprop_cell - set a property to a single cell value
+ * fdt_setprop_u32 - set a property to a 32-bit integer
  * @fdt: pointer to the device tree blob
  * @nodeoffset: offset of the node whose property to change
  * @name: name of the property to change
  * @val: 32-bit integer value for the property (native endian)
  *
- * fdt_setprop_cell() sets the value of the named property in the
- * given node to the given cell value (converting to big-endian if
+ * fdt_setprop_u32() sets the value of the named property in the given
+ * node to the given 32-bit integer value (converting to big-endian if
  * necessary), or creates a new property with that value if it does
  * not already exist.
  *
@@ -936,13 +1151,59 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
  *     -FDT_ERR_BADLAYOUT,
  *     -FDT_ERR_TRUNCATED, standard meanings
  */
-static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
-                                  uint32_t val)
+static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name,
+                                 uint32_t val)
 {
        val = cpu_to_fdt32(val);
        return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val));
 }
 
+/**
+ * fdt_setprop_u64 - set a property to a 64-bit integer
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value for the property (native endian)
+ *
+ * fdt_setprop_u64() sets the value of the named property in the given
+ * node to the given 64-bit integer value (converting to big-endian if
+ * necessary), or creates a new property with that value if it does
+ * not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *             contain the new property value
+ *     -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_u64(void *fdt, int nodeoffset, const char *name,
+                                 uint64_t val)
+{
+       val = cpu_to_fdt64(val);
+       return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_setprop_cell - set a property to a single cell value
+ *
+ * This is an alternative name for fdt_setprop_u32()
+ */
+static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
+                                  uint32_t val)
+{
+       return fdt_setprop_u32(fdt, nodeoffset, name, val);
+}
+
 /**
  * fdt_setprop_string - set a property to a string value
  * @fdt: pointer to the device tree blob
@@ -974,6 +1235,147 @@ static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
 #define fdt_setprop_string(fdt, nodeoffset, name, str) \
        fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
 
+/**
+ * fdt_appendprop - append to or create a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to append to
+ * @val: pointer to data to append to the property value
+ * @len: length of the data to append to the property value
+ *
+ * fdt_appendprop() appends the value to the named property in the
+ * given node, creating the property if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *             contain the new property value
+ *     -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
+                  const void *val, int len);
+
+/**
+ * fdt_appendprop_u32 - append a 32-bit integer value to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value to append to the property (native endian)
+ *
+ * fdt_appendprop_u32() appends the given 32-bit integer value
+ * (converting to big-endian if necessary) to the value of the named
+ * property in the given node, or creates a new property with that
+ * value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *             contain the new property value
+ *     -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_appendprop_u32(void *fdt, int nodeoffset,
+                                    const char *name, uint32_t val)
+{
+       val = cpu_to_fdt32(val);
+       return fdt_appendprop(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_appendprop_u64 - append a 64-bit integer value to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 64-bit integer value to append to the property (native endian)
+ *
+ * fdt_appendprop_u64() appends the given 64-bit integer value
+ * (converting to big-endian if necessary) to the value of the named
+ * property in the given node, or creates a new property with that
+ * value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *             contain the new property value
+ *     -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_appendprop_u64(void *fdt, int nodeoffset,
+                                    const char *name, uint64_t val)
+{
+       val = cpu_to_fdt64(val);
+       return fdt_appendprop(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_appendprop_cell - append a single cell value to a property
+ *
+ * This is an alternative name for fdt_appendprop_u32()
+ */
+static inline int fdt_appendprop_cell(void *fdt, int nodeoffset,
+                                     const char *name, uint32_t val)
+{
+       return fdt_appendprop_u32(fdt, nodeoffset, name, val);
+}
+
+/**
+ * fdt_appendprop_string - append a string to a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @str: string value to append to the property
+ *
+ * fdt_appendprop_string() appends the given string to the value of
+ * the named property in the given node, or creates a new property
+ * with that value if it does not already exist.
+ *
+ * This function may insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ *     0, on success
+ *     -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ *             contain the new property value
+ *     -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_BADMAGIC,
+ *     -FDT_ERR_BADVERSION,
+ *     -FDT_ERR_BADSTATE,
+ *     -FDT_ERR_BADSTRUCTURE,
+ *     -FDT_ERR_BADLAYOUT,
+ *     -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_appendprop_string(fdt, nodeoffset, name, str) \
+       fdt_appendprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
 /**
  * fdt_delprop - delete a property
  * @fdt: pointer to the device tree blob
index 449bf602daf1ff59a1cfef48bc4b50c15940112b..213d7fb81c4218ba516bbbf59fa35b1f36d1b6ad 100644 (file)
@@ -5,19 +5,25 @@
 #include <stdint.h>
 #include <string.h>
 
-#define _B(n)  ((unsigned long long)((uint8_t *)&x)[n])
+#define EXTRACT_BYTE(n)        ((unsigned long long)((uint8_t *)&x)[n])
+static inline uint16_t fdt16_to_cpu(uint16_t x)
+{
+       return (EXTRACT_BYTE(0) << 8) | EXTRACT_BYTE(1);
+}
+#define cpu_to_fdt16(x) fdt16_to_cpu(x)
+
 static inline uint32_t fdt32_to_cpu(uint32_t x)
 {
-       return (_B(0) << 24) | (_B(1) << 16) | (_B(2) << 8) | _B(3);
+       return (EXTRACT_BYTE(0) << 24) | (EXTRACT_BYTE(1) << 16) | (EXTRACT_BYTE(2) << 8) | EXTRACT_BYTE(3);
 }
 #define cpu_to_fdt32(x) fdt32_to_cpu(x)
 
 static inline uint64_t fdt64_to_cpu(uint64_t x)
 {
-       return (_B(0) << 56) | (_B(1) << 48) | (_B(2) << 40) | (_B(3) << 32)
-               | (_B(4) << 24) | (_B(5) << 16) | (_B(6) << 8) | _B(7);
+       return (EXTRACT_BYTE(0) << 56) | (EXTRACT_BYTE(1) << 48) | (EXTRACT_BYTE(2) << 40) | (EXTRACT_BYTE(3) << 32)
+               | (EXTRACT_BYTE(4) << 24) | (EXTRACT_BYTE(5) << 16) | (EXTRACT_BYTE(6) << 8) | EXTRACT_BYTE(7);
 }
 #define cpu_to_fdt64(x) fdt64_to_cpu(x)
-#undef _B
+#undef EXTRACT_BYTE
 
 #endif /* _LIBFDT_ENV_H */
index 46eb93e4af5c471ecac7bc470065c11f6ae6f9e6..381133ba81df7d02c375d7a1462d650136abd6d1 100644 (file)
@@ -62,8 +62,8 @@
                        return err; \
        }
 
-uint32_t _fdt_next_tag(const void *fdt, int startoffset, int *nextoffset);
 int _fdt_check_node_offset(const void *fdt, int offset);
+int _fdt_check_prop_offset(const void *fdt, int offset);
 const char *_fdt_find_string(const char *strtab, int tabsize, const char *s);
 int _fdt_node_end_offset(void *fdt, int nodeoffset);
 
index 26d0e1e60c0ca546fda76f1a6efcd1e07acf7acf..b61465fb2f33813ac78959c22fb3ded4452c9efa 100644 (file)
@@ -29,16 +29,27 @@ void add_label(struct label **labels, char *label)
        struct label *new;
 
        /* Make sure the label isn't already there */
-       for_each_label(*labels, new)
-               if (streq(new->label, label))
+       for_each_label_withdel(*labels, new)
+               if (streq(new->label, label)) {
+                       new->deleted = 0;
                        return;
+               }
 
        new = xmalloc(sizeof(*new));
+       memset(new, 0, sizeof(*new));
        new->label = label;
        new->next = *labels;
        *labels = new;
 }
 
+void delete_labels(struct label **labels)
+{
+       struct label *label;
+
+       for_each_label(*labels, label)
+               label->deleted = 1;
+}
+
 struct property *build_property(char *name, struct data val)
 {
        struct property *new = xmalloc(sizeof(*new));
@@ -51,6 +62,18 @@ struct property *build_property(char *name, struct data val)
        return new;
 }
 
+struct property *build_property_delete(char *name)
+{
+       struct property *new = xmalloc(sizeof(*new));
+
+       memset(new, 0, sizeof(*new));
+
+       new->name = name;
+       new->deleted = 1;
+
+       return new;
+}
+
 struct property *chain_property(struct property *first, struct property *list)
 {
        assert(first->next == NULL);
@@ -91,6 +114,17 @@ struct node *build_node(struct property *proplist, struct node *children)
        return new;
 }
 
+struct node *build_node_delete(void)
+{
+       struct node *new = xmalloc(sizeof(*new));
+
+       memset(new, 0, sizeof(*new));
+
+       new->deleted = 1;
+
+       return new;
+}
+
 struct node *name_node(struct node *node, char *name)
 {
        assert(node->name == NULL);
@@ -106,8 +140,10 @@ struct node *merge_nodes(struct node *old_node, struct node *new_node)
        struct node *new_child, *old_child;
        struct label *l;
 
+       old_node->deleted = 0;
+
        /* Add new node labels to old node */
-       for_each_label(new_node->labels, l)
+       for_each_label_withdel(new_node->labels, l)
                add_label(&old_node->labels, l->label);
 
        /* Move properties from the new node to the old node.  If there
@@ -118,14 +154,21 @@ struct node *merge_nodes(struct node *old_node, struct node *new_node)
                new_node->proplist = new_prop->next;
                new_prop->next = NULL;
 
+               if (new_prop->deleted) {
+                       delete_property_by_name(old_node, new_prop->name);
+                       free(new_prop);
+                       continue;
+               }
+
                /* Look for a collision, set new value if there is */
-               for_each_property(old_node, old_prop) {
+               for_each_property_withdel(old_node, old_prop) {
                        if (streq(old_prop->name, new_prop->name)) {
                                /* Add new labels to old property */
-                               for_each_label(new_prop->labels, l)
+                               for_each_label_withdel(new_prop->labels, l)
                                        add_label(&old_prop->labels, l->label);
 
                                old_prop->val = new_prop->val;
+                               old_prop->deleted = 0;
                                free(new_prop);
                                new_prop = NULL;
                                break;
@@ -146,8 +189,14 @@ struct node *merge_nodes(struct node *old_node, struct node *new_node)
                new_child->parent = NULL;
                new_child->next_sibling = NULL;
 
+               if (new_child->deleted) {
+                       delete_node_by_name(old_node, new_child->name);
+                       free(new_child);
+                       continue;
+               }
+
                /* Search for a collision.  Merge if there is */
-               for_each_child(old_node, old_child) {
+               for_each_child_withdel(old_node, old_child) {
                        if (streq(old_child->name, new_child->name)) {
                                merge_nodes(old_child, new_child);
                                new_child = NULL;
@@ -155,7 +204,7 @@ struct node *merge_nodes(struct node *old_node, struct node *new_node)
                        }
                }
 
-               /* if no collision occurred, add child to the old node. */
+               /* if no collision occured, add child to the old node. */
                if (new_child)
                        add_child(old_node, new_child);
        }
@@ -188,6 +237,25 @@ void add_property(struct node *node, struct property *prop)
        *p = prop;
 }
 
+void delete_property_by_name(struct node *node, char *name)
+{
+       struct property *prop = node->proplist;
+
+       while (prop) {
+               if (!strcmp(prop->name, name)) {
+                       delete_property(prop);
+                       return;
+               }
+               prop = prop->next;
+       }
+}
+
+void delete_property(struct property *prop)
+{
+       prop->deleted = 1;
+       delete_labels(&prop->labels);
+}
+
 void add_child(struct node *parent, struct node *child)
 {
        struct node **p;
@@ -202,6 +270,32 @@ void add_child(struct node *parent, struct node *child)
        *p = child;
 }
 
+void delete_node_by_name(struct node *parent, char *name)
+{
+       struct node *node = parent->children;
+
+       while (node) {
+               if (!strcmp(node->name, name)) {
+                       delete_node(node);
+                       return;
+               }
+               node = node->next_sibling;
+       }
+}
+
+void delete_node(struct node *node)
+{
+       struct property *prop;
+       struct node *child;
+
+       node->deleted = 1;
+       for_each_child(node, child)
+               delete_node(child);
+       for_each_property(node, prop)
+               delete_property(prop);
+       delete_labels(&node->labels);
+}
+
 struct reserve_info *build_reserve_entry(uint64_t address, uint64_t size)
 {
        struct reserve_info *new = xmalloc(sizeof(*new));
@@ -353,8 +447,11 @@ struct node *get_node_by_path(struct node *tree, const char *path)
        const char *p;
        struct node *child;
 
-       if (!path || ! (*path))
+       if (!path || ! (*path)) {
+               if (tree->deleted)
+                       return NULL;
                return tree;
+       }
 
        while (path[0] == '/')
                path++;
@@ -397,8 +494,11 @@ struct node *get_node_by_phandle(struct node *tree, cell_t phandle)
 
        assert((phandle != 0) && (phandle != -1));
 
-       if (tree->phandle == phandle)
+       if (tree->phandle == phandle) {
+               if (tree->deleted)
+                       return NULL;
                return tree;
+       }
 
        for_each_child(tree, child) {
                node = get_node_by_phandle(child, phandle);
@@ -535,7 +635,7 @@ static void sort_properties(struct node *node)
        int n = 0, i = 0;
        struct property *prop, **tbl;
 
-       for_each_property(node, prop)
+       for_each_property_withdel(node, prop)
                n++;
 
        if (n == 0)
@@ -543,7 +643,7 @@ static void sort_properties(struct node *node)
 
        tbl = xmalloc(n * sizeof(*tbl));
 
-       for_each_property(node, prop)
+       for_each_property_withdel(node, prop)
                tbl[i++] = prop;
 
        qsort(tbl, n, sizeof(*tbl), cmp_prop);
@@ -571,7 +671,7 @@ static void sort_subnodes(struct node *node)
        int n = 0, i = 0;
        struct node *subnode, **tbl;
 
-       for_each_child(node, subnode)
+       for_each_child_withdel(node, subnode)
                n++;
 
        if (n == 0)
@@ -579,7 +679,7 @@ static void sort_subnodes(struct node *node)
 
        tbl = xmalloc(n * sizeof(*tbl));
 
-       for_each_child(node, subnode)
+       for_each_child_withdel(node, subnode)
                tbl[i++] = subnode;
 
        qsort(tbl, n, sizeof(*tbl), cmp_subnode);
@@ -598,7 +698,7 @@ static void sort_node(struct node *node)
 
        sort_properties(node);
        sort_subnodes(node);
-       for_each_child(node, c)
+       for_each_child_withdel(node, c)
                sort_node(c);
 }
 
index 36a38e9f1a2cb9200ee07bfce22eab489f1f1127..246ab4bc0d9d8c6e8830e18d35941213730e51ef 100644 (file)
 #include "dtc.h"
 #include "srcpos.h"
 
+/* A node in our list of directories to search for source/include files */
+struct search_path {
+       struct search_path *next;       /* next node in list, NULL for end */
+       const char *dirname;            /* name of directory to search */
+};
+
+/* This is the list of directories that we search for source files */
+static struct search_path *search_path_head, **search_path_tail;
+
 
 static char *dirname(const char *path)
 {
@@ -47,6 +56,64 @@ struct srcfile_state *current_srcfile; /* = NULL */
 #define MAX_SRCFILE_DEPTH     (100)
 static int srcfile_depth; /* = 0 */
 
+
+/**
+ * Try to open a file in a given directory.
+ *
+ * If the filename is an absolute path, then dirname is ignored. If it is a
+ * relative path, then we look in that directory for the file.
+ *
+ * @param dirname      Directory to look in, or NULL for none
+ * @param fname                Filename to look for
+ * @param fp           Set to NULL if file did not open
+ * @return allocated filename on success (caller must free), NULL on failure
+ */
+static char *try_open(const char *dirname, const char *fname, FILE **fp)
+{
+       char *fullname;
+
+       if (!dirname || fname[0] == '/')
+               fullname = xstrdup(fname);
+       else
+               fullname = join_path(dirname, fname);
+
+       *fp = fopen(fullname, "r");
+       if (!*fp) {
+               free(fullname);
+               fullname = NULL;
+       }
+
+       return fullname;
+}
+
+/**
+ * Open a file for read access
+ *
+ * If it is a relative filename, we search the full search path for it.
+ *
+ * @param fname        Filename to open
+ * @param fp   Returns pointer to opened FILE, or NULL on failure
+ * @return pointer to allocated filename, which caller must free
+ */
+static char *fopen_any_on_path(const char *fname, FILE **fp)
+{
+       const char *cur_dir = NULL;
+       struct search_path *node;
+       char *fullname;
+
+       /* Try current directory first */
+       assert(fp);
+       if (current_srcfile)
+               cur_dir = current_srcfile->dir;
+       fullname = try_open(cur_dir, fname, fp);
+
+       /* Failing that, try each search path in turn */
+       for (node = search_path_head; !*fp && node; node = node->next)
+               fullname = try_open(node->dirname, fname, fp);
+
+       return fullname;
+}
+
 FILE *srcfile_relative_open(const char *fname, char **fullnamep)
 {
        FILE *f;
@@ -56,13 +123,7 @@ FILE *srcfile_relative_open(const char *fname, char **fullnamep)
                f = stdin;
                fullname = xstrdup("<stdin>");
        } else {
-               if (!current_srcfile || !current_srcfile->dir
-                   || (fname[0] == '/'))
-                       fullname = xstrdup(fname);
-               else
-                       fullname = join_path(current_srcfile->dir, fname);
-
-               f = fopen(fullname, "r");
+               fullname = fopen_any_on_path(fname, &f);
                if (!f)
                        die("Couldn't open \"%s\": %s\n", fname,
                            strerror(errno));
@@ -119,6 +180,23 @@ int srcfile_pop(void)
        return current_srcfile ? 1 : 0;
 }
 
+void srcfile_add_search_path(const char *dirname)
+{
+       struct search_path *node;
+
+       /* Create the node */
+       node = xmalloc(sizeof(*node));
+       node->next = NULL;
+       node->dirname = xstrdup(dirname);
+
+       /* Add to the end of our list */
+       if (search_path_tail)
+               *search_path_tail = node;
+       else
+               search_path_head = node;
+       search_path_tail = &node->next;
+}
+
 /*
  * The empty source position.
  */
@@ -250,3 +328,9 @@ srcpos_warn(struct srcpos *pos, char const *fmt, ...)
 
        va_end(va);
 }
+
+void srcpos_set_line(char *f, int l)
+{
+       current_srcfile->name = f;
+       current_srcfile->lineno = l;
+}
index ce980cafe58867d64d7a142b00914034f24f0d50..93a27123c2e9c17ec27c728243860121447feb7e 100644 (file)
@@ -33,10 +33,39 @@ struct srcfile_state {
 extern FILE *depfile; /* = NULL */
 extern struct srcfile_state *current_srcfile; /* = NULL */
 
+/**
+ * Open a source file.
+ *
+ * If the source file is a relative pathname, then it is searched for in the
+ * current directory (the directory of the last source file read) and after
+ * that in the search path.
+ *
+ * We work through the search path in order from the first path specified to
+ * the last.
+ *
+ * If the file is not found, then this function does not return, but calls
+ * die().
+ *
+ * @param fname                Filename to search
+ * @param fullnamep    If non-NULL, it is set to the allocated filename of the
+ *                     file that was opened. The caller is then responsible
+ *                     for freeing the pointer.
+ * @return pointer to opened FILE
+ */
 FILE *srcfile_relative_open(const char *fname, char **fullnamep);
+
 void srcfile_push(const char *fname);
 int srcfile_pop(void);
 
+/**
+ * Add a new directory to the search path for input files
+ *
+ * The new path is added at the end of the list.
+ *
+ * @param dirname      Directory to add
+ */
+void srcfile_add_search_path(const char *dirname);
+
 struct srcpos {
     int first_line;
     int first_column;
@@ -84,4 +113,6 @@ extern void srcpos_error(struct srcpos *pos, char const *, ...)
 extern void srcpos_warn(struct srcpos *pos, char const *, ...)
      __attribute__((format(printf, 2, 3)));
 
+extern void srcpos_set_line(char *f, int l);
+
 #endif /* _SRCPOS_H_ */
index c09aafade313509740d662ed2c5f36a0d895a004..33eeba55fb4dd6482193902e784dbb1a62368f3e 100644 (file)
@@ -23,6 +23,7 @@
 
 extern FILE *yyin;
 extern int yyparse(void);
+extern YYLTYPE yylloc;
 
 struct boot_info *the_boot_info;
 int treesource_error;
@@ -34,6 +35,7 @@ struct boot_info *dt_from_source(const char *fname)
 
        srcfile_push(fname);
        yyin = current_srcfile->f;
+       yylloc.file = current_srcfile;
 
        if (yyparse() != 0)
                die("Unable to parse input tree\n");
index d7ac27d2ae15469d09e476d4193d120e6ac97f6b..2422c34e11dfd381b26d11c1fec39d9369b17d62 100644 (file)
@@ -1,6 +1,10 @@
 /*
+ * Copyright 2011 The Chromium Authors, All Rights Reserved.
  * Copyright 2008 Jon Loeliger, Freescale Semiconductor, Inc.
  *
+ * util_is_printable_string contributed by
+ *     Pantelis Antoniou <pantelis.antoniou AT gmail.com>
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
  * published by the Free Software Foundation; either version 2 of the
  *                                                                   USA
  */
 
+#include <ctype.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdarg.h>
 #include <string.h>
+#include <assert.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
 
+#include "libfdt.h"
 #include "util.h"
 
 char *xstrdup(const char *s)
@@ -57,3 +68,264 @@ char *join_path(const char *path, const char *name)
        memcpy(str+lenp, name, lenn+1);
        return str;
 }
+
+int util_is_printable_string(const void *data, int len)
+{
+       const char *s = data;
+       const char *ss;
+
+       /* zero length is not */
+       if (len == 0)
+               return 0;
+
+       /* must terminate with zero */
+       if (s[len - 1] != '\0')
+               return 0;
+
+       ss = s;
+       while (*s && isprint(*s))
+               s++;
+
+       /* not zero, or not done yet */
+       if (*s != '\0' || (s + 1 - ss) < len)
+               return 0;
+
+       return 1;
+}
+
+/*
+ * Parse a octal encoded character starting at index i in string s.  The
+ * resulting character will be returned and the index i will be updated to
+ * point at the character directly after the end of the encoding, this may be
+ * the '\0' terminator of the string.
+ */
+static char get_oct_char(const char *s, int *i)
+{
+       char x[4];
+       char *endx;
+       long val;
+
+       x[3] = '\0';
+       strncpy(x, s + *i, 3);
+
+       val = strtol(x, &endx, 8);
+
+       assert(endx > x);
+
+       (*i) += endx - x;
+       return val;
+}
+
+/*
+ * Parse a hexadecimal encoded character starting at index i in string s.  The
+ * resulting character will be returned and the index i will be updated to
+ * point at the character directly after the end of the encoding, this may be
+ * the '\0' terminator of the string.
+ */
+static char get_hex_char(const char *s, int *i)
+{
+       char x[3];
+       char *endx;
+       long val;
+
+       x[2] = '\0';
+       strncpy(x, s + *i, 2);
+
+       val = strtol(x, &endx, 16);
+       if (!(endx  > x))
+               die("\\x used with no following hex digits\n");
+
+       (*i) += endx - x;
+       return val;
+}
+
+char get_escape_char(const char *s, int *i)
+{
+       char    c = s[*i];
+       int     j = *i + 1;
+       char    val;
+
+       assert(c);
+       switch (c) {
+       case 'a':
+               val = '\a';
+               break;
+       case 'b':
+               val = '\b';
+               break;
+       case 't':
+               val = '\t';
+               break;
+       case 'n':
+               val = '\n';
+               break;
+       case 'v':
+               val = '\v';
+               break;
+       case 'f':
+               val = '\f';
+               break;
+       case 'r':
+               val = '\r';
+               break;
+       case '0':
+       case '1':
+       case '2':
+       case '3':
+       case '4':
+       case '5':
+       case '6':
+       case '7':
+               j--; /* need to re-read the first digit as
+                     * part of the octal value */
+               val = get_oct_char(s, &j);
+               break;
+       case 'x':
+               val = get_hex_char(s, &j);
+               break;
+       default:
+               val = c;
+       }
+
+       (*i) = j;
+       return val;
+}
+
+int utilfdt_read_err(const char *filename, char **buffp)
+{
+       int fd = 0;     /* assume stdin */
+       char *buf = NULL;
+       off_t bufsize = 1024, offset = 0;
+       int ret = 0;
+
+       *buffp = NULL;
+       if (strcmp(filename, "-") != 0) {
+               fd = open(filename, O_RDONLY);
+               if (fd < 0)
+                       return errno;
+       }
+
+       /* Loop until we have read everything */
+       buf = malloc(bufsize);
+       do {
+               /* Expand the buffer to hold the next chunk */
+               if (offset == bufsize) {
+                       bufsize *= 2;
+                       buf = realloc(buf, bufsize);
+                       if (!buf) {
+                               ret = ENOMEM;
+                               break;
+                       }
+               }
+
+               ret = read(fd, &buf[offset], bufsize - offset);
+               if (ret < 0) {
+                       ret = errno;
+                       break;
+               }
+               offset += ret;
+       } while (ret != 0);
+
+       /* Clean up, including closing stdin; return errno on error */
+       close(fd);
+       if (ret)
+               free(buf);
+       else
+               *buffp = buf;
+       return ret;
+}
+
+char *utilfdt_read(const char *filename)
+{
+       char *buff;
+       int ret = utilfdt_read_err(filename, &buff);
+
+       if (ret) {
+               fprintf(stderr, "Couldn't open blob from '%s': %s\n", filename,
+                       strerror(ret));
+               return NULL;
+       }
+       /* Successful read */
+       return buff;
+}
+
+int utilfdt_write_err(const char *filename, const void *blob)
+{
+       int fd = 1;     /* assume stdout */
+       int totalsize;
+       int offset;
+       int ret = 0;
+       const char *ptr = blob;
+
+       if (strcmp(filename, "-") != 0) {
+               fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+               if (fd < 0)
+                       return errno;
+       }
+
+       totalsize = fdt_totalsize(blob);
+       offset = 0;
+
+       while (offset < totalsize) {
+               ret = write(fd, ptr + offset, totalsize - offset);
+               if (ret < 0) {
+                       ret = -errno;
+                       break;
+               }
+               offset += ret;
+       }
+       /* Close the file/stdin; return errno on error */
+       if (fd != 1)
+               close(fd);
+       return ret < 0 ? -ret : 0;
+}
+
+
+int utilfdt_write(const char *filename, const void *blob)
+{
+       int ret = utilfdt_write_err(filename, blob);
+
+       if (ret) {
+               fprintf(stderr, "Couldn't write blob to '%s': %s\n", filename,
+                       strerror(ret));
+       }
+       return ret ? -1 : 0;
+}
+
+int utilfdt_decode_type(const char *fmt, int *type, int *size)
+{
+       int qualifier = 0;
+
+       if (!*fmt)
+               return -1;
+
+       /* get the conversion qualifier */
+       *size = -1;
+       if (strchr("hlLb", *fmt)) {
+               qualifier = *fmt++;
+               if (qualifier == *fmt) {
+                       switch (*fmt++) {
+/* TODO:               case 'l': qualifier = 'L'; break;*/
+                       case 'h':
+                               qualifier = 'b';
+                               break;
+                       }
+               }
+       }
+
+       /* we should now have a type */
+       if ((*fmt == '\0') || !strchr("iuxs", *fmt))
+               return -1;
+
+       /* convert qualifier (bhL) to byte size */
+       if (*fmt != 's')
+               *size = qualifier == 'b' ? 1 :
+                               qualifier == 'h' ? 2 :
+                               qualifier == 'l' ? 4 : -1;
+       *type = *fmt++;
+
+       /* that should be it! */
+       if (*fmt)
+               return -1;
+       return 0;
+}
index 9cead842c11e48a0843a52d2825eef83837271d3..c8eb45d9f04b718d12c8315356960f5d2976779f 100644 (file)
@@ -1,7 +1,10 @@
 #ifndef _UTIL_H
 #define _UTIL_H
 
+#include <stdarg.h>
+
 /*
+ * Copyright 2011 The Chromium Authors, All Rights Reserved.
  * Copyright 2008 Jon Loeliger, Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute it and/or
@@ -53,4 +56,98 @@ static inline void *xrealloc(void *p, size_t len)
 extern char *xstrdup(const char *s);
 extern char *join_path(const char *path, const char *name);
 
+/**
+ * Check a string of a given length to see if it is all printable and
+ * has a valid terminator.
+ *
+ * @param data The string to check
+ * @param len  The string length including terminator
+ * @return 1 if a valid printable string, 0 if not */
+int util_is_printable_string(const void *data, int len);
+
+/*
+ * Parse an escaped character starting at index i in string s.  The resulting
+ * character will be returned and the index i will be updated to point at the
+ * character directly after the end of the encoding, this may be the '\0'
+ * terminator of the string.
+ */
+char get_escape_char(const char *s, int *i);
+
+/**
+ * Read a device tree file into a buffer. This will report any errors on
+ * stderr.
+ *
+ * @param filename     The filename to read, or - for stdin
+ * @return Pointer to allocated buffer containing fdt, or NULL on error
+ */
+char *utilfdt_read(const char *filename);
+
+/**
+ * Read a device tree file into a buffer. Does not report errors, but only
+ * returns them. The value returned can be passed to strerror() to obtain
+ * an error message for the user.
+ *
+ * @param filename     The filename to read, or - for stdin
+ * @param buffp                Returns pointer to buffer containing fdt
+ * @return 0 if ok, else an errno value representing the error
+ */
+int utilfdt_read_err(const char *filename, char **buffp);
+
+
+/**
+ * Write a device tree buffer to a file. This will report any errors on
+ * stderr.
+ *
+ * @param filename     The filename to write, or - for stdout
+ * @param blob         Poiner to buffer containing fdt
+ * @return 0 if ok, -1 on error
+ */
+int utilfdt_write(const char *filename, const void *blob);
+
+/**
+ * Write a device tree buffer to a file. Does not report errors, but only
+ * returns them. The value returned can be passed to strerror() to obtain
+ * an error message for the user.
+ *
+ * @param filename     The filename to write, or - for stdout
+ * @param blob         Poiner to buffer containing fdt
+ * @return 0 if ok, else an errno value representing the error
+ */
+int utilfdt_write_err(const char *filename, const void *blob);
+
+/**
+ * Decode a data type string. The purpose of this string
+ *
+ * The string consists of an optional character followed by the type:
+ *     Modifier characters:
+ *             hh or b 1 byte
+ *             h       2 byte
+ *             l       4 byte, default
+ *
+ *     Type character:
+ *             s       string
+ *             i       signed integer
+ *             u       unsigned integer
+ *             x       hex
+ *
+ * TODO: Implement ll modifier (8 bytes)
+ * TODO: Implement o type (octal)
+ *
+ * @param fmt          Format string to process
+ * @param type         Returns type found(s/d/u/x), or 0 if none
+ * @param size         Returns size found(1,2,4,8) or 4 if none
+ * @return 0 if ok, -1 on error (no type given, or other invalid format)
+ */
+int utilfdt_decode_type(const char *fmt, int *type, int *size);
+
+/*
+ * This is a usage message fragment for the -t option. It is the format
+ * supported by utilfdt_decode_type.
+ */
+
+#define USAGE_TYPE_MSG \
+       "<type>\ts=string, i=int, u=unsigned, x=hex\n" \
+       "\tOptional modifier prefix:\n" \
+       "\t\thh or b=byte, h=2 byte, l=4 byte (default)\n";
+
 #endif /* _UTIL_H */
index b81ea10a17a3a12981b166894709f2601b51f587..60f0c76a27d3cec1afe576104ef831fe577c54e1 100644 (file)
@@ -721,7 +721,7 @@ audit:
        if (!permtest)
                error = aa_audit_file(profile, &perms, GFP_KERNEL,
                                      OP_CHANGE_HAT, AA_MAY_CHANGEHAT, NULL,
-                                     target, 0, info, error);
+                                     target, GLOBAL_ROOT_UID, info, error);
 
 out:
        aa_put_profile(hat);
@@ -848,7 +848,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
 audit:
        if (!permtest)
                error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request,
-                                     name, hname, 0, info, error);
+                                     name, hname, GLOBAL_ROOT_UID, info, error);
 
        aa_put_namespace(ns);
        aa_put_profile(target);
index cf19d4093ca4df3be3ce7535e851181dc8031840..cd21ec5b90afcca345b52b892b9f847e9b55621f 100644 (file)
@@ -65,7 +65,7 @@ static void audit_file_mask(struct audit_buffer *ab, u32 mask)
 static void file_audit_cb(struct audit_buffer *ab, void *va)
 {
        struct common_audit_data *sa = va;
-       uid_t fsuid = current_fsuid();
+       kuid_t fsuid = current_fsuid();
 
        if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
                audit_log_format(ab, " requested_mask=");
@@ -76,8 +76,10 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
                audit_file_mask(ab, sa->aad->fs.denied);
        }
        if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
-               audit_log_format(ab, " fsuid=%d", fsuid);
-               audit_log_format(ab, " ouid=%d", sa->aad->fs.ouid);
+               audit_log_format(ab, " fsuid=%d",
+                                from_kuid(&init_user_ns, fsuid));
+               audit_log_format(ab, " ouid=%d",
+                                from_kuid(&init_user_ns, sa->aad->fs.ouid));
        }
 
        if (sa->aad->fs.target) {
@@ -103,7 +105,7 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
  */
 int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
                  gfp_t gfp, int op, u32 request, const char *name,
-                 const char *target, uid_t ouid, const char *info, int error)
+                 const char *target, kuid_t ouid, const char *info, int error)
 {
        int type = AUDIT_APPARMOR_AUTO;
        struct common_audit_data sa;
@@ -201,7 +203,7 @@ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
         */
        perms.kill = 0;
 
-       if (current_fsuid() == cond->uid) {
+       if (uid_eq(current_fsuid(), cond->uid)) {
                perms.allow = map_old_perms(dfa_user_allow(dfa, state));
                perms.audit = map_old_perms(dfa_user_audit(dfa, state));
                perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
index 4b7e18951aea31da2fd90cf7635f1d2707df1034..69d8cae634e7bef79ea136520b404a730914d9d4 100644 (file)
@@ -125,7 +125,7 @@ struct apparmor_audit_data {
                        const char *target;
                        u32 request;
                        u32 denied;
-                       uid_t ouid;
+                       kuid_t ouid;
                } fs;
        };
 };
index f98fd4701d800f5eec9c39d01f94e3447a7eeb33..967b2deda376a2b3ff63821301b26ba600260fc6 100644 (file)
@@ -71,7 +71,7 @@ struct path;
 
 /* need to make conditional which ones are being set */
 struct path_cond {
-       uid_t uid;
+       kuid_t uid;
        umode_t mode;
 };
 
@@ -146,7 +146,7 @@ static inline u16 dfa_map_xindex(u16 mask)
 
 int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
                  gfp_t gfp, int op, u32 request, const char *name,
-                 const char *target, uid_t ouid, const char *info, int error);
+                 const char *target, kuid_t ouid, const char *info, int error);
 
 /**
  * struct aa_file_rules - components used for file rule permissions
index 8ea39aabe94889a224c868757196afc084c578b8..8c2a7f6b35e2c4463a92508af7580ec4240c4656 100644 (file)
@@ -352,7 +352,7 @@ static int apparmor_path_chmod(struct path *path, umode_t mode)
        return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
 }
 
-static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid)
+static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 {
        struct path_cond cond =  { path->dentry->d_inode->i_uid,
                                   path->dentry->d_inode->i_mode
index 61095df8b89ac452d50528144a67dca751d4f992..a40aac677c722b15dd30b89cc57c518d8764b020 100644 (file)
@@ -284,7 +284,7 @@ static int cap_path_chmod(struct path *path, umode_t mode)
        return 0;
 }
 
-static int cap_path_chown(struct path *path, uid_t uid, gid_t gid)
+static int cap_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 {
        return 0;
 }
index 442204cc22d91772251043f3a051d6cce93bcfc7..4b877a92a7ea3dc3a0307f5c5efb9e78c3289b17 100644 (file)
@@ -457,6 +457,15 @@ struct cgroup_subsys devices_subsys = {
        .destroy = devcgroup_destroy,
        .subsys_id = devices_subsys_id,
        .base_cftypes = dev_cgroup_files,
+
+       /*
+        * While devices cgroup has the rudimentary hierarchy support which
+        * checks the parent's restriction, it doesn't properly propagates
+        * config changes in ancestors to their descendents.  A child
+        * should only be allowed to add more restrictions to the parent's
+        * configuration.  Fix it and remove the following.
+        */
+       .broken_hierarchy = true,
 };
 
 int __devcgroup_inode_permission(struct inode *inode, int mask)
index 49a464f5595b99f46a52da35bb331f546082b31f..dfb26918699c5be5cffcfe4f37295914561096d4 100644 (file)
@@ -106,8 +106,8 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
        memset(&hmac_misc, 0, sizeof hmac_misc);
        hmac_misc.ino = inode->i_ino;
        hmac_misc.generation = inode->i_generation;
-       hmac_misc.uid = inode->i_uid;
-       hmac_misc.gid = inode->i_gid;
+       hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+       hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
        hmac_misc.mode = inode->i_mode;
        crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc);
        crypto_shash_final(desc, digest);
index 8901501425f42beb8a0e23dade6b4e4200019bf1..eb5484504f506490eb7c4f90a81cba92b7020c40 100644 (file)
@@ -33,6 +33,9 @@ char *evm_config_xattrnames[] = {
 #endif
 #ifdef CONFIG_SECURITY_SMACK
        XATTR_NAME_SMACK,
+#endif
+#ifdef CONFIG_IMA_APPRAISE
+       XATTR_NAME_IMA,
 #endif
        XATTR_NAME_CAPS,
        NULL
index 399641c3e84644821e711c0ade263a2ee89cdc49..d82a5a13d8551ca1b857a13c401f706340b88850 100644 (file)
@@ -22,7 +22,7 @@
 #include "integrity.h"
 
 static struct rb_root integrity_iint_tree = RB_ROOT;
-static DEFINE_SPINLOCK(integrity_iint_lock);
+static DEFINE_RWLOCK(integrity_iint_lock);
 static struct kmem_cache *iint_cache __read_mostly;
 
 int iint_initialized;
@@ -35,8 +35,6 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
        struct integrity_iint_cache *iint;
        struct rb_node *n = integrity_iint_tree.rb_node;
 
-       assert_spin_locked(&integrity_iint_lock);
-
        while (n) {
                iint = rb_entry(n, struct integrity_iint_cache, rb_node);
 
@@ -63,9 +61,9 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
        if (!IS_IMA(inode))
                return NULL;
 
-       spin_lock(&integrity_iint_lock);
+       read_lock(&integrity_iint_lock);
        iint = __integrity_iint_find(inode);
-       spin_unlock(&integrity_iint_lock);
+       read_unlock(&integrity_iint_lock);
 
        return iint;
 }
@@ -74,59 +72,53 @@ static void iint_free(struct integrity_iint_cache *iint)
 {
        iint->version = 0;
        iint->flags = 0UL;
+       iint->ima_status = INTEGRITY_UNKNOWN;
        iint->evm_status = INTEGRITY_UNKNOWN;
        kmem_cache_free(iint_cache, iint);
 }
 
 /**
- * integrity_inode_alloc - allocate an iint associated with an inode
+ * integrity_inode_get - find or allocate an iint associated with an inode
  * @inode: pointer to the inode
+ * @return: allocated iint
+ *
+ * Caller must lock i_mutex
  */
-int integrity_inode_alloc(struct inode *inode)
+struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
 {
        struct rb_node **p;
-       struct rb_node *new_node, *parent = NULL;
-       struct integrity_iint_cache *new_iint, *test_iint;
-       int rc;
+       struct rb_node *node, *parent = NULL;
+       struct integrity_iint_cache *iint, *test_iint;
 
-       new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
-       if (!new_iint)
-               return -ENOMEM;
+       iint = integrity_iint_find(inode);
+       if (iint)
+               return iint;
 
-       new_iint->inode = inode;
-       new_node = &new_iint->rb_node;
+       iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+       if (!iint)
+               return NULL;
 
-       mutex_lock(&inode->i_mutex);    /* i_flags */
-       spin_lock(&integrity_iint_lock);
+       write_lock(&integrity_iint_lock);
 
        p = &integrity_iint_tree.rb_node;
        while (*p) {
                parent = *p;
                test_iint = rb_entry(parent, struct integrity_iint_cache,
                                     rb_node);
-               rc = -EEXIST;
                if (inode < test_iint->inode)
                        p = &(*p)->rb_left;
-               else if (inode > test_iint->inode)
-                       p = &(*p)->rb_right;
                else
-                       goto out_err;
+                       p = &(*p)->rb_right;
        }
 
+       iint->inode = inode;
+       node = &iint->rb_node;
        inode->i_flags |= S_IMA;
-       rb_link_node(new_node, parent, p);
-       rb_insert_color(new_node, &integrity_iint_tree);
+       rb_link_node(node, parent, p);
+       rb_insert_color(node, &integrity_iint_tree);
 
-       spin_unlock(&integrity_iint_lock);
-       mutex_unlock(&inode->i_mutex);  /* i_flags */
-
-       return 0;
-out_err:
-       spin_unlock(&integrity_iint_lock);
-       mutex_unlock(&inode->i_mutex);  /* i_flags */
-       iint_free(new_iint);
-
-       return rc;
+       write_unlock(&integrity_iint_lock);
+       return iint;
 }
 
 /**
@@ -142,10 +134,10 @@ void integrity_inode_free(struct inode *inode)
        if (!IS_IMA(inode))
                return;
 
-       spin_lock(&integrity_iint_lock);
+       write_lock(&integrity_iint_lock);
        iint = __integrity_iint_find(inode);
        rb_erase(&iint->rb_node, &integrity_iint_tree);
-       spin_unlock(&integrity_iint_lock);
+       write_unlock(&integrity_iint_lock);
 
        iint_free(iint);
 }
@@ -157,7 +149,7 @@ static void init_once(void *foo)
        memset(iint, 0, sizeof *iint);
        iint->version = 0;
        iint->flags = 0UL;
-       mutex_init(&iint->mutex);
+       iint->ima_status = INTEGRITY_UNKNOWN;
        iint->evm_status = INTEGRITY_UNKNOWN;
 }
 
index b9c1219924f197b7195650ea99cc0c28b79aaa56..d232c73647ae46ee0f8295a4965419f5b044ca99 100644 (file)
@@ -11,6 +11,7 @@ config IMA
        select CRYPTO_SHA1
        select TCG_TPM if HAS_IOMEM && !UML
        select TCG_TIS if TCG_TPM && X86
+       select TCG_IBMVTPM if TCG_TPM && PPC64
        help
          The Trusted Computing Group(TCG) runtime Integrity
          Measurement Architecture(IMA) maintains a list of hash
@@ -55,3 +56,18 @@ config IMA_LSM_RULES
        default y
        help
          Disabling this option will disregard LSM based policy rules.
+
+config IMA_APPRAISE
+       bool "Appraise integrity measurements"
+       depends on IMA
+       default n
+       help
+         This option enables local measurement integrity appraisal.
+         It requires the system to be labeled with a security extended
+         attribute containing the file hash measurement.  To protect
+         the security extended attributes from offline attack, enable
+         and configure EVM.
+
+         For more information on integrity appraisal refer to:
+         <http://linux-ima.sourceforge.net>
+         If unsure, say N.
index 5f740f6971e1f59c6d3e7bfb6254d6350f3f55fa..3f2ca6bdc384ecbb5b3de4dc53d32c9a802d5958 100644 (file)
@@ -8,3 +8,4 @@ obj-$(CONFIG_IMA) += ima.o
 ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
         ima_policy.o
 ima-$(CONFIG_IMA_AUDIT) += ima_audit.o
+ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
index e7c99fd0d2236e7d3a923733bdf7e622f8cd993a..8180adde10b72486644ba9eb1902c02b855a06c2 100644 (file)
@@ -40,6 +40,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
 extern int ima_initialized;
 extern int ima_used_chip;
 extern char *ima_hash;
+extern int ima_appraise;
 
 /* IMA inode template definition */
 struct ima_template_data {
@@ -107,11 +108,14 @@ static inline unsigned long ima_hash_key(u8 *digest)
 }
 
 /* LIM API function definitions */
+int ima_get_action(struct inode *inode, int mask, int function);
 int ima_must_measure(struct inode *inode, int mask, int function);
 int ima_collect_measurement(struct integrity_iint_cache *iint,
                            struct file *file);
 void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
                           const unsigned char *filename);
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+                          const unsigned char *filename);
 int ima_store_template(struct ima_template_entry *entry, int violation,
                       struct inode *inode);
 void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
@@ -123,14 +127,45 @@ struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
 struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
 
 /* IMA policy related functions */
-enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
+enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK, POST_SETATTR };
 
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+                    int flags);
 void ima_init_policy(void);
 void ima_update_policy(void);
 ssize_t ima_parse_add_rule(char *);
 void ima_delete_rules(void);
 
+/* Appraise integrity measurements */
+#define IMA_APPRAISE_ENFORCE   0x01
+#define IMA_APPRAISE_FIX       0x02
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise_measurement(struct integrity_iint_cache *iint,
+                            struct file *file, const unsigned char *filename);
+int ima_must_appraise(struct inode *inode, enum ima_hooks func, int mask);
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
+
+#else
+static inline int ima_appraise_measurement(struct integrity_iint_cache *iint,
+                                          struct file *file,
+                                          const unsigned char *filename)
+{
+       return INTEGRITY_UNKNOWN;
+}
+
+static inline int ima_must_appraise(struct inode *inode,
+                                   enum ima_hooks func, int mask)
+{
+       return 0;
+}
+
+static inline void ima_update_xattr(struct integrity_iint_cache *iint,
+                                   struct file *file)
+{
+}
+#endif
+
 /* LSM based policy rules require audit */
 #ifdef CONFIG_IMA_LSM_RULES
 
index 032ff03ad9078bc956faa6c6e6d6759b6e2a0420..b356884fb3ef9c17dbc3d71396ac01c98e20a400 100644 (file)
@@ -9,13 +9,17 @@
  * License.
  *
  * File: ima_api.c
- *     Implements must_measure, collect_measurement, store_measurement,
- *     and store_template.
+ *     Implements must_appraise_or_measure, collect_measurement,
+ *     appraise_measurement, store_measurement and store_template.
  */
 #include <linux/module.h>
 #include <linux/slab.h>
-
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
 #include "ima.h"
+
 static const char *IMA_TEMPLATE_NAME = "ima";
 
 /*
@@ -93,7 +97,7 @@ err_out:
 }
 
 /**
- * ima_must_measure - measure decision based on policy.
+ * ima_get_action - appraise & measure decision based on policy.
  * @inode: pointer to inode to measure
  * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
  * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
@@ -105,15 +109,22 @@ err_out:
  *     mask: contains the permission mask
  *     fsmagic: hex value
  *
- * Return 0 to measure. For matching a DONT_MEASURE policy, no policy,
- * or other error, return an error code.
-*/
-int ima_must_measure(struct inode *inode, int mask, int function)
+ * Returns IMA_MEASURE, IMA_APPRAISE mask.
+ *
+ */
+int ima_get_action(struct inode *inode, int mask, int function)
 {
-       int must_measure;
+       int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
+
+       if (!ima_appraise)
+               flags &= ~IMA_APPRAISE;
 
-       must_measure = ima_match_policy(inode, function, mask);
-       return must_measure ? 0 : -EACCES;
+       return ima_match_policy(inode, function, mask, flags);
+}
+
+int ima_must_measure(struct inode *inode, int mask, int function)
+{
+       return ima_match_policy(inode, function, mask, IMA_MEASURE);
 }
 
 /*
@@ -129,16 +140,24 @@ int ima_must_measure(struct inode *inode, int mask, int function)
 int ima_collect_measurement(struct integrity_iint_cache *iint,
                            struct file *file)
 {
-       int result = -EEXIST;
+       struct inode *inode = file->f_dentry->d_inode;
+       const char *filename = file->f_dentry->d_name.name;
+       int result = 0;
 
-       if (!(iint->flags & IMA_MEASURED)) {
+       if (!(iint->flags & IMA_COLLECTED)) {
                u64 i_version = file->f_dentry->d_inode->i_version;
 
-               memset(iint->digest, 0, IMA_DIGEST_SIZE);
-               result = ima_calc_hash(file, iint->digest);
-               if (!result)
+               iint->ima_xattr.type = IMA_XATTR_DIGEST;
+               result = ima_calc_hash(file, iint->ima_xattr.digest);
+               if (!result) {
                        iint->version = i_version;
+                       iint->flags |= IMA_COLLECTED;
+               }
        }
+       if (result)
+               integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+                                   filename, "collect_data", "failed",
+                                   result, 0);
        return result;
 }
 
@@ -167,6 +186,9 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
        struct ima_template_entry *entry;
        int violation = 0;
 
+       if (iint->flags & IMA_MEASURED)
+               return;
+
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry) {
                integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
@@ -174,7 +196,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
                return;
        }
        memset(&entry->template, 0, sizeof(entry->template));
-       memcpy(entry->template.digest, iint->digest, IMA_DIGEST_SIZE);
+       memcpy(entry->template.digest, iint->ima_xattr.digest, IMA_DIGEST_SIZE);
        strcpy(entry->template.file_name,
               (strlen(filename) > IMA_EVENT_NAME_LEN_MAX) ?
               file->f_dentry->d_name.name : filename);
@@ -185,3 +207,33 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
        if (result < 0)
                kfree(entry);
 }
+
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+                          const unsigned char *filename)
+{
+       struct audit_buffer *ab;
+       char hash[(IMA_DIGEST_SIZE * 2) + 1];
+       int i;
+
+       if (iint->flags & IMA_AUDITED)
+               return;
+
+       for (i = 0; i < IMA_DIGEST_SIZE; i++)
+               hex_byte_pack(hash + (i * 2), iint->ima_xattr.digest[i]);
+       hash[i * 2] = '\0';
+
+       ab = audit_log_start(current->audit_context, GFP_KERNEL,
+                            AUDIT_INTEGRITY_RULE);
+       if (!ab)
+               return;
+
+       audit_log_format(ab, "file=");
+       audit_log_untrustedstring(ab, filename);
+       audit_log_format(ab, " hash=");
+       audit_log_untrustedstring(ab, hash);
+
+       audit_log_task_info(ab, current);
+       audit_log_end(ab);
+
+       iint->flags |= IMA_AUDITED;
+}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
new file mode 100644 (file)
index 0000000..0aa43bd
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/magic.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+
+#include "ima.h"
+
+static int __init default_appraise_setup(char *str)
+{
+       if (strncmp(str, "off", 3) == 0)
+               ima_appraise = 0;
+       else if (strncmp(str, "fix", 3) == 0)
+               ima_appraise = IMA_APPRAISE_FIX;
+       return 1;
+}
+
+__setup("ima_appraise=", default_appraise_setup);
+
+/*
+ * ima_must_appraise - set appraise flag
+ *
+ * Return 1 to appraise
+ */
+int ima_must_appraise(struct inode *inode, enum ima_hooks func, int mask)
+{
+       if (!ima_appraise)
+               return 0;
+
+       return ima_match_policy(inode, func, mask, IMA_APPRAISE);
+}
+
+static void ima_fix_xattr(struct dentry *dentry,
+                         struct integrity_iint_cache *iint)
+{
+       iint->ima_xattr.type = IMA_XATTR_DIGEST;
+       __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA, (u8 *)&iint->ima_xattr,
+                             sizeof iint->ima_xattr, 0);
+}
+
+/*
+ * ima_appraise_measurement - appraise file measurement
+ *
+ * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
+ * Assuming success, compare the xattr hash with the collected measurement.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_appraise_measurement(struct integrity_iint_cache *iint,
+                            struct file *file, const unsigned char *filename)
+{
+       struct dentry *dentry = file->f_dentry;
+       struct inode *inode = dentry->d_inode;
+       struct evm_ima_xattr_data *xattr_value = NULL;
+       enum integrity_status status = INTEGRITY_UNKNOWN;
+       const char *op = "appraise_data";
+       char *cause = "unknown";
+       int rc;
+
+       if (!ima_appraise)
+               return 0;
+       if (!inode->i_op->getxattr)
+               return INTEGRITY_UNKNOWN;
+
+       if (iint->flags & IMA_APPRAISED)
+               return iint->ima_status;
+
+       rc = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)&xattr_value,
+                               0, GFP_NOFS);
+       if (rc <= 0) {
+               if (rc && rc != -ENODATA)
+                       goto out;
+
+               cause = "missing-hash";
+               status =
+                   (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL;
+               goto out;
+       }
+
+       status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
+       if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
+               if ((status == INTEGRITY_NOLABEL)
+                   || (status == INTEGRITY_NOXATTRS))
+                       cause = "missing-HMAC";
+               else if (status == INTEGRITY_FAIL)
+                       cause = "invalid-HMAC";
+               goto out;
+       }
+
+       switch (xattr_value->type) {
+       case IMA_XATTR_DIGEST:
+               rc = memcmp(xattr_value->digest, iint->ima_xattr.digest,
+                           IMA_DIGEST_SIZE);
+               if (rc) {
+                       cause = "invalid-hash";
+                       status = INTEGRITY_FAIL;
+                       print_hex_dump_bytes("security.ima: ", DUMP_PREFIX_NONE,
+                                            xattr_value, sizeof(*xattr_value));
+                       print_hex_dump_bytes("collected: ", DUMP_PREFIX_NONE,
+                                            (u8 *)&iint->ima_xattr,
+                                            sizeof iint->ima_xattr);
+                       break;
+               }
+               status = INTEGRITY_PASS;
+               break;
+       case EVM_IMA_XATTR_DIGSIG:
+               iint->flags |= IMA_DIGSIG;
+               rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+                                            xattr_value->digest, rc - 1,
+                                            iint->ima_xattr.digest,
+                                            IMA_DIGEST_SIZE);
+               if (rc == -EOPNOTSUPP) {
+                       status = INTEGRITY_UNKNOWN;
+               } else if (rc) {
+                       cause = "invalid-signature";
+                       status = INTEGRITY_FAIL;
+               } else {
+                       status = INTEGRITY_PASS;
+               }
+               break;
+       default:
+               status = INTEGRITY_UNKNOWN;
+               cause = "unknown-ima-data";
+               break;
+       }
+
+out:
+       if (status != INTEGRITY_PASS) {
+               if ((ima_appraise & IMA_APPRAISE_FIX) &&
+                   (!xattr_value ||
+                    xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
+                       ima_fix_xattr(dentry, iint);
+                       status = INTEGRITY_PASS;
+               }
+               integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+                                   op, cause, rc, 0);
+       } else {
+               iint->flags |= IMA_APPRAISED;
+       }
+       iint->ima_status = status;
+       kfree(xattr_value);
+       return status;
+}
+
+/*
+ * ima_update_xattr - update 'security.ima' hash value
+ */
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+{
+       struct dentry *dentry = file->f_dentry;
+       int rc = 0;
+
+       /* do not collect and update hash for digital signatures */
+       if (iint->flags & IMA_DIGSIG)
+               return;
+
+       rc = ima_collect_measurement(iint, file);
+       if (rc < 0)
+               return;
+
+       ima_fix_xattr(dentry, iint);
+}
+
+/**
+ * ima_inode_post_setattr - reflect file metadata changes
+ * @dentry: pointer to the affected dentry
+ *
+ * Changes to a dentry's metadata might result in needing to appraise.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void ima_inode_post_setattr(struct dentry *dentry)
+{
+       struct inode *inode = dentry->d_inode;
+       struct integrity_iint_cache *iint;
+       int must_appraise, rc;
+
+       if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode)
+           || !inode->i_op->removexattr)
+               return;
+
+       must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+       iint = integrity_iint_find(inode);
+       if (iint) {
+               if (must_appraise)
+                       iint->flags |= IMA_APPRAISE;
+               else
+                       iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED);
+       }
+       if (!must_appraise)
+               rc = inode->i_op->removexattr(dentry, XATTR_NAME_IMA);
+       return;
+}
+
+/*
+ * ima_protect_xattr - protect 'security.ima'
+ *
+ * Ensure that not just anyone can modify or remove 'security.ima'.
+ */
+static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
+                            const void *xattr_value, size_t xattr_value_len)
+{
+       if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) {
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               return 1;
+       }
+       return 0;
+}
+
+static void ima_reset_appraise_flags(struct inode *inode)
+{
+       struct integrity_iint_cache *iint;
+
+       if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode))
+               return;
+
+       iint = integrity_iint_find(inode);
+       if (!iint)
+               return;
+
+       iint->flags &= ~IMA_DONE_MASK;
+       return;
+}
+
+int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+                      const void *xattr_value, size_t xattr_value_len)
+{
+       int result;
+
+       result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+                                  xattr_value_len);
+       if (result == 1) {
+               ima_reset_appraise_flags(dentry->d_inode);
+               result = 0;
+       }
+       return result;
+}
+
+int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+       int result;
+
+       result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
+       if (result == 1) {
+               ima_reset_appraise_flags(dentry->d_inode);
+               result = 0;
+       }
+       return result;
+}
index 7a57f6769e9cffd276e5e33e64efa7f388561cd4..c586faae8fd6c7901baf6b37b3db0500157f8644 100644 (file)
@@ -39,8 +39,9 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
 
        ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
        audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
-                        current->pid, current_cred()->uid,
-                        audit_get_loginuid(current),
+                        current->pid,
+                        from_kuid(&init_user_ns, current_cred()->uid),
+                        from_kuid(&init_user_ns, audit_get_loginuid(current)),
                         audit_get_sessionid(current));
        audit_log_task_context(ab);
        audit_log_format(ab, " op=");
index 9b3ade7468b283412fa962fa291f0c3f6413812f..b21ee5b5495a8a4a3bf3e5626cd367d16158757f 100644 (file)
@@ -48,7 +48,7 @@ int ima_calc_hash(struct file *file, char *digest)
        struct scatterlist sg[1];
        loff_t i_size, offset = 0;
        char *rbuf;
-       int rc;
+       int rc, read = 0;
 
        rc = init_desc(&desc);
        if (rc != 0)
@@ -59,6 +59,10 @@ int ima_calc_hash(struct file *file, char *digest)
                rc = -ENOMEM;
                goto out;
        }
+       if (!(file->f_mode & FMODE_READ)) {
+               file->f_mode |= FMODE_READ;
+               read = 1;
+       }
        i_size = i_size_read(file->f_dentry->d_inode);
        while (offset < i_size) {
                int rbuf_len;
@@ -80,6 +84,8 @@ int ima_calc_hash(struct file *file, char *digest)
        kfree(rbuf);
        if (!rc)
                rc = crypto_hash_final(&desc, digest);
+       if (read)
+               file->f_mode &= ~FMODE_READ;
 out:
        crypto_free_hash(desc.tfm);
        return rc;
index be8294915cf7f65524a21ab2dd3101ecb0a6edd9..73c9a268253e44718c1f02395f2ffb1f905511ca 100644 (file)
 #include <linux/mount.h>
 #include <linux/mman.h>
 #include <linux/slab.h>
+#include <linux/xattr.h>
 #include <linux/ima.h>
 
 #include "ima.h"
 
 int ima_initialized;
 
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise = IMA_APPRAISE_ENFORCE;
+#else
+int ima_appraise;
+#endif
+
 char *ima_hash = "sha1";
 static int __init hash_setup(char *str)
 {
@@ -52,7 +59,7 @@ static void ima_rdwr_violation_check(struct file *file)
        struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = dentry->d_inode;
        fmode_t mode = file->f_mode;
-       int rc;
+       int must_measure;
        bool send_tomtou = false, send_writers = false;
        unsigned char *pathname = NULL, *pathbuf = NULL;
 
@@ -67,8 +74,8 @@ static void ima_rdwr_violation_check(struct file *file)
                goto out;
        }
 
-       rc = ima_must_measure(inode, MAY_READ, FILE_CHECK);
-       if (rc < 0)
+       must_measure = ima_must_measure(inode, MAY_READ, FILE_CHECK);
+       if (!must_measure)
                goto out;
 
        if (atomic_read(&inode->i_writecount) > 0)
@@ -100,17 +107,21 @@ out:
 }
 
 static void ima_check_last_writer(struct integrity_iint_cache *iint,
-                                 struct inode *inode,
-                                 struct file *file)
+                                 struct inode *inode, struct file *file)
 {
        fmode_t mode = file->f_mode;
 
-       mutex_lock(&iint->mutex);
-       if (mode & FMODE_WRITE &&
-           atomic_read(&inode->i_writecount) == 1 &&
-           iint->version != inode->i_version)
-               iint->flags &= ~IMA_MEASURED;
-       mutex_unlock(&iint->mutex);
+       if (!(mode & FMODE_WRITE))
+               return;
+
+       mutex_lock(&inode->i_mutex);
+       if (atomic_read(&inode->i_writecount) == 1 &&
+           iint->version != inode->i_version) {
+               iint->flags &= ~IMA_DONE_MASK;
+               if (iint->flags & IMA_APPRAISE)
+                       ima_update_xattr(iint, file);
+       }
+       mutex_unlock(&inode->i_mutex);
 }
 
 /**
@@ -140,28 +151,37 @@ static int process_measurement(struct file *file, const unsigned char *filename,
        struct inode *inode = file->f_dentry->d_inode;
        struct integrity_iint_cache *iint;
        unsigned char *pathname = NULL, *pathbuf = NULL;
-       int rc = 0;
+       int rc = -ENOMEM, action, must_appraise;
 
        if (!ima_initialized || !S_ISREG(inode->i_mode))
                return 0;
 
-       rc = ima_must_measure(inode, mask, function);
-       if (rc != 0)
-               return rc;
-retry:
-       iint = integrity_iint_find(inode);
-       if (!iint) {
-               rc = integrity_inode_alloc(inode);
-               if (!rc || rc == -EEXIST)
-                       goto retry;
-               return rc;
-       }
+       /* Determine if in appraise/audit/measurement policy,
+        * returns IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT bitmask.  */
+       action = ima_get_action(inode, mask, function);
+       if (!action)
+               return 0;
 
-       mutex_lock(&iint->mutex);
+       must_appraise = action & IMA_APPRAISE;
 
-       rc = iint->flags & IMA_MEASURED ? 1 : 0;
-       if (rc != 0)
+       mutex_lock(&inode->i_mutex);
+
+       iint = integrity_inode_get(inode);
+       if (!iint)
+               goto out;
+
+       /* Determine if already appraised/measured based on bitmask
+        * (IMA_MEASURE, IMA_MEASURED, IMA_APPRAISE, IMA_APPRAISED,
+        *  IMA_AUDIT, IMA_AUDITED) */
+       iint->flags |= action;
+       action &= ~((iint->flags & IMA_DONE_MASK) >> 1);
+
+       /* Nothing to do, just return existing appraised status */
+       if (!action) {
+               if (iint->flags & IMA_APPRAISED)
+                       rc = iint->ima_status;
                goto out;
+       }
 
        rc = ima_collect_measurement(iint, file);
        if (rc != 0)
@@ -177,11 +197,18 @@ retry:
                                pathname = NULL;
                }
        }
-       ima_store_measurement(iint, file, !pathname ? filename : pathname);
+       if (action & IMA_MEASURE)
+               ima_store_measurement(iint, file,
+                                     !pathname ? filename : pathname);
+       if (action & IMA_APPRAISE)
+               rc = ima_appraise_measurement(iint, file,
+                                             !pathname ? filename : pathname);
+       if (action & IMA_AUDIT)
+               ima_audit_measurement(iint, !pathname ? filename : pathname);
        kfree(pathbuf);
 out:
-       mutex_unlock(&iint->mutex);
-       return rc;
+       mutex_unlock(&inode->i_mutex);
+       return (rc && must_appraise) ? -EACCES : 0;
 }
 
 /**
@@ -197,14 +224,14 @@ out:
  */
 int ima_file_mmap(struct file *file, unsigned long prot)
 {
-       int rc;
+       int rc = 0;
 
        if (!file)
                return 0;
        if (prot & PROT_EXEC)
                rc = process_measurement(file, file->f_dentry->d_name.name,
                                         MAY_EXEC, FILE_MMAP);
-       return 0;
+       return (ima_appraise & IMA_APPRAISE_ENFORCE) ? rc : 0;
 }
 
 /**
@@ -228,7 +255,7 @@ int ima_bprm_check(struct linux_binprm *bprm)
                                 (strcmp(bprm->filename, bprm->interp) == 0) ?
                                 bprm->filename : bprm->interp,
                                 MAY_EXEC, BPRM_CHECK);
-       return 0;
+       return (ima_appraise & IMA_APPRAISE_ENFORCE) ? rc : 0;
 }
 
 /**
@@ -249,7 +276,7 @@ int ima_file_check(struct file *file, int mask)
        rc = process_measurement(file, file->f_dentry->d_name.name,
                                 mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
                                 FILE_CHECK);
-       return 0;
+       return (ima_appraise & IMA_APPRAISE_ENFORCE) ? rc : 0;
 }
 EXPORT_SYMBOL_GPL(ima_file_check);
 
index 1a9583008aaef1bf1fe6868f921408ac83d06a63..c7dacd2eab7a8eb59de3b00d4963b59b58396b96 100644 (file)
 #define IMA_MASK       0x0002
 #define IMA_FSMAGIC    0x0004
 #define IMA_UID                0x0008
+#define IMA_FOWNER     0x0010
 
-enum ima_action { UNKNOWN = -1, DONT_MEASURE = 0, MEASURE };
+#define UNKNOWN                0
+#define MEASURE                0x0001  /* same as IMA_MEASURE */
+#define DONT_MEASURE   0x0002
+#define APPRAISE       0x0004  /* same as IMA_APPRAISE */
+#define DONT_APPRAISE  0x0008
+#define AUDIT          0x0040
 
 #define MAX_LSM_RULES 6
 enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
        LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
 };
 
-struct ima_measure_rule_entry {
+struct ima_rule_entry {
        struct list_head list;
-       enum ima_action action;
+       int action;
        unsigned int flags;
        enum ima_hooks func;
        int mask;
        unsigned long fsmagic;
-       uid_t uid;
+       kuid_t uid;
+       kuid_t fowner;
        struct {
                void *rule;     /* LSM file metadata specific */
                int type;       /* audit type */
@@ -48,7 +55,7 @@ struct ima_measure_rule_entry {
 
 /*
  * Without LSM specific knowledge, the default policy can only be
- * written in terms of .action, .func, .mask, .fsmagic, and .uid
+ * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner
  */
 
 /*
@@ -57,7 +64,7 @@ struct ima_measure_rule_entry {
  * normal users can easily run the machine out of memory simply building
  * and running executables.
  */
-static struct ima_measure_rule_entry default_rules[] = {
+static struct ima_rule_entry default_rules[] = {
        {.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC},
        {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
        {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
@@ -71,23 +78,45 @@ static struct ima_measure_rule_entry default_rules[] = {
         .flags = IMA_FUNC | IMA_MASK},
        {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
         .flags = IMA_FUNC | IMA_MASK},
-       {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0,
+       {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID,
         .flags = IMA_FUNC | IMA_MASK | IMA_UID},
 };
 
-static LIST_HEAD(measure_default_rules);
-static LIST_HEAD(measure_policy_rules);
-static struct list_head *ima_measure;
+static struct ima_rule_entry default_appraise_rules[] = {
+       {.action = DONT_APPRAISE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE,.fsmagic = CGROUP_SUPER_MAGIC,.flags = IMA_FSMAGIC},
+       {.action = APPRAISE,.fowner = GLOBAL_ROOT_UID,.flags = IMA_FOWNER},
+};
+
+static LIST_HEAD(ima_default_rules);
+static LIST_HEAD(ima_policy_rules);
+static struct list_head *ima_rules;
 
-static DEFINE_MUTEX(ima_measure_mutex);
+static DEFINE_MUTEX(ima_rules_mutex);
 
 static bool ima_use_tcb __initdata;
-static int __init default_policy_setup(char *str)
+static int __init default_measure_policy_setup(char *str)
 {
        ima_use_tcb = 1;
        return 1;
 }
-__setup("ima_tcb", default_policy_setup);
+__setup("ima_tcb", default_measure_policy_setup);
+
+static bool ima_use_appraise_tcb __initdata;
+static int __init default_appraise_policy_setup(char *str)
+{
+       ima_use_appraise_tcb = 1;
+       return 1;
+}
+__setup("ima_appraise_tcb", default_appraise_policy_setup);
 
 /**
  * ima_match_rules - determine whether an inode matches the measure rule.
@@ -98,7 +127,7 @@ __setup("ima_tcb", default_policy_setup);
  *
  * Returns true on rule match, false on failure.
  */
-static bool ima_match_rules(struct ima_measure_rule_entry *rule,
+static bool ima_match_rules(struct ima_rule_entry *rule,
                            struct inode *inode, enum ima_hooks func, int mask)
 {
        struct task_struct *tsk = current;
@@ -112,7 +141,9 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
        if ((rule->flags & IMA_FSMAGIC)
            && rule->fsmagic != inode->i_sb->s_magic)
                return false;
-       if ((rule->flags & IMA_UID) && rule->uid != cred->uid)
+       if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
+               return false;
+       if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
                return false;
        for (i = 0; i < MAX_LSM_RULES; i++) {
                int rc = 0;
@@ -163,39 +194,61 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
  * as elements in the list are never deleted, nor does the list
  * change.)
  */
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask)
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+                    int flags)
 {
-       struct ima_measure_rule_entry *entry;
+       struct ima_rule_entry *entry;
+       int action = 0, actmask = flags | (flags << 1);
+
+       list_for_each_entry(entry, ima_rules, list) {
+
+               if (!(entry->action & actmask))
+                       continue;
+
+               if (!ima_match_rules(entry, inode, func, mask))
+                       continue;
 
-       list_for_each_entry(entry, ima_measure, list) {
-               bool rc;
+               action |= entry->action & IMA_DO_MASK;
+               if (entry->action & IMA_DO_MASK)
+                       actmask &= ~(entry->action | entry->action << 1);
+               else
+                       actmask &= ~(entry->action | entry->action >> 1);
 
-               rc = ima_match_rules(entry, inode, func, mask);
-               if (rc)
-                       return entry->action;
+               if (!actmask)
+                       break;
        }
-       return 0;
+
+       return action;
 }
 
 /**
  * ima_init_policy - initialize the default measure rules.
  *
- * ima_measure points to either the measure_default_rules or the
- * the new measure_policy_rules.
+ * ima_rules points to either the ima_default_rules or the
+ * the new ima_policy_rules.
  */
 void __init ima_init_policy(void)
 {
-       int i, entries;
+       int i, measure_entries, appraise_entries;
 
        /* if !ima_use_tcb set entries = 0 so we load NO default rules */
-       if (ima_use_tcb)
-               entries = ARRAY_SIZE(default_rules);
-       else
-               entries = 0;
-
-       for (i = 0; i < entries; i++)
-               list_add_tail(&default_rules[i].list, &measure_default_rules);
-       ima_measure = &measure_default_rules;
+       measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0;
+       appraise_entries = ima_use_appraise_tcb ?
+                        ARRAY_SIZE(default_appraise_rules) : 0;
+       
+       for (i = 0; i < measure_entries + appraise_entries; i++) {
+               if (i < measure_entries)
+                       list_add_tail(&default_rules[i].list,
+                                     &ima_default_rules);
+               else {
+                       int j = i - measure_entries;
+
+                       list_add_tail(&default_appraise_rules[j].list,
+                                     &ima_default_rules);
+               }
+       }
+
+       ima_rules = &ima_default_rules;
 }
 
 /**
@@ -212,8 +265,8 @@ void ima_update_policy(void)
        int result = 1;
        int audit_info = 0;
 
-       if (ima_measure == &measure_default_rules) {
-               ima_measure = &measure_policy_rules;
+       if (ima_rules == &ima_default_rules) {
+               ima_rules = &ima_policy_rules;
                cause = "complete";
                result = 0;
        }
@@ -224,14 +277,19 @@ void ima_update_policy(void)
 enum {
        Opt_err = -1,
        Opt_measure = 1, Opt_dont_measure,
+       Opt_appraise, Opt_dont_appraise,
+       Opt_audit,
        Opt_obj_user, Opt_obj_role, Opt_obj_type,
        Opt_subj_user, Opt_subj_role, Opt_subj_type,
-       Opt_func, Opt_mask, Opt_fsmagic, Opt_uid
+       Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner
 };
 
 static match_table_t policy_tokens = {
        {Opt_measure, "measure"},
        {Opt_dont_measure, "dont_measure"},
+       {Opt_appraise, "appraise"},
+       {Opt_dont_appraise, "dont_appraise"},
+       {Opt_audit, "audit"},
        {Opt_obj_user, "obj_user=%s"},
        {Opt_obj_role, "obj_role=%s"},
        {Opt_obj_type, "obj_type=%s"},
@@ -242,10 +300,11 @@ static match_table_t policy_tokens = {
        {Opt_mask, "mask=%s"},
        {Opt_fsmagic, "fsmagic=%s"},
        {Opt_uid, "uid=%s"},
+       {Opt_fowner, "fowner=%s"},
        {Opt_err, NULL}
 };
 
-static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
+static int ima_lsm_rule_init(struct ima_rule_entry *entry,
                             char *args, int lsm_rule, int audit_type)
 {
        int result;
@@ -269,7 +328,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
        audit_log_format(ab, " ");
 }
 
-static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
 {
        struct audit_buffer *ab;
        char *p;
@@ -277,7 +336,8 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
 
-       entry->uid = -1;
+       entry->uid = INVALID_UID;
+       entry->fowner = INVALID_UID;
        entry->action = UNKNOWN;
        while ((p = strsep(&rule, " \t")) != NULL) {
                substring_t args[MAX_OPT_ARGS];
@@ -306,11 +366,35 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
 
                        entry->action = DONT_MEASURE;
                        break;
+               case Opt_appraise:
+                       ima_log_string(ab, "action", "appraise");
+
+                       if (entry->action != UNKNOWN)
+                               result = -EINVAL;
+
+                       entry->action = APPRAISE;
+                       break;
+               case Opt_dont_appraise:
+                       ima_log_string(ab, "action", "dont_appraise");
+
+                       if (entry->action != UNKNOWN)
+                               result = -EINVAL;
+
+                       entry->action = DONT_APPRAISE;
+                       break;
+               case Opt_audit:
+                       ima_log_string(ab, "action", "audit");
+
+                       if (entry->action != UNKNOWN)
+                               result = -EINVAL;
+
+                       entry->action = AUDIT;
+                       break;
                case Opt_func:
                        ima_log_string(ab, "func", args[0].from);
 
                        if (entry->func)
-                               result  = -EINVAL;
+                               result = -EINVAL;
 
                        if (strcmp(args[0].from, "FILE_CHECK") == 0)
                                entry->func = FILE_CHECK;
@@ -361,20 +445,37 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
                case Opt_uid:
                        ima_log_string(ab, "uid", args[0].from);
 
-                       if (entry->uid != -1) {
+                       if (uid_valid(entry->uid)) {
                                result = -EINVAL;
                                break;
                        }
 
                        result = strict_strtoul(args[0].from, 10, &lnum);
                        if (!result) {
-                               entry->uid = (uid_t) lnum;
-                               if (entry->uid != lnum)
+                               entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
+                               if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
                                        result = -EINVAL;
                                else
                                        entry->flags |= IMA_UID;
                        }
                        break;
+               case Opt_fowner:
+                       ima_log_string(ab, "fowner", args[0].from);
+
+                       if (uid_valid(entry->fowner)) {
+                               result = -EINVAL;
+                               break;
+                       }
+
+                       result = strict_strtoul(args[0].from, 10, &lnum);
+                       if (!result) {
+                               entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum);
+                               if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum))
+                                       result = -EINVAL;
+                               else
+                                       entry->flags |= IMA_FOWNER;
+                       }
+                       break;
                case Opt_obj_user:
                        ima_log_string(ab, "obj_user", args[0].from);
                        result = ima_lsm_rule_init(entry, args[0].from,
@@ -426,7 +527,7 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
 }
 
 /**
- * ima_parse_add_rule - add a rule to measure_policy_rules
+ * ima_parse_add_rule - add a rule to ima_policy_rules
  * @rule - ima measurement policy rule
  *
  * Uses a mutex to protect the policy list from multiple concurrent writers.
@@ -436,12 +537,12 @@ ssize_t ima_parse_add_rule(char *rule)
 {
        const char *op = "update_policy";
        char *p;
-       struct ima_measure_rule_entry *entry;
+       struct ima_rule_entry *entry;
        ssize_t result, len;
        int audit_info = 0;
 
        /* Prevent installed policy from changing */
-       if (ima_measure != &measure_default_rules) {
+       if (ima_rules != &ima_default_rules) {
                integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
                                    NULL, op, "already exists",
                                    -EACCES, audit_info);
@@ -474,9 +575,9 @@ ssize_t ima_parse_add_rule(char *rule)
                return result;
        }
 
-       mutex_lock(&ima_measure_mutex);
-       list_add_tail(&entry->list, &measure_policy_rules);
-       mutex_unlock(&ima_measure_mutex);
+       mutex_lock(&ima_rules_mutex);
+       list_add_tail(&entry->list, &ima_policy_rules);
+       mutex_unlock(&ima_rules_mutex);
 
        return len;
 }
@@ -484,12 +585,12 @@ ssize_t ima_parse_add_rule(char *rule)
 /* ima_delete_rules called to cleanup invalid policy */
 void ima_delete_rules(void)
 {
-       struct ima_measure_rule_entry *entry, *tmp;
+       struct ima_rule_entry *entry, *tmp;
 
-       mutex_lock(&ima_measure_mutex);
-       list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) {
+       mutex_lock(&ima_rules_mutex);
+       list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
                list_del(&entry->list);
                kfree(entry);
        }
-       mutex_unlock(&ima_measure_mutex);
+       mutex_unlock(&ima_rules_mutex);
 }
index 7a25ecec5aaac6b8d00c2bf0f926deaeea561163..e9db763a875e80f8fc4bd528974922d7e98ae97e 100644 (file)
 #include <linux/integrity.h>
 #include <crypto/sha.h>
 
+/* iint action cache flags */
+#define IMA_MEASURE            0x0001
+#define IMA_MEASURED           0x0002
+#define IMA_APPRAISE           0x0004
+#define IMA_APPRAISED          0x0008
+/*#define IMA_COLLECT          0x0010  do not use this flag */
+#define IMA_COLLECTED          0x0020
+#define IMA_AUDIT              0x0040
+#define IMA_AUDITED            0x0080
+
 /* iint cache flags */
-#define IMA_MEASURED           0x01
+#define IMA_DIGSIG             0x0100
+
+#define IMA_DO_MASK            (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT)
+#define IMA_DONE_MASK          (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED \
+                                | IMA_COLLECTED)
 
 enum evm_ima_xattr_type {
        IMA_XATTR_DIGEST = 0x01,
@@ -34,9 +48,9 @@ struct integrity_iint_cache {
        struct rb_node rb_node; /* rooted in integrity_iint_tree */
        struct inode *inode;    /* back pointer to inode in question */
        u64 version;            /* track inode changes */
-       unsigned char flags;
-       u8 digest[SHA1_DIGEST_SIZE];
-       struct mutex mutex;     /* protects: version, flags, digest */
+       unsigned short flags;
+       struct evm_ima_xattr_data ima_xattr;
+       enum integrity_status ima_status;
        enum integrity_status evm_status;
 };
 
index 61ab7c82ebb12f1470d26d6aa29519f9c33a3ce9..d67c97bb10256d5dc5a9b74b3b8aaa37022f96b1 100644 (file)
@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at)
 
        if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
                kdebug("IMMEDIATE");
-               queue_work(system_nrt_wq, &key_gc_work);
+               schedule_work(&key_gc_work);
        } else if (gc_at < key_gc_next_run) {
                kdebug("DEFERRED");
                key_gc_next_run = gc_at;
@@ -77,7 +77,7 @@ void key_schedule_gc(time_t gc_at)
 void key_schedule_gc_links(void)
 {
        set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
-       queue_work(system_nrt_wq, &key_gc_work);
+       schedule_work(&key_gc_work);
 }
 
 /*
@@ -120,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype)
        set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
 
        kdebug("schedule");
-       queue_work(system_nrt_wq, &key_gc_work);
+       schedule_work(&key_gc_work);
 
        kdebug("sleep");
        wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
@@ -369,7 +369,7 @@ maybe_resched:
        }
 
        if (gc_state & KEY_GC_REAP_AGAIN)
-               queue_work(system_nrt_wq, &key_gc_work);
+               schedule_work(&key_gc_work);
        kleave(" [end %x]", gc_state);
        return;
 
index 22ff05269e3d5a35ec00750816e662f9709209db..8bbefc3b55d42961f94c1c8727b4db8f9026f3f9 100644 (file)
@@ -52,8 +52,7 @@ struct key_user {
        atomic_t                usage;          /* for accessing qnkeys & qnbytes */
        atomic_t                nkeys;          /* number of keys */
        atomic_t                nikeys;         /* number of instantiated keys */
-       uid_t                   uid;
-       struct user_namespace   *user_ns;
+       kuid_t                  uid;
        int                     qnkeys;         /* number of keys allocated to this user */
        int                     qnbytes;        /* number of bytes allocated to this user */
 };
@@ -62,8 +61,7 @@ extern struct rb_root key_user_tree;
 extern spinlock_t      key_user_lock;
 extern struct key_user root_key_user;
 
-extern struct key_user *key_user_lookup(uid_t uid,
-                                       struct user_namespace *user_ns);
+extern struct key_user *key_user_lookup(kuid_t uid);
 extern void key_user_put(struct key_user *user);
 
 /*
index 50d96d4e06f235c3e8950255c4b8bd5fd64aa7d2..a30e927349051ce651e6f0872861cdda881e5bce 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/workqueue.h>
 #include <linux/random.h>
 #include <linux/err.h>
-#include <linux/user_namespace.h>
 #include "internal.h"
 
 struct kmem_cache *key_jar;
@@ -52,7 +51,7 @@ void __key_check(const struct key *key)
  * Get the key quota record for a user, allocating a new record if one doesn't
  * already exist.
  */
-struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
+struct key_user *key_user_lookup(kuid_t uid)
 {
        struct key_user *candidate = NULL, *user;
        struct rb_node *parent = NULL;
@@ -67,13 +66,9 @@ try_again:
                parent = *p;
                user = rb_entry(parent, struct key_user, node);
 
-               if (uid < user->uid)
+               if (uid_lt(uid, user->uid))
                        p = &(*p)->rb_left;
-               else if (uid > user->uid)
-                       p = &(*p)->rb_right;
-               else if (user_ns < user->user_ns)
-                       p = &(*p)->rb_left;
-               else if (user_ns > user->user_ns)
+               else if (uid_gt(uid, user->uid))
                        p = &(*p)->rb_right;
                else
                        goto found;
@@ -102,7 +97,6 @@ try_again:
        atomic_set(&candidate->nkeys, 0);
        atomic_set(&candidate->nikeys, 0);
        candidate->uid = uid;
-       candidate->user_ns = get_user_ns(user_ns);
        candidate->qnkeys = 0;
        candidate->qnbytes = 0;
        spin_lock_init(&candidate->lock);
@@ -131,7 +125,6 @@ void key_user_put(struct key_user *user)
        if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
                rb_erase(&user->node, &key_user_tree);
                spin_unlock(&key_user_lock);
-               put_user_ns(user->user_ns);
 
                kfree(user);
        }
@@ -229,7 +222,7 @@ serial_exists:
  * key_alloc() calls don't race with module unloading.
  */
 struct key *key_alloc(struct key_type *type, const char *desc,
-                     uid_t uid, gid_t gid, const struct cred *cred,
+                     kuid_t uid, kgid_t gid, const struct cred *cred,
                      key_perm_t perm, unsigned long flags)
 {
        struct key_user *user = NULL;
@@ -253,16 +246,16 @@ struct key *key_alloc(struct key_type *type, const char *desc,
        quotalen = desclen + type->def_datalen;
 
        /* get hold of the key tracking for this user */
-       user = key_user_lookup(uid, cred->user_ns);
+       user = key_user_lookup(uid);
        if (!user)
                goto no_memory_1;
 
        /* check that the user's quota permits allocation of another key and
         * its description */
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
-               unsigned maxkeys = (uid == 0) ?
+               unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
                        key_quota_root_maxkeys : key_quota_maxkeys;
-               unsigned maxbytes = (uid == 0) ?
+               unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
                        key_quota_root_maxbytes : key_quota_maxbytes;
 
                spin_lock(&user->lock);
@@ -380,7 +373,7 @@ int key_payload_reserve(struct key *key, size_t datalen)
 
        /* contemplate the quota adjustment */
        if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
-               unsigned maxbytes = (key->user->uid == 0) ?
+               unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
                        key_quota_root_maxbytes : key_quota_maxbytes;
 
                spin_lock(&key->user->lock);
@@ -598,7 +591,7 @@ void key_put(struct key *key)
                key_check(key);
 
                if (atomic_dec_and_test(&key->usage))
-                       queue_work(system_nrt_wq, &key_gc_work);
+                       schedule_work(&key_gc_work);
        }
 }
 EXPORT_SYMBOL(key_put);
index 6cfc6478863efa8545ffe463687ed3a3a081167f..305ecb76519c8a0aafa115d1847a8b24f3fb71bc 100644 (file)
@@ -569,8 +569,8 @@ okay:
        ret = snprintf(tmpbuf, PAGE_SIZE - 1,
                       "%s;%d;%d;%08x;%s",
                       key->type->name,
-                      key->uid,
-                      key->gid,
+                      from_kuid_munged(current_user_ns(), key->uid),
+                      from_kgid_munged(current_user_ns(), key->gid),
                       key->perm,
                       key->description ?: "");
 
@@ -766,15 +766,25 @@ error:
  *
  * If successful, 0 will be returned.
  */
-long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
+long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
 {
        struct key_user *newowner, *zapowner = NULL;
        struct key *key;
        key_ref_t key_ref;
        long ret;
+       kuid_t uid;
+       kgid_t gid;
+
+       uid = make_kuid(current_user_ns(), user);
+       gid = make_kgid(current_user_ns(), group);
+       ret = -EINVAL;
+       if ((user != (uid_t) -1) && !uid_valid(uid))
+               goto error;
+       if ((group != (gid_t) -1) && !gid_valid(gid))
+               goto error;
 
        ret = 0;
-       if (uid == (uid_t) -1 && gid == (gid_t) -1)
+       if (user == (uid_t) -1 && group == (gid_t) -1)
                goto error;
 
        key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
@@ -792,27 +802,27 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
 
        if (!capable(CAP_SYS_ADMIN)) {
                /* only the sysadmin can chown a key to some other UID */
-               if (uid != (uid_t) -1 && key->uid != uid)
+               if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
                        goto error_put;
 
                /* only the sysadmin can set the key's GID to a group other
                 * than one of those that the current process subscribes to */
-               if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
+               if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
                        goto error_put;
        }
 
        /* change the UID */
-       if (uid != (uid_t) -1 && uid != key->uid) {
+       if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
                ret = -ENOMEM;
-               newowner = key_user_lookup(uid, current_user_ns());
+               newowner = key_user_lookup(uid);
                if (!newowner)
                        goto error_put;
 
                /* transfer the quota burden to the new user */
                if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
-                       unsigned maxkeys = (uid == 0) ?
+                       unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
                                key_quota_root_maxkeys : key_quota_maxkeys;
-                       unsigned maxbytes = (uid == 0) ?
+                       unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
                                key_quota_root_maxbytes : key_quota_maxbytes;
 
                        spin_lock(&newowner->lock);
@@ -846,7 +856,7 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
        }
 
        /* change the GID */
-       if (gid != (gid_t) -1)
+       if (group != (gid_t) -1)
                key->gid = gid;
 
        ret = 0;
@@ -897,7 +907,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
        down_write(&key->sem);
 
        /* if we're not the sysadmin, we can only change a key that we own */
-       if (capable(CAP_SYS_ADMIN) || key->uid == current_fsuid()) {
+       if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
                key->perm = perm;
                ret = 0;
        }
@@ -1506,18 +1516,18 @@ long keyctl_session_to_parent(void)
 
        /* the parent must have the same effective ownership and mustn't be
         * SUID/SGID */
-       if (pcred->uid  != mycred->euid ||
-           pcred->euid != mycred->euid ||
-           pcred->suid != mycred->euid ||
-           pcred->gid  != mycred->egid ||
-           pcred->egid != mycred->egid ||
-           pcred->sgid != mycred->egid)
+       if (!uid_eq(pcred->uid,  mycred->euid) ||
+           !uid_eq(pcred->euid, mycred->euid) ||
+           !uid_eq(pcred->suid, mycred->euid) ||
+           !gid_eq(pcred->gid,  mycred->egid) ||
+           !gid_eq(pcred->egid, mycred->egid) ||
+           !gid_eq(pcred->sgid, mycred->egid))
                goto unlock;
 
        /* the keyrings must have the same UID */
        if ((pcred->tgcred->session_keyring &&
-            pcred->tgcred->session_keyring->uid != mycred->euid) ||
-           mycred->tgcred->session_keyring->uid != mycred->euid)
+            !uid_eq(pcred->tgcred->session_keyring->uid, mycred->euid)) ||
+           !uid_eq(mycred->tgcred->session_keyring->uid, mycred->euid))
                goto unlock;
 
        /* cancel an already pending keyring replacement */
index 81e7852d281d51d4faa2600b15b16ab290defa11..a5f5c4b6edc5c521b9e65508fcd50310980fce15 100644 (file)
@@ -256,7 +256,7 @@ error:
 /*
  * Allocate a keyring and link into the destination keyring.
  */
-struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
                          const struct cred *cred, unsigned long flags,
                          struct key *dest)
 {
@@ -612,7 +612,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
                                    &keyring_name_hash[bucket],
                                    type_data.link
                                    ) {
-                       if (keyring->user->user_ns != current_user_ns())
+                       if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
                                continue;
 
                        if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
index 0b4d019e027d187d42272bbc906b70ca106776b6..efcc0c855a0db1284ee3debcb9be64823d9be3ed 100644 (file)
@@ -36,33 +36,27 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
 
        key = key_ref_to_ptr(key_ref);
 
-       if (key->user->user_ns != cred->user_ns)
-               goto use_other_perms;
-
        /* use the second 8-bits of permissions for keys the caller owns */
-       if (key->uid == cred->fsuid) {
+       if (uid_eq(key->uid, cred->fsuid)) {
                kperm = key->perm >> 16;
                goto use_these_perms;
        }
 
        /* use the third 8-bits of permissions for keys the caller has a group
         * membership in common with */
-       if (key->gid != -1 && key->perm & KEY_GRP_ALL) {
-               if (key->gid == cred->fsgid) {
+       if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) {
+               if (gid_eq(key->gid, cred->fsgid)) {
                        kperm = key->perm >> 8;
                        goto use_these_perms;
                }
 
-               ret = groups_search(cred->group_info,
-                                   make_kgid(current_user_ns(), key->gid));
+               ret = groups_search(cred->group_info, key->gid);
                if (ret) {
                        kperm = key->perm >> 8;
                        goto use_these_perms;
                }
        }
 
-use_other_perms:
-
        /* otherwise use the least-significant 8-bits */
        kperm = key->perm;
 
index 30d1ddfd9cef07508e19b08a44b1be493705ec3c..217b6855e815cb851153fa08646d2bf145cee579 100644 (file)
@@ -88,14 +88,14 @@ __initcall(key_proc_init);
  */
 #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
 
-static struct rb_node *key_serial_next(struct rb_node *n)
+static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
 {
-       struct user_namespace *user_ns = current_user_ns();
+       struct user_namespace *user_ns = seq_user_ns(p);
 
        n = rb_next(n);
        while (n) {
                struct key *key = rb_entry(n, struct key, serial_node);
-               if (key->user->user_ns == user_ns)
+               if (kuid_has_mapping(user_ns, key->user->uid))
                        break;
                n = rb_next(n);
        }
@@ -107,9 +107,9 @@ static int proc_keys_open(struct inode *inode, struct file *file)
        return seq_open(file, &proc_keys_ops);
 }
 
-static struct key *find_ge_key(key_serial_t id)
+static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
 {
-       struct user_namespace *user_ns = current_user_ns();
+       struct user_namespace *user_ns = seq_user_ns(p);
        struct rb_node *n = key_serial_tree.rb_node;
        struct key *minkey = NULL;
 
@@ -132,7 +132,7 @@ static struct key *find_ge_key(key_serial_t id)
                return NULL;
 
        for (;;) {
-               if (minkey->user->user_ns == user_ns)
+               if (kuid_has_mapping(user_ns, minkey->user->uid))
                        return minkey;
                n = rb_next(&minkey->serial_node);
                if (!n)
@@ -151,7 +151,7 @@ static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
 
        if (*_pos > INT_MAX)
                return NULL;
-       key = find_ge_key(pos);
+       key = find_ge_key(p, pos);
        if (!key)
                return NULL;
        *_pos = key->serial;
@@ -168,7 +168,7 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
 {
        struct rb_node *n;
 
-       n = key_serial_next(v);
+       n = key_serial_next(p, v);
        if (n)
                *_pos = key_node_serial(n);
        return n;
@@ -254,8 +254,8 @@ static int proc_keys_show(struct seq_file *m, void *v)
                   atomic_read(&key->usage),
                   xbuf,
                   key->perm,
-                  key->uid,
-                  key->gid,
+                  from_kuid_munged(seq_user_ns(m), key->uid),
+                  from_kgid_munged(seq_user_ns(m), key->gid),
                   key->type->name);
 
 #undef showflag
@@ -270,26 +270,26 @@ static int proc_keys_show(struct seq_file *m, void *v)
 
 #endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
 
-static struct rb_node *__key_user_next(struct rb_node *n)
+static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
 {
        while (n) {
                struct key_user *user = rb_entry(n, struct key_user, node);
-               if (user->user_ns == current_user_ns())
+               if (kuid_has_mapping(user_ns, user->uid))
                        break;
                n = rb_next(n);
        }
        return n;
 }
 
-static struct rb_node *key_user_next(struct rb_node *n)
+static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
 {
-       return __key_user_next(rb_next(n));
+       return __key_user_next(user_ns, rb_next(n));
 }
 
-static struct rb_node *key_user_first(struct rb_root *r)
+static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
 {
        struct rb_node *n = rb_first(r);
-       return __key_user_next(n);
+       return __key_user_next(user_ns, n);
 }
 
 /*
@@ -309,10 +309,10 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
 
        spin_lock(&key_user_lock);
 
-       _p = key_user_first(&key_user_tree);
+       _p = key_user_first(seq_user_ns(p), &key_user_tree);
        while (pos > 0 && _p) {
                pos--;
-               _p = key_user_next(_p);
+               _p = key_user_next(seq_user_ns(p), _p);
        }
 
        return _p;
@@ -321,7 +321,7 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
 static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
 {
        (*_pos)++;
-       return key_user_next((struct rb_node *)v);
+       return key_user_next(seq_user_ns(p), (struct rb_node *)v);
 }
 
 static void proc_key_users_stop(struct seq_file *p, void *v)
@@ -334,13 +334,13 @@ static int proc_key_users_show(struct seq_file *m, void *v)
 {
        struct rb_node *_p = v;
        struct key_user *user = rb_entry(_p, struct key_user, node);
-       unsigned maxkeys = (user->uid == 0) ?
+       unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
                key_quota_root_maxkeys : key_quota_maxkeys;
-       unsigned maxbytes = (user->uid == 0) ?
+       unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
                key_quota_root_maxbytes : key_quota_maxbytes;
 
        seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
-                  user->uid,
+                  from_kuid_munged(seq_user_ns(m), user->uid),
                   atomic_read(&user->usage),
                   atomic_read(&user->nkeys),
                   atomic_read(&user->nikeys),
index 54339cfd6734d46927e8e9228019891e7130c272..a58f712605d83105b6d32e1b966436a454331a90 100644 (file)
@@ -34,8 +34,7 @@ struct key_user root_key_user = {
        .lock           = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
        .nkeys          = ATOMIC_INIT(2),
        .nikeys         = ATOMIC_INIT(2),
-       .uid            = 0,
-       .user_ns        = &init_user_ns,
+       .uid            = GLOBAL_ROOT_UID,
 };
 
 /*
@@ -48,11 +47,13 @@ int install_user_keyrings(void)
        struct key *uid_keyring, *session_keyring;
        char buf[20];
        int ret;
+       uid_t uid;
 
        cred = current_cred();
        user = cred->user;
+       uid = from_kuid(cred->user_ns, user->uid);
 
-       kenter("%p{%u}", user, user->uid);
+       kenter("%p{%u}", user, uid);
 
        if (user->uid_keyring) {
                kleave(" = 0 [exist]");
@@ -67,11 +68,11 @@ int install_user_keyrings(void)
                 * - there may be one in existence already as it may have been
                 *   pinned by a session, but the user_struct pointing to it
                 *   may have been destroyed by setuid */
-               sprintf(buf, "_uid.%u", user->uid);
+               sprintf(buf, "_uid.%u", uid);
 
                uid_keyring = find_keyring_by_name(buf, true);
                if (IS_ERR(uid_keyring)) {
-                       uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1,
+                       uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
                                                    cred, KEY_ALLOC_IN_QUOTA,
                                                    NULL);
                        if (IS_ERR(uid_keyring)) {
@@ -82,12 +83,12 @@ int install_user_keyrings(void)
 
                /* get a default session keyring (which might also exist
                 * already) */
-               sprintf(buf, "_uid_ses.%u", user->uid);
+               sprintf(buf, "_uid_ses.%u", uid);
 
                session_keyring = find_keyring_by_name(buf, true);
                if (IS_ERR(session_keyring)) {
                        session_keyring =
-                               keyring_alloc(buf, user->uid, (gid_t) -1,
+                               keyring_alloc(buf, user->uid, INVALID_GID,
                                              cred, KEY_ALLOC_IN_QUOTA, NULL);
                        if (IS_ERR(session_keyring)) {
                                ret = PTR_ERR(session_keyring);
index 000e7501752022089b82efeb153115498e55da60..66e21184b559e2f0097fe73c970a7da17e2b951d 100644 (file)
@@ -139,8 +139,8 @@ static int call_sbin_request_key(struct key_construction *cons,
                goto error_link;
 
        /* record the UID and GID */
-       sprintf(uid_str, "%d", cred->fsuid);
-       sprintf(gid_str, "%d", cred->fsgid);
+       sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
+       sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
 
        /* we say which key is under construction */
        sprintf(key_str, "%d", key->serial);
@@ -442,7 +442,7 @@ static struct key *construct_key_and_link(struct key_type *type,
 
        kenter("");
 
-       user = key_user_lookup(current_fsuid(), current_user_ns());
+       user = key_user_lookup(current_fsuid());
        if (!user)
                return ERR_PTR(-ENOMEM);
 
index 2d5d041f2049f323e5072c701f068d695f3f2c6c..3f163d0489ad2e8705a8f57c4751038b9d7d6f36 100644 (file)
@@ -368,38 +368,6 @@ static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
        return rc;
 }
 
-/*
- * get a random value from TPM
- */
-static int tpm_get_random(struct tpm_buf *tb, unsigned char *buf, uint32_t len)
-{
-       int ret;
-
-       INIT_BUF(tb);
-       store16(tb, TPM_TAG_RQU_COMMAND);
-       store32(tb, TPM_GETRANDOM_SIZE);
-       store32(tb, TPM_ORD_GETRANDOM);
-       store32(tb, len);
-       ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, sizeof tb->data);
-       if (!ret)
-               memcpy(buf, tb->data + TPM_GETRANDOM_SIZE, len);
-       return ret;
-}
-
-static int my_get_random(unsigned char *buf, int len)
-{
-       struct tpm_buf *tb;
-       int ret;
-
-       tb = kmalloc(sizeof *tb, GFP_KERNEL);
-       if (!tb)
-               return -ENOMEM;
-       ret = tpm_get_random(tb, buf, len);
-
-       kfree(tb);
-       return ret;
-}
-
 /*
  * Lock a trusted key, by extending a selected PCR.
  *
@@ -413,8 +381,8 @@ static int pcrlock(const int pcrnum)
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       ret = my_get_random(hash, SHA1_DIGEST_SIZE);
-       if (ret < 0)
+       ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE);
+       if (ret != SHA1_DIGEST_SIZE)
                return ret;
        return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
 }
@@ -429,8 +397,8 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
        unsigned char ononce[TPM_NONCE_SIZE];
        int ret;
 
-       ret = tpm_get_random(tb, ononce, TPM_NONCE_SIZE);
-       if (ret < 0)
+       ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE);
+       if (ret != TPM_NONCE_SIZE)
                return ret;
 
        INIT_BUF(tb);
@@ -524,8 +492,8 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
        if (ret < 0)
                goto out;
 
-       ret = tpm_get_random(tb, td->nonceodd, TPM_NONCE_SIZE);
-       if (ret < 0)
+       ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE);
+       if (ret != TPM_NONCE_SIZE)
                goto out;
        ordinal = htonl(TPM_ORD_SEAL);
        datsize = htonl(datalen);
@@ -634,8 +602,8 @@ static int tpm_unseal(struct tpm_buf *tb,
 
        ordinal = htonl(TPM_ORD_UNSEAL);
        keyhndl = htonl(SRKHANDLE);
-       ret = tpm_get_random(tb, nonceodd, TPM_NONCE_SIZE);
-       if (ret < 0) {
+       ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE);
+       if (ret != TPM_NONCE_SIZE) {
                pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
                return ret;
        }
@@ -935,6 +903,7 @@ static int trusted_instantiate(struct key *key, const void *data,
        char *datablob;
        int ret = 0;
        int key_cmd;
+       size_t key_len;
 
        if (datalen <= 0 || datalen > 32767 || !data)
                return -EINVAL;
@@ -974,8 +943,9 @@ static int trusted_instantiate(struct key *key, const void *data,
                        pr_info("trusted_key: key_unseal failed (%d)\n", ret);
                break;
        case Opt_new:
-               ret = my_get_random(payload->key, payload->key_len);
-               if (ret < 0) {
+               key_len = payload->key_len;
+               ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len);
+               if (ret != key_len) {
                        pr_info("trusted_key: key_create failed (%d)\n", ret);
                        goto out;
                }
index 860aeb349cb337bbccf4346d99120d4d1fd51c90..3724029d0f6dd4407a46ff7e4fff23e05f47d445 100644 (file)
@@ -136,11 +136,23 @@ int __init register_security(struct security_operations *ops)
 
 int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
 {
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+       int rc;
+       rc = yama_ptrace_access_check(child, mode);
+       if (rc)
+               return rc;
+#endif
        return security_ops->ptrace_access_check(child, mode);
 }
 
 int security_ptrace_traceme(struct task_struct *parent)
 {
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+       int rc;
+       rc = yama_ptrace_traceme(parent);
+       if (rc)
+               return rc;
+#endif
        return security_ops->ptrace_traceme(parent);
 }
 
@@ -434,7 +446,7 @@ int security_path_chmod(struct path *path, umode_t mode)
        return security_ops->path_chmod(path, mode);
 }
 
-int security_path_chown(struct path *path, uid_t uid, gid_t gid)
+int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 {
        if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
                return 0;
@@ -559,6 +571,9 @@ int security_inode_setxattr(struct dentry *dentry, const char *name,
        if (unlikely(IS_PRIVATE(dentry->d_inode)))
                return 0;
        ret = security_ops->inode_setxattr(dentry, name, value, size, flags);
+       if (ret)
+               return ret;
+       ret = ima_inode_setxattr(dentry, name, value, size);
        if (ret)
                return ret;
        return evm_inode_setxattr(dentry, name, value, size);
@@ -594,6 +609,9 @@ int security_inode_removexattr(struct dentry *dentry, const char *name)
        if (unlikely(IS_PRIVATE(dentry->d_inode)))
                return 0;
        ret = security_ops->inode_removexattr(dentry, name);
+       if (ret)
+               return ret;
+       ret = ima_inode_removexattr(dentry, name);
        if (ret)
                return ret;
        return evm_inode_removexattr(dentry, name);
@@ -761,6 +779,9 @@ int security_task_create(unsigned long clone_flags)
 
 void security_task_free(struct task_struct *task)
 {
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+       yama_task_free(task);
+#endif
        security_ops->task_free(task);
 }
 
@@ -876,6 +897,12 @@ int security_task_wait(struct task_struct *p)
 int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                         unsigned long arg4, unsigned long arg5)
 {
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+       int rc;
+       rc = yama_task_prctl(option, arg2, arg3, arg4, arg5);
+       if (rc != -ENOSYS)
+               return rc;
+#endif
        return security_ops->task_prctl(option, arg2, arg3, arg4, arg5);
 }
 
index 6c77f63c759198ead061712fb3bad1166c4ace2a..651d8456611a87c4a8b77141574a81b992b80f53 100644 (file)
@@ -2088,15 +2088,19 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm)
        return (atsecure || cap_bprm_secureexec(bprm));
 }
 
+static int match_file(const void *p, struct file *file, unsigned fd)
+{
+       return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0;
+}
+
 /* Derived from fs/exec.c:flush_old_files. */
 static inline void flush_unauthorized_files(const struct cred *cred,
                                            struct files_struct *files)
 {
        struct file *file, *devnull = NULL;
        struct tty_struct *tty;
-       struct fdtable *fdt;
-       long j = -1;
        int drop_tty = 0;
+       unsigned n;
 
        tty = get_current_tty();
        if (tty) {
@@ -2123,58 +2127,23 @@ static inline void flush_unauthorized_files(const struct cred *cred,
                no_tty();
 
        /* Revalidate access to inherited open files. */
-       spin_lock(&files->file_lock);
-       for (;;) {
-               unsigned long set, i;
-               int fd;
-
-               j++;
-               i = j * BITS_PER_LONG;
-               fdt = files_fdtable(files);
-               if (i >= fdt->max_fds)
-                       break;
-               set = fdt->open_fds[j];
-               if (!set)
-                       continue;
-               spin_unlock(&files->file_lock);
-               for ( ; set ; i++, set >>= 1) {
-                       if (set & 1) {
-                               file = fget(i);
-                               if (!file)
-                                       continue;
-                               if (file_has_perm(cred,
-                                                 file,
-                                                 file_to_av(file))) {
-                                       sys_close(i);
-                                       fd = get_unused_fd();
-                                       if (fd != i) {
-                                               if (fd >= 0)
-                                                       put_unused_fd(fd);
-                                               fput(file);
-                                               continue;
-                                       }
-                                       if (devnull) {
-                                               get_file(devnull);
-                                       } else {
-                                               devnull = dentry_open(
-                                                       &selinux_null,
-                                                       O_RDWR, cred);
-                                               if (IS_ERR(devnull)) {
-                                                       devnull = NULL;
-                                                       put_unused_fd(fd);
-                                                       fput(file);
-                                                       continue;
-                                               }
-                                       }
-                                       fd_install(fd, devnull);
-                               }
-                               fput(file);
-                       }
-               }
-               spin_lock(&files->file_lock);
+       n = iterate_fd(files, 0, match_file, cred);
+       if (!n) /* none found? */
+               return;
 
+       devnull = dentry_open(&selinux_null, O_RDWR, cred);
+       if (!IS_ERR(devnull)) {
+               /* replace all the matching ones with this */
+               do {
+                       replace_fd(n - 1, get_file(devnull), 0);
+               } while ((n = iterate_fd(files, n, match_file, cred)) != 0);
+               fput(devnull);
+       } else {
+               /* just close all the matching ones */
+               do {
+                       replace_fd(n - 1, NULL, 0);
+               } while ((n = iterate_fd(files, n, match_file, cred)) != 0);
        }
-       spin_unlock(&files->file_lock);
 }
 
 /*
index 8a77725423e0848e671a1f5bdb021fa414de6059..14d810ead42078482807666a41902e4cb24161c0 100644 (file)
@@ -113,13 +113,12 @@ static int __init selnl_init(void)
 {
        struct netlink_kernel_cfg cfg = {
                .groups = SELNLGRP_MAX,
+               .flags  = NL_CFG_F_NONROOT_RECV,
        };
 
-       selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX,
-                                     THIS_MODULE, &cfg);
+       selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg);
        if (selnl == NULL)
                panic("SELinux:  Cannot create netlink socket.");
-       netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
        return 0;
 }
 
index 298e695d6822577e80e5a03a3b5b77d78fe9ebc8..55af8c5b57e645d821073e197c8dd5e8cb4c3b9a 100644 (file)
@@ -174,7 +174,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
                audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
                        "enforcing=%d old_enforcing=%d auid=%u ses=%u",
                        new_value, selinux_enforcing,
-                       audit_get_loginuid(current),
+                       from_kuid(&init_user_ns, audit_get_loginuid(current)),
                        audit_get_sessionid(current));
                selinux_enforcing = new_value;
                if (selinux_enforcing)
@@ -305,7 +305,7 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
                        goto out;
                audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
                        "selinux=0 auid=%u ses=%u",
-                       audit_get_loginuid(current),
+                       from_kuid(&init_user_ns, audit_get_loginuid(current)),
                        audit_get_sessionid(current));
        }
 
@@ -551,7 +551,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
 out1:
        audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
                "policy loaded auid=%u ses=%u",
-               audit_get_loginuid(current),
+               from_kuid(&init_user_ns, audit_get_loginuid(current)),
                audit_get_sessionid(current));
 out:
        mutex_unlock(&sel_mutex);
index 4321b8fc886337946bd3366410a4815dfbd6dc10..b4feecc3fe0110d10bbdc183c369a03ab8495a6c 100644 (file)
@@ -2440,7 +2440,7 @@ int security_set_bools(int len, int *values)
                                sym_name(&policydb, SYM_BOOLS, i),
                                !!values[i],
                                policydb.bool_val_to_struct[i]->state,
-                               audit_get_loginuid(current),
+                               from_kuid(&init_user_ns, audit_get_loginuid(current)),
                                audit_get_sessionid(current));
                }
                if (values[i])
index 8221514cc997303f5d5d8844198c233fe79b64ac..2874c73167831f6f72cdb8aefffbf29c3aa0f7a2 100644 (file)
@@ -1691,40 +1691,19 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
  * smack_task_wait - Smack access check for waiting
  * @p: task to wait for
  *
- * Returns 0 if current can wait for p, error code otherwise
+ * Returns 0
  */
 static int smack_task_wait(struct task_struct *p)
 {
-       struct smk_audit_info ad;
-       char *sp = smk_of_current();
-       char *tsp = smk_of_forked(task_security(p));
-       int rc;
-
-       /* we don't log here, we can be overriden */
-       rc = smk_access(tsp, sp, MAY_WRITE, NULL);
-       if (rc == 0)
-               goto out_log;
-
        /*
-        * Allow the operation to succeed if either task
-        * has privilege to perform operations that might
-        * account for the smack labels having gotten to
-        * be different in the first place.
-        *
-        * This breaks the strict subject/object access
-        * control ideal, taking the object's privilege
-        * state into account in the decision as well as
-        * the smack value.
+        * Allow the operation to succeed.
+        * Zombies are bad.
+        * In userless environments (e.g. phones) programs
+        * get marked with SMACK64EXEC and even if the parent
+        * and child shouldn't be talking the parent still
+        * may expect to know when the child exits.
         */
-       if (smack_privileged(CAP_MAC_OVERRIDE) ||
-           has_capability(p, CAP_MAC_OVERRIDE))
-               rc = 0;
-       /* we log only if we didn't get overriden */
- out_log:
-       smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
-       smk_ad_setfield_u_tsk(&ad, p);
-       smack_log(tsp, sp, MAY_WRITE, rc, &ad);
-       return rc;
+       return 0;
 }
 
 /**
@@ -2705,9 +2684,7 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
 static int smack_setprocattr(struct task_struct *p, char *name,
                             void *value, size_t size)
 {
-       int rc;
        struct task_smack *tsp;
-       struct task_smack *oldtsp;
        struct cred *new;
        char *newsmack;
 
@@ -2737,21 +2714,13 @@ static int smack_setprocattr(struct task_struct *p, char *name,
        if (newsmack == smack_known_web.smk_known)
                return -EPERM;
 
-       oldtsp = p->cred->security;
        new = prepare_creds();
        if (new == NULL)
                return -ENOMEM;
 
-       tsp = new_task_smack(newsmack, oldtsp->smk_forked, GFP_KERNEL);
-       if (tsp == NULL) {
-               kfree(new);
-               return -ENOMEM;
-       }
-       rc = smk_copy_rules(&tsp->smk_rules, &oldtsp->smk_rules, GFP_KERNEL);
-       if (rc != 0)
-               return rc;
+       tsp = new->security;
+       tsp->smk_task = newsmack;
 
-       new->security = tsp;
        commit_creds(new);
        return size;
 }
index b1b768e4049af3304d457451f06c8fa834909b89..99929a50093aa38fdd2917974f68df9c267b9933 100644 (file)
@@ -49,6 +49,7 @@ enum smk_inos {
        SMK_LOAD_SELF2  = 15,   /* load task specific rules with long labels */
        SMK_ACCESS2     = 16,   /* make an access check with long labels */
        SMK_CIPSO2      = 17,   /* load long label -> CIPSO mapping */
+       SMK_REVOKE_SUBJ = 18,   /* set rules with subject label to '-' */
 };
 
 /*
@@ -1991,6 +1992,77 @@ static const struct file_operations smk_access2_ops = {
        .llseek         = generic_file_llseek,
 };
 
+/**
+ * smk_write_revoke_subj - write() for /smack/revoke-subject
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       char *data = NULL;
+       const char *cp = NULL;
+       struct smack_known *skp;
+       struct smack_rule *sp;
+       struct list_head *rule_list;
+       struct mutex *rule_lock;
+       int rc = count;
+
+       if (*ppos != 0)
+               return -EINVAL;
+
+       if (!smack_privileged(CAP_MAC_ADMIN))
+               return -EPERM;
+
+       if (count == 0 || count > SMK_LONGLABEL)
+               return -EINVAL;
+
+       data = kzalloc(count, GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       if (copy_from_user(data, buf, count) != 0) {
+               rc = -EFAULT;
+               goto free_out;
+       }
+
+       cp = smk_parse_smack(data, count);
+       if (cp == NULL) {
+               rc = -EINVAL;
+               goto free_out;
+       }
+
+       skp = smk_find_entry(cp);
+       if (skp == NULL) {
+               rc = -EINVAL;
+               goto free_out;
+       }
+
+       rule_list = &skp->smk_rules;
+       rule_lock = &skp->smk_rules_lock;
+
+       mutex_lock(rule_lock);
+
+       list_for_each_entry_rcu(sp, rule_list, list)
+               sp->smk_access = 0;
+
+       mutex_unlock(rule_lock);
+
+free_out:
+       kfree(data);
+       kfree(cp);
+       return rc;
+}
+
+static const struct file_operations smk_revoke_subj_ops = {
+       .write          = smk_write_revoke_subj,
+       .read           = simple_transaction_read,
+       .release        = simple_transaction_release,
+       .llseek         = generic_file_llseek,
+};
+
 /**
  * smk_fill_super - fill the /smackfs superblock
  * @sb: the empty superblock
@@ -2037,6 +2109,9 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
                        "access2", &smk_access2_ops, S_IRUGO|S_IWUGO},
                [SMK_CIPSO2] = {
                        "cipso2", &smk_cipso2_ops, S_IRUGO|S_IWUSR},
+               [SMK_REVOKE_SUBJ] = {
+                       "revoke-subject", &smk_revoke_subj_ops,
+                       S_IRUGO|S_IWUSR},
                /* last one */
                        {""}
        };
index 7ef9fa3e37e0c2c9f9c259b66952e25b827d52e4..c1b00375c9ad8c23edcf27ab0f5b80820d0ad99e 100644 (file)
@@ -168,9 +168,14 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
                       stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile,
                       tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid,
                       tomoyo_sys_getpid(), tomoyo_sys_getppid(),
-                      current_uid(), current_gid(), current_euid(),
-                      current_egid(), current_suid(), current_sgid(),
-                      current_fsuid(), current_fsgid());
+                      from_kuid(&init_user_ns, current_uid()),
+                      from_kgid(&init_user_ns, current_gid()),
+                      from_kuid(&init_user_ns, current_euid()),
+                      from_kgid(&init_user_ns, current_egid()),
+                      from_kuid(&init_user_ns, current_suid()),
+                      from_kgid(&init_user_ns, current_sgid()),
+                      from_kuid(&init_user_ns, current_fsuid()),
+                      from_kgid(&init_user_ns, current_fsgid()));
        if (!obj)
                goto no_obj_info;
        if (!obj->validate_done) {
@@ -191,15 +196,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
                                        tomoyo_buffer_len - 1 - pos,
                                        " path%u.parent={ uid=%u gid=%u "
                                        "ino=%lu perm=0%o }", (i >> 1) + 1,
-                                       stat->uid, stat->gid, (unsigned long)
-                                       stat->ino, stat->mode & S_IALLUGO);
+                                       from_kuid(&init_user_ns, stat->uid),
+                                       from_kgid(&init_user_ns, stat->gid),
+                                       (unsigned long)stat->ino,
+                                       stat->mode & S_IALLUGO);
                        continue;
                }
                pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
                                " path%u={ uid=%u gid=%u ino=%lu major=%u"
                                " minor=%u perm=0%o type=%s", (i >> 1) + 1,
-                               stat->uid, stat->gid, (unsigned long)
-                               stat->ino, MAJOR(dev), MINOR(dev),
+                               from_kuid(&init_user_ns, stat->uid),
+                               from_kgid(&init_user_ns, stat->gid),
+                               (unsigned long)stat->ino,
+                               MAJOR(dev), MINOR(dev),
                                mode & S_IALLUGO, tomoyo_filetype(mode));
                if (S_ISCHR(mode) || S_ISBLK(mode)) {
                        dev = stat->rdev;
index 2e0f12c629386da35b3b26fc37dfb973db57440d..f89a0333b8134b25d71e53bed63acfe3643043dd 100644 (file)
@@ -925,7 +925,9 @@ static bool tomoyo_manager(void)
 
        if (!tomoyo_policy_loaded)
                return true;
-       if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid))
+       if (!tomoyo_manage_by_non_root &&
+           (!uid_eq(task->cred->uid,  GLOBAL_ROOT_UID) ||
+            !uid_eq(task->cred->euid, GLOBAL_ROOT_UID)))
                return false;
        exe = tomoyo_get_exe();
        if (!exe)
index 75e4dc1c02a089da337d3f241b5ead704aae1c1c..af010b62d544207dd93502a4dd66179684dbc7e5 100644 (file)
@@ -561,8 +561,8 @@ struct tomoyo_address_group {
 
 /* Subset of "struct stat". Used by conditional ACL and audit logs. */
 struct tomoyo_mini_stat {
-       uid_t uid;
-       gid_t gid;
+       kuid_t uid;
+       kgid_t gid;
        ino_t ino;
        umode_t mode;
        dev_t dev;
index 986330b8c73ef024bff71d0cd74b7c2d7701358c..63681e8be62868cb69e691619d9aa4068babb3f8 100644 (file)
@@ -813,28 +813,28 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
                        unsigned long value = 0;
                        switch (index) {
                        case TOMOYO_TASK_UID:
-                               value = current_uid();
+                               value = from_kuid(&init_user_ns, current_uid());
                                break;
                        case TOMOYO_TASK_EUID:
-                               value = current_euid();
+                               value = from_kuid(&init_user_ns, current_euid());
                                break;
                        case TOMOYO_TASK_SUID:
-                               value = current_suid();
+                               value = from_kuid(&init_user_ns, current_suid());
                                break;
                        case TOMOYO_TASK_FSUID:
-                               value = current_fsuid();
+                               value = from_kuid(&init_user_ns, current_fsuid());
                                break;
                        case TOMOYO_TASK_GID:
-                               value = current_gid();
+                               value = from_kgid(&init_user_ns, current_gid());
                                break;
                        case TOMOYO_TASK_EGID:
-                               value = current_egid();
+                               value = from_kgid(&init_user_ns, current_egid());
                                break;
                        case TOMOYO_TASK_SGID:
-                               value = current_sgid();
+                               value = from_kgid(&init_user_ns, current_sgid());
                                break;
                        case TOMOYO_TASK_FSGID:
-                               value = current_fsgid();
+                               value = from_kgid(&init_user_ns, current_fsgid());
                                break;
                        case TOMOYO_TASK_PID:
                                value = tomoyo_sys_getpid();
@@ -970,13 +970,13 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
                                        case TOMOYO_PATH2_UID:
                                        case TOMOYO_PATH1_PARENT_UID:
                                        case TOMOYO_PATH2_PARENT_UID:
-                                               value = stat->uid;
+                                               value = from_kuid(&init_user_ns, stat->uid);
                                                break;
                                        case TOMOYO_PATH1_GID:
                                        case TOMOYO_PATH2_GID:
                                        case TOMOYO_PATH1_PARENT_GID:
                                        case TOMOYO_PATH2_PARENT_GID:
-                                               value = stat->gid;
+                                               value = from_kgid(&init_user_ns, stat->gid);
                                                break;
                                        case TOMOYO_PATH1_INO:
                                        case TOMOYO_PATH2_INO:
index c2d04a50f76af8b364040c7255e924c4e35cb910..d88eb3a046ed87fed289a4689e74aab68e969e2f 100644 (file)
@@ -373,13 +373,15 @@ static int tomoyo_path_chmod(struct path *path, umode_t mode)
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid)
+static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 {
        int error = 0;
-       if (uid != (uid_t) -1)
-               error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, uid);
-       if (!error && gid != (gid_t) -1)
-               error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, gid);
+       if (uid_valid(uid))
+               error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path,
+                                               from_kuid(&init_user_ns, uid));
+       if (!error && gid_valid(gid))
+               error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path,
+                                               from_kgid(&init_user_ns, gid));
        return error;
 }
 
index 51d6709d8bbd3fdd065ae119c3608149ee94d1cd..20ef5143c0c06bbedbfaf4114bf08c4579d75011 100644 (file)
@@ -11,3 +11,11 @@ config SECURITY_YAMA
          Further information can be found in Documentation/security/Yama.txt.
 
          If you are unsure how to answer this question, answer N.
+
+config SECURITY_YAMA_STACKED
+       bool "Yama stacked with other LSMs"
+       depends on SECURITY_YAMA
+       default n
+       help
+         When Yama is built into the kernel, force it to stack with the
+         selected primary LSM.
index 0cc99a3ea42d65c81188c302626681fae6044473..b4c29848b49d2ab2741ade9f1aece147ac02e6a3 100644 (file)
@@ -100,7 +100,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
  * yama_task_free - check for task_pid to remove from exception list
  * @task: task being removed
  */
-static void yama_task_free(struct task_struct *task)
+void yama_task_free(struct task_struct *task)
 {
        yama_ptracer_del(task, task);
 }
@@ -116,7 +116,7 @@ static void yama_task_free(struct task_struct *task)
  * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
  * does not handle the given option.
  */
-static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                           unsigned long arg4, unsigned long arg5)
 {
        int rc;
@@ -143,7 +143,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                if (arg2 == 0) {
                        yama_ptracer_del(NULL, myself);
                        rc = 0;
-               } else if (arg2 == PR_SET_PTRACER_ANY) {
+               } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
                        rc = yama_ptracer_add(NULL, myself);
                } else {
                        struct task_struct *tracer;
@@ -243,7 +243,7 @@ static int ptracer_exception_found(struct task_struct *tracer,
  *
  * Returns 0 if following the ptrace is allowed, -ve on error.
  */
-static int yama_ptrace_access_check(struct task_struct *child,
+int yama_ptrace_access_check(struct task_struct *child,
                                    unsigned int mode)
 {
        int rc;
@@ -293,7 +293,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
  *
  * Returns 0 if following the ptrace is allowed, -ve on error.
  */
-static int yama_ptrace_traceme(struct task_struct *parent)
+int yama_ptrace_traceme(struct task_struct *parent)
 {
        int rc;
 
@@ -324,6 +324,7 @@ static int yama_ptrace_traceme(struct task_struct *parent)
        return rc;
 }
 
+#ifndef CONFIG_SECURITY_YAMA_STACKED
 static struct security_operations yama_ops = {
        .name =                 "yama",
 
@@ -332,6 +333,7 @@ static struct security_operations yama_ops = {
        .task_prctl =           yama_task_prctl,
        .task_free =            yama_task_free,
 };
+#endif
 
 #ifdef CONFIG_SYSCTL
 static int yama_dointvec_minmax(struct ctl_table *table, int write,
@@ -378,13 +380,17 @@ static struct ctl_table yama_sysctl_table[] = {
 
 static __init int yama_init(void)
 {
+#ifndef CONFIG_SECURITY_YAMA_STACKED
        if (!security_module_enable(&yama_ops))
                return 0;
+#endif
 
        printk(KERN_INFO "Yama: becoming mindful.\n");
 
+#ifndef CONFIG_SECURITY_YAMA_STACKED
        if (register_security(&yama_ops))
                panic("Yama: kernel registration failed.\n");
+#endif
 
 #ifdef CONFIG_SYSCTL
        if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
index 53b5ada8f7c36fd5199366662cc0ce5c5233e66c..20554eff5a21947fe3806902d6825534868e702e 100644 (file)
@@ -1563,25 +1563,25 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
 
 
 /* WARNING: Don't forget to fput back the file */
-static struct file *snd_pcm_file_fd(int fd)
+static struct file *snd_pcm_file_fd(int fd, int *fput_needed)
 {
        struct file *file;
        struct inode *inode;
        unsigned int minor;
 
-       file = fget(fd);
+       file = fget_light(fd, fput_needed);
        if (!file)
                return NULL;
        inode = file->f_path.dentry->d_inode;
        if (!S_ISCHR(inode->i_mode) ||
            imajor(inode) != snd_major) {
-               fput(file);
+               fput_light(file, *fput_needed);
                return NULL;
        }
        minor = iminor(inode);
        if (!snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) &&
            !snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE)) {
-               fput(file);
+               fput_light(file, *fput_needed);
                return NULL;
        }
        return file;
@@ -1597,8 +1597,9 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
        struct snd_pcm_file *pcm_file;
        struct snd_pcm_substream *substream1;
        struct snd_pcm_group *group;
+       int fput_needed;
 
-       file = snd_pcm_file_fd(fd);
+       file = snd_pcm_file_fd(fd, &fput_needed);
        if (!file)
                return -EBADFD;
        pcm_file = file->private_data;
@@ -1633,7 +1634,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
        write_unlock_irq(&snd_pcm_link_rwlock);
        up_write(&snd_pcm_link_rwsem);
  _nolock:
-       fput(file);
+       fput_light(file, fput_needed);
        if (res < 0)
                kfree(group);
        return res;
index dde5c9c92132a0f9b353915fc4f33cc9b1aa99a2..ef68d710d08cfc13121089ae5f86f97dfb813ebb 100644 (file)
@@ -141,7 +141,7 @@ void snd_ak4113_reinit(struct ak4113 *chip)
 {
        chip->init = 1;
        mb();
-       flush_delayed_work_sync(&chip->work);
+       flush_delayed_work(&chip->work);
        ak4113_init_regs(chip);
        /* bring up statistics / event queing */
        chip->init = 0;
index fdf3c1b65e388414c995a597ef2b351b4ec2a220..816e7d225fb0a626147eb6d339d141edede00dc9 100644 (file)
@@ -154,7 +154,7 @@ void snd_ak4114_reinit(struct ak4114 *chip)
 {
        chip->init = 1;
        mb();
-       flush_delayed_work_sync(&chip->work);
+       flush_delayed_work(&chip->work);
        ak4114_init_regs(chip);
        /* bring up statistics / event queing */
        chip->init = 0;
index ab8738e21ad1378d569e349244d6e7bc6fac270c..e9fa2d07951d8e2059f3c0a233dd5258e954eb50 100644 (file)
@@ -573,8 +573,8 @@ static void oxygen_card_free(struct snd_card *card)
        oxygen_shutdown(chip);
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
-       flush_work_sync(&chip->spdif_input_bits_work);
-       flush_work_sync(&chip->gpio_work);
+       flush_work(&chip->spdif_input_bits_work);
+       flush_work(&chip->gpio_work);
        chip->model.cleanup(chip);
        kfree(chip->model_data);
        mutex_destroy(&chip->mutex);
@@ -751,8 +751,8 @@ static int oxygen_pci_suspend(struct device *dev)
        spin_unlock_irq(&chip->reg_lock);
 
        synchronize_irq(chip->irq);
-       flush_work_sync(&chip->spdif_input_bits_work);
-       flush_work_sync(&chip->gpio_work);
+       flush_work(&chip->spdif_input_bits_work);
+       flush_work(&chip->gpio_work);
        chip->interrupt_mask = saved_interrupt_mask;
 
        pci_disable_device(pci);
index d26c8ae4e6d937f8ccd856e6e7b9dbb2e0adcaab..a4cae060bf2626de702f88c4b62dee0e3f110860 100644 (file)
@@ -1601,7 +1601,7 @@ static int  wm8350_codec_remove(struct snd_soc_codec *codec)
 
        /* if there was any work waiting then we run it now and
         * wait for its completion */
-       flush_delayed_work_sync(&codec->dapm.delayed_work);
+       flush_delayed_work(&codec->dapm.delayed_work);
 
        wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
index 13bff87ddcf57853c1163f6307b603d7d25a1ad1..2e4a775ae560861543f9d2d1bb74aef096e5181d 100644 (file)
@@ -1509,7 +1509,7 @@ static int wm8753_probe(struct snd_soc_codec *codec)
 /* power down chip */
 static int wm8753_remove(struct snd_soc_codec *codec)
 {
-       flush_delayed_work_sync(&codec->dapm.delayed_work);
+       flush_delayed_work(&codec->dapm.delayed_work);
        wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
        return 0;
index c501af6d8dbefac0ce233e30d884f02d491594f7..cf3d0b0c71b9ac4bd249d4b3a5d3cabb2641dece 100644 (file)
@@ -591,7 +591,7 @@ int snd_soc_suspend(struct device *dev)
 
        /* close any waiting streams and save state */
        for (i = 0; i < card->num_rtd; i++) {
-               flush_delayed_work_sync(&card->rtd[i].delayed_work);
+               flush_delayed_work(&card->rtd[i].delayed_work);
                card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
        }
 
@@ -1848,7 +1848,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        /* make sure any delayed work runs */
        for (i = 0; i < card->num_rtd; i++) {
                struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-               flush_delayed_work_sync(&rtd->delayed_work);
+               flush_delayed_work(&rtd->delayed_work);
        }
 
        /* remove auxiliary devices */
@@ -1892,7 +1892,7 @@ int snd_soc_poweroff(struct device *dev)
         * now, we're shutting down so no imminent restart. */
        for (i = 0; i < card->num_rtd; i++) {
                struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-               flush_delayed_work_sync(&rtd->delayed_work);
+               flush_delayed_work(&rtd->delayed_work);
        }
 
        snd_soc_dapm_shutdown(card);
index 7d7e2aaffece234a81cef3f4181cc7ffe5bb14cb..67a35e90384c0ae665923bc707c4ebd5fe3c8c22 100644 (file)
@@ -90,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
         * We know no new events will be scheduled at this point, so block
         * until all previously outstanding events have completed
         */
-       flush_work_sync(&irqfd->inject);
+       flush_work(&irqfd->inject);
 
        /*
         * It is now safe to release the object's resources